nir: Drop "SSA" from NIR language
authorAlyssa Rosenzweig <alyssa@rosenzweig.io>
Sat, 12 Aug 2023 20:17:15 +0000 (16:17 -0400)
committerAlyssa Rosenzweig <alyssa@rosenzweig.io>
Sat, 12 Aug 2023 20:44:41 +0000 (16:44 -0400)
Everything is SSA now.

   sed -e 's/nir_ssa_def/nir_def/g' \
       -e 's/nir_ssa_undef/nir_undef/g' \
       -e 's/nir_ssa_scalar/nir_scalar/g' \
       -e 's/nir_src_rewrite_ssa/nir_src_rewrite/g' \
       -e 's/nir_gather_ssa_types/nir_gather_types/g' \
       -i $(git grep -l nir | grep -v relnotes)

   git mv src/compiler/nir/nir_gather_ssa_types.c \
          src/compiler/nir/nir_gather_types.c

   ninja -C build/ clang-format
   cd src/compiler/nir && find *.c *.h -type f -exec clang-format -i \{} \;

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Acked-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24585>

492 files changed:
docs/_exts/nir.py
src/amd/common/ac_nir.c
src/amd/common/ac_nir.h
src/amd/common/ac_nir_cull.c
src/amd/common/ac_nir_lower_esgs_io_to_mem.c
src/amd/common/ac_nir_lower_global_access.c
src/amd/common/ac_nir_lower_image_opcodes_cdna.c
src/amd/common/ac_nir_lower_ngg.c
src/amd/common/ac_nir_lower_ps.c
src/amd/common/ac_nir_lower_resinfo.c
src/amd/common/ac_nir_lower_subdword_loads.c
src/amd/common/ac_nir_lower_taskmesh_io_to_mem.c
src/amd/common/ac_nir_lower_tess_io_to_mem.c
src/amd/common/ac_nir_lower_tex.c
src/amd/common/ac_surface.c
src/amd/common/ac_surface.h
src/amd/compiler/aco_instruction_selection.cpp
src/amd/compiler/aco_instruction_selection_setup.cpp
src/amd/llvm/ac_nir_to_llvm.c
src/amd/vulkan/meta/radv_meta.c
src/amd/vulkan/meta/radv_meta.h
src/amd/vulkan/meta/radv_meta_blit.c
src/amd/vulkan/meta/radv_meta_blit2d.c
src/amd/vulkan/meta/radv_meta_buffer.c
src/amd/vulkan/meta/radv_meta_bufimage.c
src/amd/vulkan/meta/radv_meta_clear.c
src/amd/vulkan/meta/radv_meta_copy_vrs_htile.c
src/amd/vulkan/meta/radv_meta_dcc_retile.c
src/amd/vulkan/meta/radv_meta_decompress.c
src/amd/vulkan/meta/radv_meta_etc_decode.c
src/amd/vulkan/meta/radv_meta_fast_clear.c
src/amd/vulkan/meta/radv_meta_fmask_copy.c
src/amd/vulkan/meta/radv_meta_fmask_expand.c
src/amd/vulkan/meta/radv_meta_resolve_cs.c
src/amd/vulkan/meta/radv_meta_resolve_fs.c
src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c
src/amd/vulkan/nir/radv_nir_lower_abi.c
src/amd/vulkan/nir/radv_nir_lower_fs_barycentric.c
src/amd/vulkan/nir/radv_nir_lower_fs_intrinsics.c
src/amd/vulkan/nir/radv_nir_lower_intrinsics_early.c
src/amd/vulkan/nir/radv_nir_lower_primitive_shading_rate.c
src/amd/vulkan/nir/radv_nir_lower_ray_queries.c
src/amd/vulkan/nir/radv_nir_lower_view_index.c
src/amd/vulkan/nir/radv_nir_lower_viewport_to_zero.c
src/amd/vulkan/nir/radv_nir_lower_vs_inputs.c
src/amd/vulkan/radv_device_generated_commands.c
src/amd/vulkan/radv_query.c
src/amd/vulkan/radv_rt_common.c
src/amd/vulkan/radv_rt_common.h
src/amd/vulkan/radv_rt_shader.c
src/amd/vulkan/radv_shader_info.c
src/asahi/compiler/agx_compile.c
src/asahi/compiler/agx_compiler.h
src/asahi/compiler/agx_nir_lower_address.c
src/asahi/compiler/agx_nir_lower_discard_zs_emit.c
src/asahi/compiler/agx_nir_lower_frag_sidefx.c
src/asahi/compiler/agx_nir_lower_interpolation.c
src/asahi/compiler/agx_nir_lower_load_mask.c
src/asahi/compiler/agx_nir_lower_sample_mask.c
src/asahi/compiler/agx_nir_lower_texture.c
src/asahi/compiler/agx_nir_lower_ubo.c
src/asahi/compiler/agx_nir_opt_preamble.c
src/asahi/lib/agx_meta.c
src/asahi/lib/agx_nir_format_helpers.h
src/asahi/lib/agx_nir_lower_alpha.c
src/asahi/lib/agx_nir_lower_msaa.c
src/asahi/lib/agx_nir_lower_sample_intrinsics.c
src/asahi/lib/agx_nir_lower_tilebuffer.c
src/asahi/lib/agx_nir_lower_vbo.c
src/broadcom/compiler/nir_to_vir.c
src/broadcom/compiler/v3d33_tex.c
src/broadcom/compiler/v3d40_tex.c
src/broadcom/compiler/v3d_compiler.h
src/broadcom/compiler/v3d_nir_lower_image_load_store.c
src/broadcom/compiler/v3d_nir_lower_io.c
src/broadcom/compiler/v3d_nir_lower_line_smooth.c
src/broadcom/compiler/v3d_nir_lower_load_store_bitsize.c
src/broadcom/compiler/v3d_nir_lower_logic_ops.c
src/broadcom/compiler/v3d_nir_lower_scratch.c
src/broadcom/compiler/v3d_nir_lower_txf_ms.c
src/broadcom/compiler/vir.c
src/broadcom/vulkan/v3dv_event.c
src/broadcom/vulkan/v3dv_meta_clear.c
src/broadcom/vulkan/v3dv_meta_copy.c
src/broadcom/vulkan/v3dv_pipeline.c
src/broadcom/vulkan/v3dv_query.c
src/compiler/clc/nir_lower_libclc.c
src/compiler/glsl/gl_nir_link_varyings.c
src/compiler/glsl/gl_nir_linker.c
src/compiler/glsl/gl_nir_lower_atomics.c
src/compiler/glsl/gl_nir_lower_blend_equation_advanced.c
src/compiler/glsl/gl_nir_lower_buffers.c
src/compiler/glsl/gl_nir_lower_images.c
src/compiler/glsl/gl_nir_lower_packed_varyings.c
src/compiler/glsl/gl_nir_lower_xfb_varying.c
src/compiler/glsl/gl_nir_opt_dead_builtin_varyings.c
src/compiler/glsl/glsl_to_nir.cpp
src/compiler/nir/meson.build
src/compiler/nir/nir.c
src/compiler/nir/nir.h
src/compiler/nir/nir_builder.c
src/compiler/nir/nir_builder.h
src/compiler/nir/nir_builder_opcodes_h.py
src/compiler/nir/nir_builtin_builder.c
src/compiler/nir/nir_builtin_builder.h
src/compiler/nir/nir_clone.c
src/compiler/nir/nir_control_flow.c
src/compiler/nir/nir_conversion_builder.h
src/compiler/nir/nir_deref.c
src/compiler/nir/nir_deref.h
src/compiler/nir/nir_divergence_analysis.c
src/compiler/nir/nir_format_convert.h
src/compiler/nir/nir_from_ssa.c
src/compiler/nir/nir_gather_info.c
src/compiler/nir/nir_gather_types.c [moved from src/compiler/nir/nir_gather_ssa_types.c with 98% similarity]
src/compiler/nir/nir_group_loads.c
src/compiler/nir/nir_inline_functions.c
src/compiler/nir/nir_inline_uniforms.c
src/compiler/nir/nir_instr_set.c
src/compiler/nir/nir_legacy.c
src/compiler/nir/nir_legacy.h
src/compiler/nir/nir_linking_helpers.c
src/compiler/nir/nir_liveness.c
src/compiler/nir/nir_loop_analyze.c
src/compiler/nir/nir_loop_analyze.h
src/compiler/nir/nir_lower_alpha_test.c
src/compiler/nir/nir_lower_alu.c
src/compiler/nir/nir_lower_alu_width.c
src/compiler/nir/nir_lower_array_deref_of_vec.c
src/compiler/nir/nir_lower_atomics_to_ssbo.c
src/compiler/nir/nir_lower_bit_size.c
src/compiler/nir/nir_lower_bitmap.c
src/compiler/nir/nir_lower_blend.c
src/compiler/nir/nir_lower_bool_to_bitsize.c
src/compiler/nir/nir_lower_bool_to_float.c
src/compiler/nir/nir_lower_bool_to_int32.c
src/compiler/nir/nir_lower_cl_images.c
src/compiler/nir/nir_lower_clamp_color_outputs.c
src/compiler/nir/nir_lower_clip.c
src/compiler/nir/nir_lower_clip_disable.c
src/compiler/nir/nir_lower_clip_halfz.c
src/compiler/nir/nir_lower_const_arrays_to_uniforms.c
src/compiler/nir/nir_lower_convert_alu_types.c
src/compiler/nir/nir_lower_discard_or_demote.c
src/compiler/nir/nir_lower_double_ops.c
src/compiler/nir/nir_lower_drawpixels.c
src/compiler/nir/nir_lower_fb_read.c
src/compiler/nir/nir_lower_flrp.c
src/compiler/nir/nir_lower_fp16_conv.c
src/compiler/nir/nir_lower_frag_coord_to_pixel_coord.c
src/compiler/nir/nir_lower_fragcolor.c
src/compiler/nir/nir_lower_fragcoord_wtrans.c
src/compiler/nir/nir_lower_frexp.c
src/compiler/nir/nir_lower_goto_ifs.c
src/compiler/nir/nir_lower_gs_intrinsics.c
src/compiler/nir/nir_lower_helper_writes.c
src/compiler/nir/nir_lower_idiv.c
src/compiler/nir/nir_lower_image.c
src/compiler/nir/nir_lower_image_atomics_to_global.c
src/compiler/nir/nir_lower_indirect_derefs.c
src/compiler/nir/nir_lower_input_attachments.c
src/compiler/nir/nir_lower_int64.c
src/compiler/nir/nir_lower_int_to_float.c
src/compiler/nir/nir_lower_interpolation.c
src/compiler/nir/nir_lower_io.c
src/compiler/nir/nir_lower_io_arrays_to_elements.c
src/compiler/nir/nir_lower_io_to_scalar.c
src/compiler/nir/nir_lower_io_to_temporaries.c
src/compiler/nir/nir_lower_io_to_vector.c
src/compiler/nir/nir_lower_is_helper_invocation.c
src/compiler/nir/nir_lower_load_const_to_scalar.c
src/compiler/nir/nir_lower_locals_to_regs.c
src/compiler/nir/nir_lower_mediump.c
src/compiler/nir/nir_lower_mem_access_bit_sizes.c
src/compiler/nir/nir_lower_memcpy.c
src/compiler/nir/nir_lower_multiview.c
src/compiler/nir/nir_lower_non_uniform_access.c
src/compiler/nir/nir_lower_packing.c
src/compiler/nir/nir_lower_passthrough_edgeflags.c
src/compiler/nir/nir_lower_patch_vertices.c
src/compiler/nir/nir_lower_phis_to_scalar.c
src/compiler/nir/nir_lower_pntc_ytransform.c
src/compiler/nir/nir_lower_point_size.c
src/compiler/nir/nir_lower_point_size_mov.c
src/compiler/nir/nir_lower_point_smooth.c
src/compiler/nir/nir_lower_poly_line_smooth.c
src/compiler/nir/nir_lower_printf.c
src/compiler/nir/nir_lower_readonly_images_to_tex.c
src/compiler/nir/nir_lower_reg_intrinsics_to_ssa.c
src/compiler/nir/nir_lower_robust_access.c
src/compiler/nir/nir_lower_samplers.c
src/compiler/nir/nir_lower_scratch.c
src/compiler/nir/nir_lower_shader_calls.c
src/compiler/nir/nir_lower_single_sampled.c
src/compiler/nir/nir_lower_ssbo.c
src/compiler/nir/nir_lower_subgroups.c
src/compiler/nir/nir_lower_system_values.c
src/compiler/nir/nir_lower_task_shader.c
src/compiler/nir/nir_lower_tess_coord_z.c
src/compiler/nir/nir_lower_tex.c
src/compiler/nir/nir_lower_tex_shadow.c
src/compiler/nir/nir_lower_texcoord_replace.c
src/compiler/nir/nir_lower_texcoord_replace_late.c
src/compiler/nir/nir_lower_two_sided_color.c
src/compiler/nir/nir_lower_ubo_vec4.c
src/compiler/nir/nir_lower_undef_to_zero.c
src/compiler/nir/nir_lower_uniforms_to_ubo.c
src/compiler/nir/nir_lower_variable_initializers.c
src/compiler/nir/nir_lower_vars_to_ssa.c
src/compiler/nir/nir_lower_vec3_to_vec4.c
src/compiler/nir/nir_lower_vec_to_regs.c
src/compiler/nir/nir_lower_viewport_transform.c
src/compiler/nir/nir_lower_wpos_center.c
src/compiler/nir/nir_lower_wpos_ytransform.c
src/compiler/nir/nir_lower_wrmasks.c
src/compiler/nir/nir_mod_analysis.c
src/compiler/nir/nir_move_vec_src_uses_to_dest.c
src/compiler/nir/nir_normalize_cubemap_coords.c
src/compiler/nir/nir_opt_access.c
src/compiler/nir/nir_opt_combine_stores.c
src/compiler/nir/nir_opt_comparison_pre.c
src/compiler/nir/nir_opt_conditional_discard.c
src/compiler/nir/nir_opt_constant_folding.c
src/compiler/nir/nir_opt_copy_prop_vars.c
src/compiler/nir/nir_opt_copy_propagate.c
src/compiler/nir/nir_opt_dce.c
src/compiler/nir/nir_opt_dead_cf.c
src/compiler/nir/nir_opt_fragdepth.c
src/compiler/nir/nir_opt_gcm.c
src/compiler/nir/nir_opt_idiv_const.c
src/compiler/nir/nir_opt_if.c
src/compiler/nir/nir_opt_intrinsics.c
src/compiler/nir/nir_opt_large_constants.c
src/compiler/nir/nir_opt_load_store_vectorize.c
src/compiler/nir/nir_opt_loop_unroll.c
src/compiler/nir/nir_opt_memcpy.c
src/compiler/nir/nir_opt_move.c
src/compiler/nir/nir_opt_offsets.c
src/compiler/nir/nir_opt_peephole_select.c
src/compiler/nir/nir_opt_phi_precision.c
src/compiler/nir/nir_opt_preamble.c
src/compiler/nir/nir_opt_ray_queries.c
src/compiler/nir/nir_opt_reassociate_bfi.c
src/compiler/nir/nir_opt_remove_phis.c
src/compiler/nir/nir_opt_shrink_stores.c
src/compiler/nir/nir_opt_shrink_vectors.c
src/compiler/nir/nir_opt_sink.c
src/compiler/nir/nir_opt_undef.c
src/compiler/nir/nir_opt_uniform_atomics.c
src/compiler/nir/nir_opt_vectorize.c
src/compiler/nir/nir_passthrough_gs.c
src/compiler/nir/nir_passthrough_tcs.c
src/compiler/nir/nir_phi_builder.c
src/compiler/nir/nir_phi_builder.h
src/compiler/nir/nir_print.c
src/compiler/nir/nir_range_analysis.c
src/compiler/nir/nir_range_analysis.h
src/compiler/nir/nir_repair_ssa.c
src/compiler/nir/nir_scale_fdiv.c
src/compiler/nir/nir_schedule.c
src/compiler/nir/nir_search.c
src/compiler/nir/nir_search_helpers.h
src/compiler/nir/nir_serialize.c
src/compiler/nir/nir_split_64bit_vec3_and_vec4.c
src/compiler/nir/nir_split_per_member_structs.c
src/compiler/nir/nir_split_vars.c
src/compiler/nir/nir_to_lcssa.c
src/compiler/nir/nir_trivialize_registers.c
src/compiler/nir/nir_validate.c
src/compiler/nir/tests/algebraic_tests.cpp
src/compiler/nir/tests/builder_tests.cpp
src/compiler/nir/tests/comparison_pre_tests.cpp
src/compiler/nir/tests/core_tests.cpp
src/compiler/nir/tests/dce_tests.cpp
src/compiler/nir/tests/load_store_vectorizer_tests.cpp
src/compiler/nir/tests/loop_analyze_tests.cpp
src/compiler/nir/tests/loop_unroll_tests.cpp
src/compiler/nir/tests/lower_alu_width_tests.cpp
src/compiler/nir/tests/mod_analysis_tests.cpp
src/compiler/nir/tests/negative_equal_tests.cpp
src/compiler/nir/tests/opt_if_tests.cpp
src/compiler/nir/tests/opt_shrink_vectors_tests.cpp
src/compiler/nir/tests/range_analysis_tests.cpp
src/compiler/nir/tests/serialize_tests.cpp
src/compiler/nir/tests/vars_tests.cpp
src/compiler/spirv/spirv_to_nir.c
src/compiler/spirv/vtn_alu.c
src/compiler/spirv/vtn_amd.c
src/compiler/spirv/vtn_cfg.c
src/compiler/spirv/vtn_glsl450.c
src/compiler/spirv/vtn_opencl.c
src/compiler/spirv/vtn_private.h
src/compiler/spirv/vtn_structured_cfg.c
src/compiler/spirv/vtn_subgroup.c
src/compiler/spirv/vtn_variables.c
src/freedreno/ir3/ir3.h
src/freedreno/ir3/ir3_compiler_nir.c
src/freedreno/ir3/ir3_context.c
src/freedreno/ir3/ir3_context.h
src/freedreno/ir3/ir3_nir.c
src/freedreno/ir3/ir3_nir.h
src/freedreno/ir3/ir3_nir_analyze_ubo_ranges.c
src/freedreno/ir3/ir3_nir_lower_64b.c
src/freedreno/ir3/ir3_nir_lower_io_offsets.c
src/freedreno/ir3/ir3_nir_lower_layer_id.c
src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_offset.c
src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_sample.c
src/freedreno/ir3/ir3_nir_lower_tess.c
src/freedreno/ir3/ir3_nir_lower_tex_prefetch.c
src/freedreno/ir3/ir3_nir_lower_wide_load_store.c
src/freedreno/ir3/ir3_nir_opt_preamble.c
src/freedreno/vulkan/tu_clear_blit.cc
src/freedreno/vulkan/tu_nir_lower_multiview.cc
src/freedreno/vulkan/tu_shader.cc
src/gallium/auxiliary/gallivm/lp_bld_nir.c
src/gallium/auxiliary/nir/nir_draw_helpers.c
src/gallium/auxiliary/nir/nir_to_tgsi.c
src/gallium/auxiliary/nir/nir_to_tgsi_info.c
src/gallium/auxiliary/nir/tgsi_to_nir.c
src/gallium/drivers/asahi/agx_nir_lower_bindings.c
src/gallium/drivers/asahi/agx_nir_lower_sysvals.c
src/gallium/drivers/asahi/agx_streamout.c
src/gallium/drivers/crocus/crocus_program.c
src/gallium/drivers/d3d12/d3d12_blit.cpp
src/gallium/drivers/d3d12/d3d12_compute_transforms.cpp
src/gallium/drivers/d3d12/d3d12_gs_variant.cpp
src/gallium/drivers/d3d12/d3d12_lower_image_casts.c
src/gallium/drivers/d3d12/d3d12_lower_point_sprite.c
src/gallium/drivers/d3d12/d3d12_nir_passes.c
src/gallium/drivers/d3d12/d3d12_nir_passes.h
src/gallium/drivers/d3d12/d3d12_tcs_variant.cpp
src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c
src/gallium/drivers/etnaviv/etnaviv_compiler_nir.h
src/gallium/drivers/etnaviv/etnaviv_nir.c
src/gallium/drivers/etnaviv/etnaviv_nir_lower_source_mods.c
src/gallium/drivers/etnaviv/etnaviv_nir_lower_texture.c
src/gallium/drivers/etnaviv/etnaviv_nir_lower_ubo_to_uniform.c
src/gallium/drivers/etnaviv/tests/lower_ubo_tests.cpp
src/gallium/drivers/freedreno/a2xx/ir2_nir.c
src/gallium/drivers/freedreno/ir3/ir3_descriptor.c
src/gallium/drivers/iris/iris_program.c
src/gallium/drivers/lima/ir/gp/nir.c
src/gallium/drivers/lima/ir/lima_nir_lower_txp.c
src/gallium/drivers/lima/ir/lima_nir_lower_uniform_to_scalar.c
src/gallium/drivers/lima/ir/lima_nir_split_load_input.c
src/gallium/drivers/lima/ir/lima_nir_split_loads.c
src/gallium/drivers/lima/ir/pp/nir.c
src/gallium/drivers/nouveau/nv50/nv50_surface.c
src/gallium/drivers/panfrost/pan_nir_lower_sysvals.c
src/gallium/drivers/r600/sfn/sfn_instr_tex.cpp
src/gallium/drivers/r600/sfn/sfn_instrfactory.cpp
src/gallium/drivers/r600/sfn/sfn_instrfactory.h
src/gallium/drivers/r600/sfn/sfn_nir.cpp
src/gallium/drivers/r600/sfn/sfn_nir.h
src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_alu.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_tess_io.cpp
src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp
src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c
src/gallium/drivers/r600/sfn/sfn_valuefactory.cpp
src/gallium/drivers/r600/sfn/sfn_valuefactory.h
src/gallium/drivers/radeonsi/si_nir_lower_abi.c
src/gallium/drivers/radeonsi/si_nir_lower_resource.c
src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c
src/gallium/drivers/radeonsi/si_nir_optim.c
src/gallium/drivers/radeonsi/si_shader.c
src/gallium/drivers/radeonsi/si_shader_info.c
src/gallium/drivers/radeonsi/si_shader_internal.h
src/gallium/drivers/radeonsi/si_shader_nir.c
src/gallium/drivers/radeonsi/si_shaderlib_nir.c
src/gallium/drivers/v3d/v3d_blit.c
src/gallium/drivers/vc4/vc4_blit.c
src/gallium/drivers/vc4/vc4_nir_lower_blend.c
src/gallium/drivers/vc4/vc4_nir_lower_io.c
src/gallium/drivers/vc4/vc4_nir_lower_txf_ms.c
src/gallium/drivers/vc4/vc4_program.c
src/gallium/drivers/vc4/vc4_qir.h
src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c
src/gallium/drivers/zink/zink_compiler.c
src/gallium/drivers/zink/zink_lower_cubemap_to_array.c
src/gallium/frontends/clover/nir/invocation.cpp
src/gallium/frontends/lavapipe/lvp_inline_uniforms.c
src/gallium/frontends/lavapipe/lvp_lower_input_attachments.c
src/gallium/frontends/lavapipe/lvp_lower_vulkan_resource.c
src/gallium/frontends/lavapipe/lvp_pipeline.c
src/gallium/frontends/rusticl/rusticl_nir.c
src/imagination/rogue/nir/rogue_nir_lower_io.c
src/imagination/rogue/nir/rogue_nir_pfo.c
src/imagination/rogue/rogue_compile.c
src/intel/blorp/blorp.c
src/intel/blorp/blorp_blit.c
src/intel/blorp/blorp_clear.c
src/intel/blorp/blorp_nir_builder.h
src/intel/compiler/brw_fs.cpp
src/intel/compiler/brw_fs.h
src/intel/compiler/brw_fs_nir.cpp
src/intel/compiler/brw_kernel.c
src/intel/compiler/brw_mesh.cpp
src/intel/compiler/brw_nir.c
src/intel/compiler/brw_nir.h
src/intel/compiler/brw_nir_attribute_workarounds.c
src/intel/compiler/brw_nir_clamp_image_1d_2d_array_sizes.c
src/intel/compiler/brw_nir_clamp_per_vertex_loads.c
src/intel/compiler/brw_nir_lower_alpha_to_coverage.c
src/intel/compiler/brw_nir_lower_conversions.c
src/intel/compiler/brw_nir_lower_cs_intrinsics.c
src/intel/compiler/brw_nir_lower_intersection_shader.c
src/intel/compiler/brw_nir_lower_non_uniform_resource_intel.c
src/intel/compiler/brw_nir_lower_ray_queries.c
src/intel/compiler/brw_nir_lower_rt_intrinsics.c
src/intel/compiler/brw_nir_lower_shader_calls.c
src/intel/compiler/brw_nir_lower_shading_rate_output.c
src/intel/compiler/brw_nir_lower_sparse.c
src/intel/compiler/brw_nir_lower_storage_image.c
src/intel/compiler/brw_nir_opt_peephole_ffma.c
src/intel/compiler/brw_nir_opt_peephole_imul32x16.c
src/intel/compiler/brw_nir_rt.c
src/intel/compiler/brw_nir_rt_builder.h
src/intel/compiler/brw_nir_tcs_workarounds.c
src/intel/compiler/brw_vec4.h
src/intel/compiler/brw_vec4_nir.cpp
src/intel/vulkan/anv_internal_kernels.c
src/intel/vulkan/anv_mesh_perprim_wa.c
src/intel/vulkan/anv_nir_apply_pipeline_layout.c
src/intel/vulkan/anv_nir_compute_push_layout.c
src/intel/vulkan/anv_nir_lower_load_patch_vertices_in.c
src/intel/vulkan/anv_nir_lower_multiview.c
src/intel/vulkan/anv_nir_lower_resource_intel.c
src/intel/vulkan/anv_nir_lower_ubo_loads.c
src/intel/vulkan/anv_pipeline.c
src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c
src/intel/vulkan_hasvk/anv_nir_lower_multiview.c
src/intel/vulkan_hasvk/anv_nir_lower_ubo_loads.c
src/intel/vulkan_hasvk/anv_nir_lower_ycbcr_textures.c
src/mesa/main/ff_fragment_shader.c
src/mesa/main/ffvertex_prog.c
src/mesa/program/prog_to_nir.c
src/mesa/state_tracker/st_atifs_to_nir.c
src/mesa/state_tracker/st_cb_drawpixels.c
src/mesa/state_tracker/st_draw_hw_select.c
src/mesa/state_tracker/st_nir_builtins.c
src/mesa/state_tracker/st_nir_lower_builtin.c
src/mesa/state_tracker/st_nir_lower_fog.c
src/mesa/state_tracker/st_nir_lower_position_invariant.c
src/mesa/state_tracker/st_nir_lower_tex_src_plane.c
src/mesa/state_tracker/st_pbo.c
src/mesa/state_tracker/st_pbo_compute.c
src/microsoft/clc/clc_compiler.c
src/microsoft/clc/clc_nir.c
src/microsoft/compiler/dxil_nir.c
src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c
src/microsoft/compiler/dxil_nir_lower_int_samplers.c
src/microsoft/compiler/dxil_nir_lower_vs_vertex_conversion.c
src/microsoft/compiler/dxil_nir_tess.c
src/microsoft/compiler/nir_to_dxil.c
src/microsoft/spirv_to_dxil/dxil_spirv_nir.c
src/microsoft/spirv_to_dxil/dxil_spirv_nir_lower_bindless.c
src/microsoft/vulkan/dzn_nir.c
src/nouveau/codegen/nv50_ir_from_nir.cpp
src/nouveau/vulkan/nvk_nir_lower_descriptors.c
src/nouveau/vulkan/nvk_query_pool.c
src/nouveau/vulkan/nvk_shader.c
src/panfrost/compiler/bi_lower_divergent_indirects.c
src/panfrost/compiler/bifrost_compile.c
src/panfrost/lib/pan_blend.c
src/panfrost/lib/pan_blitter.c
src/panfrost/lib/pan_indirect_dispatch.c
src/panfrost/midgard/compiler.h
src/panfrost/midgard/midgard_address.c
src/panfrost/midgard/midgard_compile.c
src/panfrost/midgard/midgard_errata_lod.c
src/panfrost/midgard/midgard_nir_lower_image_bitsize.c
src/panfrost/midgard/midgard_nir_type_csel.c
src/panfrost/midgard/nir_fuse_io_16.c
src/panfrost/util/pan_lower_64bit_intrin.c
src/panfrost/util/pan_lower_framebuffer.c
src/panfrost/util/pan_lower_helper_invocation.c
src/panfrost/util/pan_lower_sample_position.c
src/panfrost/util/pan_lower_store_component.c
src/panfrost/util/pan_lower_writeout.c
src/panfrost/util/pan_lower_xfb.c
src/panfrost/vulkan/panvk_vX_meta_clear.c
src/panfrost/vulkan/panvk_vX_meta_copy.c
src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c
src/panfrost/vulkan/panvk_vX_shader.c
src/vulkan/runtime/vk_meta_blit_resolve.c
src/vulkan/runtime/vk_meta_clear.c
src/vulkan/runtime/vk_meta_draw_rects.c
src/vulkan/runtime/vk_nir_convert_ycbcr.c
src/vulkan/runtime/vk_nir_convert_ycbcr.h

index 7d641d7..28f8d2c 100644 (file)
@@ -40,7 +40,7 @@ import nir_opcodes
 OP_DESC_TEMPLATE = mako.template.Template("""
 <%
 def src_decl_list(num_srcs):
-   return ', '.join('nir_ssa_def *src' + str(i) for i in range(num_srcs))
+   return ', '.join('nir_def *src' + str(i) for i in range(num_srcs))
 
 def to_yn(b):
     return 'Y' if b else 'N'
@@ -68,7 +68,7 @@ ${textwrap.indent(op.const_expr, '    ')}
 
 **Builder function:**
 
-.. c:function:: nir_ssa_def *nir_${op.name}(nir_builder *, ${src_decl_list(op.num_inputs)})
+.. c:function:: nir_def *nir_${op.name}(nir_builder *, ${src_decl_list(op.num_inputs)})
 """)
 
 def parse_rst(state, parent, rst):
index 63e73c5..2e4a09e 100644 (file)
@@ -10,7 +10,7 @@
 #include "nir_xfb_info.h"
 
 /* Load argument with index start from arg plus relative_index. */
-nir_ssa_def *
+nir_def *
 ac_nir_load_arg_at_offset(nir_builder *b, const struct ac_shader_args *ac_args,
                           struct ac_arg arg, unsigned relative_index)
 {
@@ -25,7 +25,7 @@ ac_nir_load_arg_at_offset(nir_builder *b, const struct ac_shader_args *ac_args,
 
 void
 ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
-                 nir_ssa_def *val)
+                 nir_def *val)
 {
    assert(nir_cursor_current_block(b->cursor)->cf_node.parent->type == nir_cf_node_function);
 
@@ -35,11 +35,11 @@ ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac
       nir_store_vector_arg_amd(b, val, .base = arg.arg_index);
 }
 
-nir_ssa_def *
+nir_def *
 ac_nir_unpack_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
                   unsigned rshift, unsigned bitwidth)
 {
-   nir_ssa_def *value = ac_nir_load_arg(b, ac_args, arg);
+   nir_def *value = ac_nir_load_arg(b, ac_args, arg);
    if (rshift == 0 && bitwidth == 32)
       return value;
    else if (rshift == 0)
@@ -57,11 +57,11 @@ is_sin_cos(const nir_instr *instr, UNUSED const void *_)
                                                 nir_instr_as_alu(instr)->op == nir_op_fcos);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_sin_cos(struct nir_builder *b, nir_instr *instr, UNUSED void *_)
 {
    nir_alu_instr *sincos = nir_instr_as_alu(instr);
-   nir_ssa_def *src = nir_fmul_imm(b, nir_ssa_for_alu_src(b, sincos, 0), 0.15915493667125702);
+   nir_def *src = nir_fmul_imm(b, nir_ssa_for_alu_src(b, sincos, 0), 0.15915493667125702);
    return sincos->op == nir_op_fsin ? nir_fsin_amd(b, src) : nir_fcos_amd(b, src);
 }
 
@@ -85,7 +85,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_instr *instr, void *state)
 
    lower_intrinsics_to_args_state *s = (lower_intrinsics_to_args_state *)state;
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
    b->cursor = nir_after_instr(&intrin->instr);
 
    switch (intrin->intrinsic) {
@@ -134,7 +134,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_instr *instr, void *state)
    }
 
    assert(replacement);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+   nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
    nir_instr_remove(&intrin->instr);
    return true;
 }
@@ -155,15 +155,15 @@ ac_nir_lower_intrinsics_to_args(nir_shader *shader, const enum amd_gfx_level gfx
 }
 
 void
-ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_ssa_def *value,
+ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_def *value,
                             unsigned component, unsigned writemask)
 {
    /* component store */
    if (value->num_components != 4) {
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, value->bit_size);
+      nir_def *undef = nir_undef(b, 1, value->bit_size);
 
       /* add undef component before and after value to form a vec4 */
-      nir_ssa_def *comp[4];
+      nir_def *comp[4];
       for (int i = 0; i < 4; i++) {
          comp[i] = (i >= component && i < component + value->num_components) ?
             nir_channel(b, value, i - component) : undef;
@@ -180,7 +180,7 @@ ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_ssa_def *valu
 }
 
 void
-ac_nir_export_primitive(nir_builder *b, nir_ssa_def *prim)
+ac_nir_export_primitive(nir_builder *b, nir_def *prim)
 {
    unsigned write_mask = BITFIELD_MASK(prim->num_components);
 
@@ -190,15 +190,15 @@ ac_nir_export_primitive(nir_builder *b, nir_ssa_def *prim)
                   .write_mask = write_mask);
 }
 
-static nir_ssa_def *
-get_export_output(nir_builder *b, nir_ssa_def **output)
+static nir_def *
+get_export_output(nir_builder *b, nir_def **output)
 {
-   nir_ssa_def *vec[4];
+   nir_def *vec[4];
    for (int i = 0; i < 4; i++) {
       if (output[i])
          vec[i] = nir_u2uN(b, output[i], 32);
       else
-         vec[i] = nir_ssa_undef(b, 1, 32);
+         vec[i] = nir_undef(b, 1, 32);
    }
 
    return nir_vec(b, vec, 4);
@@ -211,17 +211,17 @@ ac_nir_export_position(nir_builder *b,
                        bool no_param_export,
                        bool force_vrs,
                        uint64_t outputs_written,
-                       nir_ssa_def *(*outputs)[4])
+                       nir_def *(*outputs)[4])
 {
    nir_intrinsic_instr *exp[4];
    unsigned exp_num = 0;
 
-   nir_ssa_def *pos;
+   nir_def *pos;
    if (outputs_written & VARYING_BIT_POS) {
       pos = get_export_output(b, outputs[VARYING_SLOT_POS]);
    } else {
-      nir_ssa_def *zero = nir_imm_float(b, 0);
-      nir_ssa_def *one = nir_imm_float(b, 1);
+      nir_def *zero = nir_imm_float(b, 0);
+      nir_def *one = nir_imm_float(b, 1);
       pos = nir_vec4(b, zero, zero, zero, one);
    }
 
@@ -255,8 +255,8 @@ ac_nir_export_position(nir_builder *b,
       outputs_written &= ~VARYING_BIT_VIEWPORT;
 
    if ((outputs_written & mask) || force_vrs) {
-      nir_ssa_def *zero = nir_imm_float(b, 0);
-      nir_ssa_def *vec[4] = { zero, zero, zero, zero };
+      nir_def *zero = nir_imm_float(b, 0);
+      nir_def *vec[4] = { zero, zero, zero, zero };
       unsigned flags = 0;
       unsigned write_mask = 0;
 
@@ -270,13 +270,13 @@ ac_nir_export_position(nir_builder *b,
          write_mask |= BITFIELD_BIT(1);
       }
 
-      nir_ssa_def *rates = NULL;
+      nir_def *rates = NULL;
       if (outputs_written & VARYING_BIT_PRIMITIVE_SHADING_RATE) {
          rates = outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0];
       } else if (force_vrs) {
          /* If Pos.W != 1 (typical for non-GUI elements), use coarse shading. */
-         nir_ssa_def *pos_w = nir_channel(b, pos, 3);
-         nir_ssa_def *cond = nir_fneu_imm(b, pos_w, 1);
+         nir_def *pos_w = nir_channel(b, pos, 3);
+         nir_def *cond = nir_fneu_imm(b, pos_w, 1);
          rates = nir_bcsel(b, cond, nir_load_force_vrs_rates_amd(b), nir_imm_int(b, 0));
       }
 
@@ -293,7 +293,7 @@ ac_nir_export_position(nir_builder *b,
       if (outputs_written & VARYING_BIT_VIEWPORT) {
          if (gfx_level >= GFX9) {
             /* GFX9 has the layer in [10:0] and the viewport index in [19:16]. */
-            nir_ssa_def *v = nir_ishl_imm(b, outputs[VARYING_SLOT_VIEWPORT][0], 16);
+            nir_def *v = nir_ishl_imm(b, outputs[VARYING_SLOT_VIEWPORT][0], 16);
             vec[2] = nir_ior(b, vec[2], v);
             write_mask |= BITFIELD_BIT(2);
          } else {
@@ -322,12 +322,12 @@ ac_nir_export_position(nir_builder *b,
    }
 
    if (outputs_written & VARYING_BIT_CLIP_VERTEX) {
-      nir_ssa_def *vtx = get_export_output(b, outputs[VARYING_SLOT_CLIP_VERTEX]);
+      nir_def *vtx = get_export_output(b, outputs[VARYING_SLOT_CLIP_VERTEX]);
 
       /* Clip distance for clip vertex to each user clip plane. */
-      nir_ssa_def *clip_dist[8] = {0};
+      nir_def *clip_dist[8] = {0};
       u_foreach_bit (i, clip_cull_mask) {
-         nir_ssa_def *ucp = nir_load_user_clip_plane(b, .ucp_id = i);
+         nir_def *ucp = nir_load_user_clip_plane(b, .ucp_id = i);
          clip_dist[i] = nir_fdot4(b, vtx, ucp);
       }
 
@@ -365,9 +365,9 @@ ac_nir_export_parameters(nir_builder *b,
                          const uint8_t *param_offsets,
                          uint64_t outputs_written,
                          uint16_t outputs_written_16bit,
-                         nir_ssa_def *(*outputs)[4],
-                         nir_ssa_def *(*outputs_16bit_lo)[4],
-                         nir_ssa_def *(*outputs_16bit_hi)[4])
+                         nir_def *(*outputs)[4],
+                         nir_def *(*outputs_16bit_lo)[4],
+                         nir_def *(*outputs_16bit_hi)[4])
 {
    uint32_t exported_params = 0;
 
@@ -422,11 +422,11 @@ ac_nir_export_parameters(nir_builder *b,
       if (exported_params & BITFIELD_BIT(offset))
          continue;
 
-      nir_ssa_def *vec[4];
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, 16);
+      nir_def *vec[4];
+      nir_def *undef = nir_undef(b, 1, 16);
       for (int i = 0; i < 4; i++) {
-         nir_ssa_def *lo = outputs_16bit_lo[slot][i] ? outputs_16bit_lo[slot][i] : undef;
-         nir_ssa_def *hi = outputs_16bit_hi[slot][i] ? outputs_16bit_hi[slot][i] : undef;
+         nir_def *lo = outputs_16bit_lo[slot][i] ? outputs_16bit_lo[slot][i] : undef;
+         nir_def *hi = outputs_16bit_hi[slot][i] ? outputs_16bit_hi[slot][i] : undef;
          vec[i] = nir_pack_32_2x16_split(b, lo, hi);
       }
 
@@ -443,10 +443,10 @@ ac_nir_export_parameters(nir_builder *b,
  * and emits a sequence that calculates the full offset of that instruction,
  * including a stride to the base and component offsets.
  */
-nir_ssa_def *
+nir_def *
 ac_nir_calc_io_offset(nir_builder *b,
                       nir_intrinsic_instr *intrin,
-                      nir_ssa_def *base_stride,
+                      nir_def *base_stride,
                       unsigned component_stride,
                       ac_nir_map_io_driver_location map_io)
 {
@@ -455,13 +455,13 @@ ac_nir_calc_io_offset(nir_builder *b,
    unsigned mapped_driver_location = map_io ? map_io(semantic) : base;
 
    /* base is the driver_location, which is in slots (1 slot = 4x4 bytes) */
-   nir_ssa_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location);
+   nir_def *base_op = nir_imul_imm(b, base_stride, mapped_driver_location);
 
    /* offset should be interpreted in relation to the base,
     * so the instruction effectively reads/writes another input/output
     * when it has an offset
     */
-   nir_ssa_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
+   nir_def *offset_op = nir_imul(b, base_stride, nir_ssa_for_src(b, *nir_get_io_offset_src(intrin), 1));
 
    /* component is in bytes */
    unsigned const_op = nir_intrinsic_component(intrin) * component_stride;
@@ -513,19 +513,19 @@ ac_nir_lower_indirect_derefs(nir_shader *shader,
 }
 
 struct shader_outputs {
-   nir_ssa_def *data[VARYING_SLOT_MAX][4];
-   nir_ssa_def *data_16bit_lo[16][4];
-   nir_ssa_def *data_16bit_hi[16][4];
+   nir_def *data[VARYING_SLOT_MAX][4];
+   nir_def *data_16bit_lo[16][4];
+   nir_def *data_16bit_hi[16][4];
 
    nir_alu_type (*type_16bit_lo)[4];
    nir_alu_type (*type_16bit_hi)[4];
 };
 
-static nir_ssa_def **
+static nir_def **
 get_output_and_type(struct shader_outputs *outputs, unsigned slot, bool high_16bits,
                     nir_alu_type **types)
 {
-   nir_ssa_def **data;
+   nir_def **data;
    nir_alu_type *type;
 
    /* Only VARYING_SLOT_VARn_16BIT slots need output type to convert 16bit output
@@ -554,38 +554,38 @@ static void
 emit_streamout(nir_builder *b, unsigned stream, nir_xfb_info *info,
                struct shader_outputs *outputs)
 {
-   nir_ssa_def *so_vtx_count = nir_ubfe_imm(b, nir_load_streamout_config_amd(b), 16, 7);
-   nir_ssa_def *tid = nir_load_subgroup_invocation(b);
+   nir_def *so_vtx_count = nir_ubfe_imm(b, nir_load_streamout_config_amd(b), 16, 7);
+   nir_def *tid = nir_load_subgroup_invocation(b);
 
    nir_push_if(b, nir_ilt(b, tid, so_vtx_count));
-   nir_ssa_def *so_write_index = nir_load_streamout_write_index_amd(b);
+   nir_def *so_write_index = nir_load_streamout_write_index_amd(b);
 
-   nir_ssa_def *so_buffers[NIR_MAX_XFB_BUFFERS];
-   nir_ssa_def *so_write_offset[NIR_MAX_XFB_BUFFERS];
+   nir_def *so_buffers[NIR_MAX_XFB_BUFFERS];
+   nir_def *so_write_offset[NIR_MAX_XFB_BUFFERS];
    u_foreach_bit(i, info->buffers_written) {
       so_buffers[i] = nir_load_streamout_buffer_amd(b, i);
 
       unsigned stride = info->buffers[i].stride;
-      nir_ssa_def *offset = nir_load_streamout_offset_amd(b, i);
+      nir_def *offset = nir_load_streamout_offset_amd(b, i);
       offset = nir_iadd(b, nir_imul_imm(b, nir_iadd(b, so_write_index, tid), stride),
                         nir_imul_imm(b, offset, 4));
       so_write_offset[i] = offset;
    }
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
+   nir_def *undef = nir_undef(b, 1, 32);
    for (unsigned i = 0; i < info->output_count; i++) {
       const nir_xfb_output_info *output = info->outputs + i;
       if (stream != info->buffer_to_stream[output->buffer])
          continue;
 
       nir_alu_type *output_type;
-      nir_ssa_def **output_data =
+      nir_def **output_data =
          get_output_and_type(outputs, output->location, output->high_16bits, &output_type);
 
-      nir_ssa_def *vec[4] = {undef, undef, undef, undef};
+      nir_def *vec[4] = {undef, undef, undef, undef};
       uint8_t mask = 0;
       u_foreach_bit(j, output->component_mask) {
-         nir_ssa_def *data = output_data[j];
+         nir_def *data = output_data[j];
 
          if (data) {
             if (data->bit_size < 32) {
@@ -606,8 +606,8 @@ emit_streamout(nir_builder *b, unsigned stream, nir_xfb_info *info,
          continue;
 
       unsigned buffer = output->buffer;
-      nir_ssa_def *data = nir_vec(b, vec, util_last_bit(mask));
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *data = nir_vec(b, vec, util_last_bit(mask));
+      nir_def *zero = nir_imm_int(b, 0);
       nir_store_buffer_amd(b, data, so_buffers[buffer], so_write_offset[buffer], zero, zero,
                            .base = output->offset, .write_mask = mask,
                            .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL);
@@ -636,15 +636,15 @@ ac_nir_create_gs_copy_shader(const nir_shader *gs_nir,
    b.shader->info.outputs_written = gs_nir->info.outputs_written;
    b.shader->info.outputs_written_16bit = gs_nir->info.outputs_written_16bit;
 
-   nir_ssa_def *gsvs_ring = nir_load_ring_gsvs_amd(&b);
+   nir_def *gsvs_ring = nir_load_ring_gsvs_amd(&b);
 
    nir_xfb_info *info = gs_nir->xfb_info;
-   nir_ssa_def *stream_id = NULL;
+   nir_def *stream_id = NULL;
    if (!disable_streamout && info)
       stream_id = nir_ubfe_imm(&b, nir_load_streamout_config_amd(&b), 24, 2);
 
-   nir_ssa_def *vtx_offset = nir_imul_imm(&b, nir_load_vertex_id_zero_base(&b), 4);
-   nir_ssa_def *zero = nir_imm_zero(&b, 1, 32);
+   nir_def *vtx_offset = nir_imul_imm(&b, nir_load_vertex_id_zero_base(&b), 4);
+   nir_def *zero = nir_imm_zero(&b, 1, 32);
 
    for (unsigned stream = 0; stream < 4; stream++) {
       if (stream > 0 && (!stream_id || !(info->streams_written & BITFIELD_BIT(stream))))
@@ -672,8 +672,8 @@ ac_nir_create_gs_copy_shader(const nir_shader *gs_nir,
             /* clamp legacy color output */
             if (i == VARYING_SLOT_COL0 || i == VARYING_SLOT_COL1 ||
                 i == VARYING_SLOT_BFC0 || i == VARYING_SLOT_BFC1) {
-               nir_ssa_def *color = outputs.data[i][j];
-               nir_ssa_def *clamp = nir_load_clamp_vertex_color_amd(&b);
+               nir_def *color = outputs.data[i][j];
+               nir_def *clamp = nir_load_clamp_vertex_color_amd(&b);
                outputs.data[i][j] = nir_bcsel(&b, clamp, nir_fsat(&b, color), color);
             }
 
@@ -690,7 +690,7 @@ ac_nir_create_gs_copy_shader(const nir_shader *gs_nir,
             if (!has_lo_16bit && !has_hi_16bit)
                continue;
 
-            nir_ssa_def *data =
+            nir_def *data =
                nir_load_buffer_amd(&b, 1, 32, gsvs_ring, vtx_offset, zero, zero,
                                    .base = offset,
                                    .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL);
@@ -759,7 +759,7 @@ gather_outputs(nir_builder *b, nir_function_impl *impl, struct shader_outputs *o
          nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
 
          nir_alu_type *output_type;
-         nir_ssa_def **output_data =
+         nir_def **output_data =
             get_output_and_type(outputs, sem.location, sem.high_16bits, &output_type);
 
          u_foreach_bit (i, nir_intrinsic_write_mask(intrin)) {
@@ -841,12 +841,12 @@ ac_nir_gs_shader_query(nir_builder *b,
                        bool has_pipeline_stats_query,
                        unsigned num_vertices_per_primitive,
                        unsigned wave_size,
-                       nir_ssa_def *vertex_count[4],
-                       nir_ssa_def *primitive_count[4])
+                       nir_def *vertex_count[4],
+                       nir_def *primitive_count[4])
 {
-   nir_ssa_def *pipeline_query_enabled = NULL;
-   nir_ssa_def *prim_gen_query_enabled = NULL;
-   nir_ssa_def *shader_query_enabled = NULL;
+   nir_def *pipeline_query_enabled = NULL;
+   nir_def *prim_gen_query_enabled = NULL;
+   nir_def *shader_query_enabled = NULL;
    if (has_gen_prim_query) {
       prim_gen_query_enabled = nir_load_prim_gen_query_enabled_amd(b);
       if (has_pipeline_stats_query) {
@@ -865,31 +865,31 @@ ac_nir_gs_shader_query(nir_builder *b,
 
    nir_if *if_shader_query = nir_push_if(b, shader_query_enabled);
 
-   nir_ssa_def *active_threads_mask = nir_ballot(b, 1, wave_size, nir_imm_true(b));
-   nir_ssa_def *num_active_threads = nir_bit_count(b, active_threads_mask);
+   nir_def *active_threads_mask = nir_ballot(b, 1, wave_size, nir_imm_true(b));
+   nir_def *num_active_threads = nir_bit_count(b, active_threads_mask);
 
    /* Calculate the "real" number of emitted primitives from the emitted GS vertices and primitives.
     * GS emits points, line strips or triangle strips.
     * Real primitives are points, lines or triangles.
     */
-   nir_ssa_def *num_prims_in_wave[4] = {0};
+   nir_def *num_prims_in_wave[4] = {0};
    u_foreach_bit (i, b->shader->info.gs.active_stream_mask) {
       assert(vertex_count[i] && primitive_count[i]);
 
-      nir_ssa_scalar vtx_cnt = nir_get_ssa_scalar(vertex_count[i], 0);
-      nir_ssa_scalar prm_cnt = nir_get_ssa_scalar(primitive_count[i], 0);
+      nir_scalar vtx_cnt = nir_get_ssa_scalar(vertex_count[i], 0);
+      nir_scalar prm_cnt = nir_get_ssa_scalar(primitive_count[i], 0);
 
-      if (nir_ssa_scalar_is_const(vtx_cnt) && nir_ssa_scalar_is_const(prm_cnt)) {
-         unsigned gs_vtx_cnt = nir_ssa_scalar_as_uint(vtx_cnt);
-         unsigned gs_prm_cnt = nir_ssa_scalar_as_uint(prm_cnt);
+      if (nir_scalar_is_const(vtx_cnt) && nir_scalar_is_const(prm_cnt)) {
+         unsigned gs_vtx_cnt = nir_scalar_as_uint(vtx_cnt);
+         unsigned gs_prm_cnt = nir_scalar_as_uint(prm_cnt);
          unsigned total_prm_cnt = gs_vtx_cnt - gs_prm_cnt * (num_vertices_per_primitive - 1u);
          if (total_prm_cnt == 0)
             continue;
 
          num_prims_in_wave[i] = nir_imul_imm(b, num_active_threads, total_prm_cnt);
       } else {
-         nir_ssa_def *gs_vtx_cnt = vtx_cnt.def;
-         nir_ssa_def *gs_prm_cnt = prm_cnt.def;
+         nir_def *gs_vtx_cnt = vtx_cnt.def;
+         nir_def *gs_prm_cnt = prm_cnt.def;
          if (num_vertices_per_primitive > 1)
             gs_prm_cnt = nir_iadd(b, nir_imul_imm(b, gs_prm_cnt, -1u * (num_vertices_per_primitive - 1)), gs_vtx_cnt);
          num_prims_in_wave[i] = nir_reduce(b, gs_prm_cnt, .reduction_op = nir_op_iadd);
@@ -902,7 +902,7 @@ ac_nir_gs_shader_query(nir_builder *b,
       if (has_pipeline_stats_query) {
          nir_if *if_pipeline_query = nir_push_if(b, pipeline_query_enabled);
          {
-            nir_ssa_def *count = NULL;
+            nir_def *count = NULL;
 
             /* Add all streams' number to the same counter. */
             for (int i = 0; i < 4; i++) {
@@ -941,14 +941,14 @@ ac_nir_gs_shader_query(nir_builder *b,
 }
 
 typedef struct {
-   nir_ssa_def *outputs[64][4];
-   nir_ssa_def *outputs_16bit_lo[16][4];
-   nir_ssa_def *outputs_16bit_hi[16][4];
+   nir_def *outputs[64][4];
+   nir_def *outputs_16bit_lo[16][4];
+   nir_def *outputs_16bit_hi[16][4];
 
    ac_nir_gs_output_info *info;
 
-   nir_ssa_def *vertex_count[4];
-   nir_ssa_def *primitive_count[4];
+   nir_def *vertex_count[4];
+   nir_def *primitive_count[4];
 } lower_legacy_gs_state;
 
 static bool
@@ -968,7 +968,7 @@ lower_legacy_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin,
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
    nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
 
-   nir_ssa_def **outputs;
+   nir_def **outputs;
    if (sem.location < VARYING_SLOT_VAR0_16BIT) {
       outputs = s->outputs[sem.location];
    } else {
@@ -979,7 +979,7 @@ lower_legacy_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin,
          outputs = s->outputs_16bit_lo[index];
    }
 
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
    /* 64bit output has been lowered to 32bit */
    assert(store_val->bit_size <= 32);
 
@@ -999,15 +999,15 @@ lower_legacy_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *in
    b->cursor = nir_before_instr(&intrin->instr);
 
    unsigned stream = nir_intrinsic_stream_id(intrin);
-   nir_ssa_def *vtxidx = intrin->src[0].ssa;
+   nir_def *vtxidx = intrin->src[0].ssa;
 
-   nir_ssa_def *gsvs_ring = nir_load_ring_gsvs_amd(b, .stream_id = stream);
-   nir_ssa_def *soffset = nir_load_ring_gs2vs_offset_amd(b);
+   nir_def *gsvs_ring = nir_load_ring_gsvs_amd(b, .stream_id = stream);
+   nir_def *soffset = nir_load_ring_gs2vs_offset_amd(b);
 
    unsigned offset = 0;
    u_foreach_bit64 (i, b->shader->info.outputs_written) {
       for (unsigned j = 0; j < 4; j++) {
-         nir_ssa_def *output = s->outputs[i][j];
+         nir_def *output = s->outputs[i][j];
          /* Next vertex emit need a new value, reset all outputs. */
          s->outputs[i][j] = NULL;
 
@@ -1022,10 +1022,10 @@ lower_legacy_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *in
          if (!output)
             continue;
 
-         nir_ssa_def *voffset = nir_ishl_imm(b, vtxidx, 2);
+         nir_def *voffset = nir_ishl_imm(b, vtxidx, 2);
 
          /* extend 8/16 bit to 32 bit, 64 bit has been lowered */
-         nir_ssa_def *data = nir_u2uN(b, output, 32);
+         nir_def *data = nir_u2uN(b, output, 32);
 
          nir_store_buffer_amd(b, data, gsvs_ring, voffset, soffset, nir_imm_int(b, 0),
                               .access = ACCESS_COHERENT | ACCESS_NON_TEMPORAL |
@@ -1038,8 +1038,8 @@ lower_legacy_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *in
 
    u_foreach_bit (i, b->shader->info.outputs_written_16bit) {
       for (unsigned j = 0; j < 4; j++) {
-         nir_ssa_def *output_lo = s->outputs_16bit_lo[i][j];
-         nir_ssa_def *output_hi = s->outputs_16bit_hi[i][j];
+         nir_def *output_lo = s->outputs_16bit_lo[i][j];
+         nir_def *output_hi = s->outputs_16bit_hi[i][j];
          /* Next vertex emit need a new value, reset all outputs. */
          s->outputs_16bit_lo[i][j] = NULL;
          s->outputs_16bit_hi[i][j] = NULL;
@@ -1062,12 +1062,12 @@ lower_legacy_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *in
             continue;
 
          if (!has_lo_16bit_out)
-            output_lo = nir_ssa_undef(b, 1, 16);
+            output_lo = nir_undef(b, 1, 16);
 
          if (!has_hi_16bit_out)
-            output_hi = nir_ssa_undef(b, 1, 16);
+            output_hi = nir_undef(b, 1, 16);
 
-         nir_ssa_def *voffset = nir_iadd_imm(b, vtxidx, base);
+         nir_def *voffset = nir_iadd_imm(b, vtxidx, base);
          voffset = nir_ishl_imm(b, voffset, 2);
 
          nir_store_buffer_amd(b, nir_pack_32_2x16_split(b, output_lo, output_hi),
index 5930544..ca825d3 100644 (file)
@@ -47,20 +47,20 @@ typedef struct nir_builder nir_builder;
 /* Executed by ac_nir_cull when the current primitive is accepted. */
 typedef void (*ac_nir_cull_accepted)(nir_builder *b, void *state);
 
-nir_ssa_def *
+nir_def *
 ac_nir_load_arg_at_offset(nir_builder *b, const struct ac_shader_args *ac_args,
                           struct ac_arg arg, unsigned relative_index);
 
-static inline nir_ssa_def *
+static inline nir_def *
 ac_nir_load_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg)
 {
    return ac_nir_load_arg_at_offset(b, ac_args, arg, 0);
 }
 
 void ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
-                      nir_ssa_def *val);
+                      nir_def *val);
 
-nir_ssa_def *
+nir_def *
 ac_nir_unpack_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
                   unsigned rshift, unsigned bitwidth);
 
@@ -71,11 +71,11 @@ bool ac_nir_lower_intrinsics_to_args(nir_shader *shader, const enum amd_gfx_leve
                                      const struct ac_shader_args *ac_args);
 
 void
-ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_ssa_def *value,
+ac_nir_store_var_components(nir_builder *b, nir_variable *var, nir_def *value,
                             unsigned component, unsigned writemask);
 
 void
-ac_nir_export_primitive(nir_builder *b, nir_ssa_def *prim);
+ac_nir_export_primitive(nir_builder *b, nir_def *prim);
 
 void
 ac_nir_export_position(nir_builder *b,
@@ -84,21 +84,21 @@ ac_nir_export_position(nir_builder *b,
                        bool no_param_export,
                        bool force_vrs,
                        uint64_t outputs_written,
-                       nir_ssa_def *(*outputs)[4]);
+                       nir_def *(*outputs)[4]);
 
 void
 ac_nir_export_parameters(nir_builder *b,
                          const uint8_t *param_offsets,
                          uint64_t outputs_written,
                          uint16_t outputs_written_16bit,
-                         nir_ssa_def *(*outputs)[4],
-                         nir_ssa_def *(*outputs_16bit_lo)[4],
-                         nir_ssa_def *(*outputs_16bit_hi)[4]);
+                         nir_def *(*outputs)[4],
+                         nir_def *(*outputs_16bit_lo)[4],
+                         nir_def *(*outputs_16bit_hi)[4]);
 
-nir_ssa_def *
+nir_def *
 ac_nir_calc_io_offset(nir_builder *b,
                       nir_intrinsic_instr *intrin,
-                      nir_ssa_def *base_stride,
+                      nir_def *base_stride,
                       unsigned component_stride,
                       ac_nir_map_io_driver_location map_io);
 
@@ -206,10 +206,10 @@ ac_nir_lower_mesh_inputs_to_mem(nir_shader *shader,
                                 unsigned task_payload_entry_bytes,
                                 unsigned task_num_entries);
 
-nir_ssa_def *
+nir_def *
 ac_nir_cull_primitive(nir_builder *b,
-                      nir_ssa_def *initially_accepted,
-                      nir_ssa_def *pos[3][4],
+                      nir_def *initially_accepted,
+                      nir_def *pos[3][4],
                       unsigned num_vertices,
                       ac_nir_cull_accepted accept_func,
                       void *state);
@@ -262,8 +262,8 @@ ac_nir_gs_shader_query(nir_builder *b,
                        bool has_pipeline_stats_query,
                        unsigned num_vertices_per_primitive,
                        unsigned wave_size,
-                       nir_ssa_def *vertex_count[4],
-                       nir_ssa_def *primitive_count[4]);
+                       nir_def *vertex_count[4],
+                       nir_def *primitive_count[4]);
 
 void
 ac_nir_lower_legacy_gs(nir_shader *nir,
index a4961f1..1a5bcc9 100644 (file)
 
 typedef struct
 {
-   nir_ssa_def *w_reflection;
-   nir_ssa_def *all_w_negative;
-   nir_ssa_def *any_w_negative;
+   nir_def *w_reflection;
+   nir_def *all_w_negative;
+   nir_def *any_w_negative;
 } position_w_info;
 
 static void
-analyze_position_w(nir_builder *b, nir_ssa_def *pos[][4], unsigned num_vertices,
+analyze_position_w(nir_builder *b, nir_def *pos[][4], unsigned num_vertices,
                    position_w_info *w_info)
 {
    w_info->all_w_negative = nir_imm_true(b);
@@ -26,34 +26,34 @@ analyze_position_w(nir_builder *b, nir_ssa_def *pos[][4], unsigned num_vertices,
    w_info->any_w_negative = nir_imm_false(b);
 
    for (unsigned i = 0; i < num_vertices; ++i) {
-      nir_ssa_def *neg_w = nir_flt_imm(b, pos[i][3], 0.0f);
+      nir_def *neg_w = nir_flt_imm(b, pos[i][3], 0.0f);
       w_info->w_reflection = nir_ixor(b, neg_w, w_info->w_reflection);
       w_info->any_w_negative = nir_ior(b, neg_w, w_info->any_w_negative);
       w_info->all_w_negative = nir_iand(b, neg_w, w_info->all_w_negative);
    }
 }
 
-static nir_ssa_def *
-cull_face_triangle(nir_builder *b, nir_ssa_def *pos[3][4], const position_w_info *w_info)
+static nir_def *
+cull_face_triangle(nir_builder *b, nir_def *pos[3][4], const position_w_info *w_info)
 {
-   nir_ssa_def *det_t0 = nir_fsub(b, pos[2][0], pos[0][0]);
-   nir_ssa_def *det_t1 = nir_fsub(b, pos[1][1], pos[0][1]);
-   nir_ssa_def *det_t2 = nir_fsub(b, pos[0][0], pos[1][0]);
-   nir_ssa_def *det_t3 = nir_fsub(b, pos[0][1], pos[2][1]);
-   nir_ssa_def *det_p0 = nir_fmul(b, det_t0, det_t1);
-   nir_ssa_def *det_p1 = nir_fmul(b, det_t2, det_t3);
-   nir_ssa_def *det = nir_fsub(b, det_p0, det_p1);
+   nir_def *det_t0 = nir_fsub(b, pos[2][0], pos[0][0]);
+   nir_def *det_t1 = nir_fsub(b, pos[1][1], pos[0][1]);
+   nir_def *det_t2 = nir_fsub(b, pos[0][0], pos[1][0]);
+   nir_def *det_t3 = nir_fsub(b, pos[0][1], pos[2][1]);
+   nir_def *det_p0 = nir_fmul(b, det_t0, det_t1);
+   nir_def *det_p1 = nir_fmul(b, det_t2, det_t3);
+   nir_def *det = nir_fsub(b, det_p0, det_p1);
 
    det = nir_bcsel(b, w_info->w_reflection, nir_fneg(b, det), det);
 
-   nir_ssa_def *front_facing_ccw = nir_fgt_imm(b, det, 0.0f);
-   nir_ssa_def *zero_area = nir_feq_imm(b, det, 0.0f);
-   nir_ssa_def *ccw = nir_load_cull_ccw_amd(b);
-   nir_ssa_def *front_facing = nir_ieq(b, front_facing_ccw, ccw);
-   nir_ssa_def *cull_front = nir_load_cull_front_face_enabled_amd(b);
-   nir_ssa_def *cull_back = nir_load_cull_back_face_enabled_amd(b);
+   nir_def *front_facing_ccw = nir_fgt_imm(b, det, 0.0f);
+   nir_def *zero_area = nir_feq_imm(b, det, 0.0f);
+   nir_def *ccw = nir_load_cull_ccw_amd(b);
+   nir_def *front_facing = nir_ieq(b, front_facing_ccw, ccw);
+   nir_def *cull_front = nir_load_cull_front_face_enabled_amd(b);
+   nir_def *cull_back = nir_load_cull_back_face_enabled_amd(b);
 
-   nir_ssa_def *face_culled = nir_bcsel(b, front_facing, cull_front, cull_back);
+   nir_def *face_culled = nir_bcsel(b, front_facing, cull_front, cull_back);
    face_culled = nir_ior(b, face_culled, zero_area);
 
    /* Don't reject NaN and +/-infinity, these are tricky.
@@ -63,7 +63,7 @@ cull_face_triangle(nir_builder *b, nir_ssa_def *pos[3][4], const position_w_info
 }
 
 static void
-calc_bbox_triangle(nir_builder *b, nir_ssa_def *pos[3][4], nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2])
+calc_bbox_triangle(nir_builder *b, nir_def *pos[3][4], nir_def *bbox_min[2], nir_def *bbox_max[2])
 {
    for (unsigned chan = 0; chan < 2; ++chan) {
       bbox_min[chan] = nir_fmin(b, pos[0][chan], nir_fmin(b, pos[1][chan], pos[2][chan]));
@@ -71,10 +71,10 @@ calc_bbox_triangle(nir_builder *b, nir_ssa_def *pos[3][4], nir_ssa_def *bbox_min
    }
 }
 
-static nir_ssa_def *
-cull_frustrum(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2])
+static nir_def *
+cull_frustrum(nir_builder *b, nir_def *bbox_min[2], nir_def *bbox_max[2])
 {
-   nir_ssa_def *prim_outside_view = nir_imm_false(b);
+   nir_def *prim_outside_view = nir_imm_false(b);
 
    for (unsigned chan = 0; chan < 2; ++chan) {
       prim_outside_view = nir_ior(b, prim_outside_view, nir_flt_imm(b, bbox_max[chan], -1.0f));
@@ -84,25 +84,25 @@ cull_frustrum(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2]
    return prim_outside_view;
 }
 
-static nir_ssa_def *
-cull_small_primitive_triangle(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2],
-                              nir_ssa_def *prim_is_small_else)
+static nir_def *
+cull_small_primitive_triangle(nir_builder *b, nir_def *bbox_min[2], nir_def *bbox_max[2],
+                              nir_def *prim_is_small_else)
 {
-   nir_ssa_def *prim_is_small = NULL;
+   nir_def *prim_is_small = NULL;
 
    nir_if *if_cull_small_prims = nir_push_if(b, nir_load_cull_small_primitives_enabled_amd(b));
    {
-      nir_ssa_def *vp = nir_load_viewport_xy_scale_and_offset(b);
-      nir_ssa_def *small_prim_precision = nir_load_cull_small_prim_precision_amd(b);
+      nir_def *vp = nir_load_viewport_xy_scale_and_offset(b);
+      nir_def *small_prim_precision = nir_load_cull_small_prim_precision_amd(b);
       prim_is_small = prim_is_small_else;
 
       for (unsigned chan = 0; chan < 2; ++chan) {
-         nir_ssa_def *vp_scale = nir_channel(b, vp, chan);
-         nir_ssa_def *vp_translate = nir_channel(b, vp, 2 + chan);
+         nir_def *vp_scale = nir_channel(b, vp, chan);
+         nir_def *vp_translate = nir_channel(b, vp, 2 + chan);
 
          /* Convert the position to screen-space coordinates. */
-         nir_ssa_def *min = nir_ffma(b, bbox_min[chan], vp_scale, vp_translate);
-         nir_ssa_def *max = nir_ffma(b, bbox_max[chan], vp_scale, vp_translate);
+         nir_def *min = nir_ffma(b, bbox_min[chan], vp_scale, vp_translate);
+         nir_def *max = nir_ffma(b, bbox_max[chan], vp_scale, vp_translate);
 
          /* Scale the bounding box according to precision. */
          min = nir_fsub(b, min, small_prim_precision);
@@ -112,7 +112,7 @@ cull_small_primitive_triangle(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_
          min = nir_fround_even(b, min);
          max = nir_fround_even(b, max);
 
-         nir_ssa_def *rounded_to_eq = nir_feq(b, min, max);
+         nir_def *rounded_to_eq = nir_feq(b, min, max);
          prim_is_small = nir_ior(b, prim_is_small, rounded_to_eq);
       }
    }
@@ -121,27 +121,27 @@ cull_small_primitive_triangle(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_
    return nir_if_phi(b, prim_is_small, prim_is_small_else);
 }
 
-static nir_ssa_def *
+static nir_def *
 ac_nir_cull_triangle(nir_builder *b,
-                     nir_ssa_def *initially_accepted,
-                     nir_ssa_def *pos[3][4],
+                     nir_def *initially_accepted,
+                     nir_def *pos[3][4],
                      position_w_info *w_info,
                      ac_nir_cull_accepted accept_func,
                      void *state)
 {
-   nir_ssa_def *accepted = initially_accepted;
+   nir_def *accepted = initially_accepted;
    accepted = nir_iand(b, accepted, nir_inot(b, w_info->all_w_negative));
    accepted = nir_iand(b, accepted, nir_inot(b, cull_face_triangle(b, pos, w_info)));
 
-   nir_ssa_def *bbox_accepted = NULL;
+   nir_def *bbox_accepted = NULL;
 
    nir_if *if_accepted = nir_push_if(b, accepted);
    {
-      nir_ssa_def *bbox_min[2] = {0}, *bbox_max[2] = {0};
+      nir_def *bbox_min[2] = {0}, *bbox_max[2] = {0};
       calc_bbox_triangle(b, pos, bbox_min, bbox_max);
 
-      nir_ssa_def *prim_outside_view = cull_frustrum(b, bbox_min, bbox_max);
-      nir_ssa_def *prim_invisible =
+      nir_def *prim_outside_view = cull_frustrum(b, bbox_min, bbox_max);
+      nir_def *prim_invisible =
          cull_small_primitive_triangle(b, bbox_min, bbox_max, prim_outside_view);
 
       bbox_accepted = nir_ior(b, nir_inot(b, prim_invisible), w_info->any_w_negative);
@@ -162,18 +162,18 @@ ac_nir_cull_triangle(nir_builder *b,
 }
 
 static void
-rotate_45degrees(nir_builder *b, nir_ssa_def *v[2])
+rotate_45degrees(nir_builder *b, nir_def *v[2])
 {
    /* sin(45) == cos(45) */
-   nir_ssa_def *sincos45 = nir_imm_float(b, 0.707106781);
+   nir_def *sincos45 = nir_imm_float(b, 0.707106781);
 
    /* x2  =  x*cos45 - y*sin45  =  x*sincos45 - y*sincos45
     * y2  =  x*sin45 + y*cos45  =  x*sincos45 + y*sincos45
     */
-   nir_ssa_def *first = nir_fmul(b, v[0], sincos45);
+   nir_def *first = nir_fmul(b, v[0], sincos45);
 
    /* Doing 2x ffma while duplicating the multiplication is 33% faster than fmul+fadd+fadd. */
-   nir_ssa_def *result[2] = {
+   nir_def *result[2] = {
       nir_ffma(b, nir_fneg(b, v[1]), sincos45, first),
       nir_ffma(b, v[1], sincos45, first),
    };
@@ -182,26 +182,26 @@ rotate_45degrees(nir_builder *b, nir_ssa_def *v[2])
 }
 
 static void
-calc_bbox_line(nir_builder *b, nir_ssa_def *pos[3][4], nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2])
+calc_bbox_line(nir_builder *b, nir_def *pos[3][4], nir_def *bbox_min[2], nir_def *bbox_max[2])
 {
-   nir_ssa_def *clip_half_line_width = nir_load_clip_half_line_width_amd(b);
+   nir_def *clip_half_line_width = nir_load_clip_half_line_width_amd(b);
 
    for (unsigned chan = 0; chan < 2; ++chan) {
       bbox_min[chan] = nir_fmin(b, pos[0][chan], pos[1][chan]);
       bbox_max[chan] = nir_fmax(b, pos[0][chan], pos[1][chan]);
 
-      nir_ssa_def *width = nir_channel(b, clip_half_line_width, chan);
+      nir_def *width = nir_channel(b, clip_half_line_width, chan);
       bbox_min[chan] = nir_fsub(b, bbox_min[chan], width);
       bbox_max[chan] = nir_fadd(b, bbox_max[chan], width);
    }
 }
 
-static nir_ssa_def *
-cull_small_primitive_line(nir_builder *b, nir_ssa_def *pos[3][4],
-                          nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2],
-                          nir_ssa_def *prim_is_small_else)
+static nir_def *
+cull_small_primitive_line(nir_builder *b, nir_def *pos[3][4],
+                          nir_def *bbox_min[2], nir_def *bbox_max[2],
+                          nir_def *prim_is_small_else)
 {
-   nir_ssa_def *prim_is_small = NULL;
+   nir_def *prim_is_small = NULL;
 
    /* Small primitive filter - eliminate lines that are too small to affect a sample. */
    nir_if *if_cull_small_prims = nir_push_if(b, nir_load_cull_small_primitives_enabled_amd(b));
@@ -234,13 +234,13 @@ cull_small_primitive_line(nir_builder *b, nir_ssa_def *pos[3][4],
        * A good test is piglit/lineloop because it draws 10k subpixel lines in a circle.
        * It should contain no holes if this matches hw behavior.
        */
-      nir_ssa_def *v0[2], *v1[2];
-      nir_ssa_def *vp = nir_load_viewport_xy_scale_and_offset(b);
+      nir_def *v0[2], *v1[2];
+      nir_def *vp = nir_load_viewport_xy_scale_and_offset(b);
 
       /* Get vertex positions in pixels. */
       for (unsigned chan = 0; chan < 2; chan++) {
-         nir_ssa_def *vp_scale = nir_channel(b, vp, chan);
-         nir_ssa_def *vp_translate = nir_channel(b, vp, 2 + chan);
+         nir_def *vp_scale = nir_channel(b, vp, chan);
+         nir_def *vp_translate = nir_channel(b, vp, 2 + chan);
 
          v0[chan] = nir_ffma(b, pos[0][chan], vp_scale, vp_translate);
          v1[chan] = nir_ffma(b, pos[1][chan], vp_scale, vp_translate);
@@ -250,9 +250,9 @@ cull_small_primitive_line(nir_builder *b, nir_ssa_def *pos[3][4],
       rotate_45degrees(b, v0);
       rotate_45degrees(b, v1);
 
-      nir_ssa_def *small_prim_precision = nir_load_cull_small_prim_precision_amd(b);
+      nir_def *small_prim_precision = nir_load_cull_small_prim_precision_amd(b);
 
-      nir_ssa_def *rounded_to_eq[2];
+      nir_def *rounded_to_eq[2];
       for (unsigned chan = 0; chan < 2; chan++) {
          /* The width of each square is sqrt(0.5), so scale it to 1 because we want
           * round() to give us the position of the closest center of a square (diamond).
@@ -263,8 +263,8 @@ cull_small_primitive_line(nir_builder *b, nir_ssa_def *pos[3][4],
          /* Compute the bounding box around both vertices. We do this because we must
           * enlarge the line area by the precision of the rasterizer.
           */
-         nir_ssa_def *min = nir_fmin(b, v0[chan], v1[chan]);
-         nir_ssa_def *max = nir_fmax(b, v0[chan], v1[chan]);
+         nir_def *min = nir_fmin(b, v0[chan], v1[chan]);
+         nir_def *max = nir_fmax(b, v0[chan], v1[chan]);
 
          /* Enlarge the bounding box by the precision of the rasterizer. */
          min = nir_fsub(b, min, small_prim_precision);
@@ -287,27 +287,27 @@ cull_small_primitive_line(nir_builder *b, nir_ssa_def *pos[3][4],
    return nir_if_phi(b, prim_is_small, prim_is_small_else);
 }
 
-static nir_ssa_def *
+static nir_def *
 ac_nir_cull_line(nir_builder *b,
-                 nir_ssa_def *initially_accepted,
-                 nir_ssa_def *pos[3][4],
+                 nir_def *initially_accepted,
+                 nir_def *pos[3][4],
                  position_w_info *w_info,
                  ac_nir_cull_accepted accept_func,
                  void *state)
 {
-   nir_ssa_def *accepted = initially_accepted;
+   nir_def *accepted = initially_accepted;
    accepted = nir_iand(b, accepted, nir_inot(b, w_info->all_w_negative));
 
-   nir_ssa_def *bbox_accepted = NULL;
+   nir_def *bbox_accepted = NULL;
 
    nir_if *if_accepted = nir_push_if(b, accepted);
    {
-      nir_ssa_def *bbox_min[2] = {0}, *bbox_max[2] = {0};
+      nir_def *bbox_min[2] = {0}, *bbox_max[2] = {0};
       calc_bbox_line(b, pos, bbox_min, bbox_max);
 
       /* Frustrum culling - eliminate lines that are fully outside the view. */
-      nir_ssa_def *prim_outside_view = cull_frustrum(b, bbox_min, bbox_max);
-      nir_ssa_def *prim_invisible =
+      nir_def *prim_outside_view = cull_frustrum(b, bbox_min, bbox_max);
+      nir_def *prim_invisible =
          cull_small_primitive_line(b, pos, bbox_min, bbox_max, prim_outside_view);
 
       bbox_accepted = nir_ior(b, nir_inot(b, prim_invisible), w_info->any_w_negative);
@@ -326,10 +326,10 @@ ac_nir_cull_line(nir_builder *b,
    return nir_if_phi(b, bbox_accepted, accepted);
 }
 
-nir_ssa_def *
+nir_def *
 ac_nir_cull_primitive(nir_builder *b,
-                      nir_ssa_def *initially_accepted,
-                      nir_ssa_def *pos[3][4],
+                      nir_def *initially_accepted,
+                      nir_def *pos[3][4],
                       unsigned num_vertices,
                       ac_nir_cull_accepted accept_func,
                       void *state)
index 851f511..ffe6557 100644 (file)
@@ -36,8 +36,8 @@ typedef struct {
    bool gs_triangle_strip_adjacency_fix;
 } lower_esgs_io_state;
 
-static nir_ssa_def *
-emit_split_buffer_load(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *v_off, nir_ssa_def *s_off,
+static nir_def *
+emit_split_buffer_load(nir_builder *b, nir_def *desc, nir_def *v_off, nir_def *s_off,
                        unsigned component_stride, unsigned num_components, unsigned bit_size)
 {
    unsigned total_bytes = num_components * bit_size / 8u;
@@ -45,7 +45,7 @@ emit_split_buffer_load(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *v_off, ni
    unsigned remaining_bytes = total_bytes - full_dwords * 4u;
 
    /* Accommodate max number of split 64-bit loads */
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS * 2u];
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS * 2u];
 
    /* Assume that 1x32-bit load is better than 1x16-bit + 1x8-bit */
    if (remaining_bytes == 3) {
@@ -53,7 +53,7 @@ emit_split_buffer_load(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *v_off, ni
       full_dwords++;
    }
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    for (unsigned i = 0; i < full_dwords; ++i)
       comps[i] = nir_load_buffer_amd(b, 1, 32, desc, v_off, s_off, zero,
@@ -70,11 +70,11 @@ emit_split_buffer_load(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *v_off, ni
 }
 
 static void
-emit_split_buffer_store(nir_builder *b, nir_ssa_def *d, nir_ssa_def *desc, nir_ssa_def *v_off, nir_ssa_def *s_off,
+emit_split_buffer_store(nir_builder *b, nir_def *d, nir_def *desc, nir_def *v_off, nir_def *s_off,
                         unsigned component_stride, unsigned num_components, unsigned bit_size,
                         unsigned writemask, bool swizzled, bool slc)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    while (writemask) {
       int start, count;
@@ -91,7 +91,7 @@ emit_split_buffer_store(nir_builder *b, nir_ssa_def *d, nir_ssa_def *desc, nir_s
          else if ((start_byte % 4) == 2)
             store_bytes = MIN2(store_bytes, 2);
 
-         nir_ssa_def *store_val = nir_extract_bits(b, &d, 1, start_byte * 8u, 1, store_bytes * 8u);
+         nir_def *store_val = nir_extract_bits(b, &d, 1, start_byte * 8u, 1, store_bytes * 8u);
          nir_store_buffer_amd(b, store_val, desc, v_off, s_off, zero,
                               .base = start_byte, .memory_modes = nir_var_shader_out,
                               .access = ACCESS_COHERENT |
@@ -153,19 +153,19 @@ lower_es_output_store(nir_builder *b,
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *io_off = ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io);
+   nir_def *io_off = ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io);
 
    if (st->gfx_level <= GFX8) {
       /* GFX6-8: ES is a separate HW stage, data is passed from ES to GS in VRAM. */
-      nir_ssa_def *ring = nir_load_ring_esgs_amd(b);
-      nir_ssa_def *es2gs_off = nir_load_ring_es2gs_offset_amd(b);
+      nir_def *ring = nir_load_ring_esgs_amd(b);
+      nir_def *es2gs_off = nir_load_ring_es2gs_offset_amd(b);
       emit_split_buffer_store(b, intrin->src[0].ssa, ring, io_off, es2gs_off, 4u,
                               intrin->src[0].ssa->num_components, intrin->src[0].ssa->bit_size,
                               write_mask, true, true);
    } else {
       /* GFX9+: ES is merged into GS, data is passed through LDS. */
-      nir_ssa_def *vertex_idx = nir_load_local_invocation_index(b);
-      nir_ssa_def *off = nir_iadd(b, nir_imul_imm(b, vertex_idx, st->esgs_itemsize), io_off);
+      nir_def *vertex_idx = nir_load_local_invocation_index(b);
+      nir_def *off = nir_iadd(b, nir_imul_imm(b, vertex_idx, st->esgs_itemsize), io_off);
       nir_store_shared(b, intrin->src[0].ssa, off, .write_mask = write_mask);
    }
 
@@ -173,10 +173,10 @@ lower_es_output_store(nir_builder *b,
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 gs_get_vertex_offset(nir_builder *b, lower_esgs_io_state *st, unsigned vertex_index)
 {
-   nir_ssa_def *origin = nir_load_gs_vertex_offset_amd(b, .base = vertex_index);
+   nir_def *origin = nir_load_gs_vertex_offset_amd(b, .base = vertex_index);
    if (!st->gs_triangle_strip_adjacency_fix)
       return origin;
 
@@ -190,33 +190,33 @@ gs_get_vertex_offset(nir_builder *b, lower_esgs_io_state *st, unsigned vertex_in
       /* 6 vertex offset are packed to 3 vgprs for GFX9+ */
       fixed_index = (vertex_index + 2) % 3;
    }
-   nir_ssa_def *fixed = nir_load_gs_vertex_offset_amd(b, .base = fixed_index);
+   nir_def *fixed = nir_load_gs_vertex_offset_amd(b, .base = fixed_index);
 
-   nir_ssa_def *prim_id = nir_load_primitive_id(b);
+   nir_def *prim_id = nir_load_primitive_id(b);
    /* odd primitive id use fixed offset */
-   nir_ssa_def *cond = nir_i2b(b, nir_iand_imm(b, prim_id, 1));
+   nir_def *cond = nir_i2b(b, nir_iand_imm(b, prim_id, 1));
    return nir_bcsel(b, cond, fixed, origin);
 }
 
-static nir_ssa_def *
+static nir_def *
 gs_per_vertex_input_vertex_offset_gfx6(nir_builder *b, lower_esgs_io_state *st,
                                        nir_src *vertex_src)
 {
    if (nir_src_is_const(*vertex_src))
       return gs_get_vertex_offset(b, st, nir_src_as_uint(*vertex_src));
 
-   nir_ssa_def *vertex_offset = gs_get_vertex_offset(b, st, 0);
+   nir_def *vertex_offset = gs_get_vertex_offset(b, st, 0);
 
    for (unsigned i = 1; i < b->shader->info.gs.vertices_in; ++i) {
-      nir_ssa_def *cond = nir_ieq_imm(b, vertex_src->ssa, i);
-      nir_ssa_def *elem = gs_get_vertex_offset(b, st, i);
+      nir_def *cond = nir_ieq_imm(b, vertex_src->ssa, i);
+      nir_def *elem = gs_get_vertex_offset(b, st, i);
       vertex_offset = nir_bcsel(b, cond, elem, vertex_offset);
    }
 
    return vertex_offset;
 }
 
-static nir_ssa_def *
+static nir_def *
 gs_per_vertex_input_vertex_offset_gfx9(nir_builder *b, lower_esgs_io_state *st,
                                        nir_src *vertex_src)
 {
@@ -226,11 +226,11 @@ gs_per_vertex_input_vertex_offset_gfx9(nir_builder *b, lower_esgs_io_state *st,
                           (vertex & 1u) * 16u, 16u);
    }
 
-   nir_ssa_def *vertex_offset = gs_get_vertex_offset(b, st, 0);
+   nir_def *vertex_offset = gs_get_vertex_offset(b, st, 0);
 
    for (unsigned i = 1; i < b->shader->info.gs.vertices_in; i++) {
-      nir_ssa_def *cond = nir_ieq_imm(b, vertex_src->ssa, i);
-      nir_ssa_def *elem = gs_get_vertex_offset(b, st, i / 2u * 2u);
+      nir_def *cond = nir_ieq_imm(b, vertex_src->ssa, i);
+      nir_def *elem = gs_get_vertex_offset(b, st, i / 2u * 2u);
       if (i % 2u)
          elem = nir_ishr_imm(b, elem, 16u);
 
@@ -240,13 +240,13 @@ gs_per_vertex_input_vertex_offset_gfx9(nir_builder *b, lower_esgs_io_state *st,
    return nir_iand_imm(b, vertex_offset, 0xffffu);
 }
 
-static nir_ssa_def *
+static nir_def *
 gs_per_vertex_input_offset(nir_builder *b,
                            lower_esgs_io_state *st,
                            nir_intrinsic_instr *instr)
 {
    nir_src *vertex_src = nir_get_io_arrayed_index_src(instr);
-   nir_ssa_def *vertex_offset = st->gfx_level >= GFX9
+   nir_def *vertex_offset = st->gfx_level >= GFX9
       ? gs_per_vertex_input_vertex_offset_gfx9(b, st, vertex_src)
       : gs_per_vertex_input_vertex_offset_gfx6(b, st, vertex_src);
 
@@ -257,25 +257,25 @@ gs_per_vertex_input_offset(nir_builder *b,
       vertex_offset = nir_imul(b, vertex_offset, nir_load_esgs_vertex_stride_amd(b));
 
    unsigned base_stride = st->gfx_level >= GFX9 ? 1 : 64 /* Wave size on GFX6-8 */;
-   nir_ssa_def *io_off = ac_nir_calc_io_offset(b, instr, nir_imm_int(b, base_stride * 4u), base_stride, st->map_io);
-   nir_ssa_def *off = nir_iadd(b, io_off, vertex_offset);
+   nir_def *io_off = ac_nir_calc_io_offset(b, instr, nir_imm_int(b, base_stride * 4u), base_stride, st->map_io);
+   nir_def *off = nir_iadd(b, io_off, vertex_offset);
    return nir_imul_imm(b, off, 4u);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_gs_per_vertex_input_load(nir_builder *b,
                                nir_instr *instr,
                                void *state)
 {
    lower_esgs_io_state *st = (lower_esgs_io_state *) state;
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-   nir_ssa_def *off = gs_per_vertex_input_offset(b, st, intrin);
+   nir_def *off = gs_per_vertex_input_offset(b, st, intrin);
 
    if (st->gfx_level >= GFX9)
       return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
 
    unsigned wave_size = 64u; /* GFX6-8 only support wave64 */
-   nir_ssa_def *ring = nir_load_ring_esgs_amd(b);
+   nir_def *ring = nir_load_ring_esgs_amd(b);
    return emit_split_buffer_load(b, ring, off, nir_imm_zero(b, 1, 32), 4u * wave_size,
                                  intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
 }
index f0024f6..fbc358c 100644 (file)
@@ -8,24 +8,24 @@
 #include "nir.h"
 #include "nir_builder.h"
 
-static nir_ssa_def *
-try_extract_additions(nir_builder *b, nir_ssa_scalar scalar, uint64_t *out_const,
-                      nir_ssa_def **out_offset)
+static nir_def *
+try_extract_additions(nir_builder *b, nir_scalar scalar, uint64_t *out_const,
+                      nir_def **out_offset)
 {
-   if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
+   if (!nir_scalar_is_alu(scalar) || nir_scalar_alu_op(scalar) != nir_op_iadd)
       return NULL;
 
    nir_alu_instr *alu = nir_instr_as_alu(scalar.def->parent_instr);
-   nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
-   nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
+   nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
+   nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
 
    for (unsigned i = 0; i < 2; ++i) {
-      nir_ssa_scalar src = i ? src1 : src0;
-      if (nir_ssa_scalar_is_const(src)) {
-         *out_const += nir_ssa_scalar_as_uint(src);
-      } else if (nir_ssa_scalar_is_alu(src) && nir_ssa_scalar_alu_op(src) == nir_op_u2u64) {
-         nir_ssa_scalar offset_scalar = nir_ssa_scalar_chase_alu_src(src, 0);
-         nir_ssa_def *offset = nir_channel(b, offset_scalar.def, offset_scalar.comp);
+      nir_scalar src = i ? src1 : src0;
+      if (nir_scalar_is_const(src)) {
+         *out_const += nir_scalar_as_uint(src);
+      } else if (nir_scalar_is_alu(src) && nir_scalar_alu_op(src) == nir_op_u2u64) {
+         nir_scalar offset_scalar = nir_scalar_chase_alu_src(src, 0);
+         nir_def *offset = nir_channel(b, offset_scalar.def, offset_scalar.comp);
          if (*out_offset)
             *out_offset = nir_iadd(b, *out_offset, offset);
          else
@@ -34,13 +34,13 @@ try_extract_additions(nir_builder *b, nir_ssa_scalar scalar, uint64_t *out_const
          continue;
       }
 
-      nir_ssa_def *replace_src =
+      nir_def *replace_src =
          try_extract_additions(b, i == 1 ? src0 : src1, out_const, out_offset);
       return replace_src ? replace_src : nir_ssa_for_alu_src(b, alu, 1 - i);
    }
 
-   nir_ssa_def *replace_src0 = try_extract_additions(b, src0, out_const, out_offset);
-   nir_ssa_def *replace_src1 = try_extract_additions(b, src1, out_const, out_offset);
+   nir_def *replace_src0 = try_extract_additions(b, src0, out_const, out_offset);
+   nir_def *replace_src1 = try_extract_additions(b, src1, out_const, out_offset);
    if (!replace_src0 && !replace_src1)
       return NULL;
 
@@ -80,10 +80,10 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
    nir_src *addr_src = &intrin->src[addr_src_idx];
 
    uint64_t off_const = 0;
-   nir_ssa_def *offset = NULL;
-   nir_ssa_scalar src = {addr_src->ssa, 0};
+   nir_def *offset = NULL;
+   nir_scalar src = {addr_src->ssa, 0};
    b->cursor = nir_after_instr(addr_src->ssa->parent_instr);
-   nir_ssa_def *addr = try_extract_additions(b, src, &off_const, &offset);
+   nir_def *addr = try_extract_additions(b, src, &off_const, &offset);
    addr = addr ? addr : addr_src->ssa;
 
    b->cursor = nir_before_instr(&intrin->instr);
@@ -122,7 +122,7 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
 
    nir_builder_instr_insert(b, &new_intrin->instr);
    if (op != nir_intrinsic_store_global_amd)
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, &new_intrin->dest.ssa);
+      nir_def_rewrite_uses(&intrin->dest.ssa, &new_intrin->dest.ssa);
    nir_instr_remove(&intrin->instr);
 
    return true;
index 7260c21..85f4176 100644 (file)
@@ -23,7 +23,7 @@
 #include "nir_builder.h"
 #include "amdgfxregs.h"
 
-static nir_ssa_def *get_field(nir_builder *b, nir_ssa_def *desc, unsigned index, unsigned mask)
+static nir_def *get_field(nir_builder *b, nir_def *desc, unsigned index, unsigned mask)
 {
    return nir_ubfe_imm(b, nir_channel(b, desc, index), ffs(mask) - 1, util_bitcount(mask));
 }
@@ -46,17 +46,17 @@ static unsigned get_coord_components(enum glsl_sampler_dim dim, bool is_array)
 /* Lower image coordinates to a buffer element index. Return UINT_MAX if the image coordinates
  * are out of bounds.
  */
-static nir_ssa_def *lower_image_coords(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *coord,
+static nir_def *lower_image_coords(nir_builder *b, nir_def *desc, nir_def *coord,
                                        enum glsl_sampler_dim dim, bool is_array,
                                        bool handle_out_of_bounds)
 {
    unsigned num_coord_components = get_coord_components(dim, is_array);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    /* Get coordinates. */
-   nir_ssa_def *x = nir_channel(b, coord, 0);
-   nir_ssa_def *y = num_coord_components >= 2 ? nir_channel(b, coord, 1) : NULL;
-   nir_ssa_def *z = num_coord_components >= 3 ? nir_channel(b, coord, 2) : NULL;
+   nir_def *x = nir_channel(b, coord, 0);
+   nir_def *y = num_coord_components >= 2 ? nir_channel(b, coord, 1) : NULL;
+   nir_def *z = num_coord_components >= 3 ? nir_channel(b, coord, 2) : NULL;
 
    if (dim == GLSL_SAMPLER_DIM_1D && is_array) {
       z = y;
@@ -64,35 +64,35 @@ static nir_ssa_def *lower_image_coords(nir_builder *b, nir_ssa_def *desc, nir_ss
    }
 
    if (is_array) {
-      nir_ssa_def *first_layer = get_field(b, desc, 5, 0xffff0000);
+      nir_def *first_layer = get_field(b, desc, 5, 0xffff0000);
       z = nir_iadd(b, z, first_layer);
    }
 
    /* Compute the buffer element index. */
-   nir_ssa_def *index = x;
+   nir_def *index = x;
    if (y) {
-      nir_ssa_def *pitch = nir_channel(b, desc, 6);
+      nir_def *pitch = nir_channel(b, desc, 6);
       index = nir_iadd(b, index, nir_imul(b, pitch, y));
    }
    if (z) {
-      nir_ssa_def *slice_elements = nir_channel(b, desc, 7);
+      nir_def *slice_elements = nir_channel(b, desc, 7);
       index = nir_iadd(b, index, nir_imul(b, slice_elements, z));
    }
 
    /* Determine whether the coordinates are out of bounds. */
-   nir_ssa_def *out_of_bounds = NULL;
+   nir_def *out_of_bounds = NULL;
 
    if (handle_out_of_bounds) {
-      nir_ssa_def *width = get_field(b, desc, 4, 0xffff);
+      nir_def *width = get_field(b, desc, 4, 0xffff);
       out_of_bounds = nir_ior(b, nir_ilt(b, x, zero), nir_ige(b, x, width));
 
       if (y) {
-         nir_ssa_def *height = get_field(b, desc, 4, 0xffff0000);
+         nir_def *height = get_field(b, desc, 4, 0xffff0000);
          out_of_bounds = nir_ior(b, out_of_bounds,
                                  nir_ior(b, nir_ilt(b, y, zero), nir_ige(b, y, height)));
       }
       if (z) {
-         nir_ssa_def *depth = get_field(b, desc, 5, 0xffff);
+         nir_def *depth = get_field(b, desc, 5, 0xffff);
          out_of_bounds = nir_ior(b, out_of_bounds,
                                  nir_ior(b, nir_ilt(b, z, zero), nir_ige(b, z, depth)));
       }
@@ -104,12 +104,12 @@ static nir_ssa_def *lower_image_coords(nir_builder *b, nir_ssa_def *desc, nir_ss
    return index;
 }
 
-static nir_ssa_def *emulated_image_load(nir_builder *b, unsigned num_components, unsigned bit_size,
-                                        nir_ssa_def *desc, nir_ssa_def *coord,
+static nir_def *emulated_image_load(nir_builder *b, unsigned num_components, unsigned bit_size,
+                                        nir_def *desc, nir_def *coord,
                                         enum gl_access_qualifier access, enum glsl_sampler_dim dim,
                                         bool is_array, bool handle_out_of_bounds)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    return nir_load_buffer_amd(b, num_components, bit_size, nir_channels(b, desc, 0xf),
                               zero, zero,
@@ -120,11 +120,11 @@ static nir_ssa_def *emulated_image_load(nir_builder *b, unsigned num_components,
                               .access = access | ACCESS_USES_FORMAT_AMD);
 }
 
-static void emulated_image_store(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *coord,
-                                 nir_ssa_def *data, enum gl_access_qualifier access,
+static void emulated_image_store(nir_builder *b, nir_def *desc, nir_def *coord,
+                                 nir_def *data, enum gl_access_qualifier access,
                                  enum glsl_sampler_dim dim, bool is_array)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_store_buffer_amd(b, data, nir_channels(b, desc, 0xf), zero, zero,
                         lower_image_coords(b, desc, coord, dim, is_array, true),
@@ -134,7 +134,7 @@ static void emulated_image_store(nir_builder *b, nir_ssa_def *desc, nir_ssa_def
 }
 
 /* Return the width, height, or depth for dim=0,1,2. */
-static nir_ssa_def *get_dim(nir_builder *b, nir_ssa_def *desc, unsigned dim)
+static nir_def *get_dim(nir_builder *b, nir_def *desc, unsigned dim)
 {
    return get_field(b, desc, 4 + dim / 2, 0xffff << (16 * (dim % 2)));
 }
@@ -142,9 +142,9 @@ static nir_ssa_def *get_dim(nir_builder *b, nir_ssa_def *desc, unsigned dim)
 /* Lower txl with lod=0 to typed buffer loads. This is based on the equations in the GL spec.
  * This basically converts the tex opcode into 1 or more image_load opcodes.
  */
-static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_components,
-                                            unsigned bit_size, nir_ssa_def *desc,
-                                            nir_ssa_def *sampler_desc, nir_ssa_def *coord_vec,
+static nir_def *emulated_tex_level_zero(nir_builder *b, unsigned num_components,
+                                            unsigned bit_size, nir_def *desc,
+                                            nir_def *sampler_desc, nir_def *coord_vec,
                                             enum glsl_sampler_dim sampler_dim, bool is_array)
 {
    const enum gl_access_qualifier access =
@@ -153,9 +153,9 @@ static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_compone
    const unsigned num_dim_coords = num_coord_components - is_array;
    const unsigned array_comp = num_coord_components - 1;
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *fp_one = nir_imm_floatN_t(b, 1, bit_size);
-   nir_ssa_def *coord[3] = {0};
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *fp_one = nir_imm_floatN_t(b, 1, bit_size);
+   nir_def *coord[3] = {0};
 
    assert(num_coord_components <= 3);
    for (unsigned i = 0; i < num_coord_components; i++)
@@ -179,14 +179,14 @@ static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_compone
     *
     * We assume that XY_MIN_FILTER and Z_FILTER are identical.
     */
-   nir_ssa_def *is_nearest =
+   nir_def *is_nearest =
       nir_ieq_imm(b, nir_iand_imm(b, nir_channel(b, sampler_desc, 2), 1 << 20), 0);
-   nir_ssa_def *result_nearest, *result_linear;
+   nir_def *result_nearest, *result_linear;
 
    nir_if *if_nearest = nir_push_if(b, is_nearest);
    {
       /* Nearest filter. */
-      nir_ssa_def *coord0[3] = {0};
+      nir_def *coord0[3] = {0};
       memcpy(coord0, coord, sizeof(coord));
 
       for (unsigned dim = 0; dim < num_dim_coords; dim++) {
@@ -205,9 +205,9 @@ static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_compone
    nir_push_else(b, if_nearest);
    {
       /* Linear filter. */
-      nir_ssa_def *coord0[3] = {0};
-      nir_ssa_def *coord1[3] = {0};
-      nir_ssa_def *weight[3] = {0};
+      nir_def *coord0[3] = {0};
+      nir_def *coord1[3] = {0};
+      nir_def *weight[3] = {0};
 
       memcpy(coord0, coord, sizeof(coord));
 
@@ -231,10 +231,10 @@ static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_compone
       /* Load all texels for the linear filter.
        * This is 2 texels for 1D, 4 texels for 2D, and 8 texels for 3D.
        */
-      nir_ssa_def *texel[8];
+      nir_def *texel[8];
 
       for (unsigned i = 0; i < (1 << num_dim_coords); i++) {
-         nir_ssa_def *texel_coord[3];
+         nir_def *texel_coord[3];
 
          /* Determine whether the current texel should use channels from coord0
           * or coord1. The i-th bit of the texel index determines that.
@@ -247,7 +247,7 @@ static nir_ssa_def *emulated_tex_level_zero(nir_builder *b, unsigned num_compone
             texel_coord[array_comp] = coord0[array_comp];
 
          /* Compute how much the texel contributes to the final result. */
-         nir_ssa_def *texel_weight = fp_one;
+         nir_def *texel_weight = fp_one;
          for (unsigned dim = 0; dim < num_dim_coords; dim++) {
             /* Let's see what "i" represents:
              *    Texel i=0 = 000
@@ -296,10 +296,10 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
       enum gl_access_qualifier access;
       enum glsl_sampler_dim dim;
       bool is_array;
-      nir_ssa_def *desc = NULL, *result = NULL;
+      nir_def *desc = NULL, *result = NULL;
       ASSERTED const char *intr_name;
 
-      nir_ssa_def *dst = &intr->dest.ssa;
+      nir_def *dst = &intr->dest.ssa;
       b->cursor = nir_before_instr(instr);
 
       switch (intr->intrinsic) {
@@ -359,7 +359,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
       case nir_intrinsic_bindless_image_load:
          result = emulated_image_load(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
                                       desc, intr->src[1].ssa, access, dim, is_array, true);
-         nir_ssa_def_rewrite_uses_after(dst, result, instr);
+         nir_def_rewrite_uses_after(dst, result, instr);
          nir_instr_remove(instr);
          return true;
 
@@ -376,9 +376,9 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
    } else if (instr->type == nir_instr_type_tex) {
       nir_tex_instr *tex = nir_instr_as_tex(instr);
       nir_tex_instr *new_tex;
-      nir_ssa_def *coord = NULL, *desc = NULL, *sampler_desc = NULL, *result = NULL;
+      nir_def *coord = NULL, *desc = NULL, *sampler_desc = NULL, *result = NULL;
 
-      nir_ssa_def *dst = &tex->dest.ssa;
+      nir_def *dst = &tex->dest.ssa;
       b->cursor = nir_before_instr(instr);
 
       switch (tex->op) {
@@ -447,7 +447,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
                                          desc, coord,
                                          ACCESS_RESTRICT | ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER,
                                          tex->sampler_dim, tex->is_array, true);
-            nir_ssa_def_rewrite_uses_after(dst, result, instr);
+            nir_def_rewrite_uses_after(dst, result, instr);
             nir_instr_remove(instr);
             return true;
 
@@ -455,7 +455,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
          case nir_texop_txl:
             result = emulated_tex_level_zero(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size,
                                   desc, sampler_desc, coord, tex->sampler_dim, tex->is_array);
-            nir_ssa_def_rewrite_uses_after(dst, result, instr);
+            nir_def_rewrite_uses_after(dst, result, instr);
             nir_instr_remove(instr);
             return true;
 
index 98673a2..c9cb7c5 100644 (file)
@@ -24,14 +24,14 @@ enum {
 
 typedef struct
 {
-   nir_ssa_def *ssa;
+   nir_def *ssa;
    nir_variable *var;
 } reusable_nondeferred_variable;
 
 typedef struct
 {
    gl_varying_slot slot;
-   nir_ssa_def *chan[4];
+   nir_def *chan[4];
 } vs_output;
 
 typedef struct
@@ -52,7 +52,7 @@ typedef struct
    nir_variable *gs_exported_var;
    nir_variable *gs_vtx_indices_vars[3];
 
-   nir_ssa_def *vtx_addr[3];
+   nir_def *vtx_addr[3];
 
    struct u_vector reusable_nondeferred_variables;
 
@@ -77,9 +77,9 @@ typedef struct
    bool has_clipdist;
 
    /* outputs */
-   nir_ssa_def *outputs[VARYING_SLOT_MAX][4];
-   nir_ssa_def *outputs_16bit_lo[16][4];
-   nir_ssa_def *outputs_16bit_hi[16][4];
+   nir_def *outputs[VARYING_SLOT_MAX][4];
+   nir_def *outputs_16bit_lo[16][4];
+   nir_def *outputs_16bit_hi[16][4];
    shader_output_types output_types;
 } lower_ngg_nogs_state;
 
@@ -100,25 +100,25 @@ typedef struct
    int const_out_prmcnt[4];
    unsigned max_num_waves;
    unsigned num_vertices_per_primitive;
-   nir_ssa_def *lds_addr_gs_out_vtx;
-   nir_ssa_def *lds_addr_gs_scratch;
+   nir_def *lds_addr_gs_out_vtx;
+   nir_def *lds_addr_gs_scratch;
    unsigned lds_bytes_per_gs_out_vertex;
    unsigned lds_offs_primflags;
    bool output_compile_time_known;
    bool streamout_enabled;
    /* 32 bit outputs */
-   nir_ssa_def *outputs[VARYING_SLOT_MAX][4];
+   nir_def *outputs[VARYING_SLOT_MAX][4];
    gs_output_info output_info[VARYING_SLOT_MAX];
    /* 16 bit outputs */
-   nir_ssa_def *outputs_16bit_hi[16][4];
-   nir_ssa_def *outputs_16bit_lo[16][4];
+   nir_def *outputs_16bit_hi[16][4];
+   nir_def *outputs_16bit_lo[16][4];
    gs_output_info output_info_16bit_hi[16];
    gs_output_info output_info_16bit_lo[16];
    /* output types for both 32bit and 16bit */
    shader_output_types output_types;
    /* Count per stream. */
-   nir_ssa_def *vertex_count[4];
-   nir_ssa_def *primitive_count[4];
+   nir_def *vertex_count[4];
+   nir_def *primitive_count[4];
 } lower_ngg_gs_state;
 
 /* LDS layout of Mesh Shader workgroup info. */
@@ -195,7 +195,7 @@ typedef struct
    unsigned api_workgroup_size;
    unsigned hw_workgroup_size;
 
-   nir_ssa_def *workgroup_index;
+   nir_def *workgroup_index;
    nir_variable *out_variables[VARYING_SLOT_MAX * 4];
    nir_variable *primitive_count_var;
    nir_variable *vertex_count_var;
@@ -211,7 +211,7 @@ typedef struct
    } output_info[VARYING_SLOT_MAX];
 
    /* Used by outputs export. */
-   nir_ssa_def *outputs[VARYING_SLOT_MAX][4];
+   nir_def *outputs[VARYING_SLOT_MAX][4];
    uint32_t clipdist_enable_mask;
    const uint8_t *vs_output_param_offset;
    bool has_param_exports;
@@ -239,8 +239,8 @@ enum {
 };
 
 typedef struct {
-   nir_ssa_def *num_repacked_invocations;
-   nir_ssa_def *repacked_invocation_index;
+   nir_def *num_repacked_invocations;
+   nir_def *repacked_invocation_index;
 } wg_repack_result;
 
 /**
@@ -250,8 +250,8 @@ typedef struct {
  * We only care about the results from up to wave_id+1 lanes.
  * (Other lanes are not deactivated but their calculation is not used.)
  */
-static nir_ssa_def *
-summarize_repack(nir_builder *b, nir_ssa_def *packed_counts, unsigned num_lds_dwords)
+static nir_def *
+summarize_repack(nir_builder *b, nir_def *packed_counts, unsigned num_lds_dwords)
 {
    /* We'll use shift to filter out the bytes not needed by the current lane.
     *
@@ -269,37 +269,37 @@ summarize_repack(nir_builder *b, nir_ssa_def *packed_counts, unsigned num_lds_dw
     * then we sum them using v_sad_u8.
     */
 
-   nir_ssa_def *lane_id = nir_load_subgroup_invocation(b);
-   nir_ssa_def *shift = nir_iadd_imm(b, nir_imul_imm(b, lane_id, -4u), num_lds_dwords * 16);
+   nir_def *lane_id = nir_load_subgroup_invocation(b);
+   nir_def *shift = nir_iadd_imm(b, nir_imul_imm(b, lane_id, -4u), num_lds_dwords * 16);
    bool use_dot = b->shader->options->has_udot_4x8;
 
    if (num_lds_dwords == 1) {
-      nir_ssa_def *dot_op = !use_dot ? NULL : nir_ushr(b, nir_ushr(b, nir_imm_int(b, 0x01010101), shift), shift);
+      nir_def *dot_op = !use_dot ? NULL : nir_ushr(b, nir_ushr(b, nir_imm_int(b, 0x01010101), shift), shift);
 
       /* Broadcast the packed data we read from LDS (to the first 16 lanes, but we only care up to num_waves). */
-      nir_ssa_def *packed = nir_lane_permute_16_amd(b, packed_counts, nir_imm_int(b, 0), nir_imm_int(b, 0));
+      nir_def *packed = nir_lane_permute_16_amd(b, packed_counts, nir_imm_int(b, 0), nir_imm_int(b, 0));
 
       /* Horizontally add the packed bytes. */
       if (use_dot) {
          return nir_udot_4x8_uadd(b, packed, dot_op, nir_imm_int(b, 0));
       } else {
-         nir_ssa_def *sad_op = nir_ishl(b, nir_ishl(b, packed, shift), shift);
+         nir_def *sad_op = nir_ishl(b, nir_ishl(b, packed, shift), shift);
          return nir_sad_u8x4(b, sad_op, nir_imm_int(b, 0), nir_imm_int(b, 0));
       }
    } else if (num_lds_dwords == 2) {
-      nir_ssa_def *dot_op = !use_dot ? NULL : nir_ushr(b, nir_ushr(b, nir_imm_int64(b, 0x0101010101010101), shift), shift);
+      nir_def *dot_op = !use_dot ? NULL : nir_ushr(b, nir_ushr(b, nir_imm_int64(b, 0x0101010101010101), shift), shift);
 
       /* Broadcast the packed data we read from LDS (to the first 16 lanes, but we only care up to num_waves). */
-      nir_ssa_def *packed_dw0 = nir_lane_permute_16_amd(b, nir_unpack_64_2x32_split_x(b, packed_counts), nir_imm_int(b, 0), nir_imm_int(b, 0));
-      nir_ssa_def *packed_dw1 = nir_lane_permute_16_amd(b, nir_unpack_64_2x32_split_y(b, packed_counts), nir_imm_int(b, 0), nir_imm_int(b, 0));
+      nir_def *packed_dw0 = nir_lane_permute_16_amd(b, nir_unpack_64_2x32_split_x(b, packed_counts), nir_imm_int(b, 0), nir_imm_int(b, 0));
+      nir_def *packed_dw1 = nir_lane_permute_16_amd(b, nir_unpack_64_2x32_split_y(b, packed_counts), nir_imm_int(b, 0), nir_imm_int(b, 0));
 
       /* Horizontally add the packed bytes. */
       if (use_dot) {
-         nir_ssa_def *sum = nir_udot_4x8_uadd(b, packed_dw0, nir_unpack_64_2x32_split_x(b, dot_op), nir_imm_int(b, 0));
+         nir_def *sum = nir_udot_4x8_uadd(b, packed_dw0, nir_unpack_64_2x32_split_x(b, dot_op), nir_imm_int(b, 0));
          return nir_udot_4x8_uadd(b, packed_dw1, nir_unpack_64_2x32_split_y(b, dot_op), sum);
       } else {
-         nir_ssa_def *sad_op = nir_ishl(b, nir_ishl(b, nir_pack_64_2x32_split(b, packed_dw0, packed_dw1), shift), shift);
-         nir_ssa_def *sum = nir_sad_u8x4(b, nir_unpack_64_2x32_split_x(b, sad_op), nir_imm_int(b, 0), nir_imm_int(b, 0));
+         nir_def *sad_op = nir_ishl(b, nir_ishl(b, nir_pack_64_2x32_split(b, packed_dw0, packed_dw1), shift), shift);
+         nir_def *sum = nir_sad_u8x4(b, nir_unpack_64_2x32_split_x(b, sad_op), nir_imm_int(b, 0), nir_imm_int(b, 0));
          return nir_sad_u8x4(b, nir_unpack_64_2x32_split_y(b, sad_op), nir_imm_int(b, 0), sum);
       }
    } else {
@@ -314,8 +314,8 @@ summarize_repack(nir_builder *b, nir_ssa_def *packed_counts, unsigned num_lds_dw
  * Assumes that all invocations in the workgroup are active (exec = -1).
  */
 static wg_repack_result
-repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
-                                nir_ssa_def *lds_addr_base, unsigned max_num_waves,
+repack_invocations_in_workgroup(nir_builder *b, nir_def *input_bool,
+                                nir_def *lds_addr_base, unsigned max_num_waves,
                                 unsigned wave_size)
 {
    /* Input boolean: 1 if the current invocation should survive the repack. */
@@ -326,8 +326,8 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
     * Implemented by a scalar instruction that simply counts the number of bits set in a 32/64-bit mask.
     */
 
-   nir_ssa_def *input_mask = nir_ballot(b, 1, wave_size, input_bool);
-   nir_ssa_def *surviving_invocations_in_current_wave = nir_bit_count(b, input_mask);
+   nir_def *input_mask = nir_ballot(b, 1, wave_size, input_bool);
+   nir_def *surviving_invocations_in_current_wave = nir_bit_count(b, input_mask);
 
    /* If we know at compile time that the workgroup has only 1 wave, no further steps are necessary. */
    if (max_num_waves == 1) {
@@ -351,9 +351,9 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
    const unsigned num_lds_dwords = DIV_ROUND_UP(max_num_waves, 4);
    assert(num_lds_dwords <= 2);
 
-   nir_ssa_def *wave_id = nir_load_subgroup_id(b);
-   nir_ssa_def *lds_offset = nir_iadd(b, lds_addr_base, wave_id);
-   nir_ssa_def *dont_care = nir_ssa_undef(b, 1, num_lds_dwords * 32);
+   nir_def *wave_id = nir_load_subgroup_id(b);
+   nir_def *lds_offset = nir_iadd(b, lds_addr_base, wave_id);
+   nir_def *dont_care = nir_undef(b, 1, num_lds_dwords * 32);
    nir_if *if_first_lane = nir_push_if(b, nir_elect(b, 1));
 
    nir_store_shared(b, nir_u2u8(b, surviving_invocations_in_current_wave), lds_offset);
@@ -361,7 +361,7 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
    nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
-   nir_ssa_def *packed_counts =
+   nir_def *packed_counts =
       nir_load_shared(b, 1, num_lds_dwords * 32, lds_addr_base, .align_mul = 8u);
 
    nir_pop_if(b, if_first_lane);
@@ -383,12 +383,12 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
     *   This is the total number of surviving invocations in the workgroup.
     */
 
-   nir_ssa_def *num_waves = nir_load_num_subgroups(b);
-   nir_ssa_def *sum = summarize_repack(b, packed_counts, num_lds_dwords);
+   nir_def *num_waves = nir_load_num_subgroups(b);
+   nir_def *sum = summarize_repack(b, packed_counts, num_lds_dwords);
 
-   nir_ssa_def *wg_repacked_index_base = nir_read_invocation(b, sum, wave_id);
-   nir_ssa_def *wg_num_repacked_invocations = nir_read_invocation(b, sum, num_waves);
-   nir_ssa_def *wg_repacked_index = nir_mbcnt_amd(b, input_mask, wg_repacked_index_base);
+   nir_def *wg_repacked_index_base = nir_read_invocation(b, sum, wave_id);
+   nir_def *wg_num_repacked_invocations = nir_read_invocation(b, sum, num_waves);
+   nir_def *wg_repacked_index = nir_mbcnt_amd(b, input_mask, wg_repacked_index_base);
 
    wg_repack_result r = {
       .num_repacked_invocations = wg_num_repacked_invocations,
@@ -398,17 +398,17 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
    return r;
 }
 
-static nir_ssa_def *
-pervertex_lds_addr(nir_builder *b, nir_ssa_def *vertex_idx, unsigned per_vtx_bytes)
+static nir_def *
+pervertex_lds_addr(nir_builder *b, nir_def *vertex_idx, unsigned per_vtx_bytes)
 {
    return nir_imul_imm(b, vertex_idx, per_vtx_bytes);
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_pack_ngg_prim_exp_arg(nir_builder *b, unsigned num_vertices_per_primitives,
-                           nir_ssa_def *vertex_indices[3], nir_ssa_def *is_null_prim)
+                           nir_def *vertex_indices[3], nir_def *is_null_prim)
 {
-   nir_ssa_def *arg = nir_load_initial_edgeflags_amd(b);
+   nir_def *arg = nir_load_initial_edgeflags_amd(b);
 
    for (unsigned i = 0; i < num_vertices_per_primitives; ++i) {
       assert(vertex_indices[i]);
@@ -427,8 +427,8 @@ emit_pack_ngg_prim_exp_arg(nir_builder *b, unsigned num_vertices_per_primitives,
 
 static void
 alloc_vertices_and_primitives(nir_builder *b,
-                              nir_ssa_def *num_vtx,
-                              nir_ssa_def *num_prim)
+                              nir_def *num_vtx,
+                              nir_def *num_prim)
 {
    /* The caller should only call this conditionally on wave 0.
     *
@@ -438,14 +438,14 @@ alloc_vertices_and_primitives(nir_builder *b,
     * - bits 12..22: number of primitives in group
     */
 
-   nir_ssa_def *m0 = nir_ior(b, nir_ishl_imm(b, num_prim, 12), num_vtx);
+   nir_def *m0 = nir_ior(b, nir_ishl_imm(b, num_prim, 12), num_vtx);
    nir_sendmsg_amd(b, m0, .base = AC_SENDMSG_GS_ALLOC_REQ);
 }
 
 static void
 alloc_vertices_and_primitives_gfx10_workaround(nir_builder *b,
-                                               nir_ssa_def *num_vtx,
-                                               nir_ssa_def *num_prim)
+                                               nir_def *num_vtx,
+                                               nir_def *num_prim)
 {
    /* HW workaround for a GPU hang with 100% culling on GFX10.
     * We always have to export at least 1 primitive.
@@ -453,14 +453,14 @@ alloc_vertices_and_primitives_gfx10_workaround(nir_builder *b,
     *
     * NOTE: We rely on the caller to set the vertex count also to 0 when the primitive count is 0.
     */
-   nir_ssa_def *is_prim_cnt_0 = nir_ieq_imm(b, num_prim, 0);
+   nir_def *is_prim_cnt_0 = nir_ieq_imm(b, num_prim, 0);
    nir_if *if_prim_cnt_0 = nir_push_if(b, is_prim_cnt_0);
    {
-      nir_ssa_def *one = nir_imm_int(b, 1);
+      nir_def *one = nir_imm_int(b, 1);
       alloc_vertices_and_primitives(b, one, one);
 
-      nir_ssa_def *tid = nir_load_subgroup_invocation(b);
-      nir_ssa_def *is_thread_0 = nir_ieq_imm(b, tid, 0);
+      nir_def *tid = nir_load_subgroup_invocation(b);
+      nir_def *is_thread_0 = nir_ieq_imm(b, tid, 0);
       nir_if *if_thread_0 = nir_push_if(b, is_thread_0);
       {
          /* The vertex indices are 0, 0, 0. */
@@ -492,7 +492,7 @@ ngg_nogs_init_vertex_indices_vars(nir_builder *b, nir_function_impl *impl, lower
    for (unsigned v = 0; v < s->options->num_vertices_per_primitive; ++v) {
       s->gs_vtx_indices_vars[v] = nir_local_variable_create(impl, glsl_uint_type(), "gs_vtx_addr");
 
-      nir_ssa_def *vtx = s->options->passthrough ?
+      nir_def *vtx = s->options->passthrough ?
          nir_ubfe_imm(b, nir_load_packed_passthrough_primitive_amd(b),
                       10 * v, 9) :
          nir_ubfe_imm(b, nir_load_gs_vertex_offset_amd(b, .base = v / 2u),
@@ -502,13 +502,13 @@ ngg_nogs_init_vertex_indices_vars(nir_builder *b, nir_function_impl *impl, lower
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_ngg_nogs_prim_exp_arg(nir_builder *b, lower_ngg_nogs_state *s)
 {
    if (s->options->passthrough) {
       return nir_load_packed_passthrough_primitive_amd(b);
    } else {
-      nir_ssa_def *vtx_idx[3] = {0};
+      nir_def *vtx_idx[3] = {0};
 
       for (unsigned v = 0; v < s->options->num_vertices_per_primitive; ++v)
          vtx_idx[v] = nir_load_var(b, s->gs_vtx_indices_vars[v]);
@@ -517,13 +517,13 @@ emit_ngg_nogs_prim_exp_arg(nir_builder *b, lower_ngg_nogs_state *s)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 has_input_vertex(nir_builder *b)
 {
    return nir_is_subgroup_invocation_lt_amd(b, nir_load_merged_wave_info_amd(b));
 }
 
-static nir_ssa_def *
+static nir_def *
 has_input_primitive(nir_builder *b)
 {
    return nir_is_subgroup_invocation_lt_amd(b,
@@ -542,7 +542,7 @@ nogs_prim_gen_query(nir_builder *b, lower_ngg_nogs_state *s)
       nir_if *if_elected = nir_push_if(b, nir_elect(b, 1));
       {
          /* Number of input primitives in the current wave. */
-         nir_ssa_def *num_input_prims = nir_ubfe_imm(b, nir_load_merged_wave_info_amd(b),
+         nir_def *num_input_prims = nir_ubfe_imm(b, nir_load_merged_wave_info_amd(b),
                                                      8, 8);
 
          /* Add to stream 0 primitive generated counter. */
@@ -554,7 +554,7 @@ nogs_prim_gen_query(nir_builder *b, lower_ngg_nogs_state *s)
 }
 
 static void
-emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *arg)
+emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_def *arg)
 {
    nir_if *if_gs_thread = nir_push_if(b, nir_load_var(b, s->gs_exported_var));
    {
@@ -570,7 +570,7 @@ emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *
                             .memory_modes = nir_var_mem_shared);
 
          unsigned edge_flag_bits = ac_get_all_edge_flag_bits();
-         nir_ssa_def *mask = nir_imm_intN_t(b, ~edge_flag_bits, 32);
+         nir_def *mask = nir_imm_intN_t(b, ~edge_flag_bits, 32);
 
          unsigned edge_flag_offset = 0;
          if (s->streamout_enabled) {
@@ -581,9 +581,9 @@ emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *
          }
 
          for (int i = 0; i < s->options->num_vertices_per_primitive; i++) {
-            nir_ssa_def *vtx_idx = nir_load_var(b, s->gs_vtx_indices_vars[i]);
-            nir_ssa_def *addr = pervertex_lds_addr(b, vtx_idx, s->pervertex_lds_bytes);
-            nir_ssa_def *edge = nir_load_shared(b, 1, 32, addr, .base = edge_flag_offset);
+            nir_def *vtx_idx = nir_load_var(b, s->gs_vtx_indices_vars[i]);
+            nir_def *addr = pervertex_lds_addr(b, vtx_idx, s->pervertex_lds_bytes);
+            nir_def *edge = nir_load_shared(b, 1, 32, addr, .base = edge_flag_offset);
             mask = nir_ior(b, mask, nir_ishl_imm(b, edge, 9 + i * 10));
          }
          arg = nir_iand(b, arg, mask);
@@ -597,7 +597,7 @@ emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *
 static void
 emit_ngg_nogs_prim_id_store_shared(nir_builder *b, lower_ngg_nogs_state *s)
 {
-   nir_ssa_def *gs_thread =
+   nir_def *gs_thread =
       s->gs_accepted_var ? nir_load_var(b, s->gs_accepted_var) : has_input_primitive(b);
 
    nir_if *if_gs_thread = nir_push_if(b, gs_thread);
@@ -606,16 +606,16 @@ emit_ngg_nogs_prim_id_store_shared(nir_builder *b, lower_ngg_nogs_state *s)
        * corresponding to the ES thread of the provoking vertex.
        * It will be exported as a per-vertex attribute.
        */
-      nir_ssa_def *gs_vtx_indices[3];
+      nir_def *gs_vtx_indices[3];
       for (unsigned i = 0; i < s->options->num_vertices_per_primitive; i++)
          gs_vtx_indices[i] = nir_load_var(b, s->gs_vtx_indices_vars[i]);
 
-      nir_ssa_def *provoking_vertex = nir_load_provoking_vtx_in_prim_amd(b);
-      nir_ssa_def *provoking_vtx_idx = nir_select_from_ssa_def_array(
+      nir_def *provoking_vertex = nir_load_provoking_vtx_in_prim_amd(b);
+      nir_def *provoking_vtx_idx = nir_select_from_ssa_def_array(
          b, gs_vtx_indices, s->options->num_vertices_per_primitive, provoking_vertex);
 
-      nir_ssa_def *prim_id = nir_load_primitive_id(b);
-      nir_ssa_def *addr = pervertex_lds_addr(b, provoking_vtx_idx, s->pervertex_lds_bytes);
+      nir_def *prim_id = nir_load_primitive_id(b);
+      nir_def *addr = pervertex_lds_addr(b, provoking_vtx_idx, s->pervertex_lds_bytes);
 
       /* primitive id is always at last of a vertex */
       nir_store_shared(b, prim_id, addr, .base = s->pervertex_lds_bytes - 4);
@@ -626,12 +626,12 @@ emit_ngg_nogs_prim_id_store_shared(nir_builder *b, lower_ngg_nogs_state *s)
 static void
 emit_store_ngg_nogs_es_primitive_id(nir_builder *b, lower_ngg_nogs_state *s)
 {
-   nir_ssa_def *prim_id = NULL;
+   nir_def *prim_id = NULL;
 
    if (b->shader->info.stage == MESA_SHADER_VERTEX) {
       /* LDS address where the primitive ID is stored */
-      nir_ssa_def *thread_id_in_threadgroup = nir_load_local_invocation_index(b);
-      nir_ssa_def *addr =
+      nir_def *thread_id_in_threadgroup = nir_load_local_invocation_index(b);
+      nir_def *addr =
          pervertex_lds_addr(b, thread_id_in_threadgroup, s->pervertex_lds_bytes);
 
       /* Load primitive ID from LDS */
@@ -648,10 +648,10 @@ emit_store_ngg_nogs_es_primitive_id(nir_builder *b, lower_ngg_nogs_state *s)
 }
 
 static void
-add_clipdist_bit(nir_builder *b, nir_ssa_def *dist, unsigned index, nir_variable *mask)
+add_clipdist_bit(nir_builder *b, nir_def *dist, unsigned index, nir_variable *mask)
 {
-   nir_ssa_def *is_neg = nir_flt_imm(b, dist, 0);
-   nir_ssa_def *neg_mask = nir_ishl_imm(b, nir_b2i32(b, is_neg), index);
+   nir_def *is_neg = nir_flt_imm(b, dist, 0);
+   nir_def *neg_mask = nir_ishl_imm(b, nir_b2i32(b, is_neg), index);
    neg_mask = nir_ior(b, neg_mask, nir_load_var(b, mask));
    nir_store_var(b, mask, neg_mask, 1);
 }
@@ -681,7 +681,7 @@ remove_culling_shader_output(nir_builder *b, nir_instr *instr, void *state)
 
    unsigned writemask = nir_intrinsic_write_mask(intrin);
    unsigned component = nir_intrinsic_component(intrin);
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
 
    /* Position output - store the value to a variable, remove output store */
    nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
@@ -732,7 +732,7 @@ remove_culling_shader_outputs(nir_shader *culling_shader, lower_ngg_nogs_state *
 }
 
 static void
-rewrite_uses_to_var(nir_builder *b, nir_ssa_def *old_def, nir_variable *replacement_var, unsigned replacement_var_channel)
+rewrite_uses_to_var(nir_builder *b, nir_def *old_def, nir_variable *replacement_var, unsigned replacement_var_channel)
 {
    if (old_def->parent_instr->type == nir_instr_type_load_const)
       return;
@@ -741,21 +741,21 @@ rewrite_uses_to_var(nir_builder *b, nir_ssa_def *old_def, nir_variable *replacem
    if (b->cursor.instr->type == nir_instr_type_phi)
       b->cursor = nir_after_phis(old_def->parent_instr->block);
 
-   nir_ssa_def *pos_val_rep = nir_load_var(b, replacement_var);
-   nir_ssa_def *replacement = nir_channel(b, pos_val_rep, replacement_var_channel);
+   nir_def *pos_val_rep = nir_load_var(b, replacement_var);
+   nir_def *replacement = nir_channel(b, pos_val_rep, replacement_var_channel);
 
    if (old_def->num_components > 1) {
       /* old_def uses a swizzled vector component.
        * There is no way to replace the uses of just a single vector component,
        * so instead create a new vector and replace all uses of the old vector.
        */
-      nir_ssa_def *old_def_elements[NIR_MAX_VEC_COMPONENTS] = {0};
+      nir_def *old_def_elements[NIR_MAX_VEC_COMPONENTS] = {0};
       for (unsigned j = 0; j < old_def->num_components; ++j)
          old_def_elements[j] = nir_channel(b, old_def, j);
       replacement = nir_vec(b, old_def_elements, old_def->num_components);
    }
 
-   nir_ssa_def_rewrite_uses_after(old_def, replacement, replacement->parent_instr);
+   nir_def_rewrite_uses_after(old_def, replacement, replacement->parent_instr);
 }
 
 static bool
@@ -786,7 +786,7 @@ remove_extra_pos_output(nir_builder *b, nir_instr *instr, void *state)
     * try to avoid calculating it again by rewriting the usages
     * of the store components here.
     */
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
    unsigned store_pos_component = nir_intrinsic_component(intrin);
 
    nir_instr_remove(instr);
@@ -810,7 +810,7 @@ remove_extra_pos_output(nir_builder *b, nir_instr *instr, void *state)
          /* Remember the current components whose uses we wish to replace.
           * This is needed because rewriting one source can affect the others too.
           */
-         nir_ssa_def *vec_comps[NIR_MAX_VEC_COMPONENTS] = {0};
+         nir_def *vec_comps[NIR_MAX_VEC_COMPONENTS] = {0};
          for (unsigned i = 0; i < num_vec_src; i++)
             vec_comps[i] = alu->src[i].src.ssa;
 
@@ -849,8 +849,8 @@ remove_compacted_arg(lower_ngg_nogs_state *s, nir_builder *b, unsigned idx)
     * This will cause NIR's DCE to recognize the load and its phis as dead.
     */
    b->cursor = nir_before_instr(&s->overwrite_args->instr);
-   nir_ssa_def *undef_arg = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def_rewrite_uses(s->overwrite_args->src[idx].ssa, undef_arg);
+   nir_def *undef_arg = nir_undef(b, 1, 32);
+   nir_def_rewrite_uses(s->overwrite_args->src[idx].ssa, undef_arg);
 
    s->compact_arg_stores[idx] = NULL;
    return true;
@@ -945,10 +945,10 @@ compact_vertices_after_culling(nir_builder *b,
                                lower_ngg_nogs_state *s,
                                nir_variable **repacked_variables,
                                nir_variable **gs_vtxaddr_vars,
-                               nir_ssa_def *invocation_index,
-                               nir_ssa_def *es_vertex_lds_addr,
-                               nir_ssa_def *es_exporter_tid,
-                               nir_ssa_def *num_live_vertices_in_workgroup,
+                               nir_def *invocation_index,
+                               nir_def *es_vertex_lds_addr,
+                               nir_def *es_exporter_tid,
+                               nir_def *num_live_vertices_in_workgroup,
                                unsigned pervertex_lds_bytes,
                                unsigned num_repacked_variables)
 {
@@ -959,18 +959,18 @@ compact_vertices_after_culling(nir_builder *b,
 
    nir_if *if_es_accepted = nir_push_if(b, nir_load_var(b, es_accepted_var));
    {
-      nir_ssa_def *exporter_addr = pervertex_lds_addr(b, es_exporter_tid, pervertex_lds_bytes);
+      nir_def *exporter_addr = pervertex_lds_addr(b, es_exporter_tid, pervertex_lds_bytes);
 
       /* Store the exporter thread's index to the LDS space of the current thread so GS threads can load it */
       nir_store_shared(b, nir_u2u8(b, es_exporter_tid), es_vertex_lds_addr, .base = lds_es_exporter_tid);
 
       /* Store the current thread's position output to the exporter thread's LDS space */
-      nir_ssa_def *pos = nir_load_var(b, position_value_var);
+      nir_def *pos = nir_load_var(b, position_value_var);
       nir_store_shared(b, pos, exporter_addr, .base = lds_es_pos_x);
 
       /* Store the current thread's repackable arguments to the exporter thread's LDS space */
       for (unsigned i = 0; i < num_repacked_variables; ++i) {
-         nir_ssa_def *arg_val = nir_load_var(b, repacked_variables[i]);
+         nir_def *arg_val = nir_load_var(b, repacked_variables[i]);
          nir_intrinsic_instr *store = nir_store_shared(b, arg_val, exporter_addr, .base = lds_es_arg_0 + 4u * i);
 
          s->compact_arg_stores[i] = &store->instr;
@@ -978,7 +978,7 @@ compact_vertices_after_culling(nir_builder *b,
 
       /* TES rel patch id does not cost extra dword */
       if (b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
-         nir_ssa_def *arg_val = nir_load_var(b, s->repacked_rel_patch_id);
+         nir_def *arg_val = nir_load_var(b, s->repacked_rel_patch_id);
          nir_intrinsic_instr *store =
             nir_store_shared(b, nir_u2u8(b, arg_val), exporter_addr,
                              .base = lds_es_tes_rel_patch_id);
@@ -995,46 +995,46 @@ compact_vertices_after_culling(nir_builder *b,
    nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
 
-   nir_ssa_def *es_survived = nir_ilt(b, invocation_index, num_live_vertices_in_workgroup);
+   nir_def *es_survived = nir_ilt(b, invocation_index, num_live_vertices_in_workgroup);
    nir_if *if_packed_es_thread = nir_push_if(b, es_survived);
    {
       /* Read position from the current ES thread's LDS space (written by the exported vertex's ES thread) */
-      nir_ssa_def *exported_pos = nir_load_shared(b, 4, 32, es_vertex_lds_addr, .base = lds_es_pos_x);
+      nir_def *exported_pos = nir_load_shared(b, 4, 32, es_vertex_lds_addr, .base = lds_es_pos_x);
       nir_store_var(b, position_value_var, exported_pos, 0xfu);
 
       /* Read the repacked arguments */
       for (unsigned i = 0; i < num_repacked_variables; ++i) {
-         nir_ssa_def *arg_val = nir_load_shared(b, 1, 32, es_vertex_lds_addr, .base = lds_es_arg_0 + 4u * i);
+         nir_def *arg_val = nir_load_shared(b, 1, 32, es_vertex_lds_addr, .base = lds_es_arg_0 + 4u * i);
          nir_store_var(b, repacked_variables[i], arg_val, 0x1u);
       }
 
       if (b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
-         nir_ssa_def *arg_val = nir_load_shared(b, 1, 8, es_vertex_lds_addr,
+         nir_def *arg_val = nir_load_shared(b, 1, 8, es_vertex_lds_addr,
                                                 .base = lds_es_tes_rel_patch_id);
          nir_store_var(b, s->repacked_rel_patch_id, nir_u2u32(b, arg_val), 0x1u);
       }
    }
    nir_push_else(b, if_packed_es_thread);
    {
-      nir_store_var(b, position_value_var, nir_ssa_undef(b, 4, 32), 0xfu);
+      nir_store_var(b, position_value_var, nir_undef(b, 4, 32), 0xfu);
       for (unsigned i = 0; i < num_repacked_variables; ++i)
-         nir_store_var(b, repacked_variables[i], nir_ssa_undef(b, 1, 32), 0x1u);
+         nir_store_var(b, repacked_variables[i], nir_undef(b, 1, 32), 0x1u);
    }
    nir_pop_if(b, if_packed_es_thread);
 
    nir_if *if_gs_accepted = nir_push_if(b, nir_load_var(b, gs_accepted_var));
    {
-      nir_ssa_def *exporter_vtx_indices[3] = {0};
+      nir_def *exporter_vtx_indices[3] = {0};
 
       /* Load the index of the ES threads that will export the current GS thread's vertices */
       for (unsigned v = 0; v < s->options->num_vertices_per_primitive; ++v) {
-         nir_ssa_def *vtx_addr = nir_load_var(b, gs_vtxaddr_vars[v]);
-         nir_ssa_def *exporter_vtx_idx = nir_load_shared(b, 1, 8, vtx_addr, .base = lds_es_exporter_tid);
+         nir_def *vtx_addr = nir_load_var(b, gs_vtxaddr_vars[v]);
+         nir_def *exporter_vtx_idx = nir_load_shared(b, 1, 8, vtx_addr, .base = lds_es_exporter_tid);
          exporter_vtx_indices[v] = nir_u2u32(b, exporter_vtx_idx);
          nir_store_var(b, s->gs_vtx_indices_vars[v], exporter_vtx_indices[v], 0x1);
       }
 
-      nir_ssa_def *prim_exp_arg =
+      nir_def *prim_exp_arg =
          emit_pack_ngg_prim_exp_arg(b, s->options->num_vertices_per_primitive,
                                     exporter_vtx_indices, NULL);
       nir_store_var(b, prim_exp_arg_var, prim_exp_arg, 0x1u);
@@ -1045,7 +1045,7 @@ compact_vertices_after_culling(nir_builder *b,
 }
 
 static void
-analyze_shader_before_culling_walk(nir_ssa_def *ssa,
+analyze_shader_before_culling_walk(nir_def *ssa,
                                    uint8_t flag,
                                    lower_ngg_nogs_state *s)
 {
@@ -1131,7 +1131,7 @@ analyze_shader_before_culling(nir_shader *shader, lower_ngg_nogs_state *s)
                continue;
 
             nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
-            nir_ssa_def *store_val = intrin->src[0].ssa;
+            nir_def *store_val = intrin->src[0].ssa;
             uint8_t flag = io_sem.location == VARYING_SLOT_POS ? nggc_passflag_used_by_pos : nggc_passflag_used_by_other;
             analyze_shader_before_culling_walk(store_val, flag, s);
          }
@@ -1139,7 +1139,7 @@ analyze_shader_before_culling(nir_shader *shader, lower_ngg_nogs_state *s)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 find_reusable_ssa_def(nir_instr *instr)
 {
    /* Find instructions whose SSA definitions are used by both
@@ -1180,7 +1180,7 @@ find_reusable_ssa_def(nir_instr *instr)
 }
 
 static const struct glsl_type *
-glsl_uint_type_for_ssa(nir_ssa_def *ssa)
+glsl_uint_type_for_ssa(nir_def *ssa)
 {
    enum glsl_base_type base_type = GLSL_TYPE_UINT;
    switch (ssa->bit_size) {
@@ -1226,7 +1226,7 @@ save_reusable_variables(nir_builder *b, lower_ngg_nogs_state *s)
           * processes a different vertex in the top and bottom part of the shader.
           * Therefore, we only reuse uniform values.
           */
-         nir_ssa_def *ssa = find_reusable_ssa_def(instr);
+         nir_def *ssa = find_reusable_ssa_def(instr);
          if (!ssa)
             continue;
 
@@ -1256,8 +1256,8 @@ save_reusable_variables(nir_builder *b, lower_ngg_nogs_state *s)
                      ? nir_after_instr_and_phis(instr)
                      : nir_after_instr(instr);
          nir_store_var(b, saved->var, saved->ssa, BITFIELD_MASK(ssa->num_components));
-         nir_ssa_def *reloaded = nir_load_var(b, saved->var);
-         nir_ssa_def_rewrite_uses_after(ssa, reloaded, reloaded->parent_instr);
+         nir_def *reloaded = nir_load_var(b, saved->var);
+         nir_def_rewrite_uses_after(ssa, reloaded, reloaded->parent_instr);
       }
 
       /* Look at the next CF node. */
@@ -1348,7 +1348,7 @@ cull_primitive_accepted(nir_builder *b, void *state)
 
 static void
 clipdist_culling_es_part(nir_builder *b, lower_ngg_nogs_state *s,
-                         nir_ssa_def *es_vertex_lds_addr)
+                         nir_def *es_vertex_lds_addr)
 {
    /* no gl_ClipDistance used but we have user defined clip plane */
    if (s->options->user_clip_plane_enable_mask && !s->has_clipdist) {
@@ -1356,15 +1356,15 @@ clipdist_culling_es_part(nir_builder *b, lower_ngg_nogs_state *s,
       nir_variable *clip_vertex_var =
          b->shader->info.outputs_written & BITFIELD64_BIT(VARYING_SLOT_CLIP_VERTEX) ?
          s->clip_vertex_var : s->position_value_var;
-      nir_ssa_def *clip_vertex = nir_load_var(b, clip_vertex_var);
+      nir_def *clip_vertex = nir_load_var(b, clip_vertex_var);
 
       /* clip against user defined clip planes */
       for (unsigned i = 0; i < 8; i++) {
          if (!(s->options->user_clip_plane_enable_mask & BITFIELD_BIT(i)))
             continue;
 
-         nir_ssa_def *plane = nir_load_user_clip_plane(b, .ucp_id = i);
-         nir_ssa_def *dist = nir_fdot(b, clip_vertex, plane);
+         nir_def *plane = nir_load_user_clip_plane(b, .ucp_id = i);
+         nir_def *dist = nir_fdot(b, clip_vertex, plane);
          add_clipdist_bit(b, dist, i, s->clipdist_neg_mask_var);
       }
 
@@ -1373,7 +1373,7 @@ clipdist_culling_es_part(nir_builder *b, lower_ngg_nogs_state *s,
 
    /* store clipdist_neg_mask to LDS for culling latter in gs thread */
    if (s->has_clipdist) {
-      nir_ssa_def *mask = nir_load_var(b, s->clipdist_neg_mask_var);
+      nir_def *mask = nir_load_var(b, s->clipdist_neg_mask_var);
       nir_store_shared(b, nir_u2u8(b, mask), es_vertex_lds_addr,
                        .base = lds_es_clipdist_neg_mask);
    }
@@ -1463,7 +1463,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
     * The position output is stored into a temporary variable, and reloaded later.
     */
 
-   nir_ssa_def *es_thread = has_input_vertex(b);
+   nir_def *es_thread = has_input_vertex(b);
    nir_if *if_es_thread = nir_push_if(b, es_thread);
    {
       /* Initialize the position output variable to zeroes, in case not all VS/TES invocations store the output.
@@ -1484,7 +1484,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
             nir_store_var(b, repacked_variables[1], nir_load_instance_id(b), 0x1u);
       } else if (b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
          nir_store_var(b, s->repacked_rel_patch_id, nir_load_tess_rel_patch_id_amd(b), 0x1u);
-         nir_ssa_def *tess_coord = nir_load_tess_coord(b);
+         nir_def *tess_coord = nir_load_tess_coord(b);
          nir_store_var(b, repacked_variables[0], nir_channel(b, tess_coord, 0), 0x1u);
          nir_store_var(b, repacked_variables[1], nir_channel(b, tess_coord, 1), 0x1u);
          if (uses_tess_primitive_id)
@@ -1496,7 +1496,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
    nir_pop_if(b, if_es_thread);
 
    nir_store_var(b, s->es_accepted_var, es_thread, 0x1u);
-   nir_ssa_def *gs_thread = has_input_primitive(b);
+   nir_def *gs_thread = has_input_primitive(b);
    nir_store_var(b, s->gs_accepted_var, gs_thread, 0x1u);
 
    /* Remove all non-position outputs, and put the position output into the variable. */
@@ -1504,7 +1504,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
    remove_culling_shader_outputs(b->shader, s);
    b->cursor = nir_after_cf_list(&impl->body);
 
-   nir_ssa_def *lds_scratch_base = nir_load_lds_ngg_scratch_base_amd(b);
+   nir_def *lds_scratch_base = nir_load_lds_ngg_scratch_base_amd(b);
 
    /* Run culling algorithms if culling is enabled.
     *
@@ -1515,19 +1515,19 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
 
    nir_if *if_cull_en = nir_push_if(b, nir_load_cull_any_enabled_amd(b));
    {
-      nir_ssa_def *invocation_index = nir_load_local_invocation_index(b);
-      nir_ssa_def *es_vertex_lds_addr = pervertex_lds_addr(b, invocation_index, pervertex_lds_bytes);
+      nir_def *invocation_index = nir_load_local_invocation_index(b);
+      nir_def *es_vertex_lds_addr = pervertex_lds_addr(b, invocation_index, pervertex_lds_bytes);
 
       /* ES invocations store their vertex data to LDS for GS threads to read. */
       if_es_thread = nir_push_if(b, es_thread);
       if_es_thread->control = nir_selection_control_divergent_always_taken;
       {
          /* Store position components that are relevant to culling in LDS */
-         nir_ssa_def *pre_cull_pos = nir_load_var(b, s->position_value_var);
-         nir_ssa_def *pre_cull_w = nir_channel(b, pre_cull_pos, 3);
+         nir_def *pre_cull_pos = nir_load_var(b, s->position_value_var);
+         nir_def *pre_cull_w = nir_channel(b, pre_cull_pos, 3);
          nir_store_shared(b, pre_cull_w, es_vertex_lds_addr, .base = lds_es_pos_w);
-         nir_ssa_def *pre_cull_x_div_w = nir_fdiv(b, nir_channel(b, pre_cull_pos, 0), pre_cull_w);
-         nir_ssa_def *pre_cull_y_div_w = nir_fdiv(b, nir_channel(b, pre_cull_pos, 1), pre_cull_w);
+         nir_def *pre_cull_x_div_w = nir_fdiv(b, nir_channel(b, pre_cull_pos, 0), pre_cull_w);
+         nir_def *pre_cull_y_div_w = nir_fdiv(b, nir_channel(b, pre_cull_pos, 1), pre_cull_w);
          nir_store_shared(b, nir_vec2(b, pre_cull_x_div_w, pre_cull_y_div_w), es_vertex_lds_addr, .base = lds_es_pos_x);
 
          /* Clear out the ES accepted flag in LDS */
@@ -1548,12 +1548,12 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
       nir_if *if_gs_thread = nir_push_if(b, gs_thread);
       {
          /* Load vertex indices from input VGPRs */
-         nir_ssa_def *vtx_idx[3] = {0};
+         nir_def *vtx_idx[3] = {0};
          for (unsigned vertex = 0; vertex < s->options->num_vertices_per_primitive;
               ++vertex)
             vtx_idx[vertex] = nir_load_var(b, s->gs_vtx_indices_vars[vertex]);
 
-         nir_ssa_def *pos[3][4] = {0};
+         nir_def *pos[3][4] = {0};
 
          /* Load W positions of vertices first because the culling code will use these first */
          for (unsigned vtx = 0; vtx < s->options->num_vertices_per_primitive; ++vtx) {
@@ -1564,16 +1564,16 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
 
          /* Load the X/W, Y/W positions of vertices */
          for (unsigned vtx = 0; vtx < s->options->num_vertices_per_primitive; ++vtx) {
-            nir_ssa_def *xy = nir_load_shared(b, 2, 32, s->vtx_addr[vtx], .base = lds_es_pos_x);
+            nir_def *xy = nir_load_shared(b, 2, 32, s->vtx_addr[vtx], .base = lds_es_pos_x);
             pos[vtx][0] = nir_channel(b, xy, 0);
             pos[vtx][1] = nir_channel(b, xy, 1);
          }
 
-         nir_ssa_def *accepted_by_clipdist;
+         nir_def *accepted_by_clipdist;
          if (s->has_clipdist) {
-            nir_ssa_def *clipdist_neg_mask = nir_imm_intN_t(b, 0xff, 8);
+            nir_def *clipdist_neg_mask = nir_imm_intN_t(b, 0xff, 8);
             for (unsigned vtx = 0; vtx < s->options->num_vertices_per_primitive; ++vtx) {
-               nir_ssa_def *mask =
+               nir_def *mask =
                   nir_load_shared(b, 1, 8, s->vtx_addr[vtx],
                                   .base = lds_es_clipdist_neg_mask);
                clipdist_neg_mask = nir_iand(b, clipdist_neg_mask, mask);
@@ -1600,24 +1600,24 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
       if_es_thread = nir_push_if(b, es_thread);
       if_es_thread->control = nir_selection_control_divergent_always_taken;
       {
-         nir_ssa_def *accepted = nir_load_shared(b, 1, 8u, es_vertex_lds_addr, .base = lds_es_vertex_accepted, .align_mul = 4u);
-         nir_ssa_def *accepted_bool = nir_ine_imm(b, nir_u2u32(b, accepted), 0);
+         nir_def *accepted = nir_load_shared(b, 1, 8u, es_vertex_lds_addr, .base = lds_es_vertex_accepted, .align_mul = 4u);
+         nir_def *accepted_bool = nir_ine_imm(b, nir_u2u32(b, accepted), 0);
          nir_store_var(b, s->es_accepted_var, accepted_bool, 0x1u);
       }
       nir_pop_if(b, if_es_thread);
 
-      nir_ssa_def *es_accepted = nir_load_var(b, s->es_accepted_var);
+      nir_def *es_accepted = nir_load_var(b, s->es_accepted_var);
 
       /* Repack the vertices that survived the culling. */
       wg_repack_result rep = repack_invocations_in_workgroup(b, es_accepted, lds_scratch_base,
                                                              s->max_num_waves,
                                                              s->options->wave_size);
-      nir_ssa_def *num_live_vertices_in_workgroup = rep.num_repacked_invocations;
-      nir_ssa_def *es_exporter_tid = rep.repacked_invocation_index;
+      nir_def *num_live_vertices_in_workgroup = rep.num_repacked_invocations;
+      nir_def *es_exporter_tid = rep.repacked_invocation_index;
 
       /* If all vertices are culled, set primitive count to 0 as well. */
-      nir_ssa_def *num_exported_prims = nir_load_workgroup_num_input_primitives_amd(b);
-      nir_ssa_def *fully_culled = nir_ieq_imm(b, num_live_vertices_in_workgroup, 0u);
+      nir_def *num_exported_prims = nir_load_workgroup_num_input_primitives_amd(b);
+      nir_def *fully_culled = nir_ieq_imm(b, num_live_vertices_in_workgroup, 0u);
       num_exported_prims = nir_bcsel(b, fully_culled, nir_imm_int(b, 0u), num_exported_prims);
       nir_store_var(b, s->gs_exported_var, nir_iand(b, nir_inot(b, fully_culled), has_input_primitive(b)), 0x1u);
 
@@ -1646,8 +1646,8 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
       /* When culling is disabled, we do the same as we would without culling. */
       nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
       {
-         nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
-         nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
+         nir_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
+         nir_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
          alloc_vertices_and_primitives(b, vtx_cnt, prim_cnt);
       }
       nir_pop_if(b, if_wave_0);
@@ -1692,7 +1692,7 @@ ngg_nogs_store_edgeflag_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
       return;
 
    /* clamp user edge flag to 1 for latter bit operations */
-   nir_ssa_def *edgeflag = s->outputs[VARYING_SLOT_EDGE][0];
+   nir_def *edgeflag = s->outputs[VARYING_SLOT_EDGE][0];
    edgeflag = nir_umin(b, edgeflag, nir_imm_int(b, 1));
 
    /* user edge flag is stored at the beginning of a vertex if streamout is not enabled */
@@ -1703,8 +1703,8 @@ ngg_nogs_store_edgeflag_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
       offset = packed_location * 16;
    }
 
-   nir_ssa_def *tid = nir_load_local_invocation_index(b);
-   nir_ssa_def *addr = pervertex_lds_addr(b, tid, s->pervertex_lds_bytes);
+   nir_def *tid = nir_load_local_invocation_index(b);
+   nir_def *addr = pervertex_lds_addr(b, tid, s->pervertex_lds_bytes);
 
    nir_store_shared(b, edgeflag, addr, .base = offset);
 }
@@ -1738,8 +1738,8 @@ ngg_nogs_store_xfb_outputs_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
       }
    }
 
-   nir_ssa_def *tid = nir_load_local_invocation_index(b);
-   nir_ssa_def *addr = pervertex_lds_addr(b, tid, s->pervertex_lds_bytes);
+   nir_def *tid = nir_load_local_invocation_index(b);
+   nir_def *addr = pervertex_lds_addr(b, tid, s->pervertex_lds_bytes);
 
    u_foreach_bit64(slot, xfb_outputs) {
       unsigned packed_location =
@@ -1762,7 +1762,7 @@ ngg_nogs_store_xfb_outputs_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
           *   Vulkan does not allow streamout outputs less than 32bit.
           *   OpenGL puts 16bit outputs in VARYING_SLOT_VAR0_16BIT.
           */
-         nir_ssa_def *store_val = nir_vec(b, &s->outputs[slot][start], (unsigned)count);
+         nir_def *store_val = nir_vec(b, &s->outputs[slot][start], (unsigned)count);
          nir_store_shared(b, store_val, addr, .base = packed_location * 16 + start * 4);
       }
    }
@@ -1783,25 +1783,25 @@ ngg_nogs_store_xfb_outputs_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
             mask_hi &= ~BITFIELD_BIT(i);
       }
 
-      nir_ssa_def **outputs_lo = s->outputs_16bit_lo[slot];
-      nir_ssa_def **outputs_hi = s->outputs_16bit_hi[slot];
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, 16);
+      nir_def **outputs_lo = s->outputs_16bit_lo[slot];
+      nir_def **outputs_hi = s->outputs_16bit_hi[slot];
+      nir_def *undef = nir_undef(b, 1, 16);
 
       unsigned mask = mask_lo | mask_hi;
       while (mask) {
          int start, count;
          u_bit_scan_consecutive_range(&mask, &start, &count);
 
-         nir_ssa_def *values[4] = {0};
+         nir_def *values[4] = {0};
          for (int c = start; c < start + count; ++c) {
-            nir_ssa_def *lo = mask_lo & BITFIELD_BIT(c) ? outputs_lo[c] : undef;
-            nir_ssa_def *hi = mask_hi & BITFIELD_BIT(c) ? outputs_hi[c] : undef;
+            nir_def *lo = mask_lo & BITFIELD_BIT(c) ? outputs_lo[c] : undef;
+            nir_def *hi = mask_hi & BITFIELD_BIT(c) ? outputs_hi[c] : undef;
 
             /* extend 8/16 bit to 32 bit, 64 bit has been lowered */
             values[c - start] = nir_pack_32_2x16_split(b, lo, hi);
          }
 
-         nir_ssa_def *store_val = nir_vec(b, values, (unsigned)count);
+         nir_def *store_val = nir_vec(b, values, (unsigned)count);
          nir_store_shared(b, store_val, addr, .base = packed_location * 16 + start * 4);
       }
    }
@@ -1811,20 +1811,20 @@ static void
 ngg_build_streamout_buffer_info(nir_builder *b,
                                 nir_xfb_info *info,
                                 bool has_xfb_prim_query,
-                                nir_ssa_def *scratch_base,
-                                nir_ssa_def *tid_in_tg,
-                                nir_ssa_def *gen_prim[4],
-                                nir_ssa_def *prim_stride_ret[4],
-                                nir_ssa_def *so_buffer_ret[4],
-                                nir_ssa_def *buffer_offsets_ret[4],
-                                nir_ssa_def *emit_prim_ret[4])
+                                nir_def *scratch_base,
+                                nir_def *tid_in_tg,
+                                nir_def *gen_prim[4],
+                                nir_def *prim_stride_ret[4],
+                                nir_def *so_buffer_ret[4],
+                                nir_def *buffer_offsets_ret[4],
+                                nir_def *emit_prim_ret[4])
 {
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
+   nir_def *undef = nir_undef(b, 1, 32);
 
    /* For radeonsi which pass this value by arg when VS. Streamout need accurate
     * num-vert-per-prim for writing correct amount of data to buffer.
     */
-   nir_ssa_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
+   nir_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
    for (unsigned buffer = 0; buffer < 4; buffer++) {
       if (!(info->buffers_written & BITFIELD_BIT(buffer)))
          continue;
@@ -1838,18 +1838,18 @@ ngg_build_streamout_buffer_info(nir_builder *b,
 
    nir_if *if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
    {
-      nir_ssa_def *workgroup_buffer_sizes[4];
+      nir_def *workgroup_buffer_sizes[4];
       for (unsigned buffer = 0; buffer < 4; buffer++) {
          if (info->buffers_written & BITFIELD_BIT(buffer)) {
-            nir_ssa_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
+            nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
             /* In radeonsi, we may not know if a feedback buffer has been bound when
              * compile time, so have to check buffer size in runtime to disable the
              * GDS update for unbind buffer to prevent the case that previous draw
              * compiled with streamout but does not bind feedback buffer miss update
              * GDS which will affect current draw's streamout.
              */
-            nir_ssa_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
-            nir_ssa_def *inc_buffer_size =
+            nir_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
+            nir_def *inc_buffer_size =
                nir_imul(b, gen_prim[info->buffer_to_stream[buffer]], prim_stride_ret[buffer]);
             workgroup_buffer_sizes[buffer] =
                nir_bcsel(b, buffer_valid, inc_buffer_size, nir_imm_int(b, 0));
@@ -1857,31 +1857,31 @@ ngg_build_streamout_buffer_info(nir_builder *b,
             workgroup_buffer_sizes[buffer] = undef;
       }
 
-      nir_ssa_def *ordered_id = nir_load_ordered_id_amd(b);
+      nir_def *ordered_id = nir_load_ordered_id_amd(b);
       /* Get current global offset of buffer and increase by amount of
        * workgroup buffer size. This is an ordered operation sorted by
        * ordered_id; Each buffer info is in a channel of a vec4.
        */
-      nir_ssa_def *buffer_offsets =
+      nir_def *buffer_offsets =
          nir_ordered_xfb_counter_add_amd(b, ordered_id, nir_vec(b, workgroup_buffer_sizes, 4),
                                          /* mask of buffers to update */
                                          .write_mask = info->buffers_written);
 
-      nir_ssa_def *emit_prim[4];
-      memcpy(emit_prim, gen_prim, 4 * sizeof(nir_ssa_def *));
+      nir_def *emit_prim[4];
+      memcpy(emit_prim, gen_prim, 4 * sizeof(nir_def *));
 
-      nir_ssa_def *any_overflow = nir_imm_false(b);
-      nir_ssa_def *overflow_amount[4] = {undef, undef, undef, undef};
+      nir_def *any_overflow = nir_imm_false(b);
+      nir_def *overflow_amount[4] = {undef, undef, undef, undef};
 
       for (unsigned buffer = 0; buffer < 4; buffer++) {
          if (!(info->buffers_written & BITFIELD_BIT(buffer)))
             continue;
 
-         nir_ssa_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
-         nir_ssa_def *buffer_offset = nir_channel(b, buffer_offsets, buffer);
-         nir_ssa_def *remain_size = nir_isub(b, buffer_size, buffer_offset);
-         nir_ssa_def *remain_prim = nir_idiv(b, remain_size, prim_stride_ret[buffer]);
-         nir_ssa_def *overflow = nir_ilt(b, buffer_size, buffer_offset);
+         nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
+         nir_def *buffer_offset = nir_channel(b, buffer_offsets, buffer);
+         nir_def *remain_size = nir_isub(b, buffer_size, buffer_offset);
+         nir_def *remain_prim = nir_idiv(b, remain_size, prim_stride_ret[buffer]);
+         nir_def *overflow = nir_ilt(b, buffer_size, buffer_offset);
 
          any_overflow = nir_ior(b, any_overflow, overflow);
          overflow_amount[buffer] = nir_imax(b, nir_imm_int(b, 0),
@@ -1957,17 +1957,17 @@ ngg_build_streamout_buffer_info(nir_builder *b,
 
 static void
 ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
-                           unsigned stream, nir_ssa_def *so_buffer[4],
-                           nir_ssa_def *buffer_offsets[4],
-                           nir_ssa_def *vtx_buffer_idx, nir_ssa_def *vtx_lds_addr,
+                           unsigned stream, nir_def *so_buffer[4],
+                           nir_def *buffer_offsets[4],
+                           nir_def *vtx_buffer_idx, nir_def *vtx_lds_addr,
                            shader_output_types *output_types)
 {
-   nir_ssa_def *vtx_buffer_offsets[4];
+   nir_def *vtx_buffer_offsets[4];
    for (unsigned buffer = 0; buffer < 4; buffer++) {
       if (!(info->buffers_written & BITFIELD_BIT(buffer)))
          continue;
 
-      nir_ssa_def *offset = nir_imul_imm(b, vtx_buffer_idx, info->buffers[buffer].stride);
+      nir_def *offset = nir_imul_imm(b, vtx_buffer_idx, info->buffers[buffer].stride);
       vtx_buffer_offsets[buffer] = nir_iadd(b, buffer_offsets[buffer], offset);
    }
 
@@ -1993,7 +1993,7 @@ ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
 
       assert(u_bit_consecutive(out->component_offset, count) == out->component_mask);
 
-      nir_ssa_def *out_data =
+      nir_def *out_data =
          nir_load_shared(b, count, 32, vtx_lds_addr, .base = offset);
 
       /* Up-scaling 16bit outputs to 32bit.
@@ -2005,11 +2005,11 @@ ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
        */
       if (out->location >= VARYING_SLOT_VAR0_16BIT) {
          unsigned index = out->location - VARYING_SLOT_VAR0_16BIT;
-         nir_ssa_def *values[4];
+         nir_def *values[4];
 
          for (int j = 0; j < count; j++) {
             unsigned c = out->component_offset + j;
-            nir_ssa_def *v = nir_channel(b, out_data, j);
+            nir_def *v = nir_channel(b, out_data, j);
             nir_alu_type t;
 
             if (out->high_16bits) {
@@ -2027,7 +2027,7 @@ ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
          out_data = nir_vec(b, values, count);
       }
 
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *zero = nir_imm_int(b, 0);
       nir_store_buffer_amd(b, out_data, so_buffer[out->buffer],
                            vtx_buffer_offsets[out->buffer],
                            zero, zero,
@@ -2042,16 +2042,16 @@ ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
 {
    nir_xfb_info *info = b->shader->xfb_info;
 
-   nir_ssa_def *lds_scratch_base = nir_load_lds_ngg_scratch_base_amd(b);
+   nir_def *lds_scratch_base = nir_load_lds_ngg_scratch_base_amd(b);
 
    /* Get global buffer offset where this workgroup will stream out data to. */
-   nir_ssa_def *generated_prim = nir_load_workgroup_num_input_primitives_amd(b);
-   nir_ssa_def *gen_prim_per_stream[4] = {generated_prim, 0, 0, 0};
-   nir_ssa_def *emit_prim_per_stream[4] = {0};
-   nir_ssa_def *buffer_offsets[4] = {0};
-   nir_ssa_def *so_buffer[4] = {0};
-   nir_ssa_def *prim_stride[4] = {0};
-   nir_ssa_def *tid_in_tg = nir_load_local_invocation_index(b);
+   nir_def *generated_prim = nir_load_workgroup_num_input_primitives_amd(b);
+   nir_def *gen_prim_per_stream[4] = {generated_prim, 0, 0, 0};
+   nir_def *emit_prim_per_stream[4] = {0};
+   nir_def *buffer_offsets[4] = {0};
+   nir_def *so_buffer[4] = {0};
+   nir_def *prim_stride[4] = {0};
+   nir_def *tid_in_tg = nir_load_local_invocation_index(b);
    ngg_build_streamout_buffer_info(b, info, s->options->has_xfb_prim_query,
                                    lds_scratch_base, tid_in_tg,
                                    gen_prim_per_stream, prim_stride,
@@ -2062,15 +2062,15 @@ ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
    nir_if *if_emit = nir_push_if(b, nir_ilt(b, tid_in_tg, emit_prim_per_stream[0]));
    {
       unsigned vtx_lds_stride = (b->shader->num_outputs * 4 + 1) * 4;
-      nir_ssa_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
-      nir_ssa_def *vtx_buffer_idx = nir_imul(b, tid_in_tg, num_vert_per_prim);
+      nir_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
+      nir_def *vtx_buffer_idx = nir_imul(b, tid_in_tg, num_vert_per_prim);
 
       for (unsigned i = 0; i < s->options->num_vertices_per_primitive; i++) {
          nir_if *if_valid_vertex =
             nir_push_if(b, nir_igt_imm(b, num_vert_per_prim, i));
          {
-            nir_ssa_def *vtx_lds_idx = nir_load_var(b, s->gs_vtx_indices_vars[i]);
-            nir_ssa_def *vtx_lds_addr = pervertex_lds_addr(b, vtx_lds_idx, vtx_lds_stride);
+            nir_def *vtx_lds_idx = nir_load_var(b, s->gs_vtx_indices_vars[i]);
+            nir_def *vtx_lds_addr = pervertex_lds_addr(b, vtx_lds_idx, vtx_lds_stride);
             ngg_build_streamout_vertex(b, info, 0, so_buffer, buffer_offsets,
                                        nir_iadd_imm(b, vtx_buffer_idx, i),
                                        vtx_lds_addr, &s->output_types);
@@ -2154,7 +2154,7 @@ ngg_nogs_gather_outputs(nir_builder *b, struct exec_list *cf_list, lower_ngg_nog
          nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
          unsigned slot = sem.location;
 
-         nir_ssa_def **output;
+         nir_def **output;
          nir_alu_type *type;
          if (slot >= VARYING_SLOT_VAR0_16BIT) {
             unsigned index = slot - VARYING_SLOT_VAR0_16BIT;
@@ -2189,16 +2189,16 @@ ngg_nogs_gather_outputs(nir_builder *b, struct exec_list *cf_list, lower_ngg_nog
 static unsigned
 gather_vs_outputs(nir_builder *b, vs_output *outputs,
                   const uint8_t *param_offsets,
-                  nir_ssa_def *(*data)[4],
-                  nir_ssa_def *(*data_16bit_lo)[4],
-                  nir_ssa_def *(*data_16bit_hi)[4])
+                  nir_def *(*data)[4],
+                  nir_def *(*data_16bit_lo)[4],
+                  nir_def *(*data_16bit_hi)[4])
 {
    unsigned num_outputs = 0;
    u_foreach_bit64 (slot, b->shader->info.outputs_written) {
       if (param_offsets[slot] > AC_EXP_PARAM_OFFSET_31)
          continue;
 
-      nir_ssa_def **output = data[slot];
+      nir_def **output = data[slot];
 
       /* skip output if no one written before */
       if (!output[0] && !output[1] && !output[2] && !output[3])
@@ -2206,7 +2206,7 @@ gather_vs_outputs(nir_builder *b, vs_output *outputs,
 
       outputs[num_outputs].slot = slot;
       for (int i = 0; i < 4; i++) {
-         nir_ssa_def *chan = output[i];
+         nir_def *chan = output[i];
          /* RADV implements 16-bit outputs as 32-bit with VARYING_SLOT_VAR0-31. */
          outputs[num_outputs].chan[i] = chan && chan->bit_size == 16 ? nir_u2u32(b, chan) : chan;
       }
@@ -2218,8 +2218,8 @@ gather_vs_outputs(nir_builder *b, vs_output *outputs,
       if (param_offsets[slot] > AC_EXP_PARAM_OFFSET_31)
          continue;
 
-      nir_ssa_def **output_lo = data_16bit_lo[i];
-      nir_ssa_def **output_hi = data_16bit_hi[i];
+      nir_def **output_lo = data_16bit_lo[i];
+      nir_def **output_hi = data_16bit_hi[i];
 
       /* skip output if no one written before */
       if (!output_lo[0] && !output_lo[1] && !output_lo[2] && !output_lo[3] &&
@@ -2229,10 +2229,10 @@ gather_vs_outputs(nir_builder *b, vs_output *outputs,
       vs_output *output = &outputs[num_outputs++];
       output->slot = slot;
 
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, 16);
+      nir_def *undef = nir_undef(b, 1, 16);
       for (int j = 0; j < 4; j++) {
-         nir_ssa_def *lo = output_lo[j] ? output_lo[j] : undef;
-         nir_ssa_def *hi = output_hi[j] ? output_hi[j] : undef;
+         nir_def *lo = output_lo[j] ? output_lo[j] : undef;
+         nir_def *hi = output_hi[j] ? output_hi[j] : undef;
          if (output_lo[j] || output_hi[j])
             output->chan[j] = nir_pack_32_2x16_split(b, lo, hi);
          else
@@ -2246,7 +2246,7 @@ gather_vs_outputs(nir_builder *b, vs_output *outputs,
 static void
 create_vertex_param_phis(nir_builder *b, unsigned num_outputs, vs_output *outputs)
 {
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32); /* inserted at the start of the shader */
+   nir_def *undef = nir_undef(b, 1, 32); /* inserted at the start of the shader */
 
    for (unsigned i = 0; i < num_outputs; i++) {
       for (unsigned j = 0; j < 4; j++) {
@@ -2257,11 +2257,11 @@ create_vertex_param_phis(nir_builder *b, unsigned num_outputs, vs_output *output
 }
 
 static void
-export_vertex_params_gfx11(nir_builder *b, nir_ssa_def *export_tid, nir_ssa_def *num_export_threads,
+export_vertex_params_gfx11(nir_builder *b, nir_def *export_tid, nir_def *num_export_threads,
                            unsigned num_outputs, vs_output *outputs,
                            const uint8_t *vs_output_param_offset)
 {
-   nir_ssa_def *attr_rsrc = nir_load_ring_attr_amd(b);
+   nir_def *attr_rsrc = nir_load_ring_attr_amd(b);
 
    /* We should always store full vec4s in groups of 8 lanes for the best performance even if
     * some of them are garbage or have unused components, so align the number of export threads
@@ -2273,10 +2273,10 @@ export_vertex_params_gfx11(nir_builder *b, nir_ssa_def *export_tid, nir_ssa_def
    else
       nir_push_if(b, nir_ult(b, export_tid, num_export_threads));
 
-   nir_ssa_def *attr_offset = nir_load_ring_attr_offset_amd(b);
-   nir_ssa_def *vindex = nir_load_local_invocation_index(b);
-   nir_ssa_def *voffset = nir_imm_int(b, 0);
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
+   nir_def *attr_offset = nir_load_ring_attr_offset_amd(b);
+   nir_def *vindex = nir_load_local_invocation_index(b);
+   nir_def *voffset = nir_imm_int(b, 0);
+   nir_def *undef = nir_undef(b, 1, 32);
 
    uint32_t exported_params = 0;
 
@@ -2291,7 +2291,7 @@ export_vertex_params_gfx11(nir_builder *b, nir_ssa_def *export_tid, nir_ssa_def
       if (exported_params & BITFIELD_BIT(offset))
          continue;
 
-      nir_ssa_def *comp[4];
+      nir_def *comp[4];
       for (unsigned j = 0; j < 4; j++)
          comp[j] = outputs[i].chan[j] ? outputs[i].chan[j] : undef;
       nir_store_buffer_amd(b, nir_vec(b, comp, 4), attr_rsrc, voffset, attr_offset, vindex,
@@ -2388,8 +2388,8 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
          /* Allocate export space on wave 0 - confirm to the HW that we want to use all possible space */
          nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
          {
-            nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
-            nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
+            nir_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
+            nir_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
             alloc_vertices_and_primitives(b, vtx_cnt, prim_cnt);
          }
          nir_pop_if(b, if_wave_0);
@@ -2432,15 +2432,15 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
                             .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
    }
 
-   nir_ssa_def *es_thread =
+   nir_def *es_thread =
       options->can_cull ? nir_load_var(b, es_accepted_var) : has_input_vertex(b);
 
    /* Calculate the bit count here instead of below for lower SGPR usage and better ALU
     * scheduling.
     */
-   nir_ssa_def *num_es_threads = NULL;
+   nir_def *num_es_threads = NULL;
    if (state.options->gfx_level >= GFX11 && options->can_cull) {
-      nir_ssa_def *es_accepted_mask =
+      nir_def *es_accepted_mask =
          nir_ballot(b, 1, options->wave_size, nir_load_var(b, es_accepted_var));
       num_es_threads = nir_bit_count(b, es_accepted_mask);
    }
@@ -2469,7 +2469,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
        */
       b->cursor = nir_after_cf_list(&if_es_thread->then_list);
 
-      nir_ssa_def *pos_val = nir_load_var(b, state.position_value_var);
+      nir_def *pos_val = nir_load_var(b, state.position_value_var);
       for (int i = 0; i < 4; i++)
          state.outputs[VARYING_SLOT_POS][i] = nir_channel(b, pos_val, i);
    }
@@ -2597,52 +2597,52 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
  *
  * \return an LDS pointer to type {[N x i32], [4 x i8]}
  */
-static nir_ssa_def *
-ngg_gs_out_vertex_addr(nir_builder *b, nir_ssa_def *out_vtx_idx, lower_ngg_gs_state *s)
+static nir_def *
+ngg_gs_out_vertex_addr(nir_builder *b, nir_def *out_vtx_idx, lower_ngg_gs_state *s)
 {
    unsigned write_stride_2exp = ffs(MAX2(b->shader->info.gs.vertices_out, 1)) - 1;
 
    /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
    if (write_stride_2exp) {
-      nir_ssa_def *row = nir_ushr_imm(b, out_vtx_idx, 5);
-      nir_ssa_def *swizzle = nir_iand_imm(b, row, (1u << write_stride_2exp) - 1u);
+      nir_def *row = nir_ushr_imm(b, out_vtx_idx, 5);
+      nir_def *swizzle = nir_iand_imm(b, row, (1u << write_stride_2exp) - 1u);
       out_vtx_idx = nir_ixor(b, out_vtx_idx, swizzle);
    }
 
-   nir_ssa_def *out_vtx_offs = nir_imul_imm(b, out_vtx_idx, s->lds_bytes_per_gs_out_vertex);
+   nir_def *out_vtx_offs = nir_imul_imm(b, out_vtx_idx, s->lds_bytes_per_gs_out_vertex);
    return nir_iadd_nuw(b, out_vtx_offs, s->lds_addr_gs_out_vtx);
 }
 
-static nir_ssa_def *
-ngg_gs_emit_vertex_addr(nir_builder *b, nir_ssa_def *gs_vtx_idx, lower_ngg_gs_state *s)
+static nir_def *
+ngg_gs_emit_vertex_addr(nir_builder *b, nir_def *gs_vtx_idx, lower_ngg_gs_state *s)
 {
-   nir_ssa_def *tid_in_tg = nir_load_local_invocation_index(b);
-   nir_ssa_def *gs_out_vtx_base = nir_imul_imm(b, tid_in_tg, b->shader->info.gs.vertices_out);
-   nir_ssa_def *out_vtx_idx = nir_iadd_nuw(b, gs_out_vtx_base, gs_vtx_idx);
+   nir_def *tid_in_tg = nir_load_local_invocation_index(b);
+   nir_def *gs_out_vtx_base = nir_imul_imm(b, tid_in_tg, b->shader->info.gs.vertices_out);
+   nir_def *out_vtx_idx = nir_iadd_nuw(b, gs_out_vtx_base, gs_vtx_idx);
 
    return ngg_gs_out_vertex_addr(b, out_vtx_idx, s);
 }
 
 static void
-ngg_gs_clear_primflags(nir_builder *b, nir_ssa_def *num_vertices, unsigned stream, lower_ngg_gs_state *s)
+ngg_gs_clear_primflags(nir_builder *b, nir_def *num_vertices, unsigned stream, lower_ngg_gs_state *s)
 {
    char name[32];
    snprintf(name, sizeof(name), "clear_primflag_idx_%u", stream);
    nir_variable *clear_primflag_idx_var = nir_local_variable_create(b->impl, glsl_uint_type(), name);
 
-   nir_ssa_def *zero_u8 = nir_imm_zero(b, 1, 8);
+   nir_def *zero_u8 = nir_imm_zero(b, 1, 8);
    nir_store_var(b, clear_primflag_idx_var, num_vertices, 0x1u);
 
    nir_loop *loop = nir_push_loop(b);
    {
-      nir_ssa_def *clear_primflag_idx = nir_load_var(b, clear_primflag_idx_var);
+      nir_def *clear_primflag_idx = nir_load_var(b, clear_primflag_idx_var);
       nir_if *if_break = nir_push_if(b, nir_uge_imm(b, clear_primflag_idx, b->shader->info.gs.vertices_out));
       {
          nir_jump(b, nir_jump_break);
       }
       nir_push_else(b, if_break);
       {
-         nir_ssa_def *emit_vtx_addr = ngg_gs_emit_vertex_addr(b, clear_primflag_idx, s);
+         nir_def *emit_vtx_addr = ngg_gs_emit_vertex_addr(b, clear_primflag_idx, s);
          nir_store_shared(b, zero_u8, emit_vtx_addr, .base = s->lds_offs_primflags + stream);
          nir_store_var(b, clear_primflag_idx_var, nir_iadd_imm_nuw(b, clear_primflag_idx, 1), 0x1u);
       }
@@ -2663,7 +2663,7 @@ lower_ngg_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin, lower_ngg
 
    unsigned location = io_sem.location;
 
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
    nir_alu_type src_type = nir_intrinsic_src_type(intrin);
 
    /* Small bitsize components consume the same amount of space as 32-bit components,
@@ -2675,7 +2675,7 @@ lower_ngg_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin, lower_ngg
    assert(nir_alu_type_get_type_size(src_type) == store_val->bit_size);
 
    /* Get corresponding output variable and usage info. */
-   nir_ssa_def **output;
+   nir_def **output;
    nir_alu_type *type;
    gs_output_info *info;
    if (location >= VARYING_SLOT_VAR0_16BIT) {
@@ -2756,24 +2756,24 @@ lower_ngg_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intri
       return true;
    }
 
-   nir_ssa_def *gs_emit_vtx_idx = intrin->src[0].ssa;
-   nir_ssa_def *current_vtx_per_prim = intrin->src[1].ssa;
-   nir_ssa_def *gs_emit_vtx_addr = ngg_gs_emit_vertex_addr(b, gs_emit_vtx_idx, s);
+   nir_def *gs_emit_vtx_idx = intrin->src[0].ssa;
+   nir_def *current_vtx_per_prim = intrin->src[1].ssa;
+   nir_def *gs_emit_vtx_addr = ngg_gs_emit_vertex_addr(b, gs_emit_vtx_idx, s);
 
    u_foreach_bit64(slot, b->shader->info.outputs_written) {
       unsigned packed_location = util_bitcount64((b->shader->info.outputs_written & BITFIELD64_MASK(slot)));
       gs_output_info *info = &s->output_info[slot];
-      nir_ssa_def **output = s->outputs[slot];
+      nir_def **output = s->outputs[slot];
 
       unsigned mask = gs_output_component_mask_with_stream(info, stream);
       while (mask) {
          int start, count;
          u_bit_scan_consecutive_range(&mask, &start, &count);
-         nir_ssa_def *values[4] = {0};
+         nir_def *values[4] = {0};
          for (int c = start; c < start + count; ++c) {
             if (!output[c]) {
                /* no one write to this output before */
-               values[c - start] = nir_ssa_undef(b, 1, 32);
+               values[c - start] = nir_undef(b, 1, 32);
                continue;
             }
 
@@ -2781,7 +2781,7 @@ lower_ngg_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intri
             values[c - start] = nir_u2uN(b, output[c], 32);
          }
 
-         nir_ssa_def *store_val = nir_vec(b, values, (unsigned)count);
+         nir_def *store_val = nir_vec(b, values, (unsigned)count);
          nir_store_shared(b, store_val, gs_emit_vtx_addr,
                           .base = packed_location * 16 + start * 4,
                           .align_mul = 4);
@@ -2801,22 +2801,22 @@ lower_ngg_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intri
       unsigned mask_hi = gs_output_component_mask_with_stream(s->output_info_16bit_hi + slot, stream);
       unsigned mask = mask_lo | mask_hi;
 
-      nir_ssa_def **output_lo = s->outputs_16bit_lo[slot];
-      nir_ssa_def **output_hi = s->outputs_16bit_hi[slot];
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, 16);
+      nir_def **output_lo = s->outputs_16bit_lo[slot];
+      nir_def **output_hi = s->outputs_16bit_hi[slot];
+      nir_def *undef = nir_undef(b, 1, 16);
 
       while (mask) {
          int start, count;
          u_bit_scan_consecutive_range(&mask, &start, &count);
-         nir_ssa_def *values[4] = {0};
+         nir_def *values[4] = {0};
          for (int c = start; c < start + count; ++c) {
-            nir_ssa_def *lo = output_lo[c] ? output_lo[c] : undef;
-            nir_ssa_def *hi = output_hi[c] ? output_hi[c] : undef;
+            nir_def *lo = output_lo[c] ? output_lo[c] : undef;
+            nir_def *hi = output_hi[c] ? output_hi[c] : undef;
 
             values[c - start] = nir_pack_32_2x16_split(b, lo, hi);
          }
 
-         nir_ssa_def *store_val = nir_vec(b, values, (unsigned)count);
+         nir_def *store_val = nir_vec(b, values, (unsigned)count);
          nir_store_shared(b, store_val, gs_emit_vtx_addr,
                           .base = packed_location * 16 + start * 4,
                           .align_mul = 4);
@@ -2834,18 +2834,18 @@ lower_ngg_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intri
     * - bit 2: whether vertex is live (if culling is enabled: set after culling, otherwise always 1)
     */
 
-   nir_ssa_def *vertex_live_flag =
+   nir_def *vertex_live_flag =
       !stream && s->options->can_cull
          ? nir_ishl_imm(b, nir_b2i32(b, nir_inot(b, nir_load_cull_any_enabled_amd(b))), 2)
          : nir_imm_int(b, 0b100);
 
-   nir_ssa_def *completes_prim = nir_ige_imm(b, current_vtx_per_prim, s->num_vertices_per_primitive - 1);
-   nir_ssa_def *complete_flag = nir_b2i32(b, completes_prim);
+   nir_def *completes_prim = nir_ige_imm(b, current_vtx_per_prim, s->num_vertices_per_primitive - 1);
+   nir_def *complete_flag = nir_b2i32(b, completes_prim);
 
-   nir_ssa_def *prim_flag = nir_ior(b, vertex_live_flag, complete_flag);
+   nir_def *prim_flag = nir_ior(b, vertex_live_flag, complete_flag);
    if (s->num_vertices_per_primitive == 3) {
-      nir_ssa_def *odd = nir_iand(b, current_vtx_per_prim, complete_flag);
-      nir_ssa_def *odd_flag = nir_ishl_imm(b, odd, 1);
+      nir_def *odd = nir_iand(b, current_vtx_per_prim, complete_flag);
+      nir_def *odd_flag = nir_ishl_imm(b, odd, 1);
       prim_flag = nir_ior(b, prim_flag, odd_flag);
    }
 
@@ -2918,16 +2918,16 @@ lower_ngg_gs_intrinsics(nir_shader *shader, lower_ngg_gs_state *s)
 }
 
 static void
-ngg_gs_export_primitives(nir_builder *b, nir_ssa_def *max_num_out_prims, nir_ssa_def *tid_in_tg,
-                         nir_ssa_def *exporter_tid_in_tg, nir_ssa_def *primflag_0,
+ngg_gs_export_primitives(nir_builder *b, nir_def *max_num_out_prims, nir_def *tid_in_tg,
+                         nir_def *exporter_tid_in_tg, nir_def *primflag_0,
                          lower_ngg_gs_state *s)
 {
    nir_if *if_prim_export_thread = nir_push_if(b, nir_ilt(b, tid_in_tg, max_num_out_prims));
 
    /* Only bit 0 matters here - set it to 1 when the primitive should be null */
-   nir_ssa_def *is_null_prim = nir_ixor(b, primflag_0, nir_imm_int(b, -1u));
+   nir_def *is_null_prim = nir_ixor(b, primflag_0, nir_imm_int(b, -1u));
 
-   nir_ssa_def *vtx_indices[3] = {0};
+   nir_def *vtx_indices[3] = {0};
    vtx_indices[s->num_vertices_per_primitive - 1] = exporter_tid_in_tg;
    if (s->num_vertices_per_primitive >= 2)
       vtx_indices[s->num_vertices_per_primitive - 2] = nir_iadd_imm(b, exporter_tid_in_tg, -1);
@@ -2940,9 +2940,9 @@ ngg_gs_export_primitives(nir_builder *b, nir_ssa_def *max_num_out_prims, nir_ssa
        * make sure the vertex order is so that the front/back is correct, and the provoking vertex is kept.
        */
 
-      nir_ssa_def *is_odd = nir_ubfe_imm(b, primflag_0, 1, 1);
-      nir_ssa_def *provoking_vertex_index = nir_load_provoking_vtx_in_prim_amd(b);
-      nir_ssa_def *provoking_vertex_first = nir_ieq_imm(b, provoking_vertex_index, 0);
+      nir_def *is_odd = nir_ubfe_imm(b, primflag_0, 1, 1);
+      nir_def *provoking_vertex_index = nir_load_provoking_vtx_in_prim_amd(b);
+      nir_def *provoking_vertex_first = nir_ieq_imm(b, provoking_vertex_index, 0);
 
       vtx_indices[0] = nir_bcsel(b, provoking_vertex_first, vtx_indices[0],
                                  nir_iadd(b, vtx_indices[0], is_odd));
@@ -2953,25 +2953,25 @@ ngg_gs_export_primitives(nir_builder *b, nir_ssa_def *max_num_out_prims, nir_ssa
                                  nir_isub(b, vtx_indices[2], is_odd), vtx_indices[2]);
    }
 
-   nir_ssa_def *arg = emit_pack_ngg_prim_exp_arg(b, s->num_vertices_per_primitive, vtx_indices,
+   nir_def *arg = emit_pack_ngg_prim_exp_arg(b, s->num_vertices_per_primitive, vtx_indices,
                                                  is_null_prim);
    ac_nir_export_primitive(b, arg);
    nir_pop_if(b, if_prim_export_thread);
 }
 
 static void
-ngg_gs_export_vertices(nir_builder *b, nir_ssa_def *max_num_out_vtx, nir_ssa_def *tid_in_tg,
-                       nir_ssa_def *out_vtx_lds_addr, lower_ngg_gs_state *s)
+ngg_gs_export_vertices(nir_builder *b, nir_def *max_num_out_vtx, nir_def *tid_in_tg,
+                       nir_def *out_vtx_lds_addr, lower_ngg_gs_state *s)
 {
    nir_if *if_vtx_export_thread = nir_push_if(b, nir_ilt(b, tid_in_tg, max_num_out_vtx));
-   nir_ssa_def *exported_out_vtx_lds_addr = out_vtx_lds_addr;
+   nir_def *exported_out_vtx_lds_addr = out_vtx_lds_addr;
 
    if (!s->output_compile_time_known) {
       /* Vertex compaction.
        * The current thread will export a vertex that was live in another invocation.
        * Load the index of the vertex that the current thread will have to export.
        */
-      nir_ssa_def *exported_vtx_idx = nir_load_shared(b, 1, 8, out_vtx_lds_addr, .base = s->lds_offs_primflags + 1);
+      nir_def *exported_vtx_idx = nir_load_shared(b, 1, 8, out_vtx_lds_addr, .base = s->lds_offs_primflags + 1);
       exported_out_vtx_lds_addr = ngg_gs_out_vertex_addr(b, nir_u2u32(b, exported_vtx_idx), s);
    }
 
@@ -2985,7 +2985,7 @@ ngg_gs_export_vertices(nir_builder *b, nir_ssa_def *max_num_out_vtx, nir_ssa_def
       while (mask) {
          int start, count;
          u_bit_scan_consecutive_range(&mask, &start, &count);
-         nir_ssa_def *load =
+         nir_def *load =
             nir_load_shared(b, count, 32, exported_out_vtx_lds_addr,
                             .base = packed_location * 16 + start * 4,
                             .align_mul = 4);
@@ -3010,13 +3010,13 @@ ngg_gs_export_vertices(nir_builder *b, nir_ssa_def *max_num_out_vtx, nir_ssa_def
       while (mask) {
          int start, count;
          u_bit_scan_consecutive_range(&mask, &start, &count);
-         nir_ssa_def *load =
+         nir_def *load =
             nir_load_shared(b, count, 32, exported_out_vtx_lds_addr,
                             .base = packed_location * 16 + start * 4,
                             .align_mul = 4);
 
          for (int j = 0; j < count; j++) {
-            nir_ssa_def *val = nir_channel(b, load, j);
+            nir_def *val = nir_channel(b, load, j);
             unsigned comp = start + j;
 
             if (mask_lo & BITFIELD_BIT(comp))
@@ -3068,8 +3068,8 @@ ngg_gs_export_vertices(nir_builder *b, nir_ssa_def *max_num_out_vtx, nir_ssa_def
 }
 
 static void
-ngg_gs_setup_vertex_compaction(nir_builder *b, nir_ssa_def *vertex_live, nir_ssa_def *tid_in_tg,
-                               nir_ssa_def *exporter_tid_in_tg, lower_ngg_gs_state *s)
+ngg_gs_setup_vertex_compaction(nir_builder *b, nir_def *vertex_live, nir_def *tid_in_tg,
+                               nir_def *exporter_tid_in_tg, lower_ngg_gs_state *s)
 {
    assert(vertex_live->bit_size == 1);
    nir_if *if_vertex_live = nir_push_if(b, vertex_live);
@@ -3079,22 +3079,22 @@ ngg_gs_setup_vertex_compaction(nir_builder *b, nir_ssa_def *vertex_live, nir_ssa
        * We reuse stream 1 of the primitive flag of the other thread's vertex for storing this.
        */
 
-      nir_ssa_def *exporter_lds_addr = ngg_gs_out_vertex_addr(b, exporter_tid_in_tg, s);
-      nir_ssa_def *tid_in_tg_u8 = nir_u2u8(b, tid_in_tg);
+      nir_def *exporter_lds_addr = ngg_gs_out_vertex_addr(b, exporter_tid_in_tg, s);
+      nir_def *tid_in_tg_u8 = nir_u2u8(b, tid_in_tg);
       nir_store_shared(b, tid_in_tg_u8, exporter_lds_addr, .base = s->lds_offs_primflags + 1);
    }
    nir_pop_if(b, if_vertex_live);
 }
 
-static nir_ssa_def *
-ngg_gs_load_out_vtx_primflag(nir_builder *b, unsigned stream, nir_ssa_def *tid_in_tg,
-                             nir_ssa_def *vtx_lds_addr, nir_ssa_def *max_num_out_vtx,
+static nir_def *
+ngg_gs_load_out_vtx_primflag(nir_builder *b, unsigned stream, nir_def *tid_in_tg,
+                             nir_def *vtx_lds_addr, nir_def *max_num_out_vtx,
                              lower_ngg_gs_state *s)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_if *if_outvtx_thread = nir_push_if(b, nir_ilt(b, tid_in_tg, max_num_out_vtx));
-   nir_ssa_def *primflag = nir_load_shared(b, 1, 8, vtx_lds_addr,
+   nir_def *primflag = nir_load_shared(b, 1, 8, vtx_lds_addr,
                                            .base = s->lds_offs_primflags + stream);
    primflag = nir_u2u32(b, primflag);
    nir_pop_if(b, if_outvtx_thread);
@@ -3103,19 +3103,19 @@ ngg_gs_load_out_vtx_primflag(nir_builder *b, unsigned stream, nir_ssa_def *tid_i
 }
 
 static void
-ngg_gs_out_prim_all_vtxptr(nir_builder *b, nir_ssa_def *last_vtxidx, nir_ssa_def *last_vtxptr,
-                           nir_ssa_def *last_vtx_primflag, lower_ngg_gs_state *s,
-                           nir_ssa_def *vtxptr[3])
+ngg_gs_out_prim_all_vtxptr(nir_builder *b, nir_def *last_vtxidx, nir_def *last_vtxptr,
+                           nir_def *last_vtx_primflag, lower_ngg_gs_state *s,
+                           nir_def *vtxptr[3])
 {
    unsigned last_vtx = s->num_vertices_per_primitive - 1;
    vtxptr[last_vtx]= last_vtxptr;
 
    bool primitive_is_triangle = s->num_vertices_per_primitive == 3;
-   nir_ssa_def *is_odd = primitive_is_triangle ?
+   nir_def *is_odd = primitive_is_triangle ?
       nir_ubfe_imm(b, last_vtx_primflag, 1, 1) : NULL;
 
    for (unsigned i = 0; i < s->num_vertices_per_primitive - 1; i++) {
-      nir_ssa_def *vtxidx = nir_iadd_imm(b, last_vtxidx, -(last_vtx - i));
+      nir_def *vtxidx = nir_iadd_imm(b, last_vtxidx, -(last_vtx - i));
 
       /* Need to swap vertex 0 and vertex 1 when vertex 2 index is odd to keep
        * CW/CCW order for correct front/back face culling.
@@ -3127,9 +3127,9 @@ ngg_gs_out_prim_all_vtxptr(nir_builder *b, nir_ssa_def *last_vtxidx, nir_ssa_def
    }
 }
 
-static nir_ssa_def *
-ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_vtxcnt,
-                      nir_ssa_def *out_vtx_lds_addr, nir_ssa_def *out_vtx_primflag_0,
+static nir_def *
+ngg_gs_cull_primitive(nir_builder *b, nir_def *tid_in_tg, nir_def *max_vtxcnt,
+                      nir_def *out_vtx_lds_addr, nir_def *out_vtx_primflag_0,
                       lower_ngg_gs_state *s)
 {
    /* we haven't enabled point culling, if enabled this function could be further optimized */
@@ -3140,22 +3140,22 @@ ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_v
    nir_store_var(b, primflag_var, out_vtx_primflag_0, 1);
 
    /* last bit of primflag indicate if this is the final vertex of a primitive */
-   nir_ssa_def *is_end_prim_vtx = nir_i2b(b, nir_iand_imm(b, out_vtx_primflag_0, 1));
-   nir_ssa_def *has_output_vertex = nir_ilt(b, tid_in_tg, max_vtxcnt);
-   nir_ssa_def *prim_enable = nir_iand(b, is_end_prim_vtx, has_output_vertex);
+   nir_def *is_end_prim_vtx = nir_i2b(b, nir_iand_imm(b, out_vtx_primflag_0, 1));
+   nir_def *has_output_vertex = nir_ilt(b, tid_in_tg, max_vtxcnt);
+   nir_def *prim_enable = nir_iand(b, is_end_prim_vtx, has_output_vertex);
 
    nir_if *if_prim_enable = nir_push_if(b, prim_enable);
    {
       /* Calculate the LDS address of every vertex in the current primitive. */
-      nir_ssa_def *vtxptr[3];
+      nir_def *vtxptr[3];
       ngg_gs_out_prim_all_vtxptr(b, tid_in_tg, out_vtx_lds_addr, out_vtx_primflag_0, s, vtxptr);
 
       /* Load the positions from LDS. */
-      nir_ssa_def *pos[3][4];
+      nir_def *pos[3][4];
       for (unsigned i = 0; i < s->num_vertices_per_primitive; i++) {
          /* VARYING_SLOT_POS == 0, so base won't count packed location */
          pos[i][3] = nir_load_shared(b, 1, 32, vtxptr[i], .base = 12); /* W */
-         nir_ssa_def *xy = nir_load_shared(b, 2, 32, vtxptr[i], .base = 0, .align_mul = 4);
+         nir_def *xy = nir_load_shared(b, 2, 32, vtxptr[i], .base = 0, .align_mul = 4);
          pos[i][0] = nir_channel(b, xy, 0);
          pos[i][1] = nir_channel(b, xy, 1);
 
@@ -3164,9 +3164,9 @@ ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_v
       }
 
       /* TODO: support clipdist culling in GS */
-      nir_ssa_def *accepted_by_clipdist = nir_imm_true(b);
+      nir_def *accepted_by_clipdist = nir_imm_true(b);
 
-      nir_ssa_def *accepted = ac_nir_cull_primitive(
+      nir_def *accepted = ac_nir_cull_primitive(
          b, accepted_by_clipdist, pos, s->num_vertices_per_primitive, NULL, NULL);
 
       nir_if *if_rejected = nir_push_if(b, nir_inot(b, accepted));
@@ -3188,30 +3188,30 @@ ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_v
                          .memory_modes = nir_var_mem_shared);
 
    /* only dead vertex need a chance to relive */
-   nir_ssa_def *vtx_is_dead = nir_ieq_imm(b, nir_load_var(b, primflag_var), 0);
-   nir_ssa_def *vtx_update_primflag = nir_iand(b, vtx_is_dead, has_output_vertex);
+   nir_def *vtx_is_dead = nir_ieq_imm(b, nir_load_var(b, primflag_var), 0);
+   nir_def *vtx_update_primflag = nir_iand(b, vtx_is_dead, has_output_vertex);
    nir_if *if_update_primflag = nir_push_if(b, vtx_update_primflag);
    {
       /* get succeeding vertices' primflag to detect this vertex's liveness */
       for (unsigned i = 1; i < s->num_vertices_per_primitive; i++) {
-         nir_ssa_def *vtxidx = nir_iadd_imm(b, tid_in_tg, i);
-         nir_ssa_def *not_overflow = nir_ilt(b, vtxidx, max_vtxcnt);
+         nir_def *vtxidx = nir_iadd_imm(b, tid_in_tg, i);
+         nir_def *not_overflow = nir_ilt(b, vtxidx, max_vtxcnt);
          nir_if *if_not_overflow = nir_push_if(b, not_overflow);
          {
-            nir_ssa_def *vtxptr = ngg_gs_out_vertex_addr(b, vtxidx, s);
-            nir_ssa_def *vtx_primflag =
+            nir_def *vtxptr = ngg_gs_out_vertex_addr(b, vtxidx, s);
+            nir_def *vtx_primflag =
                nir_load_shared(b, 1, 8, vtxptr, .base = s->lds_offs_primflags);
             vtx_primflag = nir_u2u32(b, vtx_primflag);
 
             /* if succeeding vertex is alive end of primitive vertex, need to set current
              * thread vertex's liveness flag (bit 2)
              */
-            nir_ssa_def *has_prim = nir_i2b(b, nir_iand_imm(b, vtx_primflag, 1));
-            nir_ssa_def *vtx_live_flag =
+            nir_def *has_prim = nir_i2b(b, nir_iand_imm(b, vtx_primflag, 1));
+            nir_def *vtx_live_flag =
                nir_bcsel(b, has_prim, nir_imm_int(b, 0b100), nir_imm_int(b, 0));
 
             /* update this vertex's primflag */
-            nir_ssa_def *primflag = nir_load_var(b, primflag_var);
+            nir_def *primflag = nir_load_var(b, primflag_var);
             primflag = nir_ior(b, primflag, vtx_live_flag);
             nir_store_var(b, primflag_var, primflag, 1);
          }
@@ -3228,13 +3228,13 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
 {
    nir_xfb_info *info = b->shader->xfb_info;
 
-   nir_ssa_def *tid_in_tg = nir_load_local_invocation_index(b);
-   nir_ssa_def *max_vtxcnt = nir_load_workgroup_num_input_vertices_amd(b);
-   nir_ssa_def *out_vtx_lds_addr = ngg_gs_out_vertex_addr(b, tid_in_tg, s);
-   nir_ssa_def *prim_live[4] = {0};
-   nir_ssa_def *gen_prim[4] = {0};
-   nir_ssa_def *export_seq[4] = {0};
-   nir_ssa_def *out_vtx_primflag[4] = {0};
+   nir_def *tid_in_tg = nir_load_local_invocation_index(b);
+   nir_def *max_vtxcnt = nir_load_workgroup_num_input_vertices_amd(b);
+   nir_def *out_vtx_lds_addr = ngg_gs_out_vertex_addr(b, tid_in_tg, s);
+   nir_def *prim_live[4] = {0};
+   nir_def *gen_prim[4] = {0};
+   nir_def *export_seq[4] = {0};
+   nir_def *out_vtx_primflag[4] = {0};
    for (unsigned stream = 0; stream < 4; stream++) {
       if (!(info->streams_written & BITFIELD_BIT(stream)))
          continue;
@@ -3248,7 +3248,7 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
       prim_live[stream] = nir_i2b(b, nir_iand_imm(b, out_vtx_primflag[stream], 1));
 
       unsigned scratch_stride = ALIGN(s->max_num_waves, 4);
-      nir_ssa_def *scratch_base =
+      nir_def *scratch_base =
          nir_iadd_imm(b, s->lds_addr_gs_scratch, stream * scratch_stride);
 
       /* We want to export primitives to streamout buffer in sequence,
@@ -3282,10 +3282,10 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
                       .memory_modes = nir_var_mem_shared);
 
    /* Get global buffer offset where this workgroup will stream out data to. */
-   nir_ssa_def *emit_prim[4] = {0};
-   nir_ssa_def *buffer_offsets[4] = {0};
-   nir_ssa_def *so_buffer[4] = {0};
-   nir_ssa_def *prim_stride[4] = {0};
+   nir_def *emit_prim[4] = {0};
+   nir_def *buffer_offsets[4] = {0};
+   nir_def *so_buffer[4] = {0};
+   nir_def *prim_stride[4] = {0};
    ngg_build_streamout_buffer_info(b, info, s->options->has_xfb_prim_query,
                                    s->lds_addr_gs_scratch, tid_in_tg, gen_prim,
                                    prim_stride, so_buffer, buffer_offsets, emit_prim);
@@ -3294,15 +3294,15 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
       if (!(info->streams_written & BITFIELD_BIT(stream)))
          continue;
 
-      nir_ssa_def *can_emit = nir_ilt(b, export_seq[stream], emit_prim[stream]);
+      nir_def *can_emit = nir_ilt(b, export_seq[stream], emit_prim[stream]);
       nir_if *if_emit = nir_push_if(b, nir_iand(b, can_emit, prim_live[stream]));
       {
          /* Get streamout buffer vertex index for the first vertex of this primitive. */
-         nir_ssa_def *vtx_buffer_idx =
+         nir_def *vtx_buffer_idx =
             nir_imul_imm(b, export_seq[stream], s->num_vertices_per_primitive);
 
          /* Get all vertices' lds address of this primitive. */
-         nir_ssa_def *exported_vtx_lds_addr[3];
+         nir_def *exported_vtx_lds_addr[3];
          ngg_gs_out_prim_all_vtxptr(b, tid_in_tg, out_vtx_lds_addr,
                                     out_vtx_primflag[stream], s,
                                     exported_vtx_lds_addr);
@@ -3323,10 +3323,10 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
 static void
 ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
 {
-   nir_ssa_def *tid_in_tg = nir_load_local_invocation_index(b);
-   nir_ssa_def *max_vtxcnt = nir_load_workgroup_num_input_vertices_amd(b);
-   nir_ssa_def *max_prmcnt = max_vtxcnt; /* They are currently practically the same; both RADV and RadeonSI do this. */
-   nir_ssa_def *out_vtx_lds_addr = ngg_gs_out_vertex_addr(b, tid_in_tg, s);
+   nir_def *tid_in_tg = nir_load_local_invocation_index(b);
+   nir_def *max_vtxcnt = nir_load_workgroup_num_input_vertices_amd(b);
+   nir_def *max_prmcnt = max_vtxcnt; /* They are currently practically the same; both RADV and RadeonSI do this. */
+   nir_def *out_vtx_lds_addr = ngg_gs_out_vertex_addr(b, tid_in_tg, s);
 
    if (s->output_compile_time_known) {
       /* When the output is compile-time known, the GS writes all possible vertices and primitives it can.
@@ -3339,7 +3339,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
 
    /* Workgroup barrier already emitted, we can assume all GS output stores are done by now. */
 
-   nir_ssa_def *out_vtx_primflag_0 = ngg_gs_load_out_vtx_primflag(b, 0, tid_in_tg, out_vtx_lds_addr, max_vtxcnt, s);
+   nir_def *out_vtx_primflag_0 = ngg_gs_load_out_vtx_primflag(b, 0, tid_in_tg, out_vtx_lds_addr, max_vtxcnt, s);
 
    if (s->output_compile_time_known) {
       ngg_gs_export_primitives(b, max_vtxcnt, tid_in_tg, tid_in_tg, out_vtx_primflag_0, s);
@@ -3352,7 +3352,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
       nir_if *if_cull_en = nir_push_if(b, nir_load_cull_any_enabled_amd(b));
 
       /* culling code will update the primflag */
-      nir_ssa_def *updated_primflag =
+      nir_def *updated_primflag =
          ngg_gs_cull_primitive(b, tid_in_tg, max_vtxcnt, out_vtx_lds_addr,
                                out_vtx_primflag_0, s);
 
@@ -3366,15 +3366,15 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
     * requires that the invocations that export vertices are packed (ie. compact).
     * To ensure this, we need to repack invocations that have a live vertex.
     */
-   nir_ssa_def *vertex_live = nir_ine_imm(b, out_vtx_primflag_0, 0);
+   nir_def *vertex_live = nir_ine_imm(b, out_vtx_primflag_0, 0);
    wg_repack_result rep = repack_invocations_in_workgroup(b, vertex_live, s->lds_addr_gs_scratch,
                                                           s->max_num_waves, s->options->wave_size);
 
-   nir_ssa_def *workgroup_num_vertices = rep.num_repacked_invocations;
-   nir_ssa_def *exporter_tid_in_tg = rep.repacked_invocation_index;
+   nir_def *workgroup_num_vertices = rep.num_repacked_invocations;
+   nir_def *exporter_tid_in_tg = rep.repacked_invocation_index;
 
    /* When the workgroup emits 0 total vertices, we also must export 0 primitives (otherwise the HW can hang). */
-   nir_ssa_def *any_output = nir_ine_imm(b, workgroup_num_vertices, 0);
+   nir_def *any_output = nir_ine_imm(b, workgroup_num_vertices, 0);
    max_prmcnt = nir_bcsel(b, any_output, max_prmcnt, nir_imm_int(b, 0));
 
    /* Allocate export space. We currently don't compact primitives, just use the maximum number. */
@@ -3541,8 +3541,8 @@ ac_ngg_get_scratch_lds_size(gl_shader_stage stage,
 
 static void
 ms_store_prim_indices(nir_builder *b,
-                      nir_ssa_def *val,
-                      nir_ssa_def *offset_src,
+                      nir_def *val,
+                      nir_def *offset_src,
                       lower_ngg_ms_state *s)
 {
    assert(val->num_components <= 3);
@@ -3559,9 +3559,9 @@ ms_store_prim_indices(nir_builder *b,
    nir_store_shared(b, nir_u2u8(b, val), offset_src, .base = s->layout.lds.indices_addr);
 }
 
-static nir_ssa_def *
+static nir_def *
 ms_load_prim_indices(nir_builder *b,
-                     nir_ssa_def *offset_src,
+                     nir_def *offset_src,
                      lower_ngg_ms_state *s)
 {
    if (!offset_src)
@@ -3572,25 +3572,25 @@ ms_load_prim_indices(nir_builder *b,
 
 static void
 ms_store_num_prims(nir_builder *b,
-                   nir_ssa_def *store_val,
+                   nir_def *store_val,
                    lower_ngg_ms_state *s)
 {
-   nir_ssa_def *addr = nir_imm_int(b, 0);
+   nir_def *addr = nir_imm_int(b, 0);
    nir_store_shared(b, nir_u2u32(b, store_val), addr, .base = s->layout.lds.workgroup_info_addr + lds_ms_num_prims);
 }
 
-static nir_ssa_def *
+static nir_def *
 ms_load_num_prims(nir_builder *b,
                   lower_ngg_ms_state *s)
 {
-   nir_ssa_def *addr = nir_imm_int(b, 0);
+   nir_def *addr = nir_imm_int(b, 0);
    return nir_load_shared(b, 1, 32, addr, .base = s->layout.lds.workgroup_info_addr + lds_ms_num_prims);
 }
 
 static void
 ms_store_cull_flag(nir_builder *b,
-                   nir_ssa_def *val,
-                   nir_ssa_def *offset_src,
+                   nir_def *val,
+                   nir_def *offset_src,
                    lower_ngg_ms_state *s)
 {
    assert(val->num_components == 1);
@@ -3607,19 +3607,19 @@ ms_store_cull_flag(nir_builder *b,
    nir_store_shared(b, nir_b2i8(b, val), offset_src, .base = s->layout.lds.cull_flags_addr);
 }
 
-static nir_ssa_def *
+static nir_def *
 ms_arrayed_output_base_addr(nir_builder *b,
-                            nir_ssa_def *arr_index,
+                            nir_def *arr_index,
                             unsigned driver_location,
                             unsigned num_arrayed_outputs)
 {
    /* Address offset of the array item (vertex or primitive). */
    unsigned arr_index_stride = num_arrayed_outputs * 16u;
-   nir_ssa_def *arr_index_off = nir_imul_imm(b, arr_index, arr_index_stride);
+   nir_def *arr_index_off = nir_imul_imm(b, arr_index, arr_index_stride);
 
    /* IO address offset within the vertex or primitive data. */
    unsigned io_offset = driver_location * 16u;
-   nir_ssa_def *io_off = nir_imm_int(b, io_offset);
+   nir_def *io_off = nir_imm_int(b, io_offset);
 
    return nir_iadd_nuw(b, arr_index_off, io_off);
 }
@@ -3647,7 +3647,7 @@ update_ms_output_info(nir_intrinsic_instr *intrin,
    uint32_t write_mask = nir_intrinsic_write_mask(intrin);
    unsigned component_offset = nir_intrinsic_component(intrin);
 
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
    write_mask = util_widen_mask(write_mask, DIV_ROUND_UP(store_val->bit_size, 32));
    uint32_t components_mask = write_mask << component_offset;
 
@@ -3662,8 +3662,8 @@ update_ms_output_info(nir_intrinsic_instr *intrin,
    }
 }
 
-static nir_ssa_def *
-regroup_store_val(nir_builder *b, nir_ssa_def *store_val)
+static nir_def *
+regroup_store_val(nir_builder *b, nir_def *store_val)
 {
    /* Vulkan spec 15.1.4-15.1.5:
     *
@@ -3679,7 +3679,7 @@ regroup_store_val(nir_builder *b, nir_ssa_def *store_val)
 
    if (store_val->bit_size < 32) {
       assert(store_val->num_components <= 4);
-      nir_ssa_def *comps[4] = {0};
+      nir_def *comps[4] = {0};
       for (unsigned c = 0; c < store_val->num_components; ++c)
          comps[c] = nir_u2u32(b, nir_channel(b, store_val, c));
       return nir_vec(b, comps, store_val->num_components);
@@ -3688,8 +3688,8 @@ regroup_store_val(nir_builder *b, nir_ssa_def *store_val)
    return store_val;
 }
 
-static nir_ssa_def *
-regroup_load_val(nir_builder *b, nir_ssa_def *load, unsigned dest_bit_size)
+static nir_def *
+regroup_load_val(nir_builder *b, nir_def *load, unsigned dest_bit_size)
 {
    if (dest_bit_size == load->bit_size)
       return load;
@@ -3697,7 +3697,7 @@ regroup_load_val(nir_builder *b, nir_ssa_def *load, unsigned dest_bit_size)
    /* Small bitsize components are not stored contiguously, take care of that here. */
    unsigned num_components = load->num_components;
    assert(num_components <= 4);
-   nir_ssa_def *components[4] = {0};
+   nir_def *components[4] = {0};
    for (unsigned i = 0; i < num_components; ++i)
       components[i] = nir_u2uN(b, nir_channel(b, load, i), dest_bit_size);
 
@@ -3764,9 +3764,9 @@ ms_store_arrayed_output_intrin(nir_builder *b,
       assert(nir_src_as_uint(*nir_get_io_offset_src(intrin)) == 0);
       assert(nir_intrinsic_component(intrin) == 0);
 
-      nir_ssa_def *store_val = intrin->src[0].ssa;
-      nir_ssa_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
-      nir_ssa_def *offset = nir_imul_imm(b, arr_index, s->vertices_per_prim);
+      nir_def *store_val = intrin->src[0].ssa;
+      nir_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
+      nir_def *offset = nir_imul_imm(b, arr_index, s->vertices_per_prim);
       ms_store_prim_indices(b, store_val, offset, s);
       return;
    } else if (location == VARYING_SLOT_CULL_PRIMITIVE) {
@@ -3778,9 +3778,9 @@ ms_store_arrayed_output_intrin(nir_builder *b,
       assert(nir_intrinsic_component(intrin) == 0);
       assert(nir_intrinsic_write_mask(intrin) == 1);
 
-      nir_ssa_def *store_val = intrin->src[0].ssa;
-      nir_ssa_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
-      nir_ssa_def *offset = nir_imul_imm(b, arr_index, s->vertices_per_prim);
+      nir_def *store_val = intrin->src[0].ssa;
+      nir_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
+      nir_def *offset = nir_imul_imm(b, arr_index, s->vertices_per_prim);
       ms_store_cull_flag(b, store_val, offset, s);
       return;
    }
@@ -3799,21 +3799,21 @@ ms_store_arrayed_output_intrin(nir_builder *b,
    unsigned num_outputs = util_bitcount64(out->mask);
    unsigned const_off = out->addr + component_offset * 4;
 
-   nir_ssa_def *store_val = regroup_store_val(b, intrin->src[0].ssa);
-   nir_ssa_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
-   nir_ssa_def *base_addr = ms_arrayed_output_base_addr(b, arr_index, driver_location, num_outputs);
-   nir_ssa_def *base_offset = nir_get_io_offset_src(intrin)->ssa;
-   nir_ssa_def *base_addr_off = nir_imul_imm(b, base_offset, 16u);
-   nir_ssa_def *addr = nir_iadd_nuw(b, base_addr, base_addr_off);
+   nir_def *store_val = regroup_store_val(b, intrin->src[0].ssa);
+   nir_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
+   nir_def *base_addr = ms_arrayed_output_base_addr(b, arr_index, driver_location, num_outputs);
+   nir_def *base_offset = nir_get_io_offset_src(intrin)->ssa;
+   nir_def *base_addr_off = nir_imul_imm(b, base_offset, 16u);
+   nir_def *addr = nir_iadd_nuw(b, base_addr, base_addr_off);
 
    if (out_mode == ms_out_mode_lds) {
       nir_store_shared(b, store_val, addr, .base = const_off,
                      .write_mask = write_mask, .align_mul = 16,
                      .align_offset = const_off % 16);
    } else if (out_mode == ms_out_mode_scratch_ring) {
-      nir_ssa_def *ring = nir_load_ring_mesh_scratch_amd(b);
-      nir_ssa_def *off = nir_load_ring_mesh_scratch_offset_amd(b);
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *ring = nir_load_ring_mesh_scratch_amd(b);
+      nir_def *off = nir_load_ring_mesh_scratch_offset_amd(b);
+      nir_def *zero = nir_imm_int(b, 0);
       nir_store_buffer_amd(b, store_val, ring, addr, off, zero,
                            .base = const_off,
                            .write_mask = write_mask,
@@ -3828,8 +3828,8 @@ ms_store_arrayed_output_intrin(nir_builder *b,
        */
       const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
       unsigned param_offset = s->vs_output_param_offset[io_sem.location];
-      nir_ssa_def *ring = nir_load_ring_attr_amd(b);
-      nir_ssa_def *soffset = nir_load_ring_attr_offset_amd(b);
+      nir_def *ring = nir_load_ring_attr_amd(b);
+      nir_def *soffset = nir_load_ring_attr_offset_amd(b);
       nir_store_buffer_amd(b, store_val, ring, base_addr_off, soffset, arr_index,
                            .base = const_off + param_offset * 16,
                            .memory_modes = nir_var_shader_out,
@@ -3843,7 +3843,7 @@ ms_store_arrayed_output_intrin(nir_builder *b,
       }
 
       u_foreach_bit(comp, write_mask) {
-         nir_ssa_def *val = nir_channel(b, store_val, comp);
+         nir_def *val = nir_channel(b, store_val, comp);
          unsigned idx = location * 4 + comp + component_offset;
          nir_store_var(b, s->out_variables[idx], val, 0x1);
       }
@@ -3852,10 +3852,10 @@ ms_store_arrayed_output_intrin(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 ms_load_arrayed_output(nir_builder *b,
-                       nir_ssa_def *arr_index,
-                       nir_ssa_def *base_offset,
+                       nir_def *arr_index,
+                       nir_def *base_offset,
                        unsigned location,
                        unsigned component_offset,
                        unsigned num_components,
@@ -3872,24 +3872,24 @@ ms_load_arrayed_output(nir_builder *b,
    /* Use compacted driver location instead of the original. */
    unsigned driver_location = util_bitcount64(out->mask & u_bit_consecutive64(0, location));
 
-   nir_ssa_def *base_addr = ms_arrayed_output_base_addr(b, arr_index, driver_location, num_outputs);
-   nir_ssa_def *base_addr_off = nir_imul_imm(b, base_offset, 16);
-   nir_ssa_def *addr = nir_iadd_nuw(b, base_addr, base_addr_off);
+   nir_def *base_addr = ms_arrayed_output_base_addr(b, arr_index, driver_location, num_outputs);
+   nir_def *base_addr_off = nir_imul_imm(b, base_offset, 16);
+   nir_def *addr = nir_iadd_nuw(b, base_addr, base_addr_off);
 
    if (out_mode == ms_out_mode_lds) {
       return nir_load_shared(b, num_components, load_bit_size, addr, .align_mul = 16,
                              .align_offset = component_addr_off % 16,
                              .base = const_off);
    } else if (out_mode == ms_out_mode_scratch_ring) {
-      nir_ssa_def *ring = nir_load_ring_mesh_scratch_amd(b);
-      nir_ssa_def *off = nir_load_ring_mesh_scratch_offset_amd(b);
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *ring = nir_load_ring_mesh_scratch_amd(b);
+      nir_def *off = nir_load_ring_mesh_scratch_offset_amd(b);
+      nir_def *zero = nir_imm_int(b, 0);
       return nir_load_buffer_amd(b, num_components, load_bit_size, ring, addr, off, zero,
                                  .base = const_off,
                                  .memory_modes = nir_var_shader_out,
                                  .access = ACCESS_COHERENT);
    } else if (out_mode == ms_out_mode_var) {
-      nir_ssa_def *arr[8] = {0};
+      nir_def *arr[8] = {0};
       unsigned num_32bit_components = num_components * load_bit_size / 32;
       for (unsigned comp = 0; comp < num_32bit_components; ++comp) {
          unsigned idx = location * 4 + comp + component_addr_off;
@@ -3903,13 +3903,13 @@ ms_load_arrayed_output(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 ms_load_arrayed_output_intrin(nir_builder *b,
                               nir_intrinsic_instr *intrin,
                               lower_ngg_ms_state *s)
 {
-   nir_ssa_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
-   nir_ssa_def *base_offset = nir_get_io_offset_src(intrin)->ssa;
+   nir_def *arr_index = nir_get_io_arrayed_index_src(intrin)->ssa;
+   nir_def *base_offset = nir_get_io_offset_src(intrin)->ssa;
 
    unsigned location = nir_intrinsic_io_semantics(intrin).location;
    unsigned component_offset = nir_intrinsic_component(intrin);
@@ -3917,14 +3917,14 @@ ms_load_arrayed_output_intrin(nir_builder *b,
    unsigned num_components = intrin->dest.ssa.num_components;
    unsigned load_bit_size = MAX2(bit_size, 32);
 
-   nir_ssa_def *load =
+   nir_def *load =
       ms_load_arrayed_output(b, arr_index, base_offset, location, component_offset,
                              num_components, load_bit_size, s);
 
    return regroup_load_val(b, load, bit_size);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_ms_load_workgroup_index(nir_builder *b,
                               UNUSED nir_intrinsic_instr *intrin,
                               lower_ngg_ms_state *s)
@@ -3932,16 +3932,16 @@ lower_ms_load_workgroup_index(nir_builder *b,
    return s->workgroup_index;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_ms_set_vertex_and_primitive_count(nir_builder *b,
                                         nir_intrinsic_instr *intrin,
                                         lower_ngg_ms_state *s)
 {
    /* If either the number of vertices or primitives is zero, set both of them to zero. */
-   nir_ssa_def *num_vtx = nir_read_first_invocation(b, intrin->src[0].ssa);
-   nir_ssa_def *num_prm = nir_read_first_invocation(b, intrin->src[1].ssa);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *is_either_zero = nir_ieq(b, nir_umin(b, num_vtx, num_prm), zero);
+   nir_def *num_vtx = nir_read_first_invocation(b, intrin->src[0].ssa);
+   nir_def *num_prm = nir_read_first_invocation(b, intrin->src[1].ssa);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *is_either_zero = nir_ieq(b, nir_umin(b, num_vtx, num_prm), zero);
    num_vtx = nir_bcsel(b, is_either_zero, zero, num_vtx);
    num_prm = nir_bcsel(b, is_either_zero, zero, num_prm);
 
@@ -3951,7 +3951,7 @@ lower_ms_set_vertex_and_primitive_count(nir_builder *b,
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-static nir_ssa_def *
+static nir_def *
 update_ms_barrier(nir_builder *b,
                          nir_intrinsic_instr *intrin,
                          lower_ngg_ms_state *s)
@@ -3970,7 +3970,7 @@ update_ms_barrier(nir_builder *b,
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_ms_intrinsic(nir_builder *b, nir_instr *instr, void *state)
 {
    lower_ngg_ms_state *s = (lower_ngg_ms_state *) state;
@@ -4026,11 +4026,11 @@ lower_ms_intrinsics(nir_shader *shader, lower_ngg_ms_state *s)
 
 static void
 ms_emit_arrayed_outputs(nir_builder *b,
-                        nir_ssa_def *invocation_index,
+                        nir_def *invocation_index,
                         uint64_t mask,
                         lower_ngg_ms_state *s)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    u_foreach_bit64(slot, mask) {
       /* Should not occur here, handled separately. */
@@ -4042,7 +4042,7 @@ ms_emit_arrayed_outputs(nir_builder *b,
          int start_comp = 0, num_components = 1;
          u_bit_scan_consecutive_range(&component_mask, &start_comp, &num_components);
 
-         nir_ssa_def *load =
+         nir_def *load =
             ms_load_arrayed_output(b, invocation_index, zero, slot, start_comp,
                                    num_components, 32, s);
 
@@ -4087,7 +4087,7 @@ ms_emit_legacy_workgroup_index(nir_builder *b, lower_ngg_ms_state *s)
     * Due to the register programming of mesh shaders, this value is only filled for
     * the first invocation of the first wave. To let other waves know, we use LDS.
     */
-   nir_ssa_def *workgroup_index = nir_load_vertex_id_zero_base(b);
+   nir_def *workgroup_index = nir_load_vertex_id_zero_base(b);
 
    if (s->api_workgroup_size <= s->wave_size) {
       /* API workgroup is small, so we don't need to use LDS. */
@@ -4097,14 +4097,14 @@ ms_emit_legacy_workgroup_index(nir_builder *b, lower_ngg_ms_state *s)
 
    unsigned workgroup_index_lds_addr = s->layout.lds.workgroup_info_addr + lds_ms_wg_index;
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *dont_care = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *loaded_workgroup_index = NULL;
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *dont_care = nir_undef(b, 1, 32);
+   nir_def *loaded_workgroup_index = NULL;
 
    /* Use elect to make sure only 1 invocation uses LDS. */
    nir_if *if_elected = nir_push_if(b, nir_elect(b, 1));
    {
-      nir_ssa_def *wave_id = nir_load_subgroup_id(b);
+      nir_def *wave_id = nir_load_subgroup_id(b);
       nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, wave_id, 0));
       {
          nir_store_shared(b, workgroup_index, zero, .base = workgroup_index_lds_addr);
@@ -4134,8 +4134,8 @@ ms_emit_legacy_workgroup_index(nir_builder *b, lower_ngg_ms_state *s)
 static void
 set_ms_final_output_counts(nir_builder *b,
                            lower_ngg_ms_state *s,
-                           nir_ssa_def **out_num_prm,
-                           nir_ssa_def **out_num_vtx)
+                           nir_def **out_num_prm,
+                           nir_def **out_num_vtx)
 {
    /* The spec allows the numbers to be divergent, and in that case we need to
     * use the values from the first invocation. Also the HW requires us to set
@@ -4143,8 +4143,8 @@ set_ms_final_output_counts(nir_builder *b,
     *
     * These are already done by the lowering.
     */
-   nir_ssa_def *num_prm = nir_load_var(b, s->primitive_count_var);
-   nir_ssa_def *num_vtx = nir_load_var(b, s->vertex_count_var);
+   nir_def *num_prm = nir_load_var(b, s->primitive_count_var);
+   nir_def *num_vtx = nir_load_var(b, s->vertex_count_var);
 
    if (s->hw_workgroup_size <= s->wave_size) {
       /* Single-wave mesh shader workgroup. */
@@ -4163,7 +4163,7 @@ set_ms_final_output_counts(nir_builder *b,
     * currently doesn't support this.
     */
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
    {
@@ -4188,8 +4188,8 @@ set_ms_final_output_counts(nir_builder *b,
                             .memory_semantics = NIR_MEMORY_ACQ_REL,
                             .memory_modes = nir_var_mem_shared);
 
-      nir_ssa_def *prm_vtx = NULL;
-      nir_ssa_def *dont_care_2x32 = nir_ssa_undef(b, 2, 32);
+      nir_def *prm_vtx = NULL;
+      nir_def *dont_care_2x32 = nir_undef(b, 2, 32);
       nir_if *if_elected = nir_push_if(b, nir_elect(b, 1));
       {
          prm_vtx = nir_load_shared(b, 2, 32, zero,
@@ -4214,17 +4214,17 @@ static void
 ms_emit_attribute_ring_output_stores(nir_builder *b, const uint64_t outputs_mask,
                                      lower_ngg_ms_state *s)
 {
-   nir_ssa_def *idx = nir_load_local_invocation_index(b);
-   nir_ssa_def *ring = nir_load_ring_attr_amd(b);
-   nir_ssa_def *off = nir_load_ring_attr_offset_amd(b);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *idx = nir_load_local_invocation_index(b);
+   nir_def *ring = nir_load_ring_attr_amd(b);
+   nir_def *off = nir_load_ring_attr_offset_amd(b);
+   nir_def *zero = nir_imm_int(b, 0);
 
    u_foreach_bit64 (slot, outputs_mask) {
       if (s->vs_output_param_offset[slot] > AC_EXP_PARAM_OFFSET_31)
          continue;
 
-      nir_ssa_def *soffset = nir_iadd_imm(b, off, s->vs_output_param_offset[slot] * 16 * 32);
-      nir_ssa_def *store_val = nir_ssa_undef(b, 4, 32);
+      nir_def *soffset = nir_iadd_imm(b, off, s->vs_output_param_offset[slot] * 16 * 32);
+      nir_def *store_val = nir_undef(b, 4, 32);
       unsigned store_val_components = 0;
       for (unsigned c = 0; c < 4; ++c) {
          if (s->outputs[slot][c]) {
@@ -4242,11 +4242,11 @@ ms_emit_attribute_ring_output_stores(nir_builder *b, const uint64_t outputs_mask
 
 static void
 ms_emit_primitive_export(nir_builder *b,
-                         nir_ssa_def *prim_exp_arg_ch1,
+                         nir_def *prim_exp_arg_ch1,
                          uint64_t per_primitive_outputs,
                          lower_ngg_ms_state *s)
 {
-   nir_ssa_def *prim_exp_arg_ch2 = NULL;
+   nir_def *prim_exp_arg_ch2 = NULL;
 
    uint64_t export_as_prim_arg_slots =
       VARYING_BIT_LAYER |
@@ -4270,18 +4270,18 @@ ms_emit_primitive_export(nir_builder *b,
       prim_exp_arg_ch2 = nir_imm_int(b, 0);
 
       if (per_primitive_outputs & VARYING_BIT_LAYER) {
-         nir_ssa_def *layer =
+         nir_def *layer =
             nir_ishl_imm(b, s->outputs[VARYING_SLOT_LAYER][0], s->gfx_level >= GFX11 ? 0 : 17);
          prim_exp_arg_ch2 = nir_ior(b, prim_exp_arg_ch2, layer);
       }
 
       if (per_primitive_outputs & VARYING_BIT_VIEWPORT) {
-         nir_ssa_def *view = nir_ishl_imm(b, s->outputs[VARYING_SLOT_VIEWPORT][0], 20);
+         nir_def *view = nir_ishl_imm(b, s->outputs[VARYING_SLOT_VIEWPORT][0], 20);
          prim_exp_arg_ch2 = nir_ior(b, prim_exp_arg_ch2, view);
       }
 
       if (per_primitive_outputs & VARYING_BIT_PRIMITIVE_SHADING_RATE) {
-         nir_ssa_def *rate = s->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0];
+         nir_def *rate = s->outputs[VARYING_SLOT_PRIMITIVE_SHADING_RATE][0];
          prim_exp_arg_ch2 = nir_ior(b, prim_exp_arg_ch2, rate);
       }
 
@@ -4292,7 +4292,7 @@ ms_emit_primitive_export(nir_builder *b,
       }
    }
 
-   nir_ssa_def *prim_exp_arg = prim_exp_arg_ch2 ?
+   nir_def *prim_exp_arg = prim_exp_arg_ch2 ?
       nir_vec2(b, prim_exp_arg_ch1, prim_exp_arg_ch2) : prim_exp_arg_ch1;
 
    ac_nir_export_primitive(b, prim_exp_arg);
@@ -4308,12 +4308,12 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
    nir_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
                          .memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_shader_out|nir_var_mem_shared);
 
-   nir_ssa_def *num_prm;
-   nir_ssa_def *num_vtx;
+   nir_def *num_prm;
+   nir_def *num_vtx;
 
    set_ms_final_output_counts(b, s, &num_prm, &num_vtx);
 
-   nir_ssa_def *invocation_index = nir_load_local_invocation_index(b);
+   nir_def *invocation_index = nir_load_local_invocation_index(b);
 
    /* Load vertex/primitive attributes from shared memory and
     * emit store_output intrinsics for them.
@@ -4324,7 +4324,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
     */
 
    /* Export vertices. */
-   nir_ssa_def *has_output_vertex = nir_ilt(b, invocation_index, num_vtx);
+   nir_def *has_output_vertex = nir_ilt(b, invocation_index, num_vtx);
    nir_if *if_has_output_vertex = nir_push_if(b, has_output_vertex);
    {
       const uint64_t per_vertex_outputs =
@@ -4355,7 +4355,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
    nir_pop_if(b, if_has_output_vertex);
 
    /* Export primitives. */
-   nir_ssa_def *has_output_primitive = nir_ilt(b, invocation_index, num_prm);
+   nir_def *has_output_primitive = nir_ilt(b, invocation_index, num_prm);
    nir_if *if_has_output_primitive = nir_push_if(b, has_output_primitive);
    {
       uint64_t per_primitive_outputs =
@@ -4371,12 +4371,12 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
       }
 
       /* Primitive connectivity data: describes which vertices the primitive uses. */
-      nir_ssa_def *prim_idx_addr = nir_imul_imm(b, invocation_index, s->vertices_per_prim);
-      nir_ssa_def *indices_loaded = NULL;
-      nir_ssa_def *cull_flag = NULL;
+      nir_def *prim_idx_addr = nir_imul_imm(b, invocation_index, s->vertices_per_prim);
+      nir_def *indices_loaded = NULL;
+      nir_def *cull_flag = NULL;
 
       if (s->layout.var.prm_attr.mask & BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_INDICES)) {
-         nir_ssa_def *indices[3] = {0};
+         nir_def *indices[3] = {0};
          for (unsigned c = 0; c < s->vertices_per_prim; ++c)
             indices[c] = nir_load_var(b, s->out_variables[VARYING_SLOT_PRIMITIVE_INDICES * 4 + c]);
          indices_loaded = nir_vec(b, indices, s->vertices_per_prim);
@@ -4387,7 +4387,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
       }
 
       if (s->uses_cull_flags) {
-         nir_ssa_def *loaded_cull_flag = NULL;
+         nir_def *loaded_cull_flag = NULL;
          if (s->layout.var.prm_attr.mask & BITFIELD64_BIT(VARYING_SLOT_CULL_PRIMITIVE))
             loaded_cull_flag = nir_load_var(b, s->out_variables[VARYING_SLOT_CULL_PRIMITIVE * 4]);
          else
@@ -4396,15 +4396,15 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
          cull_flag = nir_i2b(b, loaded_cull_flag);
       }
 
-      nir_ssa_def *indices[3];
-      nir_ssa_def *max_vtx_idx = nir_iadd_imm(b, num_vtx, -1u);
+      nir_def *indices[3];
+      nir_def *max_vtx_idx = nir_iadd_imm(b, num_vtx, -1u);
 
       for (unsigned i = 0; i < s->vertices_per_prim; ++i) {
          indices[i] = nir_channel(b, indices_loaded, i);
          indices[i] = nir_umin(b, indices[i], max_vtx_idx);
       }
 
-      nir_ssa_def *prim_exp_arg = emit_pack_ngg_prim_exp_arg(b, s->vertices_per_prim, indices,
+      nir_def *prim_exp_arg = emit_pack_ngg_prim_exp_arg(b, s->vertices_per_prim, indices,
                                                              cull_flag);
 
       ms_emit_primitive_export(b, prim_exp_arg, per_primitive_outputs, s);
@@ -4489,8 +4489,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
    b->cursor = nir_before_cf_list(&b->impl->body);
 
    /* Wrap the shader in an if to ensure that only the necessary amount of lanes run it. */
-   nir_ssa_def *invocation_index = nir_load_local_invocation_index(b);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *invocation_index = nir_load_local_invocation_index(b);
+   nir_def *zero = nir_imm_int(b, 0);
 
    if (need_additional_barriers) {
       /* First invocation stores 0 to number of API waves in flight. */
@@ -4506,7 +4506,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
                             .memory_modes = nir_var_shader_out | nir_var_mem_shared);
    }
 
-   nir_ssa_def *has_api_ms_invocation = nir_ult_imm(b, invocation_index, s->api_workgroup_size);
+   nir_def *has_api_ms_invocation = nir_ult_imm(b, invocation_index, s->api_workgroup_size);
    nir_if *if_has_api_ms_invocation = nir_push_if(b, has_api_ms_invocation);
    {
       nir_cf_reinsert(&extracted, b->cursor);
@@ -4537,8 +4537,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
        * We do this by executing a barrier until the number of API waves
        * in flight becomes zero.
        */
-      nir_ssa_def *has_api_ms_ballot = nir_ballot(b, 1, s->wave_size, has_api_ms_invocation);
-      nir_ssa_def *wave_has_no_api_ms = nir_ieq_imm(b, has_api_ms_ballot, 0);
+      nir_def *has_api_ms_ballot = nir_ballot(b, 1, s->wave_size, has_api_ms_invocation);
+      nir_def *wave_has_no_api_ms = nir_ieq_imm(b, has_api_ms_ballot, 0);
       nir_if *if_wave_has_no_api_ms = nir_push_if(b, wave_has_no_api_ms);
       {
          nir_if *if_elected = nir_push_if(b, nir_elect(b, 1));
@@ -4550,7 +4550,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
                                      .memory_semantics = NIR_MEMORY_ACQ_REL,
                                      .memory_modes = nir_var_shader_out | nir_var_mem_shared);
 
-               nir_ssa_def *loaded = nir_load_shared(b, 1, 32, zero, .base = api_waves_in_flight_addr);
+               nir_def *loaded = nir_load_shared(b, 1, 32, zero, .base = api_waves_in_flight_addr);
                nir_if *if_break = nir_push_if(b, nir_ieq_imm(b, loaded, 0));
                {
                   nir_jump(b, nir_jump_break);
index 670e5ab..c5822e4 100644 (file)
@@ -21,7 +21,7 @@ typedef struct {
    bool lower_load_barycentric;
 
    /* Add one for dual source blend second output. */
-   nir_ssa_def *outputs[FRAG_RESULT_MAX + 1][4];
+   nir_def *outputs[FRAG_RESULT_MAX + 1][4];
    nir_alu_type output_types[FRAG_RESULT_MAX + 1];
 
    /* MAX_DRAW_BUFFERS for MRT export, 1 for MRTZ export */
@@ -85,52 +85,52 @@ init_interp_param(nir_builder *b, lower_ps_state *s)
     * contains fully-covered quads.
     */
    if (s->options->bc_optimize_for_persp || s->options->bc_optimize_for_linear) {
-      nir_ssa_def *bc_optimize = nir_load_barycentric_optimize_amd(b);
+      nir_def *bc_optimize = nir_load_barycentric_optimize_amd(b);
 
       if (s->options->bc_optimize_for_persp) {
-         nir_ssa_def *center =
+         nir_def *center =
             nir_load_barycentric_pixel(b, 32, .interp_mode = INTERP_MODE_SMOOTH);
-         nir_ssa_def *centroid =
+         nir_def *centroid =
             nir_load_barycentric_centroid(b, 32, .interp_mode = INTERP_MODE_SMOOTH);
 
-         nir_ssa_def *value = nir_bcsel(b, bc_optimize, center, centroid);
+         nir_def *value = nir_bcsel(b, bc_optimize, center, centroid);
          nir_store_var(b, s->persp_centroid, value, 0x3);
       }
 
       if (s->options->bc_optimize_for_linear) {
-         nir_ssa_def *center =
+         nir_def *center =
             nir_load_barycentric_pixel(b, 32, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
-         nir_ssa_def *centroid =
+         nir_def *centroid =
             nir_load_barycentric_centroid(b, 32, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
 
-         nir_ssa_def *value = nir_bcsel(b, bc_optimize, center, centroid);
+         nir_def *value = nir_bcsel(b, bc_optimize, center, centroid);
          nir_store_var(b, s->linear_centroid, value, 0x3);
       }
    }
 
    if (s->options->force_persp_sample_interp) {
-      nir_ssa_def *sample =
+      nir_def *sample =
          nir_load_barycentric_sample(b, 32, .interp_mode = INTERP_MODE_SMOOTH);
       nir_store_var(b, s->persp_center, sample, 0x3);
       nir_store_var(b, s->persp_centroid, sample, 0x3);
    }
 
    if (s->options->force_linear_sample_interp) {
-      nir_ssa_def *sample =
+      nir_def *sample =
          nir_load_barycentric_sample(b, 32, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
       nir_store_var(b, s->linear_center, sample, 0x3);
       nir_store_var(b, s->linear_centroid, sample, 0x3);
    }
 
    if (s->options->force_persp_center_interp) {
-      nir_ssa_def *center =
+      nir_def *center =
          nir_load_barycentric_pixel(b, 32, .interp_mode = INTERP_MODE_SMOOTH);
       nir_store_var(b, s->persp_sample, center, 0x3);
       nir_store_var(b, s->persp_centroid, center, 0x3);
    }
 
    if (s->options->force_linear_center_interp) {
-      nir_ssa_def *center =
+      nir_def *center =
          nir_load_barycentric_pixel(b, 32, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
       nir_store_var(b, s->linear_sample, center, 0x3);
       nir_store_var(b, s->linear_centroid, center, 0x3);
@@ -186,8 +186,8 @@ lower_ps_load_barycentric(nir_builder *b, nir_intrinsic_instr *intrin, lower_ps_
 
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *replacement = nir_load_var(b, var);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+   nir_def *replacement = nir_load_var(b, var);
+   nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
 
    nir_instr_remove(&intrin->instr);
    return true;
@@ -200,7 +200,7 @@ gather_ps_store_output(nir_builder *b, nir_intrinsic_instr *intrin, lower_ps_sta
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
    unsigned component = nir_intrinsic_component(intrin);
    nir_alu_type type = nir_intrinsic_src_type(intrin);
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
 
    b->cursor = nir_before_instr(&intrin->instr);
 
@@ -249,13 +249,13 @@ lower_ps_load_sample_mask_in(nir_builder *b, nir_intrinsic_instr *intrin, lower_
    b->cursor = nir_before_instr(&intrin->instr);
 
    uint32_t ps_iter_mask = ac_get_ps_iter_mask(s->options->ps_iter_samples);
-   nir_ssa_def *sampleid = nir_load_sample_id(b);
-   nir_ssa_def *submask = nir_ishl(b, nir_imm_int(b, ps_iter_mask), sampleid);
+   nir_def *sampleid = nir_load_sample_id(b);
+   nir_def *submask = nir_ishl(b, nir_imm_int(b, ps_iter_mask), sampleid);
 
-   nir_ssa_def *sample_mask = nir_load_sample_mask_in(b);
-   nir_ssa_def *replacement = nir_iand(b, sample_mask, submask);
+   nir_def *sample_mask = nir_load_sample_mask_in(b);
+   nir_def *replacement = nir_iand(b, sample_mask, submask);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+   nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
 
    nir_instr_remove(&intrin->instr);
    return true;
@@ -327,8 +327,8 @@ emit_ps_color_clamp_and_alpha_test(nir_builder *b, lower_ps_state *s)
          } else if (s->options->alpha_func == COMPARE_FUNC_NEVER) {
             nir_discard(b);
          } else if (s->outputs[slot][3]) {
-            nir_ssa_def *ref = nir_load_alpha_reference_amd(b);
-            nir_ssa_def *cond =
+            nir_def *ref = nir_load_alpha_reference_amd(b);
+            nir_def *cond =
                nir_compare_func(b, s->options->alpha_func, s->outputs[slot][3], ref);
             nir_discard_if(b, nir_inot(b, cond));
          }
@@ -341,16 +341,16 @@ emit_ps_mrtz_export(nir_builder *b, lower_ps_state *s)
 {
    uint64_t outputs_written = b->shader->info.outputs_written;
 
-   nir_ssa_def *mrtz_alpha = NULL;
+   nir_def *mrtz_alpha = NULL;
    if (s->options->alpha_to_coverage_via_mrtz) {
       mrtz_alpha = s->outputs[FRAG_RESULT_COLOR][3] ?
          s->outputs[FRAG_RESULT_COLOR][3] :
          s->outputs[FRAG_RESULT_DATA0][3];
    }
 
-   nir_ssa_def *depth = s->outputs[FRAG_RESULT_DEPTH][0];
-   nir_ssa_def *stencil = s->outputs[FRAG_RESULT_STENCIL][0];
-   nir_ssa_def *sample_mask = s->outputs[FRAG_RESULT_SAMPLE_MASK][0];
+   nir_def *depth = s->outputs[FRAG_RESULT_DEPTH][0];
+   nir_def *stencil = s->outputs[FRAG_RESULT_STENCIL][0];
+   nir_def *sample_mask = s->outputs[FRAG_RESULT_SAMPLE_MASK][0];
 
    if (s->options->kill_samplemask) {
       sample_mask = NULL;
@@ -371,8 +371,8 @@ emit_ps_mrtz_export(nir_builder *b, lower_ps_state *s)
                                  outputs_written & BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK),
                                  s->options->alpha_to_coverage_via_mrtz);
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *outputs[4] = {undef, undef, undef, undef};
+   nir_def *undef = nir_undef(b, 1, 32);
+   nir_def *outputs[4] = {undef, undef, undef, undef};
    unsigned write_mask = 0;
    unsigned flags = 0;
 
@@ -465,22 +465,22 @@ emit_ps_color_export(nir_builder *b, lower_ps_state *s, gl_frag_result slot, uns
    bool enable_mrt_output_nan_fixup =
       s->options->enable_mrt_output_nan_fixup & BITFIELD_BIT(cbuf);
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *outputs[4] = {undef, undef, undef, undef};
+   nir_def *undef = nir_undef(b, 1, 32);
+   nir_def *outputs[4] = {undef, undef, undef, undef};
    unsigned write_mask = 0;
    unsigned flags = 0;
 
    nir_alu_type base_type = nir_alu_type_get_base_type(type);
    unsigned type_size = nir_alu_type_get_type_size(type);
 
-   nir_ssa_def *data[4];
+   nir_def *data[4];
    memcpy(data, s->outputs[slot], sizeof(data));
 
    /* Replace NaN by zero (for 32-bit float formats) to fix game bugs if requested. */
    if (enable_mrt_output_nan_fixup && type == nir_type_float32) {
       for (int i = 0; i < 4; i++) {
          if (data[i]) {
-            nir_ssa_def *isnan = nir_fisnan(b, data[i]);
+            nir_def *isnan = nir_fisnan(b, data[i]);
             data[i] = nir_bcsel(b, isnan, nir_imm_float(b, 0), data[i]);
          }
       }
@@ -593,14 +593,14 @@ emit_ps_color_export(nir_builder *b, lower_ps_state *s, gl_frag_result slot, uns
       }
 
       for (int i = 0; i < 2; i++) {
-         nir_ssa_def *lo = data[i * 2];
-         nir_ssa_def *hi = data[i * 2 + 1];
+         nir_def *lo = data[i * 2];
+         nir_def *hi = data[i * 2 + 1];
          if (!lo && !hi)
             continue;
 
-         lo = lo ? lo : nir_ssa_undef(b, 1, type_size);
-         hi = hi ? hi : nir_ssa_undef(b, 1, type_size);
-         nir_ssa_def *vec = nir_vec2(b, lo, hi);
+         lo = lo ? lo : nir_undef(b, 1, type_size);
+         hi = hi ? hi : nir_undef(b, 1, type_size);
+         nir_def *vec = nir_vec2(b, lo, hi);
 
          outputs[i] = nir_build_alu1(b, pack_op, vec);
 
@@ -657,8 +657,8 @@ emit_ps_dual_src_blend_swizzle(nir_builder *b, lower_ps_state *s, unsigned first
    uint32_t mrt1_write_mask = nir_intrinsic_write_mask(mrt1_exp);
    uint32_t write_mask = mrt0_write_mask | mrt1_write_mask;
 
-   nir_ssa_def *mrt0_arg = mrt0_exp->src[0].ssa;
-   nir_ssa_def *mrt1_arg = mrt1_exp->src[0].ssa;
+   nir_def *mrt0_arg = mrt0_exp->src[0].ssa;
+   nir_def *mrt1_arg = mrt1_exp->src[0].ssa;
 
    /* Swizzle code is right before mrt0_exp. */
    b->cursor = nir_before_instr(&mrt0_exp->instr);
@@ -671,9 +671,9 @@ emit_ps_dual_src_blend_swizzle(nir_builder *b, lower_ps_state *s, unsigned first
       return;
    }
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *arg0_vec[4] = {undef, undef, undef, undef};
-   nir_ssa_def *arg1_vec[4] = {undef, undef, undef, undef};
+   nir_def *undef = nir_undef(b, 1, 32);
+   nir_def *arg0_vec[4] = {undef, undef, undef, undef};
+   nir_def *arg1_vec[4] = {undef, undef, undef, undef};
 
    /* For illustration, originally
     *   lane0 export arg00 and arg01
@@ -684,17 +684,17 @@ emit_ps_dual_src_blend_swizzle(nir_builder *b, lower_ps_state *s, unsigned first
     *   lane1 export arg01 and arg11.
     */
    u_foreach_bit (i, write_mask) {
-      nir_ssa_def *arg0 = nir_channel(b, mrt0_arg, i);
-      nir_ssa_def *arg1 = nir_channel(b, mrt1_arg, i);
+      nir_def *arg0 = nir_channel(b, mrt0_arg, i);
+      nir_def *arg1 = nir_channel(b, mrt1_arg, i);
 
       /* swap odd,even lanes of arg0 */
       arg0 = nir_quad_swizzle_amd(b, arg0, .swizzle_mask = 0b10110001);
 
       /* swap even lanes between arg0 and arg1 */
-      nir_ssa_def *tid = nir_load_subgroup_invocation(b);
-      nir_ssa_def *is_even = nir_ieq_imm(b, nir_iand_imm(b, tid, 1), 0);
+      nir_def *tid = nir_load_subgroup_invocation(b);
+      nir_def *is_even = nir_ieq_imm(b, nir_iand_imm(b, tid, 1), 0);
 
-      nir_ssa_def *tmp = arg0;
+      nir_def *tmp = arg0;
       arg0 = nir_bcsel(b, is_even, arg1, arg0);
       arg1 = nir_bcsel(b, is_even, tmp, arg1);
 
@@ -741,7 +741,7 @@ emit_ps_null_export(nir_builder *b, lower_ps_state *s)
       V_008DFC_SQ_EXP_MRT : V_008DFC_SQ_EXP_NULL;
 
    nir_intrinsic_instr *intrin =
-      nir_export_amd(b, nir_ssa_undef(b, 4, 32),
+      nir_export_amd(b, nir_undef(b, 4, 32),
                      .base = target,
                      .flags = AC_EXP_FLAG_VALID_MASK | AC_EXP_FLAG_DONE);
    /* To avoid builder set write mask to 0xf. */
@@ -798,7 +798,7 @@ export_ps_outputs(nir_builder *b, lower_ps_state *s)
          unsigned target = get_ps_color_export_target(s);
 
          s->exp[s->exp_num++] =
-            nir_export_amd(b, nir_ssa_undef(b, 4, 32), .base = target);
+            nir_export_amd(b, nir_undef(b, 4, 32), .base = target);
       }
    } else {
       if (s->output_types[FRAG_RESULT_COLOR] != nir_type_invalid) {
index 42925b9..ce5b94f 100644 (file)
 #include "nir_builder.h"
 #include "amdgfxregs.h"
 
-static nir_ssa_def *get_field(nir_builder *b, nir_ssa_def *desc, unsigned index, unsigned mask)
+static nir_def *get_field(nir_builder *b, nir_def *desc, unsigned index, unsigned mask)
 {
    return nir_ubfe_imm(b, nir_channel(b, desc, index), ffs(mask) - 1, util_bitcount(mask));
 }
 
-static nir_ssa_def *handle_null_desc(nir_builder *b, nir_ssa_def *desc, nir_ssa_def *value)
+static nir_def *handle_null_desc(nir_builder *b, nir_def *desc, nir_def *value)
 {
-   nir_ssa_def *is_null = nir_ieq_imm(b, nir_channel(b, desc, 1), 0);
+   nir_def *is_null = nir_ieq_imm(b, nir_channel(b, desc, 1), 0);
    return nir_bcsel(b, is_null, nir_imm_int(b, 0), value);
 }
 
-static nir_ssa_def *query_samples(nir_builder *b, nir_ssa_def *desc, enum glsl_sampler_dim dim)
+static nir_def *query_samples(nir_builder *b, nir_def *desc, enum glsl_sampler_dim dim)
 {
-   nir_ssa_def *samples;
+   nir_def *samples;
 
    if (dim == GLSL_SAMPLER_DIM_MS) {
       /* LAST_LEVEL contains log2(num_samples). */
@@ -38,22 +38,22 @@ static nir_ssa_def *query_samples(nir_builder *b, nir_ssa_def *desc, enum glsl_s
    return handle_null_desc(b, desc, samples);
 }
 
-static nir_ssa_def *query_levels(nir_builder *b, nir_ssa_def *desc)
+static nir_def *query_levels(nir_builder *b, nir_def *desc)
 {
-   nir_ssa_def *base_level = get_field(b, desc, 3, ~C_00A00C_BASE_LEVEL);
-   nir_ssa_def *last_level = get_field(b, desc, 3, ~C_00A00C_LAST_LEVEL);
+   nir_def *base_level = get_field(b, desc, 3, ~C_00A00C_BASE_LEVEL);
+   nir_def *last_level = get_field(b, desc, 3, ~C_00A00C_LAST_LEVEL);
 
-   nir_ssa_def *levels = nir_iadd_imm(b, nir_isub(b, last_level, base_level), 1);
+   nir_def *levels = nir_iadd_imm(b, nir_isub(b, last_level, base_level), 1);
 
    return handle_null_desc(b, desc, levels);
 }
 
-static nir_ssa_def *
-lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
+static nir_def *
+lower_query_size(nir_builder *b, nir_def *desc, nir_src *lod,
                  enum glsl_sampler_dim dim, bool is_array, enum amd_gfx_level gfx_level)
 {
    if (dim == GLSL_SAMPLER_DIM_BUF) {
-      nir_ssa_def *size = nir_channel(b, desc, 2);
+      nir_def *size = nir_channel(b, desc, 2);
 
       if (gfx_level == GFX8) {
          /* On GFX8, the descriptor contains the size in bytes,
@@ -72,14 +72,14 @@ lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
    bool has_width = dim != GLSL_SAMPLER_DIM_CUBE;
    bool has_height = dim != GLSL_SAMPLER_DIM_1D;
    bool has_depth = dim == GLSL_SAMPLER_DIM_3D;
-   nir_ssa_def *width = NULL, *height = NULL, *layers = NULL, *base_array = NULL;
-   nir_ssa_def *last_array = NULL, *depth = NULL;
+   nir_def *width = NULL, *height = NULL, *layers = NULL, *base_array = NULL;
+   nir_def *last_array = NULL, *depth = NULL;
 
    /* Get the width, height, depth, layers. */
    if (gfx_level >= GFX10) {
       if (has_width) {
-         nir_ssa_def *width_lo = get_field(b, desc, 1, ~C_00A004_WIDTH_LO);
-         nir_ssa_def *width_hi = get_field(b, desc, 2, ~C_00A008_WIDTH_HI);
+         nir_def *width_lo = get_field(b, desc, 1, ~C_00A004_WIDTH_LO);
+         nir_def *width_hi = get_field(b, desc, 2, ~C_00A008_WIDTH_HI);
          /* Use iadd to get s_lshl2_add_u32 in the end. */
          width = nir_iadd(b, width_lo, nir_ishl_imm(b, width_hi, 2));
       }
@@ -115,8 +115,8 @@ lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
     * the pitch for 2D. We need to set depth and last_array to 0 in that case.
     */
    if (gfx_level >= GFX10_3 && (has_depth || is_array)) {
-      nir_ssa_def *type = get_field(b, desc, 3, ~C_00A00C_TYPE);
-      nir_ssa_def *is_2d = nir_ieq_imm(b, type, V_008F1C_SQ_RSRC_IMG_2D);
+      nir_def *type = get_field(b, desc, 3, ~C_00A00C_TYPE);
+      nir_def *is_2d = nir_ieq_imm(b, type, V_008F1C_SQ_RSRC_IMG_2D);
 
       if (has_depth)
          depth = nir_bcsel(b, is_2d, nir_imm_int(b, 0), depth);
@@ -139,8 +139,8 @@ lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
 
    /* Minify the dimensions according to base_level + lod. */
    if (dim != GLSL_SAMPLER_DIM_MS && dim != GLSL_SAMPLER_DIM_RECT) {
-      nir_ssa_def *base_level = get_field(b, desc, 3, ~C_00A00C_BASE_LEVEL);
-      nir_ssa_def *level = lod ? nir_iadd(b, base_level, lod->ssa) : base_level;
+      nir_def *base_level = get_field(b, desc, 3, ~C_00A00C_BASE_LEVEL);
+      nir_def *level = lod ? nir_iadd(b, base_level, lod->ssa) : base_level;
 
       if (has_width)
          width = nir_ushr(b, width, level);
@@ -165,16 +165,16 @@ lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
 
    /* Special case for sliced storage 3D views which shouldn't be minified. */
    if (gfx_level >= GFX10 && has_depth) {
-      nir_ssa_def *uav3d =
+      nir_def *uav3d =
          nir_ieq_imm(b, get_field(b, desc, 5, ~C_00A014_ARRAY_PITCH), 1);
-      nir_ssa_def *layers_3d =
+      nir_def *layers_3d =
          nir_isub(b, get_field(b, desc, 4, ~C_00A010_DEPTH),
                      get_field(b, desc, 4, ~C_00A010_BASE_ARRAY));
       layers_3d = nir_iadd_imm(b, layers_3d, 1);
       depth = nir_bcsel(b, uav3d, layers_3d, depth);
    }
 
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
 
    /* Construct the result. */
    switch (dim) {
@@ -203,14 +203,14 @@ lower_query_size(nir_builder *b, nir_ssa_def *desc, nir_src *lod,
 static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
 {
    enum amd_gfx_level gfx_level = *(enum amd_gfx_level*)data;
-   nir_ssa_def *result = NULL, *dst = NULL;
+   nir_def *result = NULL, *dst = NULL;
 
    if (instr->type == nir_instr_type_intrinsic) {
       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
       const struct glsl_type *type;
       enum glsl_sampler_dim dim;
       bool is_array;
-      nir_ssa_def *desc = NULL;
+      nir_def *desc = NULL;
 
       dst = &intr->dest.ssa;
       b->cursor = nir_before_instr(instr);
@@ -265,7 +265,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
    } else if (instr->type == nir_instr_type_tex) {
       nir_tex_instr *tex = nir_instr_as_tex(instr);
       nir_tex_instr *new_tex;
-      nir_ssa_def *desc = NULL;
+      nir_def *desc = NULL;
       nir_src *lod = NULL;
 
       dst = &tex->dest.ssa;
@@ -326,7 +326,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
    if (!result)
       return false;
 
-   nir_ssa_def_rewrite_uses_after(dst, result, instr);
+   nir_def_rewrite_uses_after(dst, result, instr);
    nir_instr_remove(instr);
    return true;
 }
index af38090..5142a1f 100644 (file)
@@ -69,8 +69,8 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
    unsigned align_offset = nir_intrinsic_align_offset(intr) % align_mul;
 
    nir_src *src_offset = nir_get_io_offset_src(intr);
-   nir_ssa_def *offset = src_offset->ssa;
-   nir_ssa_def *result = &intr->dest.ssa;
+   nir_def *offset = src_offset->ssa;
+   nir_def *result = &intr->dest.ssa;
 
    /* Change the load to 32 bits per channel, update the channel count,
     * and increase the declared load alignment.
@@ -87,7 +87,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
       b->cursor = nir_after_instr(instr);
       result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
 
-      nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, result,
+      nir_def_rewrite_uses_after(&intr->dest.ssa, result,
                                      result->parent_instr);
       return true;
    }
@@ -121,7 +121,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
       result = nir_extract_bits(b, &result, 1, comp_offset * bit_size,
                                 num_components, bit_size);
 
-      nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, result,
+      nir_def_rewrite_uses_after(&intr->dest.ssa, result,
                                      result->parent_instr);
       return true;
    }
@@ -138,10 +138,10 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
 
    /* We need to shift bits in the loaded vector by this number. */
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *shift = nir_ishl_imm(b, nir_iand_imm(b, offset, 0x3), 3);
-   nir_ssa_def *rev_shift32 = nir_isub_imm(b, 32, shift);
+   nir_def *shift = nir_ishl_imm(b, nir_iand_imm(b, offset, 0x3), 3);
+   nir_def *rev_shift32 = nir_isub_imm(b, 32, shift);
 
-   nir_ssa_def *elems[NIR_MAX_VEC_COMPONENTS];
+   nir_def *elems[NIR_MAX_VEC_COMPONENTS];
 
    /* "shift" can be only be one of: 0, 8, 16, 24
     *
@@ -170,7 +170,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
    if (intr->num_components >= 2) {
       /* Use the 64-bit algorithm as described above. */
       for (i = 0; i < intr->num_components / 2 - 1; i++) {
-         nir_ssa_def *qword1, *dword2;
+         nir_def *qword1, *dword2;
 
          qword1 = nir_pack_64_2x32_split(b,
                                          nir_channel(b, result, i * 2 + 0),
@@ -203,7 +203,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
    result = nir_vec(b, elems, intr->num_components);
    result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, result,
+   nir_def_rewrite_uses_after(&intr->dest.ssa, result,
                                   result->parent_instr);
    return true;
 }
index 2fa083d..61225b6 100644 (file)
@@ -22,25 +22,25 @@ typedef struct {
    unsigned num_entries;
 } lower_tsms_io_state;
 
-static nir_ssa_def *
+static nir_def *
 task_workgroup_index(nir_builder *b,
                      lower_tsms_io_state *s)
 {
-   nir_ssa_def *id = nir_load_workgroup_id(b, 32);
+   nir_def *id = nir_load_workgroup_id(b, 32);
 
-   nir_ssa_def *x = nir_channel(b, id, 0);
-   nir_ssa_def *y = nir_channel(b, id, 1);
-   nir_ssa_def *z = nir_channel(b, id, 2);
+   nir_def *x = nir_channel(b, id, 0);
+   nir_def *y = nir_channel(b, id, 1);
+   nir_def *z = nir_channel(b, id, 2);
 
-   nir_ssa_def *grid_size = nir_load_num_workgroups(b, 32);
-   nir_ssa_def *grid_size_x = nir_channel(b, grid_size, 0);
-   nir_ssa_def *grid_size_y = nir_channel(b, grid_size, 1);
+   nir_def *grid_size = nir_load_num_workgroups(b, 32);
+   nir_def *grid_size_x = nir_channel(b, grid_size, 0);
+   nir_def *grid_size_y = nir_channel(b, grid_size, 1);
 
    return nir_iadd(b, nir_imul(b, nir_imul(b, grid_size_x, grid_size_y), z),
                       nir_iadd(b, nir_imul(b, grid_size_x, y), x));
 }
 
-static nir_ssa_def *
+static nir_def *
 task_ring_entry_index(nir_builder *b,
                       lower_tsms_io_state *s)
 {
@@ -54,12 +54,12 @@ task_ring_entry_index(nir_builder *b,
     *   AND with num_entries - 1 to get the correct meaning.
     *   Note that num_entries must be a power of two.
     */
-   nir_ssa_def *ring_entry = nir_load_task_ring_entry_amd(b);
-   nir_ssa_def *idx = nir_iadd_nuw(b, ring_entry, task_workgroup_index(b, s));
+   nir_def *ring_entry = nir_load_task_ring_entry_amd(b);
+   nir_def *idx = nir_iadd_nuw(b, ring_entry, task_workgroup_index(b, s));
    return nir_iand_imm(b, idx, s->num_entries - 1);
 }
 
-static nir_ssa_def *
+static nir_def *
 task_draw_ready_bit(nir_builder *b,
                     lower_tsms_io_state *s)
 {
@@ -86,14 +86,14 @@ task_draw_ready_bit(nir_builder *b,
     * If the task shader doesn't write this bit, the HW hangs.
     */
 
-   nir_ssa_def *ring_entry = nir_load_task_ring_entry_amd(b);
-   nir_ssa_def *workgroup_index = task_workgroup_index(b, s);
+   nir_def *ring_entry = nir_load_task_ring_entry_amd(b);
+   nir_def *workgroup_index = task_workgroup_index(b, s);
 
-   nir_ssa_def *idx = nir_iadd_nuw(b, ring_entry, workgroup_index);
+   nir_def *idx = nir_iadd_nuw(b, ring_entry, workgroup_index);
    return nir_u2u8(b, nir_ubfe_imm(b, idx, util_bitcount(s->num_entries - 1), 1));
 }
 
-static nir_ssa_def *
+static nir_def *
 mesh_ring_entry_index(nir_builder *b,
                       lower_tsms_io_state *s)
 {
@@ -111,15 +111,15 @@ mesh_ring_entry_index(nir_builder *b,
 
 static void
 task_write_draw_ring(nir_builder *b,
-                     nir_ssa_def *store_val,
+                     nir_def *store_val,
                      unsigned const_off,
                      lower_tsms_io_state *s)
 {
-   nir_ssa_def *ptr = task_ring_entry_index(b, s);
-   nir_ssa_def *ring = nir_load_ring_task_draw_amd(b);
-   nir_ssa_def *scalar_off = nir_imul_imm(b, ptr, s->draw_entry_bytes);
-   nir_ssa_def *vector_off = nir_imm_int(b, 0);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *ptr = task_ring_entry_index(b, s);
+   nir_def *ring = nir_load_ring_task_draw_amd(b);
+   nir_def *scalar_off = nir_imul_imm(b, ptr, s->draw_entry_bytes);
+   nir_def *vector_off = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_store_buffer_amd(b, store_val, ring, vector_off, scalar_off, zero,
                         .base = const_off, .memory_modes = nir_var_shader_out,
@@ -139,7 +139,7 @@ filter_task_intrinsics(const nir_instr *instr,
           intrin->intrinsic == nir_intrinsic_load_task_payload;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_task_launch_mesh_workgroups(nir_builder *b,
                                   nir_intrinsic_instr *intrin,
                                   lower_tsms_io_state *s)
@@ -160,13 +160,13 @@ lower_task_launch_mesh_workgroups(nir_builder *b,
                                          nir_var_mem_ssbo | nir_var_mem_global);
 
    /* On the first invocation, write the full draw ring entry. */
-   nir_ssa_def *invocation_index = nir_load_local_invocation_index(b);
+   nir_def *invocation_index = nir_load_local_invocation_index(b);
    nir_if *if_invocation_index_zero = nir_push_if(b, nir_ieq_imm(b, invocation_index, 0));
    {
-      nir_ssa_def *dimensions = intrin->src[0].ssa;
-      nir_ssa_def *x = nir_channel(b, dimensions, 0);
-      nir_ssa_def *y = nir_channel(b, dimensions, 1);
-      nir_ssa_def *z = nir_channel(b, dimensions, 2);
+      nir_def *dimensions = intrin->src[0].ssa;
+      nir_def *x = nir_channel(b, dimensions, 0);
+      nir_def *y = nir_channel(b, dimensions, 1);
+      nir_def *z = nir_channel(b, dimensions, 2);
 
       /* When either Y or Z are 0, also set X to 0.
        * Not necessary, but speeds up the job of the CP.
@@ -185,7 +185,7 @@ lower_task_launch_mesh_workgroups(nir_builder *b,
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_task_payload_store(nir_builder *b,
                          nir_intrinsic_instr *intrin,
                          lower_tsms_io_state *s)
@@ -193,12 +193,12 @@ lower_task_payload_store(nir_builder *b,
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
    unsigned base = nir_intrinsic_base(intrin);
 
-   nir_ssa_def *store_val = intrin->src[0].ssa;
-   nir_ssa_def *addr = intrin->src[1].ssa;
-   nir_ssa_def *ring = nir_load_ring_task_payload_amd(b);
-   nir_ssa_def *ptr = task_ring_entry_index(b, s);
-   nir_ssa_def *ring_off = nir_imul_imm(b, ptr, s->payload_entry_bytes);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *store_val = intrin->src[0].ssa;
+   nir_def *addr = intrin->src[1].ssa;
+   nir_def *ring = nir_load_ring_task_payload_amd(b);
+   nir_def *ptr = task_ring_entry_index(b, s);
+   nir_def *ring_off = nir_imul_imm(b, ptr, s->payload_entry_bytes);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_store_buffer_amd(b, store_val, ring, addr, ring_off, zero, .base = base,
                         .write_mask = write_mask,
@@ -208,7 +208,7 @@ lower_task_payload_store(nir_builder *b,
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_taskmesh_payload_load(nir_builder *b,
                             nir_intrinsic_instr *intrin,
                             lower_tsms_io_state *s)
@@ -217,22 +217,22 @@ lower_taskmesh_payload_load(nir_builder *b,
    unsigned num_components = intrin->dest.ssa.num_components;
    unsigned bit_size = intrin->dest.ssa.bit_size;
 
-   nir_ssa_def *ptr =
+   nir_def *ptr =
       b->shader->info.stage == MESA_SHADER_TASK ?
       task_ring_entry_index(b, s) :
       mesh_ring_entry_index(b, s);
 
-   nir_ssa_def *addr = intrin->src[0].ssa;
-   nir_ssa_def *ring = nir_load_ring_task_payload_amd(b);
-   nir_ssa_def *ring_off = nir_imul_imm(b, ptr, s->payload_entry_bytes);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *addr = intrin->src[0].ssa;
+   nir_def *ring = nir_load_ring_task_payload_amd(b);
+   nir_def *ring_off = nir_imul_imm(b, ptr, s->payload_entry_bytes);
+   nir_def *zero = nir_imm_int(b, 0);
 
    return nir_load_buffer_amd(b, num_components, bit_size, ring, addr, ring_off, zero, .base = base,
                               .memory_modes = nir_var_mem_task_payload,
                               .access = ACCESS_COHERENT);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_task_intrinsics(nir_builder *b,
                       nir_instr *instr,
                       void *state)
@@ -293,7 +293,7 @@ filter_mesh_input_load(const nir_instr *instr,
    return intrin->intrinsic == nir_intrinsic_load_task_payload;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_mesh_intrinsics(nir_builder *b,
                       nir_instr *instr,
                       void *state)
index eb79653..71cbb53 100644 (file)
@@ -238,13 +238,13 @@ lower_ls_output_store(nir_builder *b,
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *vertex_idx = nir_load_local_invocation_index(b);
-   nir_ssa_def *base_off_var = nir_imul(b, vertex_idx, nir_load_lshs_vertex_stride_amd(b));
+   nir_def *vertex_idx = nir_load_local_invocation_index(b);
+   nir_def *base_off_var = nir_imul(b, vertex_idx, nir_load_lshs_vertex_stride_amd(b));
 
-   nir_ssa_def *io_off = ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io);
+   nir_def *io_off = ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io);
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
 
-   nir_ssa_def *off = nir_iadd_nuw(b, base_off_var, io_off);
+   nir_def *off = nir_iadd_nuw(b, base_off_var, io_off);
    nir_store_shared(b, intrin->src[0].ssa, off, .write_mask = write_mask);
 
    /* NOTE: don't remove the store_output intrinsic on GFX9+ when tcs_in_out_eq,
@@ -285,27 +285,27 @@ filter_load_tcs_per_vertex_input(const nir_instr *instr,
    return !can_use_temps;
 }
 
-static nir_ssa_def *
+static nir_def *
 hs_per_vertex_input_lds_offset(nir_builder *b,
                                lower_tess_io_state *st,
                                nir_intrinsic_instr *instr)
 {
-   nir_ssa_def *tcs_in_vtxcnt = nir_load_patch_vertices_in(b);
-   nir_ssa_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
-   nir_ssa_def *vertex_index = nir_get_io_arrayed_index_src(instr)->ssa;
+   nir_def *tcs_in_vtxcnt = nir_load_patch_vertices_in(b);
+   nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
+   nir_def *vertex_index = nir_get_io_arrayed_index_src(instr)->ssa;
 
-   nir_ssa_def *stride = nir_load_lshs_vertex_stride_amd(b);
-   nir_ssa_def *tcs_in_patch_stride = nir_imul(b, tcs_in_vtxcnt, stride);
-   nir_ssa_def *vertex_index_off = nir_imul(b, vertex_index, stride);
+   nir_def *stride = nir_load_lshs_vertex_stride_amd(b);
+   nir_def *tcs_in_patch_stride = nir_imul(b, tcs_in_vtxcnt, stride);
+   nir_def *vertex_index_off = nir_imul(b, vertex_index, stride);
 
-   nir_ssa_def *tcs_in_current_patch_offset = nir_imul(b, rel_patch_id, tcs_in_patch_stride);
+   nir_def *tcs_in_current_patch_offset = nir_imul(b, rel_patch_id, tcs_in_patch_stride);
 
-   nir_ssa_def *io_offset = ac_nir_calc_io_offset(b, instr, nir_imm_int(b, 16u), 4u, st->map_io);
+   nir_def *io_offset = ac_nir_calc_io_offset(b, instr, nir_imm_int(b, 16u), 4u, st->map_io);
 
    return nir_iadd_nuw(b, nir_iadd_nuw(b, tcs_in_current_patch_offset, vertex_index_off), io_offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 hs_output_lds_offset(nir_builder *b,
                      lower_tess_io_state *st,
                      nir_intrinsic_instr *intrin)
@@ -318,28 +318,28 @@ hs_output_lds_offset(nir_builder *b,
    unsigned pervertex_output_patch_size = b->shader->info.tess.tcs_vertices_out * output_vertex_size;
    unsigned output_patch_stride = pervertex_output_patch_size + st->tcs_num_reserved_patch_outputs * 16u;
 
-   nir_ssa_def *off = intrin
+   nir_def *off = intrin
                     ? ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io)
                     : nir_imm_int(b, 0);
 
-   nir_ssa_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
-   nir_ssa_def *patch_offset = nir_imul_imm(b, rel_patch_id, output_patch_stride);
+   nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
+   nir_def *patch_offset = nir_imul_imm(b, rel_patch_id, output_patch_stride);
 
-   nir_ssa_def *output_patch_offset;
+   nir_def *output_patch_offset;
    if (st->tcs_no_inputs_in_lds)
       output_patch_offset = patch_offset;
    else {
-      nir_ssa_def *tcs_in_vtxcnt = nir_load_patch_vertices_in(b);
-      nir_ssa_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
-      nir_ssa_def *input_patch_size =
+      nir_def *tcs_in_vtxcnt = nir_load_patch_vertices_in(b);
+      nir_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
+      nir_def *input_patch_size =
          nir_imul(b, tcs_in_vtxcnt, nir_load_lshs_vertex_stride_amd(b));
-      nir_ssa_def *output_patch0_offset = nir_imul(b, input_patch_size, tcs_num_patches);
+      nir_def *output_patch0_offset = nir_imul(b, input_patch_size, tcs_num_patches);
       output_patch_offset = nir_iadd_nuw(b, patch_offset, output_patch0_offset);
    }
 
    if (per_vertex) {
-      nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
-      nir_ssa_def *vertex_index_off = nir_imul_imm(b, vertex_index, output_vertex_size);
+      nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
+      nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, output_vertex_size);
 
       off = nir_iadd_nuw(b, off, vertex_index_off);
       return nir_iadd_nuw(b, off, output_patch_offset);
@@ -349,51 +349,51 @@ hs_output_lds_offset(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 hs_per_vertex_output_vmem_offset(nir_builder *b,
                                  lower_tess_io_state *st,
                                  nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *out_vertices_per_patch = b->shader->info.stage == MESA_SHADER_TESS_CTRL
+   nir_def *out_vertices_per_patch = b->shader->info.stage == MESA_SHADER_TESS_CTRL
                                          ? nir_imm_int(b, b->shader->info.tess.tcs_vertices_out)
                                          : nir_load_patch_vertices_in(b);
 
-   nir_ssa_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
-   nir_ssa_def *attr_stride = nir_imul(b, tcs_num_patches, nir_imul_imm(b, out_vertices_per_patch, 16u));
-   nir_ssa_def *io_offset = ac_nir_calc_io_offset(b, intrin, attr_stride, 4u, st->map_io);
+   nir_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
+   nir_def *attr_stride = nir_imul(b, tcs_num_patches, nir_imul_imm(b, out_vertices_per_patch, 16u));
+   nir_def *io_offset = ac_nir_calc_io_offset(b, intrin, attr_stride, 4u, st->map_io);
 
-   nir_ssa_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
-   nir_ssa_def *patch_offset = nir_imul(b, rel_patch_id, nir_imul_imm(b, out_vertices_per_patch, 16u));
+   nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
+   nir_def *patch_offset = nir_imul(b, rel_patch_id, nir_imul_imm(b, out_vertices_per_patch, 16u));
 
-   nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
-   nir_ssa_def *vertex_index_off = nir_imul_imm(b, vertex_index, 16u);
+   nir_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
+   nir_def *vertex_index_off = nir_imul_imm(b, vertex_index, 16u);
 
    return nir_iadd_nuw(b, nir_iadd_nuw(b, patch_offset, vertex_index_off), io_offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 hs_per_patch_output_vmem_offset(nir_builder *b,
                                 lower_tess_io_state *st,
                                 nir_intrinsic_instr *intrin,
                                 unsigned const_base_offset)
 {
-   nir_ssa_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
-   nir_ssa_def *per_patch_data_offset = nir_load_hs_out_patch_data_offset_amd(b);
+   nir_def *tcs_num_patches = nir_load_tcs_num_patches_amd(b);
+   nir_def *per_patch_data_offset = nir_load_hs_out_patch_data_offset_amd(b);
 
-   nir_ssa_def * off = intrin
+   nir_def * off = intrin
                     ? ac_nir_calc_io_offset(b, intrin, nir_imul_imm(b, tcs_num_patches, 16u), 4u, st->map_io)
                     : nir_imm_int(b, 0);
 
    if (const_base_offset)
       off = nir_iadd_nuw(b, off, nir_imul_imm(b, tcs_num_patches, const_base_offset));
 
-   nir_ssa_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
-   nir_ssa_def *patch_offset = nir_imul_imm(b, rel_patch_id, 16u);
+   nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
+   nir_def *patch_offset = nir_imul_imm(b, rel_patch_id, 16u);
    off = nir_iadd_nuw(b, off, per_patch_data_offset);
    return nir_iadd_nuw(b, off, patch_offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_hs_per_vertex_input_load(nir_builder *b,
                                nir_instr *instr,
                                void *state)
@@ -401,11 +401,11 @@ lower_hs_per_vertex_input_load(nir_builder *b,
    lower_tess_io_state *st = (lower_tess_io_state *) state;
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *off = hs_per_vertex_input_lds_offset(b, st, intrin);
+   nir_def *off = hs_per_vertex_input_lds_offset(b, st, intrin);
    return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_hs_output_store(nir_builder *b,
                       nir_intrinsic_instr *intrin,
                       lower_tess_io_state *st)
@@ -414,7 +414,7 @@ lower_hs_output_store(nir_builder *b,
           intrin->intrinsic == nir_intrinsic_store_output);
 
    nir_io_semantics semantics = nir_intrinsic_io_semantics(intrin);
-   nir_ssa_def *store_val = intrin->src[0].ssa;
+   nir_def *store_val = intrin->src[0].ssa;
    unsigned component = nir_intrinsic_component(intrin);
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
    bool is_tess_factor = semantics.location == VARYING_SLOT_TESS_LEVEL_INNER ||
@@ -437,24 +437,24 @@ lower_hs_output_store(nir_builder *b,
    }
 
    if (write_to_vmem) {
-      nir_ssa_def *vmem_off = intrin->intrinsic == nir_intrinsic_store_per_vertex_output
+      nir_def *vmem_off = intrin->intrinsic == nir_intrinsic_store_per_vertex_output
                             ? hs_per_vertex_output_vmem_offset(b, st, intrin)
                             : hs_per_patch_output_vmem_offset(b, st, intrin, 0);
 
-      nir_ssa_def *hs_ring_tess_offchip = nir_load_ring_tess_offchip_amd(b);
-      nir_ssa_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *hs_ring_tess_offchip = nir_load_ring_tess_offchip_amd(b);
+      nir_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
+      nir_def *zero = nir_imm_int(b, 0);
       nir_store_buffer_amd(b, store_val, hs_ring_tess_offchip, vmem_off, offchip_offset, zero,
                            .write_mask = write_mask, .memory_modes = nir_var_shader_out,
                            .access = ACCESS_COHERENT);
    }
 
    if (write_to_lds) {
-      nir_ssa_def *lds_off = hs_output_lds_offset(b, st, intrin);
+      nir_def *lds_off = hs_output_lds_offset(b, st, intrin);
       nir_store_shared(b, store_val, lds_off, .write_mask = write_mask);
    }
 
-   nir_ssa_def *ret = NIR_LOWER_INSTR_PROGRESS_REPLACE;
+   nir_def *ret = NIR_LOWER_INSTR_PROGRESS_REPLACE;
 
    if (is_tess_factor && st->tcs_pass_tessfactors_by_reg) {
       if (st->tcs_emit_tess_factor_write) {
@@ -474,12 +474,12 @@ lower_hs_output_store(nir_builder *b,
    return ret;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_hs_output_load(nir_builder *b,
                      nir_intrinsic_instr *intrin,
                      lower_tess_io_state *st)
 {
-   nir_ssa_def *off = hs_output_lds_offset(b, st, intrin);
+   nir_def *off = hs_output_lds_offset(b, st, intrin);
    return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
 }
 
@@ -505,7 +505,7 @@ update_hs_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
       nir_intrinsic_set_memory_scope(intrin, SCOPE_SUBGROUP);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_hs_output_access(nir_builder *b,
                        nir_instr *instr,
                        void *state)
@@ -571,7 +571,7 @@ hs_emit_write_tess_factors(nir_shader *shader,
                          .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
    }
 
-   nir_ssa_def *invocation_id = nir_load_invocation_id(b);
+   nir_def *invocation_id = nir_load_invocation_id(b);
 
    /* Only the 1st invocation of each patch needs to do this. */
    nir_if *invocation_id_zero = nir_push_if(b, nir_ieq_imm(b, invocation_id, 0));
@@ -586,8 +586,8 @@ hs_emit_write_tess_factors(nir_shader *shader,
    const bool tess_lvl_in_written = st->tcs_tess_lvl_in_loc >= 0;
    const bool tess_lvl_out_written = st->tcs_tess_lvl_out_loc >= 0;
 
-   nir_ssa_def *tessfactors_outer = NULL;
-   nir_ssa_def *tessfactors_inner = NULL;
+   nir_def *tessfactors_outer = NULL;
+   nir_def *tessfactors_inner = NULL;
    if (st->tcs_pass_tessfactors_by_reg) {
       if (tess_lvl_out_written) {
          tessfactors_outer = nir_load_var(b, st->tcs_tess_level_outer);
@@ -600,7 +600,7 @@ hs_emit_write_tess_factors(nir_shader *shader,
       }
    } else {
       /* Base LDS address of per-patch outputs in the current patch. */
-      nir_ssa_def *lds_base = hs_output_lds_offset(b, st, NULL);
+      nir_def *lds_base = hs_output_lds_offset(b, st, NULL);
 
       /* Load all tessellation factors (aka. tess levels) from LDS. */
       if (tess_lvl_out_written) {
@@ -621,18 +621,18 @@ hs_emit_write_tess_factors(nir_shader *shader,
       tessfactors_inner = nir_imm_zero(b, inner_comps, 32);
 
    /* The descriptor where tess factors have to be stored by the shader. */
-   nir_ssa_def *tessfactor_ring = nir_load_ring_tess_factors_amd(b);
+   nir_def *tessfactor_ring = nir_load_ring_tess_factors_amd(b);
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
-   nir_ssa_def *tess_factors_base = nir_load_ring_tess_factors_offset_amd(b);
-   nir_ssa_def *tess_factors_offset = nir_imul_imm(b, rel_patch_id, (inner_comps + outer_comps) * 4u);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *rel_patch_id = nir_load_tess_rel_patch_id_amd(b);
+   nir_def *tess_factors_base = nir_load_ring_tess_factors_offset_amd(b);
+   nir_def *tess_factors_offset = nir_imul_imm(b, rel_patch_id, (inner_comps + outer_comps) * 4u);
    unsigned tess_factors_const_offset = 0;
 
    if (st->gfx_level <= GFX8) {
       /* Store the dynamic HS control word. */
       nir_if *rel_patch_id_zero = nir_push_if(b, nir_ieq_imm(b, rel_patch_id, 0));
-      nir_ssa_def *ctrlw = nir_imm_int(b, 0x80000000u);
+      nir_def *ctrlw = nir_imm_int(b, 0x80000000u);
       nir_store_buffer_amd(b, ctrlw, tessfactor_ring, zero, tess_factors_base, zero,
                            .access = ACCESS_COHERENT);
       tess_factors_const_offset += 4;
@@ -642,11 +642,11 @@ hs_emit_write_tess_factors(nir_shader *shader,
    /* Store tess factors for the tessellator */
    if (shader->info.tess._primitive_mode == TESS_PRIMITIVE_ISOLINES) {
       /* LINES reversal */
-      nir_ssa_def *t = nir_vec2(b, nir_channel(b, tessfactors_outer, 1), nir_channel(b, tessfactors_outer, 0));
+      nir_def *t = nir_vec2(b, nir_channel(b, tessfactors_outer, 1), nir_channel(b, tessfactors_outer, 0));
       nir_store_buffer_amd(b, t, tessfactor_ring, tess_factors_offset, tess_factors_base, zero,
                            .base = tess_factors_const_offset, .access = ACCESS_COHERENT);
    } else if (shader->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES) {
-      nir_ssa_def *t = nir_vec4(b, nir_channel(b, tessfactors_outer, 0), nir_channel(b, tessfactors_outer, 1),
+      nir_def *t = nir_vec4(b, nir_channel(b, tessfactors_outer, 0), nir_channel(b, tessfactors_outer, 1),
                                 nir_channel(b, tessfactors_outer, 2), nir_channel(b, tessfactors_inner, 0));
       nir_store_buffer_amd(b, t, tessfactor_ring, tess_factors_offset, tess_factors_base, zero,
                            .base = tess_factors_const_offset, .access = ACCESS_COHERENT);
@@ -659,11 +659,11 @@ hs_emit_write_tess_factors(nir_shader *shader,
 
    if (st->tes_reads_tessfactors) {
       /* Store to offchip for TES to read - only if TES actually reads them */
-      nir_ssa_def *hs_ring_tess_offchip = nir_load_ring_tess_offchip_amd(b);
-      nir_ssa_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
+      nir_def *hs_ring_tess_offchip = nir_load_ring_tess_offchip_amd(b);
+      nir_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
 
       if (tess_lvl_out_written) {
-         nir_ssa_def *vmem_off_outer =
+         nir_def *vmem_off_outer =
             hs_per_patch_output_vmem_offset(b, st, NULL, st->tcs_tess_lvl_out_loc);
 
          nir_store_buffer_amd(b, tessfactors_outer, hs_ring_tess_offchip,
@@ -673,7 +673,7 @@ hs_emit_write_tess_factors(nir_shader *shader,
       }
 
       if (inner_comps && tess_lvl_in_written) {
-         nir_ssa_def *vmem_off_inner =
+         nir_def *vmem_off_inner =
             hs_per_patch_output_vmem_offset(b, st, NULL, st->tcs_tess_lvl_in_loc);
 
          nir_store_buffer_amd(b, tessfactors_inner, hs_ring_tess_offchip,
@@ -688,7 +688,7 @@ hs_emit_write_tess_factors(nir_shader *shader,
    nir_metadata_preserve(impl, nir_metadata_none);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_tes_input_load(nir_builder *b,
                      nir_instr *instr,
                      void *state)
@@ -696,13 +696,13 @@ lower_tes_input_load(nir_builder *b,
    lower_tess_io_state *st = (lower_tess_io_state *) state;
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *offchip_ring = nir_load_ring_tess_offchip_amd(b);
-   nir_ssa_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
-   nir_ssa_def *off = intrin->intrinsic == nir_intrinsic_load_per_vertex_input
+   nir_def *offchip_ring = nir_load_ring_tess_offchip_amd(b);
+   nir_def *offchip_offset = nir_load_ring_tess_offchip_offset_amd(b);
+   nir_def *off = intrin->intrinsic == nir_intrinsic_load_per_vertex_input
                     ? hs_per_vertex_output_vmem_offset(b, st, intrin)
                     : hs_per_patch_output_vmem_offset(b, st, intrin, 0);
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    return nir_load_buffer_amd(b, intrin->dest.ssa.num_components,
                               intrin->dest.ssa.bit_size, offchip_ring,
index faf307a..940e76e 100644 (file)
  * the selcoords major axis.
  */
 static void
-build_cube_select(nir_builder *b, nir_ssa_def *ma, nir_ssa_def *id, nir_ssa_def *deriv,
-                  nir_ssa_def **out_ma, nir_ssa_def **out_sc, nir_ssa_def **out_tc)
+build_cube_select(nir_builder *b, nir_def *ma, nir_def *id, nir_def *deriv,
+                  nir_def **out_ma, nir_def **out_sc, nir_def **out_tc)
 {
-   nir_ssa_def *deriv_x = nir_channel(b, deriv, 0);
-   nir_ssa_def *deriv_y = nir_channel(b, deriv, 1);
-   nir_ssa_def *deriv_z = nir_channel(b, deriv, 2);
+   nir_def *deriv_x = nir_channel(b, deriv, 0);
+   nir_def *deriv_y = nir_channel(b, deriv, 1);
+   nir_def *deriv_z = nir_channel(b, deriv, 2);
 
-   nir_ssa_def *is_ma_positive = nir_fge_imm(b, ma, 0.0);
-   nir_ssa_def *sgn_ma =
+   nir_def *is_ma_positive = nir_fge_imm(b, ma, 0.0);
+   nir_def *sgn_ma =
       nir_bcsel(b, is_ma_positive, nir_imm_float(b, 1.0), nir_imm_float(b, -1.0));
-   nir_ssa_def *neg_sgn_ma = nir_fneg(b, sgn_ma);
+   nir_def *neg_sgn_ma = nir_fneg(b, sgn_ma);
 
-   nir_ssa_def *is_ma_z = nir_fge_imm(b, id, 4.0);
-   nir_ssa_def *is_ma_y = nir_fge_imm(b, id, 2.0);
+   nir_def *is_ma_z = nir_fge_imm(b, id, 4.0);
+   nir_def *is_ma_y = nir_fge_imm(b, id, 2.0);
    is_ma_y = nir_iand(b, is_ma_y, nir_inot(b, is_ma_z));
-   nir_ssa_def *is_not_ma_x = nir_ior(b, is_ma_z, is_ma_y);
+   nir_def *is_not_ma_x = nir_ior(b, is_ma_z, is_ma_y);
 
    /* Select sc */
-   nir_ssa_def *tmp = nir_bcsel(b, is_not_ma_x, deriv_x, deriv_z);
-   nir_ssa_def *sgn =
+   nir_def *tmp = nir_bcsel(b, is_not_ma_x, deriv_x, deriv_z);
+   nir_def *sgn =
       nir_bcsel(b, is_ma_y, nir_imm_float(b, 1.0), nir_bcsel(b, is_ma_z, sgn_ma, neg_sgn_ma));
    *out_sc = nir_fmul(b, tmp, sgn);
 
@@ -69,10 +69,10 @@ build_cube_select(nir_builder *b, nir_ssa_def *ma, nir_ssa_def *id, nir_ssa_def
 }
 
 static void
-prepare_cube_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coord, nir_src *ddx,
+prepare_cube_coords(nir_builder *b, nir_tex_instr *tex, nir_def **coord, nir_src *ddx,
                     nir_src *ddy, const ac_nir_lower_tex_options *options)
 {
-   nir_ssa_def *coords[NIR_MAX_VEC_COMPONENTS] = {0};
+   nir_def *coords[NIR_MAX_VEC_COMPONENTS] = {0};
    for (unsigned i = 0; i < (*coord)->num_components; i++)
       coords[i] = nir_channel(b, *coord, i);
 
@@ -98,12 +98,12 @@ prepare_cube_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coord, nir
    if (tex->is_array && options->gfx_level <= GFX8 && coords[3])
       coords[3] = nir_fmax(b, coords[3], nir_imm_float(b, 0.0));
 
-   nir_ssa_def *cube_coords = nir_cube_amd(b, nir_vec(b, coords, 3));
-   nir_ssa_def *sc = nir_channel(b, cube_coords, 1);
-   nir_ssa_def *tc = nir_channel(b, cube_coords, 0);
-   nir_ssa_def *ma = nir_channel(b, cube_coords, 2);
-   nir_ssa_def *invma = nir_frcp(b, nir_fabs(b, ma));
-   nir_ssa_def *id = nir_channel(b, cube_coords, 3);
+   nir_def *cube_coords = nir_cube_amd(b, nir_vec(b, coords, 3));
+   nir_def *sc = nir_channel(b, cube_coords, 1);
+   nir_def *tc = nir_channel(b, cube_coords, 0);
+   nir_def *ma = nir_channel(b, cube_coords, 2);
+   nir_def *invma = nir_frcp(b, nir_fabs(b, ma));
+   nir_def *id = nir_channel(b, cube_coords, 3);
 
    if (ddx || ddy) {
       sc = nir_fmul(b, sc, invma);
@@ -132,13 +132,13 @@ prepare_cube_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coord, nir
           * seems awfully quiet about how textureGrad for cube
           * maps should be handled.
           */
-         nir_ssa_def *deriv_ma, *deriv_sc, *deriv_tc;
+         nir_def *deriv_ma, *deriv_sc, *deriv_tc;
          build_cube_select(b, ma, id, i ? ddy->ssa : ddx->ssa, &deriv_ma, &deriv_sc, &deriv_tc);
 
          deriv_ma = nir_fmul(b, deriv_ma, invma);
 
-         nir_ssa_def *x = nir_fsub(b, nir_fmul(b, deriv_sc, invma), nir_fmul(b, deriv_ma, sc));
-         nir_ssa_def *y = nir_fsub(b, nir_fmul(b, deriv_tc, invma), nir_fmul(b, deriv_ma, tc));
+         nir_def *x = nir_fsub(b, nir_fmul(b, deriv_sc, invma), nir_fmul(b, deriv_ma, sc));
+         nir_def *y = nir_fsub(b, nir_fmul(b, deriv_tc, invma), nir_fmul(b, deriv_ma, tc));
 
          nir_instr_rewrite_src_ssa(&tex->instr, i ? ddy : ddx, nir_vec2(b, x, y));
       }
@@ -159,20 +159,20 @@ prepare_cube_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coord, nir
 }
 
 static bool
-lower_array_layer_round_even(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coords)
+lower_array_layer_round_even(nir_builder *b, nir_tex_instr *tex, nir_def **coords)
 {
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
    if (coord_index < 0 || nir_tex_instr_src_type(tex, coord_index) != nir_type_float)
       return false;
 
    unsigned layer = tex->coord_components - 1;
-   nir_ssa_def *rounded_layer = nir_fround_even(b, nir_channel(b, *coords, layer));
+   nir_def *rounded_layer = nir_fround_even(b, nir_channel(b, *coords, layer));
    *coords = nir_vector_insert_imm(b, *coords, rounded_layer, layer);
    return true;
 }
 
 static bool
-lower_tex_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coords,
+lower_tex_coords(nir_builder *b, nir_tex_instr *tex, nir_def **coords,
                  const ac_nir_lower_tex_options *options)
 {
    bool progress = false;
@@ -190,11 +190,11 @@ lower_tex_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coords,
    nir_src *ddy = ddy_idx >= 0 ? &tex->src[ddy_idx].src : NULL;
 
    if (tex->sampler_dim == GLSL_SAMPLER_DIM_1D) {
-      nir_ssa_def *y =
+      nir_def *y =
          nir_imm_floatN_t(b, tex->op == nir_texop_txf ? 0.0 : 0.5, (*coords)->bit_size);
       if (tex->is_array && (*coords)->num_components > 1) {
-         nir_ssa_def *x = nir_channel(b, *coords, 0);
-         nir_ssa_def *idx = nir_channel(b, *coords, 1);
+         nir_def *x = nir_channel(b, *coords, 0);
+         nir_def *idx = nir_channel(b, *coords, 1);
          *coords = nir_vec3(b, x, y, idx);
       } else {
          *coords = nir_vec2(b, *coords, y);
@@ -203,12 +203,12 @@ lower_tex_coords(nir_builder *b, nir_tex_instr *tex, nir_ssa_def **coords,
       int offset_src = nir_tex_instr_src_index(tex, nir_tex_src_offset);
       if (offset_src >= 0) {
          nir_src *offset = &tex->src[offset_src].src;
-         nir_ssa_def *zero = nir_imm_intN_t(b, 0, offset->ssa->bit_size);
+         nir_def *zero = nir_imm_intN_t(b, 0, offset->ssa->bit_size);
          nir_instr_rewrite_src_ssa(&tex->instr, offset, nir_vec2(b, offset->ssa, zero));
       }
 
       if (ddx || ddy) {
-         nir_ssa_def *def = nir_vec2(b, ddx->ssa, nir_imm_floatN_t(b, 0.0, ddx->ssa->bit_size));
+         nir_def *def = nir_vec2(b, ddx->ssa, nir_imm_floatN_t(b, 0.0, ddx->ssa->bit_size));
          nir_instr_rewrite_src_ssa(&tex->instr, ddx, def);
          def = nir_vec2(b, ddy->ssa, nir_imm_floatN_t(b, 0.0, ddy->ssa->bit_size));
          nir_instr_rewrite_src_ssa(&tex->instr, ddy, def);
@@ -233,7 +233,7 @@ lower_tex(nir_builder *b, nir_instr *instr, void *options_)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *coords = tex->src[coord_idx].src.ssa;
+   nir_def *coords = tex->src[coord_idx].src.ssa;
    if (lower_tex_coords(b, tex, &coords, options)) {
       tex->coord_components = coords->num_components;
       nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[coord_idx].src, coords);
@@ -249,12 +249,12 @@ typedef struct {
 } coord_info;
 
 static bool
-can_move_coord(nir_ssa_scalar scalar, coord_info *info)
+can_move_coord(nir_scalar scalar, coord_info *info)
 {
    if (scalar.def->bit_size != 32)
       return false;
 
-   if (nir_ssa_scalar_is_const(scalar))
+   if (nir_scalar_is_const(scalar))
       return true;
 
    if (scalar.def->parent_instr->type != nir_instr_type_intrinsic)
@@ -270,8 +270,8 @@ can_move_coord(nir_ssa_scalar scalar, coord_info *info)
    if (intrin->intrinsic != nir_intrinsic_load_interpolated_input)
       return false;
 
-   nir_ssa_scalar coord_x = nir_ssa_scalar_resolved(intrin->src[0].ssa, 0);
-   nir_ssa_scalar coord_y = nir_ssa_scalar_resolved(intrin->src[0].ssa, 1);
+   nir_scalar coord_x = nir_scalar_resolved(intrin->src[0].ssa, 0);
+   nir_scalar coord_y = nir_scalar_resolved(intrin->src[0].ssa, 1);
    if (coord_x.def->parent_instr->type != nir_instr_type_intrinsic || coord_x.comp != 0 ||
        coord_y.def->parent_instr->type != nir_instr_type_intrinsic || coord_y.comp != 1)
       return false;
@@ -297,22 +297,22 @@ struct move_tex_coords_state {
    nir_builder toplevel_b;
 };
 
-static nir_ssa_def *
-build_coordinate(struct move_tex_coords_state *state, nir_ssa_scalar scalar, coord_info info)
+static nir_def *
+build_coordinate(struct move_tex_coords_state *state, nir_scalar scalar, coord_info info)
 {
    nir_builder *b = &state->toplevel_b;
 
-   if (nir_ssa_scalar_is_const(scalar))
-      return nir_imm_intN_t(b, nir_ssa_scalar_as_uint(scalar), scalar.def->bit_size);
+   if (nir_scalar_is_const(scalar))
+      return nir_imm_intN_t(b, nir_scalar_as_uint(scalar), scalar.def->bit_size);
 
    ASSERTED nir_src offset = *nir_get_io_offset_src(info.load);
    assert(nir_src_is_const(offset) && !nir_src_as_uint(offset));
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *res;
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *res;
    if (info.bary) {
       enum glsl_interp_mode interp_mode = nir_intrinsic_interp_mode(info.bary);
-      nir_ssa_def *bary = nir_load_system_value(b, info.bary->intrinsic, interp_mode, 2, 32);
+      nir_def *bary = nir_load_system_value(b, info.bary->intrinsic, interp_mode, 2, 32);
       res = nir_load_interpolated_input(b, 1, 32, bary, zero);
    } else {
       res = nir_load_input(b, 1, 32, zero);
@@ -351,11 +351,11 @@ move_tex_coords(struct move_tex_coords_state *state, nir_function_impl *impl, ni
       return false;
 
    nir_tex_src *src = &tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)];
-   nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar components[NIR_MAX_VEC_COMPONENTS];
    coord_info infos[NIR_MAX_VEC_COMPONENTS];
    bool can_move_all = true;
    for (unsigned i = 0; i < tex->coord_components; i++) {
-      components[i] = nir_ssa_scalar_resolved(src->src.ssa, i);
+      components[i] = nir_scalar_resolved(src->src.ssa, i);
       can_move_all &= can_move_coord(components[i], &infos[i]);
    }
    if (!can_move_all)
@@ -386,7 +386,7 @@ move_tex_coords(struct move_tex_coords_state *state, nir_function_impl *impl, ni
    for (unsigned i = 0; i < tex->coord_components; i++)
       components[i] = nir_get_ssa_scalar(build_coordinate(state, components[i], infos[i]), 0);
 
-   nir_ssa_def *linear_vgpr = nir_vec_scalars(&state->toplevel_b, components, tex->coord_components);
+   nir_def *linear_vgpr = nir_vec_scalars(&state->toplevel_b, components, tex->coord_components);
    lower_tex_coords(&state->toplevel_b, tex, &linear_vgpr, state->options);
 
    linear_vgpr = nir_strict_wqm_coord_amd(&state->toplevel_b, linear_vgpr, coord_base * 4);
@@ -421,25 +421,25 @@ move_fddxy(struct move_tex_coords_state *state, nir_function_impl *impl, nir_alu
    }
 
    unsigned num_components = instr->dest.dest.ssa.num_components;
-   nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar components[NIR_MAX_VEC_COMPONENTS];
    coord_info infos[NIR_MAX_VEC_COMPONENTS];
    bool can_move_all = true;
    for (unsigned i = 0; i < num_components; i++) {
-      components[i] = nir_ssa_scalar_chase_alu_src(nir_get_ssa_scalar(&instr->dest.dest.ssa, i), 0);
-      components[i] = nir_ssa_scalar_chase_movs(components[i]);
+      components[i] = nir_scalar_chase_alu_src(nir_get_ssa_scalar(&instr->dest.dest.ssa, i), 0);
+      components[i] = nir_scalar_chase_movs(components[i]);
       can_move_all &= can_move_coord(components[i], &infos[i]);
    }
    if (!can_move_all || state->num_wqm_vgprs + num_components > state->options->max_wqm_vgprs)
       return false;
 
    for (unsigned i = 0; i < num_components; i++) {
-      nir_ssa_def *def = build_coordinate(state, components[i], infos[i]);
+      nir_def *def = build_coordinate(state, components[i], infos[i]);
       components[i] = nir_get_ssa_scalar(def, 0);
    }
 
-   nir_ssa_def *def = nir_vec_scalars(&state->toplevel_b, components, num_components);
+   nir_def *def = nir_vec_scalars(&state->toplevel_b, components, num_components);
    def = nir_build_alu1(&state->toplevel_b, instr->op, def);
-   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, def);
+   nir_def_rewrite_uses(&instr->dest.dest.ssa, def);
 
    state->num_wqm_vgprs += num_components;
 
index 6f5356c..0c4591a 100644 (file)
@@ -3312,16 +3312,16 @@ void ac_surface_print_info(FILE *out, const struct radeon_info *info,
    }
 }
 
-static nir_ssa_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+static nir_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                                    struct gfx9_meta_equation *equation,
                                                    int blkSizeBias, unsigned blkStart,
-                                                   nir_ssa_def *meta_pitch, nir_ssa_def *meta_slice_size,
-                                                   nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                                   nir_ssa_def *pipe_xor,
-                                                   nir_ssa_def **bit_position)
+                                                   nir_def *meta_pitch, nir_def *meta_slice_size,
+                                                   nir_def *x, nir_def *y, nir_def *z,
+                                                   nir_def *pipe_xor,
+                                                   nir_def **bit_position)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *one = nir_imm_int(b, 1);
 
    assert(info->gfx_level >= GFX10);
 
@@ -3329,17 +3329,17 @@ static nir_ssa_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct
    unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
    unsigned blkSizeLog2 = meta_block_width_log2 + meta_block_height_log2 + blkSizeBias;
 
-   nir_ssa_def *coord[] = {x, y, z, 0};
-   nir_ssa_def *address = zero;
+   nir_def *coord[] = {x, y, z, 0};
+   nir_def *address = zero;
 
    for (unsigned i = blkStart; i < blkSizeLog2 + 1; i++) {
-      nir_ssa_def *v = zero;
+      nir_def *v = zero;
 
       for (unsigned c = 0; c < 4; c++) {
          unsigned index = i * 4 + c - (blkStart * 4);
          if (equation->u.gfx10_bits[index]) {
             unsigned mask = equation->u.gfx10_bits[index];
-            nir_ssa_def *bits = coord[c];
+            nir_def *bits = coord[c];
 
             while (mask)
                v = nir_ixor(b, v, nir_iand(b, nir_ushr_imm(b, bits, u_bit_scan(&mask)), one));
@@ -3352,11 +3352,11 @@ static nir_ssa_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct
    unsigned blkMask = (1 << blkSizeLog2) - 1;
    unsigned pipeMask = (1 << G_0098F8_NUM_PIPES(info->gb_addr_config)) - 1;
    unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
-   nir_ssa_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
-   nir_ssa_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
-   nir_ssa_def *pb = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
-   nir_ssa_def *blkIndex = nir_iadd(b, nir_imul(b, yb, pb), xb);
-   nir_ssa_def *pipeXor = nir_iand_imm(b, nir_ishl_imm(b, nir_iand_imm(b, pipe_xor, pipeMask),
+   nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
+   nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
+   nir_def *pb = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
+   nir_def *blkIndex = nir_iadd(b, nir_imul(b, yb, pb), xb);
+   nir_def *pipeXor = nir_iand_imm(b, nir_ishl_imm(b, nir_iand_imm(b, pipe_xor, pipeMask),
                                                        m_pipeInterleaveLog2), blkMask);
 
    if (bit_position)
@@ -3367,15 +3367,15 @@ static nir_ssa_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct
                    nir_ixor(b, nir_ushr(b, address, one), pipeXor));
 }
 
-static nir_ssa_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+static nir_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                                   struct gfx9_meta_equation *equation,
-                                                  nir_ssa_def *meta_pitch, nir_ssa_def *meta_height,
-                                                  nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                                  nir_ssa_def *sample, nir_ssa_def *pipe_xor,
-                                                  nir_ssa_def **bit_position)
+                                                  nir_def *meta_pitch, nir_def *meta_height,
+                                                  nir_def *x, nir_def *y, nir_def *z,
+                                                  nir_def *sample, nir_def *pipe_xor,
+                                                  nir_def **bit_position)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *one = nir_imm_int(b, 1);
 
    assert(info->gfx_level >= GFX9);
 
@@ -3385,32 +3385,32 @@ static nir_ssa_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct r
 
    unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
    unsigned numPipeBits = equation->u.gfx9.num_pipe_bits;
-   nir_ssa_def *pitchInBlock = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
-   nir_ssa_def *sliceSizeInBlock = nir_imul(b, nir_ushr_imm(b, meta_height, meta_block_height_log2),
+   nir_def *pitchInBlock = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
+   nir_def *sliceSizeInBlock = nir_imul(b, nir_ushr_imm(b, meta_height, meta_block_height_log2),
                                             pitchInBlock);
 
-   nir_ssa_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
-   nir_ssa_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
-   nir_ssa_def *zb = nir_ushr_imm(b, z, meta_block_depth_log2);
+   nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
+   nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
+   nir_def *zb = nir_ushr_imm(b, z, meta_block_depth_log2);
 
-   nir_ssa_def *blockIndex = nir_iadd(b, nir_iadd(b, nir_imul(b, zb, sliceSizeInBlock),
+   nir_def *blockIndex = nir_iadd(b, nir_iadd(b, nir_imul(b, zb, sliceSizeInBlock),
                                                   nir_imul(b, yb, pitchInBlock)), xb);
-   nir_ssa_def *coords[] = {x, y, z, sample, blockIndex};
+   nir_def *coords[] = {x, y, z, sample, blockIndex};
 
-   nir_ssa_def *address = zero;
+   nir_def *address = zero;
    unsigned num_bits = equation->u.gfx9.num_bits;
    assert(num_bits <= 32);
 
    /* Compute the address up until the last bit that doesn't use the block index. */
    for (unsigned i = 0; i < num_bits - 1; i++) {
-      nir_ssa_def *xor = zero;
+      nir_def *xor = zero;
 
       for (unsigned c = 0; c < 5; c++) {
          if (equation->u.gfx9.bit[i].coord[c].dim >= 5)
             continue;
 
          assert(equation->u.gfx9.bit[i].coord[c].ord < 32);
-         nir_ssa_def *ison =
+         nir_def *ison =
             nir_iand(b, nir_ushr_imm(b, coords[equation->u.gfx9.bit[i].coord[c].dim],
                                      equation->u.gfx9.bit[i].coord[c].ord), one);
 
@@ -3429,17 +3429,17 @@ static nir_ssa_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct r
    if (bit_position)
       *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
 
-   nir_ssa_def *pipeXor = nir_iand_imm(b, pipe_xor, (1 << numPipeBits) - 1);
+   nir_def *pipeXor = nir_iand_imm(b, pipe_xor, (1 << numPipeBits) - 1);
    return nir_ixor(b, nir_ushr(b, address, one),
                    nir_ishl_imm(b, pipeXor, m_pipeInterleaveLog2));
 }
 
-nir_ssa_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                         unsigned bpe, struct gfx9_meta_equation *equation,
-                                        nir_ssa_def *dcc_pitch, nir_ssa_def *dcc_height,
-                                        nir_ssa_def *dcc_slice_size,
-                                        nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                        nir_ssa_def *sample, nir_ssa_def *pipe_xor)
+                                        nir_def *dcc_pitch, nir_def *dcc_height,
+                                        nir_def *dcc_slice_size,
+                                        nir_def *x, nir_def *y, nir_def *z,
+                                        nir_def *sample, nir_def *pipe_xor)
 {
    if (info->gfx_level >= GFX10) {
       unsigned bpp_log2 = util_logbase2(bpe);
@@ -3454,15 +3454,15 @@ nir_ssa_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info
    }
 }
 
-nir_ssa_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                         struct gfx9_meta_equation *equation,
-                                        nir_ssa_def *cmask_pitch, nir_ssa_def *cmask_height,
-                                        nir_ssa_def *cmask_slice_size,
-                                        nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                        nir_ssa_def *pipe_xor,
-                                        nir_ssa_def **bit_position)
+                                        nir_def *cmask_pitch, nir_def *cmask_height,
+                                        nir_def *cmask_slice_size,
+                                        nir_def *x, nir_def *y, nir_def *z,
+                                        nir_def *pipe_xor,
+                                        nir_def **bit_position)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    if (info->gfx_level >= GFX10) {
       return gfx10_nir_meta_addr_from_coord(b, info, equation, -7, 1,
@@ -3475,12 +3475,12 @@ nir_ssa_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_in
    }
 }
 
-nir_ssa_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                           struct gfx9_meta_equation *equation,
-                                          nir_ssa_def *htile_pitch,
-                                          nir_ssa_def *htile_slice_size,
-                                          nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                          nir_ssa_def *pipe_xor)
+                                          nir_def *htile_pitch,
+                                          nir_def *htile_slice_size,
+                                          nir_def *x, nir_def *y, nir_def *z,
+                                          nir_def *pipe_xor)
 {
    return gfx10_nir_meta_addr_from_coord(b, info, equation, -4, 2,
                                             htile_pitch, htile_slice_size,
index 5076479..08ce3cb 100644 (file)
@@ -490,27 +490,27 @@ unsigned ac_get_cb_number_type(enum pipe_format format);
 unsigned ac_get_cb_format(enum amd_gfx_level gfx_level, enum pipe_format format);
 
 #ifdef AC_SURFACE_INCLUDE_NIR
-nir_ssa_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                         unsigned bpe, struct gfx9_meta_equation *equation,
-                                        nir_ssa_def *dcc_pitch, nir_ssa_def *dcc_height,
-                                        nir_ssa_def *dcc_slice_size,
-                                        nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                        nir_ssa_def *sample, nir_ssa_def *pipe_xor);
+                                        nir_def *dcc_pitch, nir_def *dcc_height,
+                                        nir_def *dcc_slice_size,
+                                        nir_def *x, nir_def *y, nir_def *z,
+                                        nir_def *sample, nir_def *pipe_xor);
 
-nir_ssa_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                         struct gfx9_meta_equation *equation,
-                                        nir_ssa_def *cmask_pitch, nir_ssa_def *cmask_height,
-                                        nir_ssa_def *cmask_slice_size,
-                                        nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                        nir_ssa_def *pipe_xor,
-                                        nir_ssa_def **bit_position);
+                                        nir_def *cmask_pitch, nir_def *cmask_height,
+                                        nir_def *cmask_slice_size,
+                                        nir_def *x, nir_def *y, nir_def *z,
+                                        nir_def *pipe_xor,
+                                        nir_def **bit_position);
 
-nir_ssa_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
+nir_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
                                           struct gfx9_meta_equation *equation,
-                                          nir_ssa_def *htile_pitch,
-                                          nir_ssa_def *htile_slice_size,
-                                          nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z,
-                                          nir_ssa_def *pipe_xor);
+                                          nir_def *htile_pitch,
+                                          nir_def *htile_slice_size,
+                                          nir_def *x, nir_def *y, nir_def *z,
+                                          nir_def *pipe_xor);
 #endif
 
 #ifdef __cplusplus
index f4bcadd..77c63c4 100644 (file)
@@ -128,7 +128,7 @@ append_logical_end(Block* b)
 }
 
 Temp
-get_ssa_temp(struct isel_context* ctx, nir_ssa_def* def)
+get_ssa_temp(struct isel_context* ctx, nir_def* def)
 {
    uint32_t id = ctx->first_temp_id + def->index;
    return Temp(id, ctx->program->temp_rc[id]);
@@ -576,7 +576,7 @@ byte_align_vector(isel_context* ctx, Temp vec, Operand offset, Temp dst, unsigne
 }
 
 Temp
-get_ssa_temp_tex(struct isel_context* ctx, nir_ssa_def* def, bool is_16bit)
+get_ssa_temp_tex(struct isel_context* ctx, nir_def* def, bool is_16bit)
 {
    RegClass rc = RegClass::get(RegType::vgpr, (is_16bit ? 2 : 4) * def->num_components);
    Temp tmp = get_ssa_temp(ctx, def);
@@ -806,8 +806,7 @@ get_alu_src_vop3p(struct isel_context* ctx, nir_alu_src src)
 uint32_t
 get_alu_src_ub(isel_context* ctx, nir_alu_instr* instr, int src_idx)
 {
-   nir_ssa_scalar scalar =
-      nir_ssa_scalar{instr->src[src_idx].src.ssa, instr->src[src_idx].swizzle[0]};
+   nir_scalar scalar = nir_scalar{instr->src[src_idx].src.ssa, instr->src[src_idx].swizzle[0]};
    return nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, scalar, &ctx->ub_config);
 }
 
@@ -6131,7 +6130,7 @@ visit_image_load(isel_context* ctx, nir_intrinsic_instr* instr)
 
    unsigned result_size = instr->dest.ssa.num_components - is_sparse;
    unsigned expand_mask =
-      nir_ssa_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size);
+      nir_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size);
    expand_mask = MAX2(expand_mask, 1); /* this can be zero in the case of sparse image loads */
    if (dim == GLSL_SAMPLER_DIM_BUF)
       expand_mask = (1u << util_last_bit(expand_mask)) - 1u;
@@ -6311,9 +6310,9 @@ visit_image_store(isel_context* ctx, nir_intrinsic_instr* instr)
     */
    if (instr->src[3].ssa->bit_size == 32 || instr->src[3].ssa->bit_size == 16) {
       for (uint32_t i = 0; i < instr->num_components; i++) {
-         nir_ssa_scalar comp = nir_ssa_scalar_resolved(instr->src[3].ssa, i);
-         if ((nir_ssa_scalar_is_const(comp) && nir_ssa_scalar_as_uint(comp) == 0) ||
-             nir_ssa_scalar_is_undef(comp))
+         nir_scalar comp = nir_scalar_resolved(instr->src[3].ssa, i);
+         if ((nir_scalar_is_const(comp) && nir_scalar_as_uint(comp) == 0) ||
+             nir_scalar_is_undef(comp))
             dmask &= ~BITFIELD_BIT(i);
       }
 
@@ -6444,7 +6443,7 @@ translate_buffer_image_atomic_op(const nir_atomic_op op, aco_opcode* buf_op, aco
 void
 visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
 {
-   bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
+   bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
    const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
    bool is_array = nir_intrinsic_image_array(instr);
    Builder bld(ctx->program, ctx->block);
@@ -6586,7 +6585,7 @@ void
 visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
 {
    Builder bld(ctx->program, ctx->block);
-   bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
+   bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
    Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
 
    const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
@@ -6788,7 +6787,7 @@ void
 visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
 {
    Builder bld(ctx->program, ctx->block);
-   bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
+   bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
    Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
 
    const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
@@ -7177,7 +7176,7 @@ emit_barrier(isel_context* ctx, nir_intrinsic_instr* instr)
 void
 visit_load_shared(isel_context* ctx, nir_intrinsic_instr* instr)
 {
-   // TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
+   // TODO: implement sparse reads using ds_read2_b32 and nir_def_components_read()
    Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
    Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
    Builder bld(ctx->program, ctx->block);
@@ -7294,7 +7293,7 @@ visit_shared_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
    default: unreachable("Unhandled shared atomic intrinsic");
    }
 
-   bool return_previous = !nir_ssa_def_is_unused(&instr->dest.ssa);
+   bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
 
    aco_opcode op;
    if (data.size() == 1) {
@@ -9102,7 +9101,7 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
 }
 
 void
-get_const_vec(nir_ssa_def* vec, nir_const_value* cv[4])
+get_const_vec(nir_def* vec, nir_const_value* cv[4])
 {
    if (vec->parent_instr->type != nir_instr_type_alu)
       return;
@@ -9339,7 +9338,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr)
    }
 
    /* Build tex instruction */
-   unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa) & 0xf;
+   unsigned dmask = nir_def_components_read(&instr->dest.ssa) & 0xf;
    if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
       dmask = u_bit_consecutive(0, util_last_bit(dmask));
    if (instr->is_sparse)
@@ -9746,7 +9745,7 @@ visit_tex(isel_context* ctx, nir_tex_instr* instr)
 }
 
 Operand
-get_phi_operand(isel_context* ctx, nir_ssa_def* ssa, RegClass rc, bool logical)
+get_phi_operand(isel_context* ctx, nir_def* ssa, RegClass rc, bool logical)
 {
    Temp tmp = get_ssa_temp(ctx, ssa);
    if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
@@ -9772,7 +9771,7 @@ visit_phi(isel_context* ctx, nir_phi_instr* instr)
    aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
 
    /* we want a sorted list of sources, since the predecessor list is also sorted */
-   std::map<unsigned, nir_ssa_def*> phi_src;
+   std::map<unsigned, nir_def*> phi_src;
    nir_foreach_phi_src (src, instr)
       phi_src[src->pred->index] = src->src.ssa;
 
@@ -9782,7 +9781,7 @@ visit_phi(isel_context* ctx, nir_phi_instr* instr)
       (std::max(exec_list_length(&instr->srcs), (unsigned)preds.size()) + 1) * sizeof(Operand));
    unsigned num_defined = 0;
    unsigned cur_pred_idx = 0;
-   for (std::pair<unsigned, nir_ssa_def*> src : phi_src) {
+   for (std::pair<unsigned, nir_def*> src : phi_src) {
       if (cur_pred_idx < preds.size()) {
          /* handle missing preds (IF merges with discard/break) and extra preds
           * (loop exit with discard) */
@@ -9857,7 +9856,7 @@ visit_phi(isel_context* ctx, nir_phi_instr* instr)
 }
 
 void
-visit_undef(isel_context* ctx, nir_ssa_undef_instr* instr)
+visit_undef(isel_context* ctx, nir_undef_instr* instr)
 {
    Temp dst = get_ssa_temp(ctx, &instr->def);
 
index cc2089c..21b63ef 100644 (file)
@@ -66,7 +66,7 @@ is_block_reachable(nir_function_impl* impl, nir_block* known_reachable, nir_bloc
 
 /* Check whether the given SSA def is only used by cross-lane instructions. */
 bool
-only_used_by_cross_lane_instrs(nir_ssa_def* ssa, bool follow_phis = true)
+only_used_by_cross_lane_instrs(nir_def* ssa, bool follow_phis = true)
 {
    nir_foreach_use (src, ssa) {
       switch (src->parent_instr->type) {
@@ -178,13 +178,13 @@ sanitize_cf_list(nir_function_impl* impl, struct exec_list* cf_list)
 }
 
 void
-apply_nuw_to_ssa(isel_context* ctx, nir_ssa_def* ssa)
+apply_nuw_to_ssa(isel_context* ctx, nir_def* ssa)
 {
-   nir_ssa_scalar scalar;
+   nir_scalar scalar;
    scalar.def = ssa;
    scalar.comp = 0;
 
-   if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
+   if (!nir_scalar_is_alu(scalar) || nir_scalar_alu_op(scalar) != nir_op_iadd)
       return;
 
    nir_alu_instr* add = nir_instr_as_alu(ssa->parent_instr);
@@ -192,11 +192,11 @@ apply_nuw_to_ssa(isel_context* ctx, nir_ssa_def* ssa)
    if (add->no_unsigned_wrap)
       return;
 
-   nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
-   nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
+   nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
+   nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
 
-   if (nir_ssa_scalar_is_const(src0)) {
-      nir_ssa_scalar tmp = src0;
+   if (nir_scalar_is_const(src0)) {
+      nir_scalar tmp = src0;
       src0 = src1;
       src1 = tmp;
    }
index 0cb9ef3..e2d1bdb 100644 (file)
@@ -41,7 +41,7 @@ struct ac_nir_context {
    LLVMBasicBlockRef break_block;
 };
 
-static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_ssa_def *def)
+static LLVMTypeRef get_def_type(struct ac_nir_context *ctx, const nir_def *def)
 {
    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
    if (def->num_components > 1) {
@@ -1471,7 +1471,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
    assert((!args->tfe || !args->d16) && "unsupported");
 
    if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
-      unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
+      unsigned mask = nir_def_components_read(&instr->dest.ssa);
 
       /* Buffers don't support A16. */
       if (args->a16)
@@ -2326,7 +2326,7 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
    args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
 
    if (dim == GLSL_SAMPLER_DIM_BUF) {
-      unsigned num_channels = util_last_bit(nir_ssa_def_components_read(&instr->dest.ssa));
+      unsigned num_channels = util_last_bit(nir_def_components_read(&instr->dest.ssa));
       if (instr->dest.ssa.bit_size == 64)
          num_channels = num_channels < 4 ? 2 : 4;
       LLVMValueRef rsrc, vindex;
@@ -4133,7 +4133,7 @@ static void phi_post_pass(struct ac_nir_context *ctx)
    }
 }
 
-static bool is_def_used_in_an_export(const nir_ssa_def *def)
+static bool is_def_used_in_an_export(const nir_def *def)
 {
    nir_foreach_use (use_src, def) {
       if (use_src->parent_instr->type == nir_instr_type_intrinsic) {
@@ -4150,7 +4150,7 @@ static bool is_def_used_in_an_export(const nir_ssa_def *def)
    return false;
 }
 
-static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_ssa_undef_instr *instr)
+static void visit_ssa_undef(struct ac_nir_context *ctx, const nir_undef_instr *instr)
 {
    unsigned num_components = instr->def.num_components;
    LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
index 963f1ea..eb8aa3d 100644 (file)
@@ -618,7 +618,7 @@ radv_meta_build_nir_vs_generate_vertices(struct radv_device *dev)
 
    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_VERTEX, "meta_vs_gen_verts");
 
-   nir_ssa_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
 
    v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4, "gl_Position");
    v_position->data.location = VARYING_SLOT_POS;
@@ -636,10 +636,10 @@ radv_meta_build_nir_fs_noop(struct radv_device *dev)
 
 void
 radv_meta_build_resolve_shader_core(struct radv_device *device, nir_builder *b, bool is_integer, int samples,
-                                    nir_variable *input_img, nir_variable *color, nir_ssa_def *img_coord)
+                                    nir_variable *input_img, nir_variable *color, nir_def *img_coord)
 {
    nir_deref_instr *input_img_deref = nir_build_deref_var(b, input_img);
-   nir_ssa_def *sample0 = nir_txf_ms_deref(b, input_img_deref, img_coord, nir_imm_int(b, 0));
+   nir_def *sample0 = nir_txf_ms_deref(b, input_img_deref, img_coord, nir_imm_int(b, 0));
 
    if (is_integer || samples <= 1) {
       nir_store_var(b, color, sample0, 0xf);
@@ -647,13 +647,13 @@ radv_meta_build_resolve_shader_core(struct radv_device *device, nir_builder *b,
    }
 
    if (device->physical_device->use_fmask) {
-      nir_ssa_def *all_same = nir_samples_identical_deref(b, input_img_deref, img_coord);
+      nir_def *all_same = nir_samples_identical_deref(b, input_img_deref, img_coord);
       nir_push_if(b, nir_inot(b, all_same));
    }
 
-   nir_ssa_def *accum = sample0;
+   nir_def *accum = sample0;
    for (int i = 1; i < samples; i++) {
-      nir_ssa_def *sample = nir_txf_ms_deref(b, input_img_deref, img_coord, nir_imm_int(b, i));
+      nir_def *sample = nir_txf_ms_deref(b, input_img_deref, img_coord, nir_imm_int(b, i));
       accum = nir_fadd(b, accum, sample);
    }
 
@@ -667,21 +667,21 @@ radv_meta_build_resolve_shader_core(struct radv_device *device, nir_builder *b,
    }
 }
 
-nir_ssa_def *
+nir_def *
 radv_meta_load_descriptor(nir_builder *b, unsigned desc_set, unsigned binding)
 {
-   nir_ssa_def *rsrc = nir_vulkan_resource_index(b, 3, 32, nir_imm_int(b, 0), .desc_set = desc_set, .binding = binding);
+   nir_def *rsrc = nir_vulkan_resource_index(b, 3, 32, nir_imm_int(b, 0), .desc_set = desc_set, .binding = binding);
    return nir_trim_vector(b, rsrc, 2);
 }
 
-nir_ssa_def *
+nir_def *
 get_global_ids(nir_builder *b, unsigned num_components)
 {
    unsigned mask = BITFIELD_MASK(num_components);
 
-   nir_ssa_def *local_ids = nir_channels(b, nir_load_local_invocation_id(b), mask);
-   nir_ssa_def *block_ids = nir_channels(b, nir_load_workgroup_id(b, 32), mask);
-   nir_ssa_def *block_size =
+   nir_def *local_ids = nir_channels(b, nir_load_local_invocation_id(b), mask);
+   nir_def *block_ids = nir_channels(b, nir_load_workgroup_id(b, 32), mask);
+   nir_def *block_size =
       nir_channels(b,
                    nir_imm_ivec4(b, b->shader->info.workgroup_size[0], b->shader->info.workgroup_size[1],
                                  b->shader->info.workgroup_size[2], 0),
@@ -691,9 +691,9 @@ get_global_ids(nir_builder *b, unsigned num_components)
 }
 
 void
-radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
+radv_break_on_count(nir_builder *b, nir_variable *var, nir_def *count)
 {
-   nir_ssa_def *counter = nir_load_var(b, var);
+   nir_def *counter = nir_load_var(b, var);
 
    nir_push_if(b, nir_uge(b, counter, count));
    nir_jump(b, nir_jump_break);
index da38b04..98c8e71 100644 (file)
@@ -261,13 +261,13 @@ nir_shader *radv_meta_build_nir_vs_generate_vertices(struct radv_device *dev);
 nir_shader *radv_meta_build_nir_fs_noop(struct radv_device *dev);
 
 void radv_meta_build_resolve_shader_core(struct radv_device *device, nir_builder *b, bool is_integer, int samples,
-                                         nir_variable *input_img, nir_variable *color, nir_ssa_def *img_coord);
+                                         nir_variable *input_img, nir_variable *color, nir_def *img_coord);
 
-nir_ssa_def *radv_meta_load_descriptor(nir_builder *b, unsigned desc_set, unsigned binding);
+nir_def *radv_meta_load_descriptor(nir_builder *b, unsigned desc_set, unsigned binding);
 
-nir_ssa_def *get_global_ids(nir_builder *b, unsigned num_components);
+nir_def *get_global_ids(nir_builder *b, unsigned num_components);
 
-void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count);
+void radv_break_on_count(nir_builder *b, nir_variable *var, nir_def *count);
 
 #ifdef __cplusplus
 }
index 3a874b3..a9e494e 100644 (file)
@@ -47,14 +47,14 @@ build_nir_vertex_shader(struct radv_device *dev)
    tex_pos_out->data.location = VARYING_SLOT_VAR0;
    tex_pos_out->data.interpolation = INTERP_MODE_SMOOTH;
 
-   nir_ssa_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
 
    nir_store_var(&b, pos_out, outvec, 0xf);
 
-   nir_ssa_def *src_box = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *src0_z = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
+   nir_def *src_box = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *src0_z = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
 
-   nir_ssa_def *vertex_id = nir_load_vertex_id_zero_base(&b);
+   nir_def *vertex_id = nir_load_vertex_id_zero_base(&b);
 
    /* vertex 0 - src0_x, src0_y, src0_z */
    /* vertex 1 - src0_x, src1_y, src0_z*/
@@ -62,16 +62,16 @@ build_nir_vertex_shader(struct radv_device *dev)
    /* so channel 0 is vertex_id != 2 ? src_x : src_x + w
       channel 1 is vertex id != 1 ? src_y : src_y + w */
 
-   nir_ssa_def *c0cmp = nir_ine_imm(&b, vertex_id, 2);
-   nir_ssa_def *c1cmp = nir_ine_imm(&b, vertex_id, 1);
+   nir_def *c0cmp = nir_ine_imm(&b, vertex_id, 2);
+   nir_def *c1cmp = nir_ine_imm(&b, vertex_id, 1);
 
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    comp[0] = nir_bcsel(&b, c0cmp, nir_channel(&b, src_box, 0), nir_channel(&b, src_box, 2));
 
    comp[1] = nir_bcsel(&b, c1cmp, nir_channel(&b, src_box, 1), nir_channel(&b, src_box, 3));
    comp[2] = src0_z;
    comp[3] = nir_imm_float(&b, 1.0);
-   nir_ssa_def *out_tex_vec = nir_vec(&b, comp, 4);
+   nir_def *out_tex_vec = nir_vec(&b, comp, 4);
    nir_store_var(&b, tex_pos_out, out_tex_vec, 0xf);
    return b.shader;
 }
@@ -89,7 +89,7 @@ build_nir_copy_fragment_shader(struct radv_device *dev, enum glsl_sampler_dim te
     * position.
     */
    unsigned swz[] = {0, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 1), 2};
-   nir_ssa_def *const tex_pos =
+   nir_def *const tex_pos =
       nir_swizzle(&b, nir_load_var(&b, tex_pos_in), swz, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 3));
 
    const struct glsl_type *sampler_type =
@@ -99,7 +99,7 @@ build_nir_copy_fragment_shader(struct radv_device *dev, enum glsl_sampler_dim te
    sampler->data.binding = 0;
 
    nir_deref_instr *tex_deref = nir_build_deref_var(&b, sampler);
-   nir_ssa_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
+   nir_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
 
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_DATA0;
@@ -121,7 +121,7 @@ build_nir_copy_fragment_shader_depth(struct radv_device *dev, enum glsl_sampler_
     * position.
     */
    unsigned swz[] = {0, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 1), 2};
-   nir_ssa_def *const tex_pos =
+   nir_def *const tex_pos =
       nir_swizzle(&b, nir_load_var(&b, tex_pos_in), swz, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 3));
 
    const struct glsl_type *sampler_type =
@@ -131,7 +131,7 @@ build_nir_copy_fragment_shader_depth(struct radv_device *dev, enum glsl_sampler_
    sampler->data.binding = 0;
 
    nir_deref_instr *tex_deref = nir_build_deref_var(&b, sampler);
-   nir_ssa_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
+   nir_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
 
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_DEPTH;
@@ -153,7 +153,7 @@ build_nir_copy_fragment_shader_stencil(struct radv_device *dev, enum glsl_sample
     * position.
     */
    unsigned swz[] = {0, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 1), 2};
-   nir_ssa_def *const tex_pos =
+   nir_def *const tex_pos =
       nir_swizzle(&b, nir_load_var(&b, tex_pos_in), swz, (tex_dim == GLSL_SAMPLER_DIM_1D ? 2 : 3));
 
    const struct glsl_type *sampler_type =
@@ -163,7 +163,7 @@ build_nir_copy_fragment_shader_stencil(struct radv_device *dev, enum glsl_sample
    sampler->data.binding = 0;
 
    nir_deref_instr *tex_deref = nir_build_deref_var(&b, sampler);
-   nir_ssa_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
+   nir_def *color = nir_tex_deref(&b, tex_deref, tex_deref, tex_pos);
 
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_STENCIL;
index 209902f..d9d701c 100644 (file)
@@ -375,11 +375,11 @@ build_nir_vertex_shader(struct radv_device *device)
    tex_pos_out->data.location = VARYING_SLOT_VAR0;
    tex_pos_out->data.interpolation = INTERP_MODE_SMOOTH;
 
-   nir_ssa_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *outvec = nir_gen_rect_vertices(&b, NULL, NULL);
    nir_store_var(&b, pos_out, outvec, 0xf);
 
-   nir_ssa_def *src_box = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *vertex_id = nir_load_vertex_id_zero_base(&b);
+   nir_def *src_box = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *vertex_id = nir_load_vertex_id_zero_base(&b);
 
    /* vertex 0 - src_x, src_y */
    /* vertex 1 - src_x, src_y+h */
@@ -387,22 +387,22 @@ build_nir_vertex_shader(struct radv_device *device)
    /* so channel 0 is vertex_id != 2 ? src_x : src_x + w
       channel 1 is vertex id != 1 ? src_y : src_y + w */
 
-   nir_ssa_def *c0cmp = nir_ine_imm(&b, vertex_id, 2);
-   nir_ssa_def *c1cmp = nir_ine_imm(&b, vertex_id, 1);
+   nir_def *c0cmp = nir_ine_imm(&b, vertex_id, 2);
+   nir_def *c1cmp = nir_ine_imm(&b, vertex_id, 1);
 
-   nir_ssa_def *comp[2];
+   nir_def *comp[2];
    comp[0] = nir_bcsel(&b, c0cmp, nir_channel(&b, src_box, 0), nir_channel(&b, src_box, 2));
 
    comp[1] = nir_bcsel(&b, c1cmp, nir_channel(&b, src_box, 1), nir_channel(&b, src_box, 3));
-   nir_ssa_def *out_tex_vec = nir_vec(&b, comp, 2);
+   nir_def *out_tex_vec = nir_vec(&b, comp, 2);
    nir_store_var(&b, tex_pos_out, out_tex_vec, 0x3);
    return b.shader;
 }
 
-typedef nir_ssa_def *(*texel_fetch_build_func)(struct nir_builder *, struct radv_device *, nir_ssa_def *, bool, bool);
+typedef nir_def *(*texel_fetch_build_func)(struct nir_builder *, struct radv_device *, nir_def *, bool, bool);
 
-static nir_ssa_def *
-build_nir_texel_fetch(struct nir_builder *b, struct radv_device *device, nir_ssa_def *tex_pos, bool is_3d,
+static nir_def *
+build_nir_texel_fetch(struct nir_builder *b, struct radv_device *device, nir_def *tex_pos, bool is_3d,
                       bool is_multisampled)
 {
    enum glsl_sampler_dim dim = is_3d             ? GLSL_SAMPLER_DIM_3D
@@ -413,12 +413,12 @@ build_nir_texel_fetch(struct nir_builder *b, struct radv_device *device, nir_ssa
    sampler->data.descriptor_set = 0;
    sampler->data.binding = 0;
 
-   nir_ssa_def *tex_pos_3d = NULL;
-   nir_ssa_def *sample_idx = NULL;
+   nir_def *tex_pos_3d = NULL;
+   nir_def *sample_idx = NULL;
    if (is_3d) {
-      nir_ssa_def *layer = nir_load_push_constant(b, 1, 32, nir_imm_int(b, 0), .base = 16, .range = 4);
+      nir_def *layer = nir_load_push_constant(b, 1, 32, nir_imm_int(b, 0), .base = 16, .range = 4);
 
-      nir_ssa_def *chans[3];
+      nir_def *chans[3];
       chans[0] = nir_channel(b, tex_pos, 0);
       chans[1] = nir_channel(b, tex_pos, 1);
       chans[2] = layer;
@@ -437,8 +437,8 @@ build_nir_texel_fetch(struct nir_builder *b, struct radv_device *device, nir_ssa
    }
 }
 
-static nir_ssa_def *
-build_nir_buffer_fetch(struct nir_builder *b, struct radv_device *device, nir_ssa_def *tex_pos, bool is_3d,
+static nir_def *
+build_nir_buffer_fetch(struct nir_builder *b, struct radv_device *device, nir_def *tex_pos, bool is_3d,
                        bool is_multisampled)
 {
    const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_BUF, false, false, GLSL_TYPE_UINT);
@@ -446,10 +446,10 @@ build_nir_buffer_fetch(struct nir_builder *b, struct radv_device *device, nir_ss
    sampler->data.descriptor_set = 0;
    sampler->data.binding = 0;
 
-   nir_ssa_def *width = nir_load_push_constant(b, 1, 32, nir_imm_int(b, 0), .base = 16, .range = 4);
+   nir_def *width = nir_load_push_constant(b, 1, 32, nir_imm_int(b, 0), .base = 16, .range = 4);
 
-   nir_ssa_def *pos_x = nir_channel(b, tex_pos, 0);
-   nir_ssa_def *pos_y = nir_channel(b, tex_pos, 1);
+   nir_def *pos_x = nir_channel(b, tex_pos, 0);
+   nir_def *pos_y = nir_channel(b, tex_pos, 1);
    pos_y = nir_imul(b, pos_y, width);
    pos_x = nir_iadd(b, pos_x, pos_y);
 
@@ -477,10 +477,10 @@ build_nir_copy_fragment_shader(struct radv_device *device, texel_fetch_build_fun
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_DATA0;
 
-   nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
-   nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
+   nir_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
+   nir_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
 
-   nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
+   nir_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
    nir_store_var(&b, color_out, color, 0xf);
 
    b.shader->info.fs.uses_sample_shading = is_multisampled;
@@ -502,10 +502,10 @@ build_nir_copy_fragment_shader_depth(struct radv_device *device, texel_fetch_bui
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_DEPTH;
 
-   nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
-   nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
+   nir_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
+   nir_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
 
-   nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
+   nir_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
    nir_store_var(&b, color_out, color, 0x1);
 
    b.shader->info.fs.uses_sample_shading = is_multisampled;
@@ -527,10 +527,10 @@ build_nir_copy_fragment_shader_stencil(struct radv_device *device, texel_fetch_b
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_STENCIL;
 
-   nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
-   nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
+   nir_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in));
+   nir_def *tex_pos = nir_trim_vector(&b, pos_int, 2);
 
-   nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
+   nir_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled);
    nir_store_var(&b, color_out, color, 0x1);
 
    b.shader->info.fs.uses_sample_shading = is_multisampled;
index 5cef44d..7415618 100644 (file)
@@ -10,17 +10,17 @@ build_buffer_fill_shader(struct radv_device *dev)
    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
    b.shader->info.workgroup_size[0] = 64;
 
-   nir_ssa_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
-   nir_ssa_def *max_offset = nir_channel(&b, pconst, 2);
-   nir_ssa_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
+   nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
+   nir_def *max_offset = nir_channel(&b, pconst, 2);
+   nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
 
-   nir_ssa_def *global_id = nir_iadd(
+   nir_def *global_id = nir_iadd(
       &b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b, 32), 0), b.shader->info.workgroup_size[0]),
       nir_load_local_invocation_index(&b));
 
-   nir_ssa_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
-   nir_ssa_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
+   nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
+   nir_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
    nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
 
    return b.shader;
@@ -32,18 +32,18 @@ build_buffer_copy_shader(struct radv_device *dev)
    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
    b.shader->info.workgroup_size[0] = 64;
 
-   nir_ssa_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *max_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
-   nir_ssa_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
-   nir_ssa_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
+   nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *max_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
+   nir_def *src_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
+   nir_def *dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b1100));
 
-   nir_ssa_def *global_id = nir_iadd(
+   nir_def *global_id = nir_iadd(
       &b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b, 32), 0), b.shader->info.workgroup_size[0]),
       nir_load_local_invocation_index(&b));
 
-   nir_ssa_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
+   nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
 
-   nir_ssa_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
+   nir_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
    nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
 
    return b.shader;
index fa395fb..fddfac9 100644 (file)
@@ -46,24 +46,24 @@ build_nir_itob_compute_shader(struct radv_device *dev, bool is_3d)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
+   nir_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
 
-   nir_ssa_def *offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
-   nir_ssa_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
+   nir_def *offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
+   nir_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
 
-   nir_ssa_def *img_coord = nir_iadd(&b, global_id, offset);
-   nir_ssa_def *outval =
+   nir_def *img_coord = nir_iadd(&b, global_id, offset);
+   nir_def *outval =
       nir_txf_deref(&b, nir_build_deref_var(&b, input_img), nir_trim_vector(&b, img_coord, 2 + is_3d), NULL);
 
-   nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
-   nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);
+   nir_def *pos_x = nir_channel(&b, global_id, 0);
+   nir_def *pos_y = nir_channel(&b, global_id, 1);
 
-   nir_ssa_def *tmp = nir_imul(&b, pos_y, stride);
+   nir_def *tmp = nir_imul(&b, pos_y, stride);
    tmp = nir_iadd(&b, tmp, pos_x);
 
-   nir_ssa_def *coord = nir_replicate(&b, tmp, 4);
+   nir_def *coord = nir_replicate(&b, tmp, 4);
 
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_ssa_undef(&b, 1, 32), outval,
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
 
    return b.shader;
@@ -196,26 +196,25 @@ build_nir_btoi_compute_shader(struct radv_device *dev, bool is_3d)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
+   nir_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
 
-   nir_ssa_def *offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
-   nir_ssa_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
+   nir_def *offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
+   nir_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
 
-   nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
-   nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);
+   nir_def *pos_x = nir_channel(&b, global_id, 0);
+   nir_def *pos_y = nir_channel(&b, global_id, 1);
 
-   nir_ssa_def *buf_coord = nir_imul(&b, pos_y, stride);
+   nir_def *buf_coord = nir_imul(&b, pos_y, stride);
    buf_coord = nir_iadd(&b, buf_coord, pos_x);
 
-   nir_ssa_def *coord = nir_iadd(&b, global_id, offset);
-   nir_ssa_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), buf_coord, NULL);
+   nir_def *coord = nir_iadd(&b, global_id, offset);
+   nir_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), buf_coord, NULL);
 
-   nir_ssa_def *img_coord =
-      nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1),
-               is_3d ? nir_channel(&b, coord, 2) : nir_ssa_undef(&b, 1, 32), nir_ssa_undef(&b, 1, 32));
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1),
+                                 is_3d ? nir_channel(&b, coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
 
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32),
-                         outval, nir_imm_int(&b, 0), .image_dim = dim);
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
+                         nir_imm_int(&b, 0), .image_dim = dim);
 
    return b.shader;
 }
@@ -344,31 +343,31 @@ build_nir_btoi_r32g32b32_compute_shader(struct radv_device *dev)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
-   nir_ssa_def *pitch = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 12);
-   nir_ssa_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
+   nir_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *pitch = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 12);
+   nir_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
 
-   nir_ssa_def *pos_x = nir_channel(&b, global_id, 0);
-   nir_ssa_def *pos_y = nir_channel(&b, global_id, 1);
+   nir_def *pos_x = nir_channel(&b, global_id, 0);
+   nir_def *pos_y = nir_channel(&b, global_id, 1);
 
-   nir_ssa_def *buf_coord = nir_imul(&b, pos_y, stride);
+   nir_def *buf_coord = nir_imul(&b, pos_y, stride);
    buf_coord = nir_iadd(&b, buf_coord, pos_x);
 
-   nir_ssa_def *img_coord = nir_iadd(&b, global_id, offset);
+   nir_def *img_coord = nir_iadd(&b, global_id, offset);
 
-   nir_ssa_def *global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, img_coord, 1), pitch),
-                                      nir_imul_imm(&b, nir_channel(&b, img_coord, 0), 3));
+   nir_def *global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, img_coord, 1), pitch),
+                                  nir_imul_imm(&b, nir_channel(&b, img_coord, 0), 3));
 
-   nir_ssa_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), buf_coord, NULL);
+   nir_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), buf_coord, NULL);
 
    for (int chan = 0; chan < 3; chan++) {
-      nir_ssa_def *local_pos = nir_iadd_imm(&b, global_pos, chan);
+      nir_def *local_pos = nir_iadd_imm(&b, global_pos, chan);
 
-      nir_ssa_def *coord = nir_replicate(&b, local_pos, 4);
+      nir_def *coord = nir_replicate(&b, local_pos, 4);
 
-      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_ssa_undef(&b, 1, 32),
+      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
                             nir_channel(&b, outval, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
    }
 
@@ -472,18 +471,17 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
+   nir_def *global_id = get_global_ids(&b, is_3d ? 3 : 2);
 
-   nir_ssa_def *src_offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
-   nir_ssa_def *dst_offset =
-      nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 12), .range = is_3d ? 24 : 20);
+   nir_def *src_offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 0), .range = is_3d ? 12 : 8);
+   nir_def *dst_offset = nir_load_push_constant(&b, is_3d ? 3 : 2, 32, nir_imm_int(&b, 12), .range = is_3d ? 24 : 20);
 
-   nir_ssa_def *src_coord = nir_iadd(&b, global_id, src_offset);
+   nir_def *src_coord = nir_iadd(&b, global_id, src_offset);
    nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
 
-   nir_ssa_def *dst_coord = nir_iadd(&b, global_id, dst_offset);
+   nir_def *dst_coord = nir_iadd(&b, global_id, dst_offset);
 
-   nir_ssa_def *tex_vals[8];
+   nir_def *tex_vals[8];
    if (is_multisampled) {
       for (uint32_t i = 0; i < samples; i++) {
          tex_vals[i] = nir_txf_ms_deref(&b, input_img_deref, nir_trim_vector(&b, src_coord, 2), nir_imm_int(&b, i));
@@ -492,9 +490,8 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples)
       tex_vals[0] = nir_txf_deref(&b, input_img_deref, nir_trim_vector(&b, src_coord, 2 + is_3d), nir_imm_int(&b, 0));
    }
 
-   nir_ssa_def *img_coord =
-      nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1),
-               is_3d ? nir_channel(&b, dst_coord, 2) : nir_ssa_undef(&b, 1, 32), nir_ssa_undef(&b, 1, 32));
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1),
+                                 is_3d ? nir_channel(&b, dst_coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
 
    for (uint32_t i = 0; i < samples; i++) {
       nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_imm_int(&b, i),
@@ -641,34 +638,34 @@ build_nir_itoi_r32g32b32_compute_shader(struct radv_device *dev)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *src_offset = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 0), .range = 12);
-   nir_ssa_def *dst_offset = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 12), .range = 24);
+   nir_def *src_offset = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 0), .range = 12);
+   nir_def *dst_offset = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 12), .range = 24);
 
-   nir_ssa_def *src_stride = nir_channel(&b, src_offset, 2);
-   nir_ssa_def *dst_stride = nir_channel(&b, dst_offset, 2);
+   nir_def *src_stride = nir_channel(&b, src_offset, 2);
+   nir_def *dst_stride = nir_channel(&b, dst_offset, 2);
 
-   nir_ssa_def *src_img_coord = nir_iadd(&b, global_id, src_offset);
-   nir_ssa_def *dst_img_coord = nir_iadd(&b, global_id, dst_offset);
+   nir_def *src_img_coord = nir_iadd(&b, global_id, src_offset);
+   nir_def *dst_img_coord = nir_iadd(&b, global_id, dst_offset);
 
-   nir_ssa_def *src_global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, src_img_coord, 1), src_stride),
-                                          nir_imul_imm(&b, nir_channel(&b, src_img_coord, 0), 3));
+   nir_def *src_global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, src_img_coord, 1), src_stride),
+                                      nir_imul_imm(&b, nir_channel(&b, src_img_coord, 0), 3));
 
-   nir_ssa_def *dst_global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, dst_img_coord, 1), dst_stride),
-                                          nir_imul_imm(&b, nir_channel(&b, dst_img_coord, 0), 3));
+   nir_def *dst_global_pos = nir_iadd(&b, nir_imul(&b, nir_channel(&b, dst_img_coord, 1), dst_stride),
+                                      nir_imul_imm(&b, nir_channel(&b, dst_img_coord, 0), 3));
 
    for (int chan = 0; chan < 3; chan++) {
       /* src */
-      nir_ssa_def *src_local_pos = nir_iadd_imm(&b, src_global_pos, chan);
-      nir_ssa_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), src_local_pos, NULL);
+      nir_def *src_local_pos = nir_iadd_imm(&b, src_global_pos, chan);
+      nir_def *outval = nir_txf_deref(&b, nir_build_deref_var(&b, input_img), src_local_pos, NULL);
 
       /* dst */
-      nir_ssa_def *dst_local_pos = nir_iadd_imm(&b, dst_global_pos, chan);
+      nir_def *dst_local_pos = nir_iadd_imm(&b, dst_global_pos, chan);
 
-      nir_ssa_def *dst_coord = nir_replicate(&b, dst_local_pos, 4);
+      nir_def *dst_coord = nir_replicate(&b, dst_local_pos, 4);
 
-      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, nir_ssa_undef(&b, 1, 32),
+      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, nir_undef(&b, 1, 32),
                             nir_channel(&b, outval, 0), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
    }
 
@@ -769,16 +766,16 @@ build_nir_cleari_compute_shader(struct radv_device *dev, bool is_3d, int samples
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 0;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *clear_val = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *layer = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 16), .range = 20);
+   nir_def *clear_val = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *layer = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 16), .range = 20);
 
-   nir_ssa_def *comps[4];
+   nir_def *comps[4];
    comps[0] = nir_channel(&b, global_id, 0);
    comps[1] = nir_channel(&b, global_id, 1);
    comps[2] = layer;
-   comps[3] = nir_ssa_undef(&b, 1, 32);
+   comps[3] = nir_undef(&b, 1, 32);
    global_id = nir_vec(&b, comps, 4);
 
    for (uint32_t i = 0; i < samples; i++) {
@@ -917,22 +914,22 @@ build_nir_cleari_r32g32b32_compute_shader(struct radv_device *dev)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 0;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *clear_val = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 0), .range = 12);
-   nir_ssa_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
+   nir_def *clear_val = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 0), .range = 12);
+   nir_def *stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
 
-   nir_ssa_def *global_x = nir_channel(&b, global_id, 0);
-   nir_ssa_def *global_y = nir_channel(&b, global_id, 1);
+   nir_def *global_x = nir_channel(&b, global_id, 0);
+   nir_def *global_y = nir_channel(&b, global_id, 1);
 
-   nir_ssa_def *global_pos = nir_iadd(&b, nir_imul(&b, global_y, stride), nir_imul_imm(&b, global_x, 3));
+   nir_def *global_pos = nir_iadd(&b, nir_imul(&b, global_y, stride), nir_imul_imm(&b, global_x, 3));
 
    for (unsigned chan = 0; chan < 3; chan++) {
-      nir_ssa_def *local_pos = nir_iadd_imm(&b, global_pos, chan);
+      nir_def *local_pos = nir_iadd_imm(&b, global_pos, chan);
 
-      nir_ssa_def *coord = nir_replicate(&b, local_pos, 4);
+      nir_def *coord = nir_replicate(&b, local_pos, 4);
 
-      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_ssa_undef(&b, 1, 32),
+      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
                             nir_channel(&b, clear_val, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
    }
 
index 30e0047..4e97082 100644 (file)
@@ -45,24 +45,24 @@ build_color_shaders(struct radv_device *dev, struct nir_shader **out_vs, struct
    nir_variable *vs_out_pos = nir_variable_create(vs_b.shader, nir_var_shader_out, position_type, "gl_Position");
    vs_out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *in_color_load = nir_load_push_constant(&fs_b, 4, 32, nir_imm_int(&fs_b, 0), .range = 16);
+   nir_def *in_color_load = nir_load_push_constant(&fs_b, 4, 32, nir_imm_int(&fs_b, 0), .range = 16);
 
    nir_variable *fs_out_color = nir_variable_create(fs_b.shader, nir_var_shader_out, color_type, "f_color");
    fs_out_color->data.location = FRAG_RESULT_DATA0 + frag_output;
 
    nir_store_var(&fs_b, fs_out_color, in_color_load, 0xf);
 
-   nir_ssa_def *outvec = nir_gen_rect_vertices(&vs_b, NULL, NULL);
+   nir_def *outvec = nir_gen_rect_vertices(&vs_b, NULL, NULL);
    nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
 
    const struct glsl_type *layer_type = glsl_int_type();
    nir_variable *vs_out_layer = nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type, "v_layer");
    vs_out_layer->data.location = VARYING_SLOT_LAYER;
    vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
-   nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
-   nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
+   nir_def *inst_id = nir_load_instance_id(&vs_b);
+   nir_def *base_instance = nir_load_base_instance(&vs_b);
 
-   nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
+   nir_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
    nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
 
    *out_vs = vs_b.shader;
@@ -376,9 +376,9 @@ build_depthstencil_shader(struct radv_device *dev, struct nir_shader **out_vs, s
    nir_variable *vs_out_pos = nir_variable_create(vs_b.shader, nir_var_shader_out, position_out_type, "gl_Position");
    vs_out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *z;
+   nir_def *z;
    if (unrestricted) {
-      nir_ssa_def *in_color_load = nir_load_push_constant(&fs_b, 1, 32, nir_imm_int(&fs_b, 0), .range = 4);
+      nir_def *in_color_load = nir_load_push_constant(&fs_b, 1, 32, nir_imm_int(&fs_b, 0), .range = 4);
 
       nir_variable *fs_out_depth = nir_variable_create(fs_b.shader, nir_var_shader_out, glsl_int_type(), "f_depth");
       fs_out_depth->data.location = FRAG_RESULT_DEPTH;
@@ -389,17 +389,17 @@ build_depthstencil_shader(struct radv_device *dev, struct nir_shader **out_vs, s
       z = nir_load_push_constant(&vs_b, 1, 32, nir_imm_int(&vs_b, 0), .range = 4);
    }
 
-   nir_ssa_def *outvec = nir_gen_rect_vertices(&vs_b, z, NULL);
+   nir_def *outvec = nir_gen_rect_vertices(&vs_b, z, NULL);
    nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
 
    const struct glsl_type *layer_type = glsl_int_type();
    nir_variable *vs_out_layer = nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type, "v_layer");
    vs_out_layer->data.location = VARYING_SLOT_LAYER;
    vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
-   nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
-   nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
+   nir_def *inst_id = nir_load_instance_id(&vs_b);
+   nir_def *base_instance = nir_load_base_instance(&vs_b);
 
-   nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
+   nir_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
    nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
 
    *out_vs = vs_b.shader;
@@ -808,19 +808,19 @@ build_clear_htile_mask_shader(struct radv_device *dev)
    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_clear_htile_mask");
    b.shader->info.workgroup_size[0] = 64;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
-   nir_ssa_def *offset = nir_imul_imm(&b, global_id, 16);
+   nir_def *offset = nir_imul_imm(&b, global_id, 16);
    offset = nir_channel(&b, offset, 0);
 
-   nir_ssa_def *buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *buf = radv_meta_load_descriptor(&b, 0, 0);
 
-   nir_ssa_def *constants = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *constants = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
 
-   nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, buf, offset, .align_mul = 16);
+   nir_def *load = nir_load_ssbo(&b, 4, 32, buf, offset, .align_mul = 16);
 
    /* data = (data & ~htile_mask) | (htile_value & htile_mask) */
-   nir_ssa_def *data = nir_iand(&b, load, nir_channel(&b, constants, 1));
+   nir_def *data = nir_iand(&b, load, nir_channel(&b, constants, 1));
    data = nir_ior(&b, data, nir_channel(&b, constants, 0));
 
    nir_store_ssbo(&b, data, buf, offset, .access = ACCESS_NON_READABLE, .align_mul = 16);
@@ -906,29 +906,29 @@ build_clear_dcc_comp_to_single_shader(struct radv_device *dev, bool is_msaa)
    b.shader->info.workgroup_size[0] = 8;
    b.shader->info.workgroup_size[1] = 8;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 3);
+   nir_def *global_id = get_global_ids(&b, 3);
 
    /* Load the dimensions in pixels of a block that gets compressed to one DCC byte. */
-   nir_ssa_def *dcc_block_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *dcc_block_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
 
    /* Compute the coordinates. */
-   nir_ssa_def *coord = nir_trim_vector(&b, global_id, 2);
+   nir_def *coord = nir_trim_vector(&b, global_id, 2);
    coord = nir_imul(&b, coord, dcc_block_size);
    coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), nir_channel(&b, global_id, 2),
-                    nir_ssa_undef(&b, 1, 32));
+                    nir_undef(&b, 1, 32));
 
    nir_variable *output_img = nir_variable_create(b.shader, nir_var_image, img_type, "out_img");
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 0;
 
    /* Load the clear color values. */
-   nir_ssa_def *clear_values = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 8);
+   nir_def *clear_values = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 8);
 
-   nir_ssa_def *data = nir_vec4(&b, nir_channel(&b, clear_values, 0), nir_channel(&b, clear_values, 1),
-                                nir_channel(&b, clear_values, 1), nir_channel(&b, clear_values, 1));
+   nir_def *data = nir_vec4(&b, nir_channel(&b, clear_values, 0), nir_channel(&b, clear_values, 1),
+                            nir_channel(&b, clear_values, 1), nir_channel(&b, clear_values, 1));
 
    /* Store the clear color values. */
-   nir_ssa_def *sample_id = is_msaa ? nir_imm_int(&b, 0) : nir_ssa_undef(&b, 1, 32);
+   nir_def *sample_id = is_msaa ? nir_imm_int(&b, 0) : nir_undef(&b, 1, 32);
    nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, sample_id, data, nir_imm_int(&b, 0),
                          .image_dim = dim, .image_array = true);
 
index 6af3920..02b3be7 100644 (file)
@@ -47,22 +47,22 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf
    b.shader->info.workgroup_size[1] = 8;
 
    /* Get coordinates. */
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
 
    /* Multiply the coordinates by the HTILE block size. */
-   nir_ssa_def *coord = nir_iadd(&b, nir_imul_imm(&b, global_id, 8), offset);
+   nir_def *coord = nir_iadd(&b, nir_imul_imm(&b, global_id, 8), offset);
 
    /* Load constants. */
-   nir_ssa_def *constants = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 8), .range = 20);
-   nir_ssa_def *htile_pitch = nir_channel(&b, constants, 0);
-   nir_ssa_def *htile_slice_size = nir_channel(&b, constants, 1);
-   nir_ssa_def *read_htile_value = nir_channel(&b, constants, 2);
+   nir_def *constants = nir_load_push_constant(&b, 3, 32, nir_imm_int(&b, 8), .range = 20);
+   nir_def *htile_pitch = nir_channel(&b, constants, 0);
+   nir_def *htile_slice_size = nir_channel(&b, constants, 1);
+   nir_def *read_htile_value = nir_channel(&b, constants, 2);
 
    /* Get the HTILE addr from coordinates. */
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
-   nir_ssa_def *htile_addr =
+   nir_def *zero = nir_imm_int(&b, 0);
+   nir_def *htile_addr =
       ac_nir_htile_addr_from_coord(&b, &device->physical_device->rad_info, &surf->u.gfx9.zs.htile_equation, htile_pitch,
                                    htile_slice_size, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), zero, zero);
 
@@ -73,7 +73,7 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf
    input_vrs_img->data.binding = 0;
 
    /* Load the VRS rates from the 2D image. */
-   nir_ssa_def *value = nir_txf_deref(&b, nir_build_deref_var(&b, input_vrs_img), global_id, NULL);
+   nir_def *value = nir_txf_deref(&b, nir_build_deref_var(&b, input_vrs_img), global_id, NULL);
 
    /* Extract the X/Y rates and clamp them because the maximum supported VRS rate is 2x2 (1x1 in
     * hardware).
@@ -81,17 +81,17 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf
     * VRS rate X = min(value >> 2, 1)
     * VRS rate Y = min(value & 3, 1)
     */
-   nir_ssa_def *x_rate = nir_ushr_imm(&b, nir_channel(&b, value, 0), 2);
+   nir_def *x_rate = nir_ushr_imm(&b, nir_channel(&b, value, 0), 2);
    x_rate = nir_umin(&b, x_rate, nir_imm_int(&b, 1));
 
-   nir_ssa_def *y_rate = nir_iand_imm(&b, nir_channel(&b, value, 0), 3);
+   nir_def *y_rate = nir_iand_imm(&b, nir_channel(&b, value, 0), 3);
    y_rate = nir_umin(&b, y_rate, nir_imm_int(&b, 1));
 
    /* Compute the final VRS rate. */
-   nir_ssa_def *vrs_rates = nir_ior(&b, nir_ishl_imm(&b, y_rate, 10), nir_ishl_imm(&b, x_rate, 6));
+   nir_def *vrs_rates = nir_ior(&b, nir_ishl_imm(&b, y_rate, 10), nir_ishl_imm(&b, x_rate, 6));
 
    /* Load the HTILE buffer descriptor. */
-   nir_ssa_def *htile_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *htile_buf = radv_meta_load_descriptor(&b, 0, 1);
 
    /* Load the HTILE value if requested, otherwise use the default value. */
    nir_variable *htile_value = nir_local_variable_create(b.impl, glsl_int_type(), "htile_value");
@@ -99,7 +99,7 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf
    nir_push_if(&b, nir_ieq_imm(&b, read_htile_value, 1));
    {
       /* Load the existing HTILE 32-bit value for this 8x8 pixels area. */
-      nir_ssa_def *input_value = nir_load_ssbo(&b, 1, 32, htile_buf, htile_addr);
+      nir_def *input_value = nir_load_ssbo(&b, 1, 32, htile_buf, htile_addr);
 
       /* Clear the 4-bit VRS rates. */
       nir_store_var(&b, htile_value, nir_iand_imm(&b, input_value, 0xfffff33f), 0x1);
@@ -111,7 +111,7 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf
    nir_pop_if(&b, NULL);
 
    /* Set the VRS rates loaded from the image. */
-   nir_ssa_def *output_value = nir_ior(&b, nir_load_var(&b, htile_value), vrs_rates);
+   nir_def *output_value = nir_ior(&b, nir_load_var(&b, htile_value), vrs_rates);
 
    /* Store the updated HTILE 32-bit which contains the VRS rates. */
    nir_store_ssbo(&b, output_value, htile_buf, htile_addr, .access = ACCESS_NON_READABLE);
index 72987c9..4613d2d 100644 (file)
@@ -37,13 +37,13 @@ build_dcc_retile_compute_shader(struct radv_device *dev, struct radeon_surf *sur
    b.shader->info.workgroup_size[0] = 8;
    b.shader->info.workgroup_size[1] = 8;
 
-   nir_ssa_def *src_dcc_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
-   nir_ssa_def *src_dcc_pitch = nir_channels(&b, src_dcc_size, 1);
-   nir_ssa_def *src_dcc_height = nir_channels(&b, src_dcc_size, 2);
+   nir_def *src_dcc_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *src_dcc_pitch = nir_channels(&b, src_dcc_size, 1);
+   nir_def *src_dcc_height = nir_channels(&b, src_dcc_size, 2);
 
-   nir_ssa_def *dst_dcc_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 8);
-   nir_ssa_def *dst_dcc_pitch = nir_channels(&b, dst_dcc_size, 1);
-   nir_ssa_def *dst_dcc_height = nir_channels(&b, dst_dcc_size, 2);
+   nir_def *dst_dcc_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 8);
+   nir_def *dst_dcc_pitch = nir_channels(&b, dst_dcc_size, 1);
+   nir_def *dst_dcc_height = nir_channels(&b, dst_dcc_size, 2);
    nir_variable *input_dcc = nir_variable_create(b.shader, nir_var_uniform, buf_type, "dcc_in");
    input_dcc->data.descriptor_set = 0;
    input_dcc->data.binding = 0;
@@ -51,25 +51,25 @@ build_dcc_retile_compute_shader(struct radv_device *dev, struct radeon_surf *sur
    output_dcc->data.descriptor_set = 0;
    output_dcc->data.binding = 1;
 
-   nir_ssa_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->dest.ssa;
-   nir_ssa_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->dest.ssa;
+   nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->dest.ssa;
+   nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->dest.ssa;
 
-   nir_ssa_def *coord = get_global_ids(&b, 2);
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *coord = get_global_ids(&b, 2);
+   nir_def *zero = nir_imm_int(&b, 0);
    coord =
       nir_imul(&b, coord, nir_imm_ivec2(&b, surf->u.gfx9.color.dcc_block_width, surf->u.gfx9.color.dcc_block_height));
 
-   nir_ssa_def *src = ac_nir_dcc_addr_from_coord(
-      &b, &dev->physical_device->rad_info, surf->bpe, &surf->u.gfx9.color.dcc_equation, src_dcc_pitch, src_dcc_height,
-      zero, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), zero, zero, zero);
-   nir_ssa_def *dst = ac_nir_dcc_addr_from_coord(
+   nir_def *src = ac_nir_dcc_addr_from_coord(&b, &dev->physical_device->rad_info, surf->bpe,
+                                             &surf->u.gfx9.color.dcc_equation, src_dcc_pitch, src_dcc_height, zero,
+                                             nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), zero, zero, zero);
+   nir_def *dst = ac_nir_dcc_addr_from_coord(
       &b, &dev->physical_device->rad_info, surf->bpe, &surf->u.gfx9.color.display_dcc_equation, dst_dcc_pitch,
       dst_dcc_height, zero, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), zero, zero, zero);
 
-   nir_ssa_def *dcc_val = nir_image_deref_load(&b, 1, 32, input_dcc_ref, nir_vec4(&b, src, src, src, src),
-                                               nir_ssa_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = dim);
+   nir_def *dcc_val = nir_image_deref_load(&b, 1, 32, input_dcc_ref, nir_vec4(&b, src, src, src, src),
+                                           nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = dim);
 
-   nir_image_deref_store(&b, output_dcc_ref, nir_vec4(&b, dst, dst, dst, dst), nir_ssa_undef(&b, 1, 32), dcc_val,
+   nir_image_deref_store(&b, output_dcc_ref, nir_vec4(&b, dst, dst, dst, dst), nir_undef(&b, 1, 32), dcc_val,
                          nir_imm_int(&b, 0), .image_dim = dim);
 
    return b.shader;
index 0a3bbe1..1a0b51e 100644 (file)
@@ -51,16 +51,15 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
-   nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
-   nir_ssa_def *block_size = nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
-                                           b.shader->info.workgroup_size[2], 0);
+   nir_def *invoc_id = nir_load_local_invocation_id(&b);
+   nir_def *wg_id = nir_load_workgroup_id(&b, 32);
+   nir_def *block_size = nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+                                       b.shader->info.workgroup_size[2], 0);
 
-   nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
+   nir_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
 
-   nir_ssa_def *data =
-      nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id,
-                           nir_ssa_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
+   nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id,
+                                        nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
 
    /* We need a SCOPE_DEVICE memory_scope because ACO will avoid
     * creating a vmcnt(0) because it expects the L1 cache to keep memory
@@ -69,7 +68,7 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
    nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
                .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
 
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_ssa_undef(&b, 1, 32), data,
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_undef(&b, 1, 32), data,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
    return b.shader;
 }
index c770620..4f88869 100644 (file)
  *  - the EAC shader doesn't do SNORM correctly, so this has that fixed.
  */
 
-static nir_ssa_def *
-flip_endian(nir_builder *b, nir_ssa_def *src, unsigned cnt)
+static nir_def *
+flip_endian(nir_builder *b, nir_def *src, unsigned cnt)
 {
-   nir_ssa_def *v[2];
+   nir_def *v[2];
    for (unsigned i = 0; i < cnt; ++i) {
-      nir_ssa_def *intermediate[4];
-      nir_ssa_def *chan = cnt == 1 ? src : nir_channel(b, src, i);
+      nir_def *intermediate[4];
+      nir_def *chan = cnt == 1 ? src : nir_channel(b, src, i);
       for (unsigned j = 0; j < 4; ++j)
          intermediate[j] = nir_ubfe_imm(b, chan, 8 * j, 8);
       v[i] = nir_ior(b, nir_ior(b, nir_ishl_imm(b, intermediate[0], 24), nir_ishl_imm(b, intermediate[1], 16)),
@@ -56,14 +56,14 @@ flip_endian(nir_builder *b, nir_ssa_def *src, unsigned cnt)
    return cnt == 1 ? v[0] : nir_vec(b, v, cnt);
 }
 
-static nir_ssa_def *
-etc1_color_modifier_lookup(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+etc1_color_modifier_lookup(nir_builder *b, nir_def *x, nir_def *y)
 {
    const unsigned table[8][2] = {{2, 8}, {5, 17}, {9, 29}, {13, 42}, {18, 60}, {24, 80}, {33, 106}, {47, 183}};
-   nir_ssa_def *upper = nir_ieq_imm(b, y, 1);
-   nir_ssa_def *result = NULL;
+   nir_def *upper = nir_ieq_imm(b, y, 1);
+   nir_def *result = NULL;
    for (unsigned i = 0; i < 8; ++i) {
-      nir_ssa_def *tmp = nir_bcsel(b, upper, nir_imm_int(b, table[i][1]), nir_imm_int(b, table[i][0]));
+      nir_def *tmp = nir_bcsel(b, upper, nir_imm_int(b, table[i][1]), nir_imm_int(b, table[i][0]));
       if (result)
          result = nir_bcsel(b, nir_ieq_imm(b, x, i), tmp, result);
       else
@@ -72,11 +72,11 @@ etc1_color_modifier_lookup(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
    return result;
 }
 
-static nir_ssa_def *
-etc2_distance_lookup(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+etc2_distance_lookup(nir_builder *b, nir_def *x)
 {
    const unsigned table[8] = {3, 6, 11, 16, 23, 32, 41, 64};
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
    for (unsigned i = 0; i < 8; ++i) {
       if (result)
          result = nir_bcsel(b, nir_ieq_imm(b, x, i), nir_imm_int(b, table[i]), result);
@@ -86,14 +86,14 @@ etc2_distance_lookup(nir_builder *b, nir_ssa_def *x)
    return result;
 }
 
-static nir_ssa_def *
-etc1_alpha_modifier_lookup(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+etc1_alpha_modifier_lookup(nir_builder *b, nir_def *x, nir_def *y)
 {
    const unsigned table[16] = {0xe852, 0xc962, 0xc741, 0xc531, 0xb752, 0xa862, 0xa763, 0xa742,
                                0x9751, 0x9741, 0x9731, 0x9641, 0x9632, 0x9210, 0x8753, 0x8642};
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
    for (unsigned i = 0; i < 16; ++i) {
-      nir_ssa_def *tmp = nir_imm_int(b, table[i]);
+      nir_def *tmp = nir_imm_int(b, table[i]);
       if (result)
          result = nir_bcsel(b, nir_ieq_imm(b, x, i), tmp, result);
       else
@@ -102,45 +102,44 @@ etc1_alpha_modifier_lookup(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
    return nir_ubfe(b, result, nir_imul_imm(b, y, 4), nir_imm_int(b, 4));
 }
 
-static nir_ssa_def *
-etc_extend(nir_builder *b, nir_ssa_def *v, int bits)
+static nir_def *
+etc_extend(nir_builder *b, nir_def *v, int bits)
 {
    if (bits == 4)
       return nir_imul_imm(b, v, 0x11);
    return nir_ior(b, nir_ishl_imm(b, v, 8 - bits), nir_ushr_imm(b, v, bits - (8 - bits)));
 }
 
-static nir_ssa_def *
-decode_etc2_alpha(struct nir_builder *b, nir_ssa_def *alpha_payload, nir_ssa_def *linear_pixel, bool eac,
-                  nir_ssa_def *is_signed)
+static nir_def *
+decode_etc2_alpha(struct nir_builder *b, nir_def *alpha_payload, nir_def *linear_pixel, bool eac, nir_def *is_signed)
 {
    alpha_payload = flip_endian(b, alpha_payload, 2);
-   nir_ssa_def *alpha_x = nir_channel(b, alpha_payload, 1);
-   nir_ssa_def *alpha_y = nir_channel(b, alpha_payload, 0);
-   nir_ssa_def *bit_offset = nir_isub_imm(b, 45, nir_imul_imm(b, linear_pixel, 3));
-   nir_ssa_def *base = nir_ubfe_imm(b, alpha_y, 24, 8);
-   nir_ssa_def *multiplier = nir_ubfe_imm(b, alpha_y, 20, 4);
-   nir_ssa_def *table = nir_ubfe_imm(b, alpha_y, 16, 4);
+   nir_def *alpha_x = nir_channel(b, alpha_payload, 1);
+   nir_def *alpha_y = nir_channel(b, alpha_payload, 0);
+   nir_def *bit_offset = nir_isub_imm(b, 45, nir_imul_imm(b, linear_pixel, 3));
+   nir_def *base = nir_ubfe_imm(b, alpha_y, 24, 8);
+   nir_def *multiplier = nir_ubfe_imm(b, alpha_y, 20, 4);
+   nir_def *table = nir_ubfe_imm(b, alpha_y, 16, 4);
 
    if (eac) {
-      nir_ssa_def *signed_base = nir_ibfe_imm(b, alpha_y, 24, 8);
+      nir_def *signed_base = nir_ibfe_imm(b, alpha_y, 24, 8);
       signed_base = nir_imul_imm(b, signed_base, 8);
       base = nir_iadd_imm(b, nir_imul_imm(b, base, 8), 4);
       base = nir_bcsel(b, is_signed, signed_base, base);
       multiplier = nir_imax(b, nir_imul_imm(b, multiplier, 8), nir_imm_int(b, 1));
    }
 
-   nir_ssa_def *lsb_index = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
-                                     nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 2));
+   nir_def *lsb_index = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
+                                 nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 2));
    bit_offset = nir_iadd_imm(b, bit_offset, 2);
-   nir_ssa_def *msb = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
-                               nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 1));
-   nir_ssa_def *mod = nir_ixor(b, etc1_alpha_modifier_lookup(b, table, lsb_index), nir_iadd_imm(b, msb, -1));
-   nir_ssa_def *a = nir_iadd(b, base, nir_imul(b, mod, multiplier));
-
-   nir_ssa_def *low_bound = nir_imm_int(b, 0);
-   nir_ssa_def *high_bound = nir_imm_int(b, 255);
-   nir_ssa_def *final_mult = nir_imm_float(b, 1 / 255.0);
+   nir_def *msb = nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x),
+                           nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 1));
+   nir_def *mod = nir_ixor(b, etc1_alpha_modifier_lookup(b, table, lsb_index), nir_iadd_imm(b, msb, -1));
+   nir_def *a = nir_iadd(b, base, nir_imul(b, mod, multiplier));
+
+   nir_def *low_bound = nir_imm_int(b, 0);
+   nir_def *high_bound = nir_imm_int(b, 255);
+   nir_def *final_mult = nir_imm_float(b, 1 / 255.0);
    if (eac) {
       low_bound = nir_bcsel(b, is_signed, nir_imm_int(b, -1023), low_bound);
       high_bound = nir_bcsel(b, is_signed, nir_imm_int(b, 1023), nir_imm_int(b, 2047));
@@ -177,55 +176,55 @@ build_shader(struct radv_device *dev)
    output_img_3d->data.descriptor_set = 0;
    output_img_3d->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 3);
+   nir_def *global_id = get_global_ids(&b, 3);
 
-   nir_ssa_def *consts = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
-   nir_ssa_def *consts2 = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
-   nir_ssa_def *offset = nir_channels(&b, consts, 7);
-   nir_ssa_def *format = nir_channel(&b, consts, 3);
-   nir_ssa_def *image_type = nir_channel(&b, consts2, 0);
-   nir_ssa_def *is_3d = nir_ieq_imm(&b, image_type, VK_IMAGE_TYPE_3D);
-   nir_ssa_def *coord = nir_iadd(&b, global_id, offset);
-   nir_ssa_def *src_coord = nir_vec3(&b, nir_ushr_imm(&b, nir_channel(&b, coord, 0), 2),
-                                     nir_ushr_imm(&b, nir_channel(&b, coord, 1), 2), nir_channel(&b, coord, 2));
+   nir_def *consts = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *consts2 = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
+   nir_def *offset = nir_channels(&b, consts, 7);
+   nir_def *format = nir_channel(&b, consts, 3);
+   nir_def *image_type = nir_channel(&b, consts2, 0);
+   nir_def *is_3d = nir_ieq_imm(&b, image_type, VK_IMAGE_TYPE_3D);
+   nir_def *coord = nir_iadd(&b, global_id, offset);
+   nir_def *src_coord = nir_vec3(&b, nir_ushr_imm(&b, nir_channel(&b, coord, 0), 2),
+                                 nir_ushr_imm(&b, nir_channel(&b, coord, 1), 2), nir_channel(&b, coord, 2));
 
    nir_variable *payload_var = nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "payload");
    nir_push_if(&b, is_3d);
    {
-      nir_ssa_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_3d), src_coord, nir_imm_int(&b, 0));
+      nir_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_3d), src_coord, nir_imm_int(&b, 0));
       nir_store_var(&b, payload_var, color, 0xf);
    }
    nir_push_else(&b, NULL);
    {
-      nir_ssa_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_2d), src_coord, nir_imm_int(&b, 0));
+      nir_def *color = nir_txf_deref(&b, nir_build_deref_var(&b, input_img_2d), src_coord, nir_imm_int(&b, 0));
       nir_store_var(&b, payload_var, color, 0xf);
    }
    nir_pop_if(&b, NULL);
 
-   nir_ssa_def *pixel_coord = nir_iand_imm(&b, nir_channels(&b, coord, 3), 3);
-   nir_ssa_def *linear_pixel =
+   nir_def *pixel_coord = nir_iand_imm(&b, nir_channels(&b, coord, 3), 3);
+   nir_def *linear_pixel =
       nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, pixel_coord, 0), 4), nir_channel(&b, pixel_coord, 1));
 
-   nir_ssa_def *payload = nir_load_var(&b, payload_var);
+   nir_def *payload = nir_load_var(&b, payload_var);
    nir_variable *color = nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "color");
    nir_store_var(&b, color, nir_imm_vec4(&b, 1.0, 0.0, 0.0, 1.0), 0xf);
    nir_push_if(&b, nir_ilt_imm(&b, format, VK_FORMAT_EAC_R11_UNORM_BLOCK));
    {
-      nir_ssa_def *alpha_bits_8 = nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK);
-      nir_ssa_def *alpha_bits_1 = nir_iand(&b, nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK),
-                                           nir_ilt_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK));
+      nir_def *alpha_bits_8 = nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK);
+      nir_def *alpha_bits_1 = nir_iand(&b, nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK),
+                                       nir_ilt_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK));
 
-      nir_ssa_def *color_payload =
+      nir_def *color_payload =
          nir_bcsel(&b, alpha_bits_8, nir_channels(&b, payload, 0xC), nir_channels(&b, payload, 3));
       color_payload = flip_endian(&b, color_payload, 2);
-      nir_ssa_def *color_y = nir_channel(&b, color_payload, 0);
-      nir_ssa_def *color_x = nir_channel(&b, color_payload, 1);
-      nir_ssa_def *flip = nir_test_mask(&b, color_y, 1);
-      nir_ssa_def *subblock =
+      nir_def *color_y = nir_channel(&b, color_payload, 0);
+      nir_def *color_x = nir_channel(&b, color_payload, 1);
+      nir_def *flip = nir_test_mask(&b, color_y, 1);
+      nir_def *subblock =
          nir_ushr_imm(&b, nir_bcsel(&b, flip, nir_channel(&b, pixel_coord, 1), nir_channel(&b, pixel_coord, 0)), 1);
 
       nir_variable *punchthrough = nir_variable_create(b.shader, nir_var_shader_temp, glsl_bool_type(), "punchthrough");
-      nir_ssa_def *punchthrough_init = nir_iand(&b, alpha_bits_1, nir_inot(&b, nir_test_mask(&b, color_y, 2)));
+      nir_def *punchthrough_init = nir_iand(&b, alpha_bits_1, nir_inot(&b, nir_test_mask(&b, color_y, 2)));
       nir_store_var(&b, punchthrough, punchthrough_init, 0x1);
 
       nir_variable *etc1_compat = nir_variable_create(b.shader, nir_var_shader_temp, glsl_bool_type(), "etc1_compat");
@@ -249,13 +248,13 @@ build_shader(struct radv_device *dev)
       nir_variable *base_rgb = nir_variable_create(b.shader, nir_var_shader_temp, uvec3_type, "base_rgb");
       nir_store_var(&b, rgb_result, nir_imm_ivec3(&b, 255, 0, 0), 0x7);
 
-      nir_ssa_def *msb = nir_iand_imm(&b, nir_ushr(&b, color_x, nir_iadd_imm(&b, linear_pixel, 15)), 2);
-      nir_ssa_def *lsb = nir_iand_imm(&b, nir_ushr(&b, color_x, linear_pixel), 1);
+      nir_def *msb = nir_iand_imm(&b, nir_ushr(&b, color_x, nir_iadd_imm(&b, linear_pixel, 15)), 2);
+      nir_def *lsb = nir_iand_imm(&b, nir_ushr(&b, color_x, linear_pixel), 1);
 
       nir_push_if(&b, nir_iand(&b, nir_inot(&b, alpha_bits_1), nir_inot(&b, nir_test_mask(&b, color_y, 2))));
       {
          nir_store_var(&b, etc1_compat, nir_imm_true(&b), 1);
-         nir_ssa_def *tmp[3];
+         nir_def *tmp[3];
          for (unsigned i = 0; i < 3; ++i)
             tmp[i] = etc_extend(
                &b,
@@ -266,29 +265,29 @@ build_shader(struct radv_device *dev)
       }
       nir_push_else(&b, NULL);
       {
-         nir_ssa_def *rb = nir_ubfe_imm(&b, color_y, 27, 5);
-         nir_ssa_def *rd = nir_ibfe_imm(&b, color_y, 24, 3);
-         nir_ssa_def *gb = nir_ubfe_imm(&b, color_y, 19, 5);
-         nir_ssa_def *gd = nir_ibfe_imm(&b, color_y, 16, 3);
-         nir_ssa_def *bb = nir_ubfe_imm(&b, color_y, 11, 5);
-         nir_ssa_def *bd = nir_ibfe_imm(&b, color_y, 8, 3);
-         nir_ssa_def *r1 = nir_iadd(&b, rb, rd);
-         nir_ssa_def *g1 = nir_iadd(&b, gb, gd);
-         nir_ssa_def *b1 = nir_iadd(&b, bb, bd);
+         nir_def *rb = nir_ubfe_imm(&b, color_y, 27, 5);
+         nir_def *rd = nir_ibfe_imm(&b, color_y, 24, 3);
+         nir_def *gb = nir_ubfe_imm(&b, color_y, 19, 5);
+         nir_def *gd = nir_ibfe_imm(&b, color_y, 16, 3);
+         nir_def *bb = nir_ubfe_imm(&b, color_y, 11, 5);
+         nir_def *bd = nir_ibfe_imm(&b, color_y, 8, 3);
+         nir_def *r1 = nir_iadd(&b, rb, rd);
+         nir_def *g1 = nir_iadd(&b, gb, gd);
+         nir_def *b1 = nir_iadd(&b, bb, bd);
 
          nir_push_if(&b, nir_ugt_imm(&b, r1, 31));
          {
-            nir_ssa_def *r0 =
+            nir_def *r0 =
                nir_ior(&b, nir_ubfe_imm(&b, color_y, 24, 2), nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 27, 2), 2));
-            nir_ssa_def *g0 = nir_ubfe_imm(&b, color_y, 20, 4);
-            nir_ssa_def *b0 = nir_ubfe_imm(&b, color_y, 16, 4);
-            nir_ssa_def *r2 = nir_ubfe_imm(&b, color_y, 12, 4);
-            nir_ssa_def *g2 = nir_ubfe_imm(&b, color_y, 8, 4);
-            nir_ssa_def *b2 = nir_ubfe_imm(&b, color_y, 4, 4);
-            nir_ssa_def *da =
+            nir_def *g0 = nir_ubfe_imm(&b, color_y, 20, 4);
+            nir_def *b0 = nir_ubfe_imm(&b, color_y, 16, 4);
+            nir_def *r2 = nir_ubfe_imm(&b, color_y, 12, 4);
+            nir_def *g2 = nir_ubfe_imm(&b, color_y, 8, 4);
+            nir_def *b2 = nir_ubfe_imm(&b, color_y, 4, 4);
+            nir_def *da =
                nir_ior(&b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 2, 2), 1), nir_iand_imm(&b, color_y, 1));
-            nir_ssa_def *dist = etc2_distance_lookup(&b, da);
-            nir_ssa_def *index = nir_ior(&b, lsb, msb);
+            nir_def *dist = etc2_distance_lookup(&b, da);
+            nir_def *index = nir_ior(&b, lsb, msb);
 
             nir_store_var(&b, punchthrough,
                           nir_iand(&b, nir_load_var(&b, punchthrough), nir_ieq_imm(&b, nir_iadd(&b, lsb, msb), 2)),
@@ -300,8 +299,8 @@ build_shader(struct radv_device *dev)
             nir_push_else(&b, NULL);
             {
 
-               nir_ssa_def *tmp = nir_iadd(&b, etc_extend(&b, nir_vec3(&b, r2, g2, b2), 4),
-                                           nir_imul(&b, dist, nir_isub_imm(&b, 2, index)));
+               nir_def *tmp = nir_iadd(&b, etc_extend(&b, nir_vec3(&b, r2, g2, b2), 4),
+                                       nir_imul(&b, dist, nir_isub_imm(&b, 2, index)));
                nir_store_var(&b, rgb_result, tmp, 0x7);
             }
             nir_pop_if(&b, NULL);
@@ -309,23 +308,22 @@ build_shader(struct radv_device *dev)
          nir_push_else(&b, NULL);
          nir_push_if(&b, nir_ugt_imm(&b, g1, 31));
          {
-            nir_ssa_def *r0 = nir_ubfe_imm(&b, color_y, 27, 4);
-            nir_ssa_def *g0 = nir_ior(&b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 24, 3), 1),
-                                      nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 20), 1));
-            nir_ssa_def *b0 =
+            nir_def *r0 = nir_ubfe_imm(&b, color_y, 27, 4);
+            nir_def *g0 = nir_ior(&b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 24, 3), 1),
+                                  nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 20), 1));
+            nir_def *b0 =
                nir_ior(&b, nir_ubfe_imm(&b, color_y, 15, 3), nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 16), 8));
-            nir_ssa_def *r2 = nir_ubfe_imm(&b, color_y, 11, 4);
-            nir_ssa_def *g2 = nir_ubfe_imm(&b, color_y, 7, 4);
-            nir_ssa_def *b2 = nir_ubfe_imm(&b, color_y, 3, 4);
-            nir_ssa_def *da = nir_iand_imm(&b, color_y, 4);
-            nir_ssa_def *db = nir_iand_imm(&b, color_y, 1);
-            nir_ssa_def *d = nir_iadd(&b, da, nir_imul_imm(&b, db, 2));
-            nir_ssa_def *d0 = nir_iadd(&b, nir_ishl_imm(&b, r0, 16), nir_iadd(&b, nir_ishl_imm(&b, g0, 8), b0));
-            nir_ssa_def *d2 = nir_iadd(&b, nir_ishl_imm(&b, r2, 16), nir_iadd(&b, nir_ishl_imm(&b, g2, 8), b2));
+            nir_def *r2 = nir_ubfe_imm(&b, color_y, 11, 4);
+            nir_def *g2 = nir_ubfe_imm(&b, color_y, 7, 4);
+            nir_def *b2 = nir_ubfe_imm(&b, color_y, 3, 4);
+            nir_def *da = nir_iand_imm(&b, color_y, 4);
+            nir_def *db = nir_iand_imm(&b, color_y, 1);
+            nir_def *d = nir_iadd(&b, da, nir_imul_imm(&b, db, 2));
+            nir_def *d0 = nir_iadd(&b, nir_ishl_imm(&b, r0, 16), nir_iadd(&b, nir_ishl_imm(&b, g0, 8), b0));
+            nir_def *d2 = nir_iadd(&b, nir_ishl_imm(&b, r2, 16), nir_iadd(&b, nir_ishl_imm(&b, g2, 8), b2));
             d = nir_bcsel(&b, nir_uge(&b, d0, d2), nir_iadd_imm(&b, d, 1), d);
-            nir_ssa_def *dist = etc2_distance_lookup(&b, d);
-            nir_ssa_def *base =
-               nir_bcsel(&b, nir_ine_imm(&b, msb, 0), nir_vec3(&b, r2, g2, b2), nir_vec3(&b, r0, g0, b0));
+            nir_def *dist = etc2_distance_lookup(&b, d);
+            nir_def *base = nir_bcsel(&b, nir_ine_imm(&b, msb, 0), nir_vec3(&b, r2, g2, b2), nir_vec3(&b, r0, g0, b0));
             base = etc_extend(&b, base, 4);
             base = nir_iadd(&b, base, nir_imul(&b, dist, nir_isub_imm(&b, 1, nir_imul_imm(&b, lsb, 2))));
             nir_store_var(&b, rgb_result, base, 0x7);
@@ -336,19 +334,19 @@ build_shader(struct radv_device *dev)
          nir_push_else(&b, NULL);
          nir_push_if(&b, nir_ugt_imm(&b, b1, 31));
          {
-            nir_ssa_def *r0 = nir_ubfe_imm(&b, color_y, 25, 6);
-            nir_ssa_def *g0 =
+            nir_def *r0 = nir_ubfe_imm(&b, color_y, 25, 6);
+            nir_def *g0 =
                nir_ior(&b, nir_ubfe_imm(&b, color_y, 17, 6), nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 18), 0x40));
-            nir_ssa_def *b0 = nir_ior(
+            nir_def *b0 = nir_ior(
                &b, nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 11, 2), 3),
                nir_ior(&b, nir_iand_imm(&b, nir_ushr_imm(&b, color_y, 11), 0x20), nir_ubfe_imm(&b, color_y, 7, 3)));
-            nir_ssa_def *rh =
+            nir_def *rh =
                nir_ior(&b, nir_iand_imm(&b, color_y, 1), nir_ishl_imm(&b, nir_ubfe_imm(&b, color_y, 2, 5), 1));
-            nir_ssa_def *rv = nir_ubfe_imm(&b, color_x, 13, 6);
-            nir_ssa_def *gh = nir_ubfe_imm(&b, color_x, 25, 7);
-            nir_ssa_def *gv = nir_ubfe_imm(&b, color_x, 6, 7);
-            nir_ssa_def *bh = nir_ubfe_imm(&b, color_x, 19, 6);
-            nir_ssa_def *bv = nir_ubfe_imm(&b, color_x, 0, 6);
+            nir_def *rv = nir_ubfe_imm(&b, color_x, 13, 6);
+            nir_def *gh = nir_ubfe_imm(&b, color_x, 25, 7);
+            nir_def *gv = nir_ubfe_imm(&b, color_x, 6, 7);
+            nir_def *bh = nir_ubfe_imm(&b, color_x, 19, 6);
+            nir_def *bv = nir_ubfe_imm(&b, color_x, 0, 6);
 
             r0 = etc_extend(&b, r0, 6);
             g0 = etc_extend(&b, g0, 7);
@@ -360,11 +358,9 @@ build_shader(struct radv_device *dev)
             bh = etc_extend(&b, bh, 6);
             bv = etc_extend(&b, bv, 6);
 
-            nir_ssa_def *rgb = nir_vec3(&b, r0, g0, b0);
-            nir_ssa_def *dx =
-               nir_imul(&b, nir_isub(&b, nir_vec3(&b, rh, gh, bh), rgb), nir_channel(&b, pixel_coord, 0));
-            nir_ssa_def *dy =
-               nir_imul(&b, nir_isub(&b, nir_vec3(&b, rv, gv, bv), rgb), nir_channel(&b, pixel_coord, 1));
+            nir_def *rgb = nir_vec3(&b, r0, g0, b0);
+            nir_def *dx = nir_imul(&b, nir_isub(&b, nir_vec3(&b, rh, gh, bh), rgb), nir_channel(&b, pixel_coord, 0));
+            nir_def *dy = nir_imul(&b, nir_isub(&b, nir_vec3(&b, rv, gv, bv), rgb), nir_channel(&b, pixel_coord, 1));
             rgb = nir_iadd(&b, rgb, nir_ishr_imm(&b, nir_iadd_imm(&b, nir_iadd(&b, dx, dy), 2), 2));
             nir_store_var(&b, rgb_result, rgb, 0x7);
             nir_store_var(&b, punchthrough, nir_imm_false(&b), 0x1);
@@ -372,8 +368,8 @@ build_shader(struct radv_device *dev)
          nir_push_else(&b, NULL);
          {
             nir_store_var(&b, etc1_compat, nir_imm_true(&b), 1);
-            nir_ssa_def *subblock_b = nir_ine_imm(&b, subblock, 0);
-            nir_ssa_def *tmp[] = {
+            nir_def *subblock_b = nir_ine_imm(&b, subblock, 0);
+            nir_def *tmp[] = {
                nir_bcsel(&b, subblock_b, r1, rb),
                nir_bcsel(&b, subblock_b, g1, gb),
                nir_bcsel(&b, subblock_b, b1, bb),
@@ -387,14 +383,14 @@ build_shader(struct radv_device *dev)
       nir_pop_if(&b, NULL);
       nir_push_if(&b, nir_load_var(&b, etc1_compat));
       {
-         nir_ssa_def *etc1_table_index =
+         nir_def *etc1_table_index =
             nir_ubfe(&b, color_y, nir_isub_imm(&b, 5, nir_imul_imm(&b, subblock, 3)), nir_imm_int(&b, 3));
-         nir_ssa_def *sgn = nir_isub_imm(&b, 1, msb);
+         nir_def *sgn = nir_isub_imm(&b, 1, msb);
          sgn = nir_bcsel(&b, nir_load_var(&b, punchthrough), nir_imul(&b, sgn, lsb), sgn);
          nir_store_var(&b, punchthrough,
                        nir_iand(&b, nir_load_var(&b, punchthrough), nir_ieq_imm(&b, nir_iadd(&b, lsb, msb), 2)), 0x1);
-         nir_ssa_def *off = nir_imul(&b, etc1_color_modifier_lookup(&b, etc1_table_index, lsb), sgn);
-         nir_ssa_def *result = nir_iadd(&b, nir_load_var(&b, base_rgb), off);
+         nir_def *off = nir_imul(&b, etc1_color_modifier_lookup(&b, etc1_table_index, lsb), sgn);
+         nir_def *result = nir_iadd(&b, nir_load_var(&b, base_rgb), off);
          nir_store_var(&b, rgb_result, result, 0x7);
       }
       nir_pop_if(&b, NULL);
@@ -404,7 +400,7 @@ build_shader(struct radv_device *dev)
          nir_store_var(&b, rgb_result, nir_imm_ivec3(&b, 0, 0, 0), 0x7);
       }
       nir_pop_if(&b, NULL);
-      nir_ssa_def *col[4];
+      nir_def *col[4];
       for (unsigned i = 0; i < 3; ++i)
          col[i] = nir_fdiv_imm(&b, nir_i2f32(&b, nir_channel(&b, nir_load_var(&b, rgb_result), i)), 255.0);
       col[3] = nir_load_var(&b, alpha_result);
@@ -412,9 +408,9 @@ build_shader(struct radv_device *dev)
    }
    nir_push_else(&b, NULL);
    { /* EAC */
-      nir_ssa_def *is_signed = nir_ior(&b, nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11_SNORM_BLOCK),
-                                       nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11G11_SNORM_BLOCK));
-      nir_ssa_def *val[4];
+      nir_def *is_signed = nir_ior(&b, nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11_SNORM_BLOCK),
+                                   nir_ieq_imm(&b, format, VK_FORMAT_EAC_R11G11_SNORM_BLOCK));
+      nir_def *val[4];
       for (int i = 0; i < 2; ++i) {
          val[i] = decode_etc2_alpha(&b, nir_channels(&b, payload, 3 << (2 * i)), linear_pixel, true, is_signed);
       }
@@ -424,18 +420,18 @@ build_shader(struct radv_device *dev)
    }
    nir_pop_if(&b, NULL);
 
-   nir_ssa_def *outval = nir_load_var(&b, color);
-   nir_ssa_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1),
-                                     nir_channel(&b, coord, 2), nir_ssa_undef(&b, 1, 32));
+   nir_def *outval = nir_load_var(&b, color);
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), nir_channel(&b, coord, 2),
+                                 nir_undef(&b, 1, 32));
 
    nir_push_if(&b, is_3d);
    {
-      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32),
+      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
                             outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
    }
    nir_push_else(&b, NULL);
    {
-      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32),
+      nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
                             outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
    }
    nir_pop_if(&b, NULL);
index 3bfabd6..6cd3575 100644 (file)
@@ -52,13 +52,12 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
-   nir_ssa_def *img_coord = nir_vec4(&b, nir_channel(&b, global_id, 0), nir_channel(&b, global_id, 1),
-                                     nir_ssa_undef(&b, 1, 32), nir_ssa_undef(&b, 1, 32));
+   nir_def *global_id = get_global_ids(&b, 2);
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, global_id, 0), nir_channel(&b, global_id, 1), nir_undef(&b, 1, 32),
+                                 nir_undef(&b, 1, 32));
 
-   nir_ssa_def *data =
-      nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord,
-                           nir_ssa_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
+   nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord,
+                                        nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
 
    /* We need a SCOPE_DEVICE memory_scope because ACO will avoid
     * creating a vmcnt(0) because it expects the L1 cache to keep memory
@@ -67,7 +66,7 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
    nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
                .memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
 
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32), data,
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), data,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
    return b.shader;
 }
index 0bae313..aadfa92 100644 (file)
@@ -42,28 +42,28 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples)
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
-   nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
-   nir_ssa_def *block_size = nir_imm_ivec3(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
-                                           b.shader->info.workgroup_size[2]);
+   nir_def *invoc_id = nir_load_local_invocation_id(&b);
+   nir_def *wg_id = nir_load_workgroup_id(&b, 32);
+   nir_def *block_size = nir_imm_ivec3(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
+                                       b.shader->info.workgroup_size[2]);
 
-   nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
+   nir_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
 
    /* Get coordinates. */
-   nir_ssa_def *src_coord = nir_trim_vector(&b, global_id, 2);
-   nir_ssa_def *dst_coord = nir_vec4(&b, nir_channel(&b, src_coord, 0), nir_channel(&b, src_coord, 1),
-                                     nir_ssa_undef(&b, 1, 32), nir_ssa_undef(&b, 1, 32));
+   nir_def *src_coord = nir_trim_vector(&b, global_id, 2);
+   nir_def *dst_coord = nir_vec4(&b, nir_channel(&b, src_coord, 0), nir_channel(&b, src_coord, 1), nir_undef(&b, 1, 32),
+                                 nir_undef(&b, 1, 32));
 
    nir_tex_src frag_mask_srcs[] = {{
       .src_type = nir_tex_src_coord,
       .src = nir_src_for_ssa(src_coord),
    }};
-   nir_ssa_def *frag_mask =
+   nir_def *frag_mask =
       nir_build_tex_deref_instr(&b, nir_texop_fragment_mask_fetch_amd, nir_build_deref_var(&b, input_img), NULL,
                                 ARRAY_SIZE(frag_mask_srcs), frag_mask_srcs);
 
    /* Get the maximum sample used in this fragment. */
-   nir_ssa_def *max_sample_index = nir_imm_int(&b, 0);
+   nir_def *max_sample_index = nir_imm_int(&b, 0);
    for (uint32_t s = 0; s < samples; s++) {
       /* max_sample_index = MAX2(max_sample_index, (frag_mask >> (s * 4)) & 0xf) */
       max_sample_index = nir_umax(&b, max_sample_index,
@@ -75,7 +75,7 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples)
 
    nir_loop *loop = nir_push_loop(&b);
    {
-      nir_ssa_def *sample_id = nir_load_var(&b, counter);
+      nir_def *sample_id = nir_load_var(&b, counter);
 
       nir_tex_src frag_fetch_srcs[] = {{
                                           .src_type = nir_tex_src_coord,
@@ -85,9 +85,8 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples)
                                           .src_type = nir_tex_src_ms_index,
                                           .src = nir_src_for_ssa(sample_id),
                                        }};
-      nir_ssa_def *outval =
-         nir_build_tex_deref_instr(&b, nir_texop_fragment_fetch_amd, nir_build_deref_var(&b, input_img), NULL,
-                                   ARRAY_SIZE(frag_fetch_srcs), frag_fetch_srcs);
+      nir_def *outval = nir_build_tex_deref_instr(&b, nir_texop_fragment_fetch_amd, nir_build_deref_var(&b, input_img),
+                                                  NULL, ARRAY_SIZE(frag_fetch_srcs), frag_fetch_srcs);
 
       nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, sample_id, outval,
                             nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS);
index a316a4e..8b9a738 100644 (file)
@@ -48,17 +48,17 @@ build_fmask_expand_compute_shader(struct radv_device *device, int samples)
    output_img->data.access = ACCESS_NON_READABLE;
 
    nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
-   nir_ssa_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
+   nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
 
-   nir_ssa_def *tex_coord = get_global_ids(&b, 3);
+   nir_def *tex_coord = get_global_ids(&b, 3);
 
-   nir_ssa_def *tex_vals[8];
+   nir_def *tex_vals[8];
    for (uint32_t i = 0; i < samples; i++) {
       tex_vals[i] = nir_txf_ms_deref(&b, input_img_deref, tex_coord, nir_imm_int(&b, i));
    }
 
-   nir_ssa_def *img_coord = nir_vec4(&b, nir_channel(&b, tex_coord, 0), nir_channel(&b, tex_coord, 1),
-                                     nir_channel(&b, tex_coord, 2), nir_ssa_undef(&b, 1, 32));
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, tex_coord, 0), nir_channel(&b, tex_coord, 1),
+                                 nir_channel(&b, tex_coord, 2), nir_undef(&b, 1, 32));
 
    for (uint32_t i = 0; i < samples; i++) {
       nir_image_deref_store(&b, output_img_deref, img_coord, nir_imm_int(&b, i), tex_vals[i], nir_imm_int(&b, 0),
index 8aa6cde..b670834 100644 (file)
 #include "sid.h"
 #include "vk_format.h"
 
-static nir_ssa_def *
-radv_meta_build_resolve_srgb_conversion(nir_builder *b, nir_ssa_def *input)
+static nir_def *
+radv_meta_build_resolve_srgb_conversion(nir_builder *b, nir_def *input)
 {
    unsigned i;
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    for (i = 0; i < 3; i++)
       comp[i] = nir_format_linear_to_srgb(b, nir_channel(b, input, i));
    comp[3] = nir_channels(b, input, 1 << 3);
@@ -62,27 +62,27 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 2);
+   nir_def *global_id = get_global_ids(&b, 2);
 
-   nir_ssa_def *src_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
-   nir_ssa_def *dst_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 16);
+   nir_def *src_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *dst_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 8), .range = 16);
 
-   nir_ssa_def *src_coord = nir_iadd(&b, global_id, src_offset);
-   nir_ssa_def *dst_coord = nir_iadd(&b, global_id, dst_offset);
+   nir_def *src_coord = nir_iadd(&b, global_id, src_offset);
+   nir_def *dst_coord = nir_iadd(&b, global_id, dst_offset);
 
    nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");
 
    radv_meta_build_resolve_shader_core(dev, &b, is_integer, samples, input_img, color, src_coord);
 
-   nir_ssa_def *outval = nir_load_var(&b, color);
+   nir_def *outval = nir_load_var(&b, color);
    if (is_srgb)
       outval = radv_meta_build_resolve_srgb_conversion(&b, outval);
 
-   nir_ssa_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1),
-                                     nir_ssa_undef(&b, 1, 32), nir_ssa_undef(&b, 1, 32));
+   nir_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1), nir_undef(&b, 1, 32),
+                                 nir_undef(&b, 1, 32));
 
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32),
-                         outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
+                         nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
    return b.shader;
 }
 
@@ -130,21 +130,21 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples,
    output_img->data.descriptor_set = 0;
    output_img->data.binding = 1;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 3);
+   nir_def *global_id = get_global_ids(&b, 3);
 
-   nir_ssa_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
 
-   nir_ssa_def *resolve_coord = nir_iadd(&b, nir_trim_vector(&b, global_id, 2), offset);
+   nir_def *resolve_coord = nir_iadd(&b, nir_trim_vector(&b, global_id, 2), offset);
 
-   nir_ssa_def *img_coord =
+   nir_def *img_coord =
       nir_vec3(&b, nir_channel(&b, resolve_coord, 0), nir_channel(&b, resolve_coord, 1), nir_channel(&b, global_id, 2));
 
    nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
-   nir_ssa_def *outval = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, 0));
+   nir_def *outval = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, 0));
 
    if (resolve_mode != VK_RESOLVE_MODE_SAMPLE_ZERO_BIT) {
       for (int i = 1; i < samples; i++) {
-         nir_ssa_def *si = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, i));
+         nir_def *si = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, i));
 
          switch (resolve_mode) {
          case VK_RESOLVE_MODE_AVERAGE_BIT:
@@ -172,9 +172,9 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples,
          outval = nir_fdiv_imm(&b, outval, samples);
    }
 
-   nir_ssa_def *coord = nir_vec4(&b, nir_channel(&b, img_coord, 0), nir_channel(&b, img_coord, 1),
-                                 nir_channel(&b, img_coord, 2), nir_ssa_undef(&b, 1, 32));
-   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_ssa_undef(&b, 1, 32), outval,
+   nir_def *coord = nir_vec4(&b, nir_channel(&b, img_coord, 0), nir_channel(&b, img_coord, 1),
+                             nir_channel(&b, img_coord, 2), nir_undef(&b, 1, 32));
+   nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
                          nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
    return b.shader;
 }
index b649d65..3c3f45d 100644 (file)
@@ -47,17 +47,17 @@ build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, int samp
    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color");
    color_out->data.location = FRAG_RESULT_DATA0;
 
-   nir_ssa_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2);
-   nir_ssa_def *src_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
+   nir_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2);
+   nir_def *src_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
 
-   nir_ssa_def *pos_int = nir_f2i32(&b, pos_in);
+   nir_def *pos_int = nir_f2i32(&b, pos_in);
 
-   nir_ssa_def *img_coord = nir_trim_vector(&b, nir_iadd(&b, pos_int, src_offset), 2);
+   nir_def *img_coord = nir_trim_vector(&b, nir_iadd(&b, pos_int, src_offset), 2);
    nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");
 
    radv_meta_build_resolve_shader_core(dev, &b, is_integer, samples, input_img, color, img_coord);
 
-   nir_ssa_def *outval = nir_load_var(&b, color);
+   nir_def *outval = nir_load_var(&b, color);
    nir_store_var(&b, color_out, outval, 0xf);
    return b.shader;
 }
@@ -260,18 +260,18 @@ build_depth_stencil_resolve_fragment_shader(struct radv_device *dev, int samples
    nir_variable *fs_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_out");
    fs_out->data.location = index == DEPTH_RESOLVE ? FRAG_RESULT_DEPTH : FRAG_RESULT_STENCIL;
 
-   nir_ssa_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2);
+   nir_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2);
 
-   nir_ssa_def *pos_int = nir_f2i32(&b, pos_in);
+   nir_def *pos_int = nir_f2i32(&b, pos_in);
 
-   nir_ssa_def *img_coord = nir_trim_vector(&b, pos_int, 2);
+   nir_def *img_coord = nir_trim_vector(&b, pos_int, 2);
 
    nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
-   nir_ssa_def *outval = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, 0));
+   nir_def *outval = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, 0));
 
    if (resolve_mode != VK_RESOLVE_MODE_SAMPLE_ZERO_BIT) {
       for (int i = 1; i < samples; i++) {
-         nir_ssa_def *si = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, i));
+         nir_def *si = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, i));
 
          switch (resolve_mode) {
          case VK_RESOLVE_MODE_AVERAGE_BIT:
index a031862..c3f6adc 100644 (file)
@@ -41,25 +41,25 @@ typedef struct {
    const struct radv_shader_layout *layout;
 } apply_layout_state;
 
-static nir_ssa_def *
+static nir_def *
 get_scalar_arg(nir_builder *b, unsigned size, struct ac_arg arg)
 {
    assert(arg.used);
    return nir_load_scalar_arg_amd(b, size, .base = arg.arg_index);
 }
 
-static nir_ssa_def *
-convert_pointer_to_64_bit(nir_builder *b, apply_layout_state *state, nir_ssa_def *ptr)
+static nir_def *
+convert_pointer_to_64_bit(nir_builder *b, apply_layout_state *state, nir_def *ptr)
 {
    return nir_pack_64_2x32_split(b, ptr, nir_imm_int(b, state->address32_hi));
 }
 
-static nir_ssa_def *
+static nir_def *
 load_desc_ptr(nir_builder *b, apply_layout_state *state, unsigned set)
 {
    const struct radv_userdata_locations *user_sgprs_locs = &state->info->user_sgprs_locs;
    if (user_sgprs_locs->shader_data[AC_UD_INDIRECT_DESCRIPTOR_SETS].sgpr_idx != -1) {
-      nir_ssa_def *addr = get_scalar_arg(b, 1, state->args->descriptor_sets[0]);
+      nir_def *addr = get_scalar_arg(b, 1, state->args->descriptor_sets[0]);
       addr = convert_pointer_to_64_bit(b, state, addr);
       return nir_load_smem_amd(b, 1, addr, nir_imm_int(b, set * 4));
    }
@@ -77,7 +77,7 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri
    unsigned offset = layout->binding[binding].offset;
    unsigned stride;
 
-   nir_ssa_def *set_ptr;
+   nir_def *set_ptr;
    if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
        layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
       unsigned idx = state->layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
@@ -89,7 +89,7 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri
       stride = layout->binding[binding].size;
    }
 
-   nir_ssa_def *binding_ptr = nir_imul_imm(b, intrin->src[0].ssa, stride);
+   nir_def *binding_ptr = nir_imul_imm(b, intrin->src[0].ssa, stride);
    nir_instr_as_alu(binding_ptr->parent_instr)->no_unsigned_wrap = true;
 
    binding_ptr = nir_iadd_imm(b, binding_ptr, offset);
@@ -97,9 +97,9 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri
 
    if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
       assert(stride == 16);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
    } else {
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
    }
    nir_instr_remove(&intrin->instr);
 }
@@ -109,27 +109,27 @@ visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_int
 {
    VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
    if (desc_type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
-      nir_ssa_def *set_ptr = nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa);
-      nir_ssa_def *binding_ptr = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
+      nir_def *set_ptr = nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa);
+      nir_def *binding_ptr = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
 
-      nir_ssa_def *index = nir_imul_imm(b, intrin->src[1].ssa, 16);
+      nir_def *index = nir_imul_imm(b, intrin->src[1].ssa, 16);
       nir_instr_as_alu(index->parent_instr)->no_unsigned_wrap = true;
 
       binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
    } else {
       assert(desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
 
-      nir_ssa_def *binding_ptr = nir_channel(b, intrin->src[0].ssa, 1);
-      nir_ssa_def *stride = nir_channel(b, intrin->src[0].ssa, 2);
+      nir_def *binding_ptr = nir_channel(b, intrin->src[0].ssa, 1);
+      nir_def *stride = nir_channel(b, intrin->src[0].ssa, 2);
 
-      nir_ssa_def *index = nir_imul(b, intrin->src[1].ssa, stride);
+      nir_def *index = nir_imul(b, intrin->src[1].ssa, stride);
       nir_instr_as_alu(index->parent_instr)->no_unsigned_wrap = true;
 
       binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
    }
    nir_instr_remove(&intrin->instr);
 }
@@ -138,20 +138,20 @@ static void
 visit_load_vulkan_descriptor(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
 {
    if (nir_intrinsic_desc_type(intrin) == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
-      nir_ssa_def *addr = convert_pointer_to_64_bit(b, state,
-                                                    nir_iadd(b, nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa),
-                                                             nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa)));
-      nir_ssa_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
+      nir_def *addr = convert_pointer_to_64_bit(b, state,
+                                                nir_iadd(b, nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa),
+                                                         nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa)));
+      nir_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+      nir_def_rewrite_uses(&intrin->dest.ssa, desc);
    } else {
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
    }
    nir_instr_remove(&intrin->instr);
 }
 
-static nir_ssa_def *
-load_inline_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_ssa_def *rsrc)
+static nir_def *
+load_inline_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_def *rsrc)
 {
    uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
                         S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
@@ -169,8 +169,8 @@ load_inline_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_ssa
                    nir_imm_int(b, desc_type));
 }
 
-static nir_ssa_def *
-load_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_ssa_def *rsrc, unsigned access)
+static nir_def *
+load_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_def *rsrc, unsigned access)
 {
    nir_binding binding = nir_chase_binding(nir_src_for_ssa(rsrc));
 
@@ -188,34 +188,34 @@ load_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_ssa_def *r
    if (access & ACCESS_NON_UNIFORM)
       return nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
 
-   nir_ssa_def *desc_set = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
+   nir_def *desc_set = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
    return nir_load_smem_amd(b, 4, desc_set, nir_channel(b, rsrc, 1), .align_mul = 16);
 }
 
 static void
 visit_get_ssbo_size(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *rsrc = intrin->src[0].ssa;
+   nir_def *rsrc = intrin->src[0].ssa;
 
-   nir_ssa_def *size;
+   nir_def *size;
    if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM) {
-      nir_ssa_def *ptr = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
+      nir_def *ptr = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
       ptr = nir_iadd_imm(b, ptr, 8);
       ptr = convert_pointer_to_64_bit(b, state, ptr);
       size = nir_build_load_global(b, 4, 32, ptr, .access = ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER, .align_mul = 16,
                                    .align_offset = 4);
    } else {
       /* load the entire descriptor so it can be CSE'd */
-      nir_ssa_def *ptr = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
-      nir_ssa_def *desc = nir_load_smem_amd(b, 4, ptr, nir_channel(b, rsrc, 1), .align_mul = 16);
+      nir_def *ptr = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
+      nir_def *desc = nir_load_smem_amd(b, 4, ptr, nir_channel(b, rsrc, 1), .align_mul = 16);
       size = nir_channel(b, desc, 2);
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
+   nir_def_rewrite_uses(&intrin->dest.ssa, size);
    nir_instr_remove(&intrin->instr);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *deref, enum ac_descriptor_type desc_type,
                  bool non_uniform, nir_tex_instr *tex, bool write)
 {
@@ -276,13 +276,13 @@ get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *der
       break;
    }
 
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    while (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
       unsigned array_size = MAX2(glsl_get_aoa_size(deref->type), 1);
       array_size *= binding->size;
 
-      nir_ssa_def *tmp = nir_imul_imm(b, deref->arr.index.ssa, array_size);
+      nir_def *tmp = nir_imul_imm(b, deref->arr.index.ssa, array_size);
       if (tmp != deref->arr.index.ssa)
          nir_instr_as_alu(tmp->parent_instr)->no_unsigned_wrap = true;
 
@@ -296,23 +296,23 @@ get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *der
       deref = nir_deref_instr_parent(deref);
    }
 
-   nir_ssa_def *index_offset = index ? nir_iadd_imm(b, index, offset) : nir_imm_int(b, offset);
+   nir_def *index_offset = index ? nir_iadd_imm(b, index, offset) : nir_imm_int(b, offset);
    if (index && index_offset != index)
       nir_instr_as_alu(index_offset->parent_instr)->no_unsigned_wrap = true;
 
    if (non_uniform)
       return nir_iadd(b, load_desc_ptr(b, state, desc_set), index_offset);
 
-   nir_ssa_def *addr = convert_pointer_to_64_bit(b, state, load_desc_ptr(b, state, desc_set));
-   nir_ssa_def *desc = nir_load_smem_amd(b, size, addr, index_offset, .align_mul = size * 4u);
+   nir_def *addr = convert_pointer_to_64_bit(b, state, load_desc_ptr(b, state, desc_set));
+   nir_def *desc = nir_load_smem_amd(b, size, addr, index_offset, .align_mul = size * 4u);
 
    /* 3 plane formats always have same size and format for plane 1 & 2, so
     * use the tail from plane 1 so that we can store only the first 16 bytes
     * of the last plane. */
    if (desc_type == AC_DESC_PLANE_2) {
-      nir_ssa_def *desc2 = get_sampler_desc(b, state, deref, AC_DESC_PLANE_1, non_uniform, tex, write);
+      nir_def *desc2 = get_sampler_desc(b, state, deref, AC_DESC_PLANE_1, non_uniform, tex, write);
 
-      nir_ssa_def *comp[8];
+      nir_def *comp[8];
       for (unsigned i = 0; i < 4; i++)
          comp[i] = nir_channel(b, desc, i);
       for (unsigned i = 4; i < 8; i++)
@@ -320,7 +320,7 @@ get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *der
 
       return nir_vec(b, comp, 8);
    } else if (desc_type == AC_DESC_IMAGE && state->has_image_load_dcc_bug && !tex && !write) {
-      nir_ssa_def *comp[8];
+      nir_def *comp[8];
       for (unsigned i = 0; i < 8; i++)
          comp[i] = nir_channel(b, desc, i);
 
@@ -331,7 +331,7 @@ get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *der
 
       return nir_vec(b, comp, 8);
    } else if (desc_type == AC_DESC_SAMPLER && tex->op == nir_texop_tg4 && !state->conformant_trunc_coord) {
-      nir_ssa_def *comp[4];
+      nir_def *comp[4];
       for (unsigned i = 0; i < 4; i++)
          comp[i] = nir_channel(b, desc, i);
 
@@ -354,11 +354,11 @@ update_image_intrinsic(nir_builder *b, apply_layout_state *state, nir_intrinsic_
    bool is_load =
       intrin->intrinsic == nir_intrinsic_image_deref_load || intrin->intrinsic == nir_intrinsic_image_deref_sparse_load;
 
-   nir_ssa_def *desc = get_sampler_desc(b, state, deref, dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE,
-                                        nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load);
+   nir_def *desc = get_sampler_desc(b, state, deref, dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE,
+                                    nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load);
 
    if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+      nir_def_rewrite_uses(&intrin->dest.ssa, desc);
       nir_instr_remove(&intrin->instr);
    } else {
       nir_rewrite_image_intrinsic(intrin, desc, true);
@@ -370,7 +370,7 @@ apply_layout_to_intrin(nir_builder *b, apply_layout_state *state, nir_intrinsic_
 {
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *rsrc;
+   nir_def *rsrc;
    switch (intrin->intrinsic) {
    case nir_intrinsic_vulkan_resource_index:
       visit_vulkan_resource_index(b, state, intrin);
@@ -435,8 +435,8 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te
       }
    }
 
-   nir_ssa_def *image = NULL;
-   nir_ssa_def *sampler = NULL;
+   nir_def *image = NULL;
+   nir_def *sampler = NULL;
    if (plane >= 0) {
       assert(tex->op != nir_texop_txf_ms && tex->op != nir_texop_samples_identical);
       assert(tex->sampler_dim != GLSL_SAMPLER_DIM_BUF);
@@ -467,7 +467,7 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te
           */
          /* TODO: This is unnecessary for combined image+sampler.
           * We can do this when updating the desc set. */
-         nir_ssa_def *comp[4];
+         nir_def *comp[4];
          for (unsigned i = 0; i < 4; i++)
             comp[i] = nir_channel(b, sampler, i);
          comp[0] = nir_iand(b, comp[0], nir_channel(b, image, 7));
@@ -477,7 +477,7 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te
    }
 
    if (tex->op == nir_texop_descriptor_amd) {
-      nir_ssa_def_rewrite_uses(&tex->dest.ssa, image);
+      nir_def_rewrite_uses(&tex->dest.ssa, image);
       nir_instr_remove(&tex->instr);
       return;
    }
index 38a58b2..f0cd4de 100644 (file)
@@ -39,31 +39,31 @@ typedef struct {
    const struct radv_shader_info *info;
    const struct radv_pipeline_key *pl_key;
    uint32_t address32_hi;
-   nir_ssa_def *gsvs_ring[4];
+   nir_def *gsvs_ring[4];
 } lower_abi_state;
 
-static nir_ssa_def *
+static nir_def *
 load_ring(nir_builder *b, unsigned ring, lower_abi_state *s)
 {
    struct ac_arg arg =
       b->shader->info.stage == MESA_SHADER_TASK ? s->args->task_ring_offsets : s->args->ac.ring_offsets;
 
-   nir_ssa_def *ring_offsets = ac_nir_load_arg(b, &s->args->ac, arg);
+   nir_def *ring_offsets = ac_nir_load_arg(b, &s->args->ac, arg);
    ring_offsets = nir_pack_64_2x32_split(b, nir_channel(b, ring_offsets, 0), nir_channel(b, ring_offsets, 1));
    return nir_load_smem_amd(b, 4, ring_offsets, nir_imm_int(b, ring * 16u), .align_mul = 4u);
 }
 
-static nir_ssa_def *
+static nir_def *
 nggc_bool_setting(nir_builder *b, unsigned mask, lower_abi_state *s)
 {
-   nir_ssa_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
+   nir_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
    return nir_test_mask(b, settings, mask);
 }
 
-static nir_ssa_def *
+static nir_def *
 shader_query_bool_setting(nir_builder *b, unsigned mask, lower_abi_state *s)
 {
-   nir_ssa_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->shader_query_state);
+   nir_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->shader_query_state);
    return nir_test_mask(b, settings, mask);
 }
 
@@ -80,7 +80,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
    bool progress = true;
 
    switch (intrin->intrinsic) {
@@ -129,13 +129,13 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       /* Note, the HW always assumes there is at least 1 per-vertex param. */
       const unsigned total_num_params = MAX2(1, s->info->outinfo.param_exports) + s->info->outinfo.prim_param_exports;
 
-      nir_ssa_def *dword1 = nir_channel(b, replacement, 1);
+      nir_def *dword1 = nir_channel(b, replacement, 1);
       dword1 = nir_ior_imm(b, dword1, S_008F04_STRIDE(16 * total_num_params));
       replacement = nir_vector_insert_imm(b, replacement, dword1, 1);
       break;
 
    case nir_intrinsic_load_ring_attr_offset_amd: {
-      nir_ssa_def *ring_attr_offset = ac_nir_load_arg(b, &s->args->ac, s->args->ac.gs_attr_offset);
+      nir_def *ring_attr_offset = ac_nir_load_arg(b, &s->args->ac, s->args->ac.gs_attr_offset);
       replacement = nir_ishl_imm(b, nir_ubfe_imm(b, ring_attr_offset, 0, 15), 9); /* 512b increments. */
       break;
    }
@@ -148,7 +148,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
           * to optimize some multiplications (in address calculations) so that
           * constant additions can be added to the const offset in memory load instructions.
           */
-         nir_ssa_def *arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.tes_rel_patch_id);
+         nir_def *arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.tes_rel_patch_id);
 
          if (s->info->tes.tcs_vertices_out) {
             nir_intrinsic_instr *load_arg = nir_instr_as_intrinsic(arg->parent_instr);
@@ -203,7 +203,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       replacement = ac_nir_load_arg(b, &s->args->ac, s->args->ac.merged_wave_info);
       break;
    case nir_intrinsic_load_cull_any_enabled_amd: {
-      nir_ssa_def *gs_tg_info = ac_nir_load_arg(b, &s->args->ac, s->args->ac.gs_tg_info);
+      nir_def *gs_tg_info = ac_nir_load_arg(b, &s->args->ac, s->args->ac.gs_tg_info);
 
       /* Consider a workgroup small if it contains less than 16 triangles.
        *
@@ -211,12 +211,12 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
        * so the below is equivalent to: "ult(ubfe(gs_tg_info, 22, 9), 16)", but
        * ACO can optimize out the comparison to zero (see try_optimize_scc_nocompare).
        */
-      nir_ssa_def *small_workgroup = nir_ieq_imm(b, nir_iand_imm(b, gs_tg_info, BITFIELD_RANGE(22 + 4, 9 - 4)), 0);
+      nir_def *small_workgroup = nir_ieq_imm(b, nir_iand_imm(b, gs_tg_info, BITFIELD_RANGE(22 + 4, 9 - 4)), 0);
 
-      nir_ssa_def *mask =
+      nir_def *mask =
          nir_bcsel(b, small_workgroup, nir_imm_int(b, radv_nggc_none),
                    nir_imm_int(b, radv_nggc_front_face | radv_nggc_back_face | radv_nggc_small_primitives));
-      nir_ssa_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
+      nir_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
       replacement = nir_ine_imm(b, nir_iand(b, settings, mask), 0);
       break;
    }
@@ -238,14 +238,14 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
        * exponent = nggc_settings >> 24
        * precision = 1.0 * 2 ^ exponent
        */
-      nir_ssa_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
-      nir_ssa_def *exponent = nir_ishr_imm(b, settings, 24u);
+      nir_def *settings = ac_nir_load_arg(b, &s->args->ac, s->args->ngg_culling_settings);
+      nir_def *exponent = nir_ishr_imm(b, settings, 24u);
       replacement = nir_ldexp(b, nir_imm_float(b, 1.0f), exponent);
       break;
    }
 
    case nir_intrinsic_load_viewport_xy_scale_and_offset: {
-      nir_ssa_def *comps[] = {
+      nir_def *comps[] = {
          ac_nir_load_arg(b, &s->args->ac, s->args->ngg_viewport_scale[0]),
          ac_nir_load_arg(b, &s->args->ac, s->args->ngg_viewport_scale[1]),
          ac_nir_load_arg(b, &s->args->ac, s->args->ngg_viewport_translate[0]),
@@ -280,7 +280,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
          if (s->info->inputs_linked) {
             replacement = nir_imm_int(b, get_tcs_input_vertex_stride(s->info->tcs.num_linked_inputs));
          } else {
-            nir_ssa_def *lshs_vertex_stride =
+            nir_def *lshs_vertex_stride =
                GET_SGPR_FIELD_NIR(s->args->tcs_offchip_layout, TCS_OFFCHIP_LAYOUT_LSHS_VERTEX_STRIDE);
             replacement = nir_ishl_imm(b, lshs_vertex_stride, 2);
          }
@@ -296,7 +296,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       break;
    }
    case nir_intrinsic_load_hs_out_patch_data_offset_amd: {
-      nir_ssa_def *out_vertices_per_patch;
+      nir_def *out_vertices_per_patch;
       unsigned num_tcs_outputs =
          stage == MESA_SHADER_TESS_CTRL ? s->info->tcs.num_linked_outputs : s->info->tes.num_linked_inputs;
 
@@ -310,13 +310,13 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
          }
       }
 
-      nir_ssa_def *per_vertex_output_patch_size = nir_imul_imm(b, out_vertices_per_patch, num_tcs_outputs * 16u);
+      nir_def *per_vertex_output_patch_size = nir_imul_imm(b, out_vertices_per_patch, num_tcs_outputs * 16u);
 
       if (s->info->num_tess_patches) {
          unsigned num_patches = s->info->num_tess_patches;
          replacement = nir_imul_imm(b, per_vertex_output_patch_size, num_patches);
       } else {
-         nir_ssa_def *num_patches;
+         nir_def *num_patches;
 
          if (stage == MESA_SHADER_TESS_CTRL) {
             num_patches = GET_SGPR_FIELD_NIR(s->args->tcs_offchip_layout, TCS_OFFCHIP_LAYOUT_NUM_PATCHES);
@@ -330,10 +330,10 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
    case nir_intrinsic_load_sample_positions_amd: {
       uint32_t sample_pos_offset = (RING_PS_SAMPLE_POSITIONS * 16) - 8;
 
-      nir_ssa_def *ring_offsets = ac_nir_load_arg(b, &s->args->ac, s->args->ac.ring_offsets);
-      nir_ssa_def *addr = nir_pack_64_2x32(b, ring_offsets);
-      nir_ssa_def *sample_id = nir_umin(b, intrin->src[0].ssa, nir_imm_int(b, 7));
-      nir_ssa_def *offset = nir_ishl_imm(b, sample_id, 3); /* 2 floats containing samplepos.xy */
+      nir_def *ring_offsets = ac_nir_load_arg(b, &s->args->ac, s->args->ac.ring_offsets);
+      nir_def *addr = nir_pack_64_2x32(b, ring_offsets);
+      nir_def *sample_id = nir_umin(b, intrin->src[0].ssa, nir_imm_int(b, 7));
+      nir_def *offset = nir_ishl_imm(b, sample_id, 3); /* 2 floats containing samplepos.xy */
 
       nir_const_value *const_num_samples = nir_src_as_const_value(intrin->src[1]);
       if (const_num_samples) {
@@ -400,8 +400,8 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       replacement = ac_nir_load_arg(b, &s->args->ac, s->args->ac.streamout_write_index);
       break;
    case nir_intrinsic_load_streamout_buffer_amd: {
-      nir_ssa_def *ptr = nir_pack_64_2x32_split(b, ac_nir_load_arg(b, &s->args->ac, s->args->streamout_buffers),
-                                                nir_imm_int(b, s->address32_hi));
+      nir_def *ptr = nir_pack_64_2x32_split(b, ac_nir_load_arg(b, &s->args->ac, s->args->streamout_buffers),
+                                            nir_imm_int(b, s->address32_hi));
       replacement = nir_load_smem_amd(b, 4, ptr, nir_imm_int(b, nir_intrinsic_base(intrin) * 16));
       break;
    }
@@ -461,19 +461,19 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       replacement = ac_nir_load_arg(b, &s->args->ac, s->args->ac.force_vrs_rates);
       break;
    case nir_intrinsic_load_fully_covered: {
-      nir_ssa_def *sample_coverage = ac_nir_load_arg(b, &s->args->ac, s->args->ac.sample_coverage);
+      nir_def *sample_coverage = ac_nir_load_arg(b, &s->args->ac, s->args->ac.sample_coverage);
       replacement = nir_ine_imm(b, sample_coverage, 0);
       break;
    }
    case nir_intrinsic_load_barycentric_optimize_amd: {
-      nir_ssa_def *prim_mask = ac_nir_load_arg(b, &s->args->ac, s->args->ac.prim_mask);
+      nir_def *prim_mask = ac_nir_load_arg(b, &s->args->ac, s->args->ac.prim_mask);
       /* enabled when bit 31 is set */
       replacement = nir_ilt_imm(b, prim_mask, 0);
       break;
    }
    case nir_intrinsic_load_poly_line_smooth_enabled:
       if (s->pl_key->dynamic_line_rast_mode) {
-         nir_ssa_def *line_rast_mode = GET_SGPR_FIELD_NIR(s->args->ps_state, PS_STATE_LINE_RAST_MODE);
+         nir_def *line_rast_mode = GET_SGPR_FIELD_NIR(s->args->ps_state, PS_STATE_LINE_RAST_MODE);
          replacement = nir_ieq_imm(b, line_rast_mode, VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT);
       } else {
          replacement = nir_imm_bool(b, s->pl_key->ps.line_smooth_enabled);
@@ -499,7 +499,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
       return false;
 
    if (replacement)
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+      nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
 
    nir_instr_remove(instr);
    nir_instr_free(instr);
@@ -507,10 +507,10 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_gsvs_ring(nir_builder *b, lower_abi_state *s, unsigned stream_id)
 {
-   nir_ssa_def *ring = load_ring(b, RING_GSVS_GS, s);
+   nir_def *ring = load_ring(b, RING_GSVS_GS, s);
    unsigned stream_offset = 0;
    unsigned stride = 0;
    for (unsigned i = 0; i <= stream_id; i++) {
@@ -523,7 +523,7 @@ load_gsvs_ring(nir_builder *b, lower_abi_state *s, unsigned stream_id)
    assert(stride < (1 << 14));
 
    if (stream_offset) {
-      nir_ssa_def *addr = nir_pack_64_2x32_split(b, nir_channel(b, ring, 0), nir_channel(b, ring, 1));
+      nir_def *addr = nir_pack_64_2x32_split(b, nir_channel(b, ring, 0), nir_channel(b, ring, 1));
       addr = nir_iadd_imm(b, addr, stream_offset);
       ring = nir_vector_insert_imm(b, ring, nir_unpack_64_2x32_split_x(b, addr), 0);
       ring = nir_vector_insert_imm(b, ring, nir_unpack_64_2x32_split_y(b, addr), 1);
index 9560992..36c7304 100644 (file)
@@ -32,34 +32,34 @@ typedef struct {
    unsigned rast_prim;
 } lower_fs_barycentric_state;
 
-static nir_ssa_def *
-lower_interp_center_smooth(nir_builder *b, nir_ssa_def *offset)
+static nir_def *
+lower_interp_center_smooth(nir_builder *b, nir_def *offset)
 {
-   nir_ssa_def *pull_model = nir_load_barycentric_model(b, 32);
+   nir_def *pull_model = nir_load_barycentric_model(b, 32);
 
-   nir_ssa_def *deriv_x =
+   nir_def *deriv_x =
       nir_vec3(b, nir_fddx_fine(b, nir_channel(b, pull_model, 0)), nir_fddx_fine(b, nir_channel(b, pull_model, 1)),
                nir_fddx_fine(b, nir_channel(b, pull_model, 2)));
-   nir_ssa_def *deriv_y =
+   nir_def *deriv_y =
       nir_vec3(b, nir_fddy_fine(b, nir_channel(b, pull_model, 0)), nir_fddy_fine(b, nir_channel(b, pull_model, 1)),
                nir_fddy_fine(b, nir_channel(b, pull_model, 2)));
 
-   nir_ssa_def *offset_x = nir_channel(b, offset, 0);
-   nir_ssa_def *offset_y = nir_channel(b, offset, 1);
+   nir_def *offset_x = nir_channel(b, offset, 0);
+   nir_def *offset_y = nir_channel(b, offset, 1);
 
-   nir_ssa_def *adjusted_x = nir_fadd(b, pull_model, nir_fmul(b, deriv_x, offset_x));
-   nir_ssa_def *adjusted = nir_fadd(b, adjusted_x, nir_fmul(b, deriv_y, offset_y));
+   nir_def *adjusted_x = nir_fadd(b, pull_model, nir_fmul(b, deriv_x, offset_x));
+   nir_def *adjusted = nir_fadd(b, adjusted_x, nir_fmul(b, deriv_y, offset_y));
 
-   nir_ssa_def *ij = nir_vec2(b, nir_channel(b, adjusted, 0), nir_channel(b, adjusted, 1));
+   nir_def *ij = nir_vec2(b, nir_channel(b, adjusted, 0), nir_channel(b, adjusted, 1));
 
    /* Get W by using the reciprocal of 1/W. */
-   nir_ssa_def *w = nir_frcp(b, nir_channel(b, adjusted, 2));
+   nir_def *w = nir_frcp(b, nir_channel(b, adjusted, 2));
 
    return nir_fmul(b, ij, w);
 }
 
-static nir_ssa_def *
-lower_barycentric_coord_at_offset(nir_builder *b, nir_ssa_def *src, enum glsl_interp_mode mode)
+static nir_def *
+lower_barycentric_coord_at_offset(nir_builder *b, nir_def *src, enum glsl_interp_mode mode)
 {
    if (mode == INTERP_MODE_SMOOTH)
       return lower_interp_center_smooth(b, src);
@@ -67,15 +67,15 @@ lower_barycentric_coord_at_offset(nir_builder *b, nir_ssa_def *src, enum glsl_in
    return nir_load_barycentric_at_offset(b, 32, src, .interp_mode = mode);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_barycentric_coord_at_sample(nir_builder *b, lower_fs_barycentric_state *state, nir_intrinsic_instr *intrin)
 {
    const enum glsl_interp_mode mode = (enum glsl_interp_mode)nir_intrinsic_interp_mode(intrin);
-   nir_ssa_def *num_samples = nir_load_rasterization_samples_amd(b);
-   nir_ssa_def *new_dest;
+   nir_def *num_samples = nir_load_rasterization_samples_amd(b);
+   nir_def *new_dest;
 
    if (state->dynamic_rasterization_samples) {
-      nir_ssa_def *res1, *res2;
+      nir_def *res1, *res2;
 
       nir_push_if(b, nir_ieq_imm(b, num_samples, 1));
       {
@@ -83,7 +83,7 @@ lower_barycentric_coord_at_sample(nir_builder *b, lower_fs_barycentric_state *st
       }
       nir_push_else(b, NULL);
       {
-         nir_ssa_def *sample_pos = nir_load_sample_positions_amd(b, 32, intrin->src[0].ssa, num_samples);
+         nir_def *sample_pos = nir_load_sample_positions_amd(b, 32, intrin->src[0].ssa, num_samples);
 
          /* sample_pos -= 0.5 */
          sample_pos = nir_fadd_imm(b, sample_pos, -0.5f);
@@ -97,7 +97,7 @@ lower_barycentric_coord_at_sample(nir_builder *b, lower_fs_barycentric_state *st
       if (!state->num_rasterization_samples) {
          new_dest = nir_load_barycentric_pixel(b, 32, .interp_mode = nir_intrinsic_interp_mode(intrin));
       } else {
-         nir_ssa_def *sample_pos = nir_load_sample_positions_amd(b, 32, intrin->src[0].ssa, num_samples);
+         nir_def *sample_pos = nir_load_sample_positions_amd(b, 32, intrin->src[0].ssa, num_samples);
 
          /* sample_pos -= 0.5 */
          sample_pos = nir_fadd_imm(b, sample_pos, -0.5f);
@@ -109,7 +109,7 @@ lower_barycentric_coord_at_sample(nir_builder *b, lower_fs_barycentric_state *st
    return new_dest;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_interp_param(nir_builder *b, lower_fs_barycentric_state *state, nir_intrinsic_instr *intrin)
 {
    const enum glsl_interp_mode mode = (enum glsl_interp_mode)nir_intrinsic_interp_mode(intrin);
@@ -130,10 +130,10 @@ get_interp_param(nir_builder *b, lower_fs_barycentric_state *state, nir_intrinsi
    return NULL;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_point(nir_builder *b)
 {
-   nir_ssa_def *coords[3];
+   nir_def *coords[3];
 
    coords[0] = nir_imm_float(b, 1.0f);
    coords[1] = nir_imm_float(b, 0.0f);
@@ -142,10 +142,10 @@ lower_point(nir_builder *b)
    return nir_vec(b, coords, 3);
 }
 
-static nir_ssa_def *
-lower_line(nir_builder *b, nir_ssa_def *p1, nir_ssa_def *p2)
+static nir_def *
+lower_line(nir_builder *b, nir_def *p1, nir_def *p2)
 {
-   nir_ssa_def *coords[3];
+   nir_def *coords[3];
 
    coords[1] = nir_fadd(b, p1, p2);
    coords[0] = nir_fsub_imm(b, 1.0f, coords[1]);
@@ -154,20 +154,20 @@ lower_line(nir_builder *b, nir_ssa_def *p1, nir_ssa_def *p2)
    return nir_vec(b, coords, 3);
 }
 
-static nir_ssa_def *
-lower_triangle(nir_builder *b, nir_ssa_def *p1, nir_ssa_def *p2)
+static nir_def *
+lower_triangle(nir_builder *b, nir_def *p1, nir_def *p2)
 {
-   nir_ssa_def *v0_bary[3], *v1_bary[3], *v2_bary[3];
-   nir_ssa_def *coords[3];
+   nir_def *v0_bary[3], *v1_bary[3], *v2_bary[3];
+   nir_def *coords[3];
 
    /* Compute the provoking vertex ID:
     *
     * quad_id = thread_id >> 2
     * provoking_vtx_id = (provoking_vtx >> (quad_id << 1)) & 3
     */
-   nir_ssa_def *quad_id = nir_ushr_imm(b, nir_load_subgroup_invocation(b), 2);
-   nir_ssa_def *provoking_vtx = nir_load_provoking_vtx_amd(b);
-   nir_ssa_def *provoking_vtx_id = nir_ubfe(b, provoking_vtx, nir_ishl_imm(b, quad_id, 1), nir_imm_int(b, 2));
+   nir_def *quad_id = nir_ushr_imm(b, nir_load_subgroup_invocation(b), 2);
+   nir_def *provoking_vtx = nir_load_provoking_vtx_amd(b);
+   nir_def *provoking_vtx_id = nir_ubfe(b, provoking_vtx, nir_ishl_imm(b, quad_id, 1), nir_imm_int(b, 2));
 
    /* Compute barycentrics. */
    v0_bary[0] = nir_fsub(b, nir_fsub_imm(b, 1.0f, p2), p1);
@@ -194,30 +194,30 @@ lower_triangle(nir_builder *b, nir_ssa_def *p1, nir_ssa_def *p2)
 static bool
 lower_load_barycentric_coord(nir_builder *b, lower_fs_barycentric_state *state, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *interp, *p1, *p2;
-   nir_ssa_def *new_dest;
+   nir_def *interp, *p1, *p2;
+   nir_def *new_dest;
 
    b->cursor = nir_after_instr(&intrin->instr);
 
    /* When the rasterization primitive isn't known at compile time (GPL), load it. */
    if (state->rast_prim == -1) {
-      nir_ssa_def *rast_prim = nir_load_rasterization_primitive_amd(b);
-      nir_ssa_def *res1, *res2;
+      nir_def *rast_prim = nir_load_rasterization_primitive_amd(b);
+      nir_def *res1, *res2;
 
-      nir_ssa_def *is_point = nir_ieq_imm(b, rast_prim, V_028A6C_POINTLIST);
+      nir_def *is_point = nir_ieq_imm(b, rast_prim, V_028A6C_POINTLIST);
       nir_if *if_point = nir_push_if(b, is_point);
       {
          res1 = lower_point(b);
       }
       nir_push_else(b, if_point);
       {
-         nir_ssa_def *res_line, *res_triangle;
+         nir_def *res_line, *res_triangle;
 
          interp = get_interp_param(b, state, intrin);
          p1 = nir_channel(b, interp, 0);
          p2 = nir_channel(b, interp, 1);
 
-         nir_ssa_def *is_line = nir_ieq_imm(b, rast_prim, V_028A6C_LINESTRIP);
+         nir_def *is_line = nir_ieq_imm(b, rast_prim, V_028A6C_LINESTRIP);
          nir_if *if_line = nir_push_if(b, is_line);
          {
             res_line = lower_line(b, p1, p2);
@@ -250,7 +250,7 @@ lower_load_barycentric_coord(nir_builder *b, lower_fs_barycentric_state *state,
       }
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+   nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
    nir_instr_remove(&intrin->instr);
 
    return true;
index efe5838..d992f16 100644 (file)
@@ -49,21 +49,21 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
 
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_sample_mask_in: {
-            nir_ssa_def *sample_coverage = nir_load_vector_arg_amd(&b, 1, .base = args->ac.sample_coverage.arg_index);
+            nir_def *sample_coverage = nir_load_vector_arg_amd(&b, 1, .base = args->ac.sample_coverage.arg_index);
 
-            nir_ssa_def *def = NULL;
+            nir_def *def = NULL;
             if (info->ps.uses_sample_shading || key->ps.sample_shading_enable) {
                /* gl_SampleMaskIn[0] = (SampleCoverage & (PsIterMask << gl_SampleID)). */
-               nir_ssa_def *ps_state = nir_load_scalar_arg_amd(&b, 1, .base = args->ps_state.arg_index);
-               nir_ssa_def *ps_iter_mask =
+               nir_def *ps_state = nir_load_scalar_arg_amd(&b, 1, .base = args->ps_state.arg_index);
+               nir_def *ps_iter_mask =
                   nir_ubfe_imm(&b, ps_state, PS_STATE_PS_ITER_MASK__SHIFT, util_bitcount(PS_STATE_PS_ITER_MASK__MASK));
-               nir_ssa_def *sample_id = nir_load_sample_id(&b);
+               nir_def *sample_id = nir_load_sample_id(&b);
                def = nir_iand(&b, sample_coverage, nir_ishl(&b, ps_iter_mask, sample_id));
             } else {
                def = sample_coverage;
             }
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
+            nir_def_rewrite_uses(&intrin->dest.ssa, def);
 
             nir_instr_remove(instr);
             progress = true;
@@ -73,35 +73,35 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
             if (!key->adjust_frag_coord_z)
                continue;
 
-            if (!(nir_ssa_def_components_read(&intrin->dest.ssa) & (1 << 2)))
+            if (!(nir_def_components_read(&intrin->dest.ssa) & (1 << 2)))
                continue;
 
-            nir_ssa_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
+            nir_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
 
             /* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */
-            nir_ssa_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
+            nir_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
             adjusted_frag_z = nir_ffma_imm1(&b, adjusted_frag_z, 0.0625f, frag_z);
 
             /* VRS Rate X = Ancillary[2:3] */
-            nir_ssa_def *ancillary = nir_load_vector_arg_amd(&b, 1, .base = args->ac.ancillary.arg_index);
-            nir_ssa_def *x_rate = nir_ubfe_imm(&b, ancillary, 2, 2);
+            nir_def *ancillary = nir_load_vector_arg_amd(&b, 1, .base = args->ac.ancillary.arg_index);
+            nir_def *x_rate = nir_ubfe_imm(&b, ancillary, 2, 2);
 
             /* xRate = xRate == 0x1 ? adjusted_frag_z : frag_z. */
-            nir_ssa_def *cond = nir_ieq_imm(&b, x_rate, 1);
+            nir_def *cond = nir_ieq_imm(&b, x_rate, 1);
             frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z);
 
-            nir_ssa_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
-            nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
+            nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
+            nir_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
 
             progress = true;
             break;
          }
          case nir_intrinsic_load_barycentric_at_sample: {
-            nir_ssa_def *num_samples = nir_load_rasterization_samples_amd(&b);
-            nir_ssa_def *new_dest;
+            nir_def *num_samples = nir_load_rasterization_samples_amd(&b);
+            nir_def *new_dest;
 
             if (key->dynamic_rasterization_samples) {
-               nir_ssa_def *res1, *res2;
+               nir_def *res1, *res2;
 
                nir_push_if(&b, nir_ieq_imm(&b, num_samples, 1));
                {
@@ -109,7 +109,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
                }
                nir_push_else(&b, NULL);
                {
-                  nir_ssa_def *sample_pos = nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
+                  nir_def *sample_pos = nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
 
                   /* sample_pos -= 0.5 */
                   sample_pos = nir_fadd_imm(&b, sample_pos, -0.5f);
@@ -124,7 +124,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
                if (!key->ps.num_samples) {
                   new_dest = nir_load_barycentric_pixel(&b, 32, .interp_mode = nir_intrinsic_interp_mode(intrin));
                } else {
-                  nir_ssa_def *sample_pos = nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
+                  nir_def *sample_pos = nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
 
                   /* sample_pos -= 0.5 */
                   sample_pos = nir_fadd_imm(&b, sample_pos, -0.5f);
@@ -134,7 +134,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
                }
             }
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+            nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
             nir_instr_remove(instr);
 
             progress = true;
index df06692..54b6840 100644 (file)
@@ -43,7 +43,7 @@ radv_nir_lower_intrinsics_early(nir_shader *nir, const struct radv_pipeline_key
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
          b.cursor = nir_before_instr(&intrin->instr);
 
-         nir_ssa_def *def = NULL;
+         nir_def *def = NULL;
          switch (intrin->intrinsic) {
          case nir_intrinsic_is_sparse_texels_resident:
             def = nir_ieq_imm(&b, intrin->src[0].ssa, 0);
@@ -60,7 +60,7 @@ radv_nir_lower_intrinsics_early(nir_shader *nir, const struct radv_pipeline_key
             continue;
          }
 
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
+         nir_def_rewrite_uses(&intrin->dest.ssa, def);
 
          nir_instr_remove(instr);
          progress = true;
index 6930480..ceb15a4 100644 (file)
@@ -51,17 +51,17 @@ radv_nir_lower_primitive_shading_rate(nir_shader *nir, enum amd_gfx_level gfx_le
 
          b.cursor = nir_before_instr(instr);
 
-         nir_ssa_def *val = nir_ssa_for_src(&b, intr->src[1], 1);
+         nir_def *val = nir_ssa_for_src(&b, intr->src[1], 1);
 
          /* x_rate = (shadingRate & (Horizontal2Pixels | Horizontal4Pixels)) ? 0x1 : 0x0; */
-         nir_ssa_def *x_rate = nir_iand_imm(&b, val, 12);
+         nir_def *x_rate = nir_iand_imm(&b, val, 12);
          x_rate = nir_b2i32(&b, nir_ine_imm(&b, x_rate, 0));
 
          /* y_rate = (shadingRate & (Vertical2Pixels | Vertical4Pixels)) ? 0x1 : 0x0; */
-         nir_ssa_def *y_rate = nir_iand_imm(&b, val, 3);
+         nir_def *y_rate = nir_iand_imm(&b, val, 3);
          y_rate = nir_b2i32(&b, nir_ine_imm(&b, y_rate, 0));
 
-         nir_ssa_def *out = NULL;
+         nir_def *out = NULL;
 
          /* MS:
           * Primitive shading rate is a per-primitive output, it is
index d336dbc..36a88db 100644 (file)
@@ -58,20 +58,20 @@ rq_variable_create(void *ctx, nir_shader *shader, unsigned array_length, const s
    return result;
 }
 
-static nir_ssa_def *
-nir_load_array(nir_builder *b, nir_variable *array, nir_ssa_def *index)
+static nir_def *
+nir_load_array(nir_builder *b, nir_variable *array, nir_def *index)
 {
    return nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, array), index));
 }
 
 static void
-nir_store_array(nir_builder *b, nir_variable *array, nir_ssa_def *index, nir_ssa_def *value, unsigned writemask)
+nir_store_array(nir_builder *b, nir_variable *array, nir_def *index, nir_def *value, unsigned writemask)
 {
    nir_store_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, array), index), value, writemask);
 }
 
 static nir_deref_instr *
-rq_deref_var(nir_builder *b, nir_ssa_def *index, rq_variable *var)
+rq_deref_var(nir_builder *b, nir_def *index, rq_variable *var)
 {
    if (var->array_length == 1)
       return nir_build_deref_var(b, var->variable);
@@ -79,8 +79,8 @@ rq_deref_var(nir_builder *b, nir_ssa_def *index, rq_variable *var)
    return nir_build_deref_array(b, nir_build_deref_var(b, var->variable), index);
 }
 
-static nir_ssa_def *
-rq_load_var(nir_builder *b, nir_ssa_def *index, rq_variable *var)
+static nir_def *
+rq_load_var(nir_builder *b, nir_def *index, rq_variable *var)
 {
    if (var->array_length == 1)
       return nir_load_var(b, var->variable);
@@ -89,7 +89,7 @@ rq_load_var(nir_builder *b, nir_ssa_def *index, rq_variable *var)
 }
 
 static void
-rq_store_var(nir_builder *b, nir_ssa_def *index, rq_variable *var, nir_ssa_def *value, unsigned writemask)
+rq_store_var(nir_builder *b, nir_def *index, rq_variable *var, nir_def *value, unsigned writemask)
 {
    if (var->array_length == 1) {
       nir_store_var(b, var->variable, value, writemask);
@@ -99,13 +99,13 @@ rq_store_var(nir_builder *b, nir_ssa_def *index, rq_variable *var, nir_ssa_def *
 }
 
 static void
-rq_copy_var(nir_builder *b, nir_ssa_def *index, rq_variable *dst, rq_variable *src, unsigned mask)
+rq_copy_var(nir_builder *b, nir_def *index, rq_variable *dst, rq_variable *src, unsigned mask)
 {
    rq_store_var(b, index, dst, rq_load_var(b, index, src), mask);
 }
 
-static nir_ssa_def *
-rq_load_array(nir_builder *b, nir_ssa_def *index, rq_variable *var, nir_ssa_def *array_index)
+static nir_def *
+rq_load_array(nir_builder *b, nir_def *index, rq_variable *var, nir_def *array_index)
 {
    if (var->array_length == 1)
       return nir_load_array(b, var->variable, array_index);
@@ -115,7 +115,7 @@ rq_load_array(nir_builder *b, nir_ssa_def *index, rq_variable *var, nir_ssa_def
 }
 
 static void
-rq_store_array(nir_builder *b, nir_ssa_def *index, rq_variable *var, nir_ssa_def *array_index, nir_ssa_def *value,
+rq_store_array(nir_builder *b, nir_def *index, rq_variable *var, nir_def *array_index, nir_def *value,
                unsigned writemask)
 {
    if (var->array_length == 1) {
@@ -282,7 +282,7 @@ lower_ray_query(nir_shader *shader, nir_variable *ray_query, struct hash_table *
 }
 
 static void
-copy_candidate_to_closest(nir_builder *b, nir_ssa_def *index, struct ray_query_vars *vars)
+copy_candidate_to_closest(nir_builder *b, nir_def *index, struct ray_query_vars *vars)
 {
    rq_copy_var(b, index, vars->closest.barycentrics, vars->candidate.barycentrics, 0x3);
    rq_copy_var(b, index, vars->closest.geometry_id_and_flags, vars->candidate.geometry_id_and_flags, 0x1);
@@ -296,10 +296,10 @@ copy_candidate_to_closest(nir_builder *b, nir_ssa_def *index, struct ray_query_v
 }
 
 static void
-insert_terminate_on_first_hit(nir_builder *b, nir_ssa_def *index, struct ray_query_vars *vars,
+insert_terminate_on_first_hit(nir_builder *b, nir_def *index, struct ray_query_vars *vars,
                               const struct radv_ray_flags *ray_flags, bool break_on_terminate)
 {
-   nir_ssa_def *terminate_on_first_hit;
+   nir_def *terminate_on_first_hit;
    if (ray_flags)
       terminate_on_first_hit = ray_flags->terminate_on_first_hit;
    else
@@ -315,16 +315,14 @@ insert_terminate_on_first_hit(nir_builder *b, nir_ssa_def *index, struct ray_que
 }
 
 static void
-lower_rq_confirm_intersection(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr,
-                              struct ray_query_vars *vars)
+lower_rq_confirm_intersection(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
 {
    copy_candidate_to_closest(b, index, vars);
    insert_terminate_on_first_hit(b, index, vars, NULL, false);
 }
 
 static void
-lower_rq_generate_intersection(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr,
-                               struct ray_query_vars *vars)
+lower_rq_generate_intersection(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
 {
    nir_push_if(b, nir_iand(b, nir_fge(b, rq_load_var(b, index, vars->closest.t), instr->src[1].ssa),
                            nir_fge(b, instr->src[1].ssa, rq_load_var(b, index, vars->tmin))));
@@ -339,7 +337,7 @@ lower_rq_generate_intersection(nir_builder *b, nir_ssa_def *index, nir_intrinsic
 enum rq_intersection_type { intersection_type_none, intersection_type_triangle, intersection_type_aabb };
 
 static void
-lower_rq_initialize(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars,
+lower_rq_initialize(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars,
                     struct radv_instance *instance)
 {
    rq_store_var(b, index, vars->flags, instr->src[2].ssa, 0x1);
@@ -356,12 +354,12 @@ lower_rq_initialize(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *ins
    rq_store_var(b, index, vars->closest.t, instr->src[7].ssa, 0x1);
    rq_store_var(b, index, vars->closest.intersection_type, nir_imm_int(b, intersection_type_none), 0x1);
 
-   nir_ssa_def *accel_struct = instr->src[1].ssa;
+   nir_def *accel_struct = instr->src[1].ssa;
 
-   nir_ssa_def *bvh_offset = nir_build_load_global(
+   nir_def *bvh_offset = nir_build_load_global(
       b, 1, 32, nir_iadd_imm(b, accel_struct, offsetof(struct radv_accel_struct_header, bvh_offset)),
       .access = ACCESS_NON_WRITEABLE);
-   nir_ssa_def *bvh_base = nir_iadd(b, accel_struct, nir_u2u64(b, bvh_offset));
+   nir_def *bvh_base = nir_iadd(b, accel_struct, nir_u2u64(b, bvh_offset));
    bvh_base = build_addr_to_node(b, bvh_base);
 
    rq_store_var(b, index, vars->root_bvh_base, bvh_base, 0x1);
@@ -371,7 +369,7 @@ lower_rq_initialize(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *ins
       rq_store_var(b, index, vars->trav.stack, nir_imm_int(b, 0), 0x1);
       rq_store_var(b, index, vars->trav.stack_low_watermark, nir_imm_int(b, 0), 0x1);
    } else {
-      nir_ssa_def *base_offset = nir_imul_imm(b, nir_load_local_invocation_index(b), sizeof(uint32_t));
+      nir_def *base_offset = nir_imul_imm(b, nir_load_local_invocation_index(b), sizeof(uint32_t));
       base_offset = nir_iadd_imm(b, base_offset, vars->shared_base);
       rq_store_var(b, index, vars->trav.stack, base_offset, 0x1);
       rq_store_var(b, index, vars->trav.stack_low_watermark, base_offset, 0x1);
@@ -387,8 +385,8 @@ lower_rq_initialize(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *ins
    rq_store_var(b, index, vars->incomplete, nir_imm_bool(b, !(instance->debug_flags & RADV_DEBUG_NO_RT)), 0x1);
 }
 
-static nir_ssa_def *
-lower_rq_load(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
+static nir_def *
+lower_rq_load(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
 {
    bool committed = nir_intrinsic_committed(instr);
    struct ray_query_intersection_vars *intersection = committed ? &vars->closest : &vars->candidate;
@@ -409,7 +407,7 @@ lower_rq_load(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, st
    case nir_ray_query_value_intersection_geometry_index:
       return nir_iand_imm(b, rq_load_var(b, index, intersection->geometry_id_and_flags), 0xFFFFFF);
    case nir_ray_query_value_intersection_instance_custom_index: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
       return nir_iand_imm(
          b,
          nir_build_load_global(
@@ -418,27 +416,27 @@ lower_rq_load(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, st
          0xFFFFFF);
    }
    case nir_ray_query_value_intersection_instance_id: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
       return nir_build_load_global(
          b, 1, 32, nir_iadd_imm(b, instance_node_addr, offsetof(struct radv_bvh_instance_node, instance_id)));
    }
    case nir_ray_query_value_intersection_instance_sbt_index:
       return nir_iand_imm(b, rq_load_var(b, index, intersection->sbt_offset_and_flags), 0xFFFFFF);
    case nir_ray_query_value_intersection_object_ray_direction: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
-      nir_ssa_def *wto_matrix[3];
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *wto_matrix[3];
       nir_build_wto_matrix_load(b, instance_node_addr, wto_matrix);
       return nir_build_vec3_mat_mult(b, rq_load_var(b, index, vars->direction), wto_matrix, false);
    }
    case nir_ray_query_value_intersection_object_ray_origin: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
-      nir_ssa_def *wto_matrix[3];
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *wto_matrix[3];
       nir_build_wto_matrix_load(b, instance_node_addr, wto_matrix);
       return nir_build_vec3_mat_mult(b, rq_load_var(b, index, vars->origin), wto_matrix, true);
    }
    case nir_ray_query_value_intersection_object_to_world: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
-      nir_ssa_def *rows[3];
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *rows[3];
       for (unsigned r = 0; r < 3; ++r)
          rows[r] = nir_build_load_global(
             b, 4, 32,
@@ -452,19 +450,19 @@ lower_rq_load(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, st
    case nir_ray_query_value_intersection_t:
       return rq_load_var(b, index, intersection->t);
    case nir_ray_query_value_intersection_type: {
-      nir_ssa_def *intersection_type = rq_load_var(b, index, intersection->intersection_type);
+      nir_def *intersection_type = rq_load_var(b, index, intersection->intersection_type);
       if (!committed)
          intersection_type = nir_iadd_imm(b, intersection_type, -1);
 
       return intersection_type;
    }
    case nir_ray_query_value_intersection_world_to_object: {
-      nir_ssa_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
+      nir_def *instance_node_addr = rq_load_var(b, index, intersection->instance_addr);
 
-      nir_ssa_def *wto_matrix[3];
+      nir_def *wto_matrix[3];
       nir_build_wto_matrix_load(b, instance_node_addr, wto_matrix);
 
-      nir_ssa_def *vals[3];
+      nir_def *vals[3];
       for (unsigned i = 0; i < 3; ++i)
          vals[i] = nir_channel(b, wto_matrix[i], column);
 
@@ -485,7 +483,7 @@ lower_rq_load(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, st
 
 struct traversal_data {
    struct ray_query_vars *vars;
-   nir_ssa_def *index;
+   nir_def *index;
 };
 
 static void
@@ -494,7 +492,7 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
 {
    struct traversal_data *data = args->data;
    struct ray_query_vars *vars = data->vars;
-   nir_ssa_def *index = data->index;
+   nir_def *index = data->index;
 
    rq_store_var(b, index, vars->candidate.primitive_id, intersection->primitive_id, 1);
    rq_store_var(b, index, vars->candidate.geometry_id_and_flags, intersection->geometry_id_and_flags, 1);
@@ -510,7 +508,7 @@ handle_candidate_triangle(nir_builder *b, struct radv_triangle_intersection *int
 {
    struct traversal_data *data = args->data;
    struct ray_query_vars *vars = data->vars;
-   nir_ssa_def *index = data->index;
+   nir_def *index = data->index;
 
    rq_store_var(b, index, vars->candidate.barycentrics, intersection->barycentrics, 3);
    rq_store_var(b, index, vars->candidate.primitive_id, intersection->base.primitive_id, 1);
@@ -533,7 +531,7 @@ handle_candidate_triangle(nir_builder *b, struct radv_triangle_intersection *int
 }
 
 static void
-store_stack_entry(nir_builder *b, nir_ssa_def *index, nir_ssa_def *value, const struct radv_ray_traversal_args *args)
+store_stack_entry(nir_builder *b, nir_def *index, nir_def *value, const struct radv_ray_traversal_args *args)
 {
    struct traversal_data *data = args->data;
    if (data->vars->stack)
@@ -542,8 +540,8 @@ store_stack_entry(nir_builder *b, nir_ssa_def *index, nir_ssa_def *value, const
       nir_store_shared(b, value, index, .base = 0, .align_mul = 4);
 }
 
-static nir_ssa_def *
-load_stack_entry(nir_builder *b, nir_ssa_def *index, const struct radv_ray_traversal_args *args)
+static nir_def *
+load_stack_entry(nir_builder *b, nir_def *index, const struct radv_ray_traversal_args *args)
 {
    struct traversal_data *data = args->data;
    if (data->vars->stack)
@@ -552,8 +550,8 @@ load_stack_entry(nir_builder *b, nir_ssa_def *index, const struct radv_ray_trave
       return nir_load_shared(b, 1, 32, index, .base = 0, .align_mul = 4);
 }
 
-static nir_ssa_def *
-lower_rq_proceed(nir_builder *b, nir_ssa_def *index, struct ray_query_vars *vars, struct radv_device *device)
+static nir_def *
+lower_rq_proceed(nir_builder *b, nir_def *index, struct ray_query_vars *vars, struct radv_device *device)
 {
    nir_variable *inv_dir = nir_local_variable_create(b->impl, glsl_vector_type(GLSL_TYPE_FLOAT, 3), "inv_dir");
    nir_store_var(b, inv_dir, nir_frcp(b, rq_load_var(b, index, vars->trav.direction)), 0x7);
@@ -608,7 +606,7 @@ lower_rq_proceed(nir_builder *b, nir_ssa_def *index, struct ray_query_vars *vars
 
    nir_push_if(b, rq_load_var(b, index, vars->incomplete));
    {
-      nir_ssa_def *incomplete = radv_build_ray_traversal(device, b, &args);
+      nir_def *incomplete = radv_build_ray_traversal(device, b, &args);
       rq_store_var(b, index, vars->incomplete, nir_iand(b, rq_load_var(b, index, vars->incomplete), incomplete), 1);
    }
    nir_pop_if(b, NULL);
@@ -617,7 +615,7 @@ lower_rq_proceed(nir_builder *b, nir_ssa_def *index, struct ray_query_vars *vars
 }
 
 static void
-lower_rq_terminate(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
+lower_rq_terminate(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars)
 {
    rq_store_var(b, index, vars->incomplete, nir_imm_false(b), 0x1);
 }
@@ -663,7 +661,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device
                continue;
 
             nir_deref_instr *ray_query_deref = nir_instr_as_deref(intrinsic->src[0].ssa->parent_instr);
-            nir_ssa_def *index = NULL;
+            nir_def *index = NULL;
 
             if (ray_query_deref->deref_type == nir_deref_type_array) {
                index = ray_query_deref->arr.index.ssa;
@@ -677,7 +675,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device
 
             builder.cursor = nir_before_instr(instr);
 
-            nir_ssa_def *new_dest = NULL;
+            nir_def *new_dest = NULL;
 
             switch (intrinsic->intrinsic) {
             case nir_intrinsic_rq_confirm_intersection:
@@ -703,7 +701,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device
             }
 
             if (new_dest)
-               nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
+               nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
 
             nir_instr_remove(instr);
             nir_instr_free(instr);
index b0ff02a..ce24dd6 100644 (file)
@@ -71,8 +71,8 @@ radv_nir_lower_view_index(nir_shader *nir, bool per_primitive)
 
          layer->data.per_primitive = per_primitive;
          b.cursor = nir_before_instr(instr);
-         nir_ssa_def *def = nir_load_var(&b, layer);
-         nir_ssa_def_rewrite_uses(&load->dest.ssa, def);
+         nir_def *def = nir_load_var(&b, layer);
+         nir_def_rewrite_uses(&load->dest.ssa, def);
 
          /* Update inputs_read to reflect that the pass added a new input. */
          nir->info.inputs_read |= VARYING_BIT_LAYER;
index 3e17f39..3c2d56a 100644 (file)
@@ -51,7 +51,7 @@ radv_nir_lower_viewport_to_zero(nir_shader *nir)
 
          b.cursor = nir_before_instr(instr);
 
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_imm_zero(&b, 1, 32));
+         nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_zero(&b, 1, 32));
          progress = true;
          break;
       }
index 2e40644..5e0ff61 100644 (file)
@@ -37,7 +37,7 @@ typedef struct {
    const struct radeon_info *rad_info;
 } lower_vs_inputs_state;
 
-static nir_ssa_def *
+static nir_def *
 lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs_state *s)
 {
    nir_src *offset_src = nir_get_io_offset_src(intrin);
@@ -56,7 +56,7 @@ lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, low
    const unsigned arg_bit_size = MAX2(bit_size, 32);
 
    unsigned num_input_args = 1;
-   nir_ssa_def *input_args[2] = {ac_nir_load_arg(b, &s->args->ac, s->args->vs_inputs[driver_location]), NULL};
+   nir_def *input_args[2] = {ac_nir_load_arg(b, &s->args->ac, s->args->vs_inputs[driver_location]), NULL};
    if (component * 32 + arg_bit_size * num_components > 128) {
       assert(bit_size == 64);
 
@@ -64,8 +64,7 @@ lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, low
       input_args[1] = ac_nir_load_arg(b, &s->args->ac, s->args->vs_inputs[driver_location + 1]);
    }
 
-   nir_ssa_def *extracted =
-      nir_extract_bits(b, input_args, num_input_args, component * 32, num_components, arg_bit_size);
+   nir_def *extracted = nir_extract_bits(b, input_args, num_input_args, component * 32, num_components, arg_bit_size);
 
    if (bit_size < arg_bit_size) {
       assert(bit_size == 16);
@@ -79,20 +78,20 @@ lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, low
    return extracted;
 }
 
-static nir_ssa_def *
+static nir_def *
 calc_vs_input_index_instance_rate(nir_builder *b, unsigned location, lower_vs_inputs_state *s)
 {
    const uint32_t divisor = s->pl_key->vs.instance_rate_divisors[location];
-   nir_ssa_def *start_instance = nir_load_base_instance(b);
+   nir_def *start_instance = nir_load_base_instance(b);
 
    if (divisor == 0)
       return start_instance;
 
-   nir_ssa_def *instance_id = nir_udiv_imm(b, nir_load_instance_id(b), divisor);
+   nir_def *instance_id = nir_udiv_imm(b, nir_load_instance_id(b), divisor);
    return nir_iadd(b, start_instance, instance_id);
 }
 
-static nir_ssa_def *
+static nir_def *
 calc_vs_input_index(nir_builder *b, unsigned location, lower_vs_inputs_state *s)
 {
    if (s->pl_key->vs.instance_rate_inputs & BITFIELD_BIT(location))
@@ -112,7 +111,7 @@ can_use_untyped_load(const struct util_format_description *f, const unsigned bit
    return c->size == bit_size && bit_size >= 32;
 }
 
-static nir_ssa_def *
+static nir_def *
 oob_input_load_value(nir_builder *b, const unsigned channel_idx, const unsigned bit_size, const bool is_float)
 {
    /* 22.1.1. Attribute Location and Component Assignment of Vulkan 1.3 specification:
@@ -120,7 +119,7 @@ oob_input_load_value(nir_builder *b, const unsigned channel_idx, const unsigned
     * must not use more components than provided by the attribute.
     */
    if (bit_size == 64)
-      return nir_ssa_undef(b, 1, bit_size);
+      return nir_undef(b, 1, bit_size);
 
    if (channel_idx == 3) {
       if (is_float)
@@ -175,8 +174,8 @@ first_used_swizzled_channel(const struct util_format_description *f, const unsig
    return first_used;
 }
 
-static nir_ssa_def *
-adjust_vertex_fetch_alpha(nir_builder *b, enum ac_vs_input_alpha_adjust alpha_adjust, nir_ssa_def *alpha)
+static nir_def *
+adjust_vertex_fetch_alpha(nir_builder *b, enum ac_vs_input_alpha_adjust alpha_adjust, nir_def *alpha)
 {
    if (alpha_adjust == AC_ALPHA_ADJUST_SSCALED)
       alpha = nir_f2u32(b, alpha);
@@ -201,7 +200,7 @@ adjust_vertex_fetch_alpha(nir_builder *b, enum ac_vs_input_alpha_adjust alpha_ad
    return alpha;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs_state *s)
 {
    nir_src *offset_src = nir_get_io_offset_src(intrin);
@@ -226,13 +225,13 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
    /* Bitmask of components in bit_size units
     * of the current input load that are actually used.
     */
-   const unsigned dest_use_mask = nir_ssa_def_components_read(&intrin->dest.ssa) << component;
+   const unsigned dest_use_mask = nir_def_components_read(&intrin->dest.ssa) << component;
 
    /* If the input is entirely unused, just replace it with undef.
     * This is just in case we debug this pass without running DCE first.
     */
    if (!dest_use_mask)
-      return nir_ssa_undef(b, dest_num_components, bit_size);
+      return nir_undef(b, dest_num_components, bit_size);
 
    const uint32_t attrib_binding = s->pl_key->vs.vertex_attribute_bindings[location];
    const uint32_t attrib_offset = s->pl_key->vs.vertex_attribute_offsets[location];
@@ -244,12 +243,11 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
    const unsigned binding_index = s->info->vs.use_per_attribute_vb_descs ? location : attrib_binding;
    const unsigned desc_index = util_bitcount(s->info->vs.vb_desc_usage_mask & u_bit_consecutive(0, binding_index));
 
-   nir_ssa_def *vertex_buffers_arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
-   nir_ssa_def *vertex_buffers =
-      nir_pack_64_2x32_split(b, vertex_buffers_arg, nir_imm_int(b, s->rad_info->address32_hi));
-   nir_ssa_def *descriptor = nir_load_smem_amd(b, 4, vertex_buffers, nir_imm_int(b, desc_index * 16));
-   nir_ssa_def *base_index = calc_vs_input_index(b, location, s);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *vertex_buffers_arg = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
+   nir_def *vertex_buffers = nir_pack_64_2x32_split(b, vertex_buffers_arg, nir_imm_int(b, s->rad_info->address32_hi));
+   nir_def *descriptor = nir_load_smem_amd(b, 4, vertex_buffers, nir_imm_int(b, desc_index * 16));
+   nir_def *base_index = calc_vs_input_index(b, location, s);
+   nir_def *zero = nir_imm_int(b, 0);
 
    /* We currently implement swizzling for all formats in shaders.
     * Note, it is possible to specify swizzling in the DST_SEL fields of descriptors,
@@ -290,13 +288,13 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
     * This is necessary because the backend can't further roll the const offset
     * into the index source of MUBUF / MTBUF instructions.
     */
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS] = {0};
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS] = {0};
    unsigned num_loads = 0;
    for (unsigned x = 0, channels; x < fetch_num_channels; x += channels) {
       channels = fetch_num_channels - x;
       const unsigned start = skipped_start + x;
       enum pipe_format fetch_format = attrib_format;
-      nir_ssa_def *index = base_index;
+      nir_def *index = base_index;
 
       /* Add excess constant offset to the index. */
       unsigned const_off = attrib_offset + count_format_bytes(f, 0, start);
@@ -339,7 +337,7 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
       }
    }
 
-   nir_ssa_def *load = loads[0];
+   nir_def *load = loads[0];
 
    /* Extract the channels we actually need when we couldn't skip starting
     * components or had to emit more than one load intrinsic.
@@ -357,7 +355,7 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
     * Apply swizzle and alpha adjust according to the format.
     */
    const nir_alu_type dst_type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(intrin));
-   nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS] = {0};
+   nir_def *channels[NIR_MAX_VEC_COMPONENTS] = {0};
    for (unsigned i = 0; i < dest_num_components; ++i) {
       const unsigned c = i + component;
 
@@ -400,7 +398,7 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
 
    if (s->info->vs.dynamic_inputs) {
       replacement = lower_load_vs_input_from_prolog(b, intrin, s);
@@ -408,7 +406,7 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
       replacement = lower_load_vs_input(b, intrin, s);
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+   nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
    nir_instr_remove(instr);
    nir_instr_free(instr);
 
index 78dd7bd..bbc7c58 100644 (file)
@@ -156,15 +156,15 @@ enum {
 };
 
 struct dgc_cmdbuf {
-   nir_ssa_def *descriptor;
+   nir_def *descriptor;
    nir_variable *offset;
 };
 
 static void
-dgc_emit(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *value)
+dgc_emit(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *value)
 {
    assert(value->bit_size >= 32);
-   nir_ssa_def *offset = nir_load_var(b, cs->offset);
+   nir_def *offset = nir_load_var(b, cs->offset);
    nir_store_ssbo(b, value, cs->descriptor, offset, .access = ACCESS_NON_READABLE);
    nir_store_var(b, cs->offset, nir_iadd_imm(b, offset, value->num_components * value->bit_size / 8), 0x1);
 }
@@ -188,14 +188,14 @@ dgc_emit(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *value)
    nir_pack_64_2x32((b), nir_load_push_constant((b), 2, 32, nir_imm_int((b), 0),                                       \
                                                 .base = offsetof(struct radv_dgc_params, field), .range = 8))
 
-static nir_ssa_def *
-nir_pkt3(nir_builder *b, unsigned op, nir_ssa_def *len)
+static nir_def *
+nir_pkt3(nir_builder *b, unsigned op, nir_def *len)
 {
    len = nir_iand_imm(b, len, 0x3fff);
    return nir_ior_imm(b, nir_ishl_imm(b, len, 16), PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op));
 }
 
-static nir_ssa_def *
+static nir_def *
 dgc_get_nop_packet(nir_builder *b, const struct radv_device *device)
 {
    if (device->physical_device->rad_info.gfx_ib_pad_with_type2) {
@@ -206,18 +206,18 @@ dgc_get_nop_packet(nir_builder *b, const struct radv_device *device)
 }
 
 static void
-dgc_emit_userdata_vertex(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *vtx_base_sgpr, nir_ssa_def *first_vertex,
-                         nir_ssa_def *first_instance, nir_ssa_def *drawid, const struct radv_device *device)
+dgc_emit_userdata_vertex(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *vtx_base_sgpr, nir_def *first_vertex,
+                         nir_def *first_instance, nir_def *drawid, const struct radv_device *device)
 {
    vtx_base_sgpr = nir_u2u32(b, vtx_base_sgpr);
-   nir_ssa_def *has_drawid = nir_test_mask(b, vtx_base_sgpr, DGC_USES_DRAWID);
-   nir_ssa_def *has_baseinstance = nir_test_mask(b, vtx_base_sgpr, DGC_USES_BASEINSTANCE);
+   nir_def *has_drawid = nir_test_mask(b, vtx_base_sgpr, DGC_USES_DRAWID);
+   nir_def *has_baseinstance = nir_test_mask(b, vtx_base_sgpr, DGC_USES_BASEINSTANCE);
 
-   nir_ssa_def *pkt_cnt = nir_imm_int(b, 1);
+   nir_def *pkt_cnt = nir_imm_int(b, 1);
    pkt_cnt = nir_bcsel(b, has_drawid, nir_iadd_imm(b, pkt_cnt, 1), pkt_cnt);
    pkt_cnt = nir_bcsel(b, has_baseinstance, nir_iadd_imm(b, pkt_cnt, 1), pkt_cnt);
 
-   nir_ssa_def *values[5] = {
+   nir_def *values[5] = {
       nir_pkt3(b, PKT3_SET_SH_REG, pkt_cnt), nir_iand_imm(b, vtx_base_sgpr, 0x3FFF), first_vertex,
       dgc_get_nop_packet(b, device),         dgc_get_nop_packet(b, device),
    };
@@ -230,51 +230,51 @@ dgc_emit_userdata_vertex(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *vtx
 }
 
 static void
-dgc_emit_instance_count(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *instance_count)
+dgc_emit_instance_count(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *instance_count)
 {
-   nir_ssa_def *values[2] = {nir_imm_int(b, PKT3(PKT3_NUM_INSTANCES, 0, false)), instance_count};
+   nir_def *values[2] = {nir_imm_int(b, PKT3(PKT3_NUM_INSTANCES, 0, false)), instance_count};
 
    dgc_emit(b, cs, nir_vec(b, values, 2));
 }
 
 static void
-dgc_emit_draw_index_offset_2(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *index_offset, nir_ssa_def *index_count,
-                             nir_ssa_def *max_index_count)
+dgc_emit_draw_index_offset_2(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *index_offset, nir_def *index_count,
+                             nir_def *max_index_count)
 {
-   nir_ssa_def *values[5] = {nir_imm_int(b, PKT3(PKT3_DRAW_INDEX_OFFSET_2, 3, false)), max_index_count, index_offset,
-                             index_count, nir_imm_int(b, V_0287F0_DI_SRC_SEL_DMA)};
+   nir_def *values[5] = {nir_imm_int(b, PKT3(PKT3_DRAW_INDEX_OFFSET_2, 3, false)), max_index_count, index_offset,
+                         index_count, nir_imm_int(b, V_0287F0_DI_SRC_SEL_DMA)};
 
    dgc_emit(b, cs, nir_vec(b, values, 5));
 }
 
 static void
-dgc_emit_draw_index_auto(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *vertex_count)
+dgc_emit_draw_index_auto(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *vertex_count)
 {
-   nir_ssa_def *values[3] = {nir_imm_int(b, PKT3(PKT3_DRAW_INDEX_AUTO, 1, false)), vertex_count,
-                             nir_imm_int(b, V_0287F0_DI_SRC_SEL_AUTO_INDEX)};
+   nir_def *values[3] = {nir_imm_int(b, PKT3(PKT3_DRAW_INDEX_AUTO, 1, false)), vertex_count,
+                         nir_imm_int(b, V_0287F0_DI_SRC_SEL_AUTO_INDEX)};
 
    dgc_emit(b, cs, nir_vec(b, values, 3));
 }
 
 static void
-build_dgc_buffer_tail(nir_builder *b, nir_ssa_def *sequence_count, const struct radv_device *device)
+build_dgc_buffer_tail(nir_builder *b, nir_def *sequence_count, const struct radv_device *device)
 {
-   nir_ssa_def *global_id = get_global_ids(b, 1);
+   nir_def *global_id = get_global_ids(b, 1);
 
-   nir_ssa_def *cmd_buf_stride = load_param32(b, cmd_buf_stride);
-   nir_ssa_def *cmd_buf_size = load_param32(b, cmd_buf_size);
+   nir_def *cmd_buf_stride = load_param32(b, cmd_buf_stride);
+   nir_def *cmd_buf_size = load_param32(b, cmd_buf_size);
 
    nir_push_if(b, nir_ieq_imm(b, global_id, 0));
    {
-      nir_ssa_def *cmd_buf_tail_start = nir_imul(b, cmd_buf_stride, sequence_count);
+      nir_def *cmd_buf_tail_start = nir_imul(b, cmd_buf_stride, sequence_count);
 
       nir_variable *offset = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "offset");
       nir_store_var(b, offset, cmd_buf_tail_start, 0x1);
 
-      nir_ssa_def *dst_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PREPARE);
+      nir_def *dst_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PREPARE);
       nir_push_loop(b);
       {
-         nir_ssa_def *curr_offset = nir_load_var(b, offset);
+         nir_def *curr_offset = nir_load_var(b, offset);
          const unsigned MAX_PACKET_WORDS = 0x3FFC;
 
          nir_push_if(b, nir_ieq(b, curr_offset, cmd_buf_size));
@@ -283,7 +283,7 @@ build_dgc_buffer_tail(nir_builder *b, nir_ssa_def *sequence_count, const struct
          }
          nir_pop_if(b, NULL);
 
-         nir_ssa_def *packet, *packet_size;
+         nir_def *packet, *packet_size;
 
          if (device->physical_device->rad_info.gfx_ib_pad_with_type2) {
             packet_size = nir_imm_int(b, 4);
@@ -292,7 +292,7 @@ build_dgc_buffer_tail(nir_builder *b, nir_ssa_def *sequence_count, const struct
             packet_size = nir_isub(b, cmd_buf_size, curr_offset);
             packet_size = nir_umin(b, packet_size, nir_imm_int(b, MAX_PACKET_WORDS * 4));
 
-            nir_ssa_def *len = nir_ushr_imm(b, packet_size, 2);
+            nir_def *len = nir_ushr_imm(b, packet_size, 2);
             len = nir_iadd_imm(b, len, -2);
             packet = nir_pkt3(b, PKT3_NOP, len);
          }
@@ -309,17 +309,17 @@ build_dgc_buffer_tail(nir_builder *b, nir_ssa_def *sequence_count, const struct
  * Emit VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV.
  */
 static void
-dgc_emit_draw(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, nir_ssa_def *stream_base,
-              nir_ssa_def *draw_params_offset, nir_ssa_def *sequence_id, const struct radv_device *device)
+dgc_emit_draw(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *stream_buf, nir_def *stream_base,
+              nir_def *draw_params_offset, nir_def *sequence_id, const struct radv_device *device)
 {
-   nir_ssa_def *vtx_base_sgpr = load_param16(b, vtx_base_sgpr);
-   nir_ssa_def *stream_offset = nir_iadd(b, draw_params_offset, stream_base);
+   nir_def *vtx_base_sgpr = load_param16(b, vtx_base_sgpr);
+   nir_def *stream_offset = nir_iadd(b, draw_params_offset, stream_base);
 
-   nir_ssa_def *draw_data0 = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
-   nir_ssa_def *vertex_count = nir_channel(b, draw_data0, 0);
-   nir_ssa_def *instance_count = nir_channel(b, draw_data0, 1);
-   nir_ssa_def *vertex_offset = nir_channel(b, draw_data0, 2);
-   nir_ssa_def *first_instance = nir_channel(b, draw_data0, 3);
+   nir_def *draw_data0 = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
+   nir_def *vertex_count = nir_channel(b, draw_data0, 0);
+   nir_def *instance_count = nir_channel(b, draw_data0, 1);
+   nir_def *vertex_offset = nir_channel(b, draw_data0, 2);
+   nir_def *first_instance = nir_channel(b, draw_data0, 3);
 
    nir_push_if(b, nir_iand(b, nir_ine_imm(b, vertex_count, 0), nir_ine_imm(b, instance_count, 0)));
    {
@@ -334,20 +334,20 @@ dgc_emit_draw(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, ni
  * Emit VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV.
  */
 static void
-dgc_emit_draw_indexed(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, nir_ssa_def *stream_base,
-                      nir_ssa_def *draw_params_offset, nir_ssa_def *sequence_id, nir_ssa_def *max_index_count,
+dgc_emit_draw_indexed(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *stream_buf, nir_def *stream_base,
+                      nir_def *draw_params_offset, nir_def *sequence_id, nir_def *max_index_count,
                       const struct radv_device *device)
 {
-   nir_ssa_def *vtx_base_sgpr = load_param16(b, vtx_base_sgpr);
-   nir_ssa_def *stream_offset = nir_iadd(b, draw_params_offset, stream_base);
+   nir_def *vtx_base_sgpr = load_param16(b, vtx_base_sgpr);
+   nir_def *stream_offset = nir_iadd(b, draw_params_offset, stream_base);
 
-   nir_ssa_def *draw_data0 = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
-   nir_ssa_def *draw_data1 = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd_imm(b, stream_offset, 16));
-   nir_ssa_def *index_count = nir_channel(b, draw_data0, 0);
-   nir_ssa_def *instance_count = nir_channel(b, draw_data0, 1);
-   nir_ssa_def *first_index = nir_channel(b, draw_data0, 2);
-   nir_ssa_def *vertex_offset = nir_channel(b, draw_data0, 3);
-   nir_ssa_def *first_instance = nir_channel(b, draw_data1, 0);
+   nir_def *draw_data0 = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
+   nir_def *draw_data1 = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd_imm(b, stream_offset, 16));
+   nir_def *index_count = nir_channel(b, draw_data0, 0);
+   nir_def *instance_count = nir_channel(b, draw_data0, 1);
+   nir_def *first_index = nir_channel(b, draw_data0, 2);
+   nir_def *vertex_offset = nir_channel(b, draw_data0, 3);
+   nir_def *first_instance = nir_channel(b, draw_data1, 0);
 
    nir_push_if(b, nir_iand(b, nir_ine_imm(b, index_count, 0), nir_ine_imm(b, instance_count, 0)));
    {
@@ -362,25 +362,25 @@ dgc_emit_draw_indexed(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream
  * Emit VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV.
  */
 static void
-dgc_emit_index_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, nir_ssa_def *stream_base,
-                      nir_ssa_def *index_buffer_offset, nir_ssa_def *ibo_type_32, nir_ssa_def *ibo_type_8,
+dgc_emit_index_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *stream_buf, nir_def *stream_base,
+                      nir_def *index_buffer_offset, nir_def *ibo_type_32, nir_def *ibo_type_8,
                       nir_variable *index_size_var, nir_variable *max_index_count_var, const struct radv_device *device)
 {
-   nir_ssa_def *index_stream_offset = nir_iadd(b, index_buffer_offset, stream_base);
-   nir_ssa_def *data = nir_load_ssbo(b, 4, 32, stream_buf, index_stream_offset);
+   nir_def *index_stream_offset = nir_iadd(b, index_buffer_offset, stream_base);
+   nir_def *data = nir_load_ssbo(b, 4, 32, stream_buf, index_stream_offset);
 
-   nir_ssa_def *vk_index_type = nir_channel(b, data, 3);
-   nir_ssa_def *index_type = nir_bcsel(b, nir_ieq(b, vk_index_type, ibo_type_32), nir_imm_int(b, V_028A7C_VGT_INDEX_32),
-                                       nir_imm_int(b, V_028A7C_VGT_INDEX_16));
+   nir_def *vk_index_type = nir_channel(b, data, 3);
+   nir_def *index_type = nir_bcsel(b, nir_ieq(b, vk_index_type, ibo_type_32), nir_imm_int(b, V_028A7C_VGT_INDEX_32),
+                                   nir_imm_int(b, V_028A7C_VGT_INDEX_16));
    index_type = nir_bcsel(b, nir_ieq(b, vk_index_type, ibo_type_8), nir_imm_int(b, V_028A7C_VGT_INDEX_8), index_type);
 
-   nir_ssa_def *index_size = nir_iand_imm(b, nir_ushr(b, nir_imm_int(b, 0x142), nir_imul_imm(b, index_type, 4)), 0xf);
+   nir_def *index_size = nir_iand_imm(b, nir_ushr(b, nir_imm_int(b, 0x142), nir_imul_imm(b, index_type, 4)), 0xf);
    nir_store_var(b, index_size_var, index_size, 0x1);
 
-   nir_ssa_def *max_index_count = nir_udiv(b, nir_channel(b, data, 2), index_size);
+   nir_def *max_index_count = nir_udiv(b, nir_channel(b, data, 2), index_size);
    nir_store_var(b, max_index_count_var, max_index_count, 0x1);
 
-   nir_ssa_def *cmd_values[3 + 2 + 3];
+   nir_def *cmd_values[3 + 2 + 3];
 
    if (device->physical_device->rad_info.gfx_level >= GFX9) {
       unsigned opcode = PKT3_SET_UCONFIG_REG_INDEX;
@@ -396,7 +396,7 @@ dgc_emit_index_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream
       cmd_values[2] = dgc_get_nop_packet(b, device);
    }
 
-   nir_ssa_def *addr_upper = nir_channel(b, data, 1);
+   nir_def *addr_upper = nir_channel(b, data, 1);
    addr_upper = nir_ishr_imm(b, nir_ishl_imm(b, addr_upper, 16), 16);
 
    cmd_values[3] = nir_imm_int(b, PKT3(PKT3_INDEX_BASE, 1, 0));
@@ -412,26 +412,26 @@ dgc_emit_index_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream
  * Emit VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV.
  */
 static void
-dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, nir_ssa_def *stream_base,
-                       nir_ssa_def *push_const_mask, nir_variable *upload_offset)
+dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *stream_buf, nir_def *stream_base,
+                       nir_def *push_const_mask, nir_variable *upload_offset)
 {
-   nir_ssa_def *vbo_cnt = load_param8(b, vbo_cnt);
-   nir_ssa_def *const_copy = nir_ine_imm(b, load_param8(b, const_copy), 0);
-   nir_ssa_def *const_copy_size = load_param16(b, const_copy_size);
-   nir_ssa_def *const_copy_words = nir_ushr_imm(b, const_copy_size, 2);
+   nir_def *vbo_cnt = load_param8(b, vbo_cnt);
+   nir_def *const_copy = nir_ine_imm(b, load_param8(b, const_copy), 0);
+   nir_def *const_copy_size = load_param16(b, const_copy_size);
+   nir_def *const_copy_words = nir_ushr_imm(b, const_copy_size, 2);
    const_copy_words = nir_bcsel(b, const_copy, const_copy_words, nir_imm_int(b, 0));
 
    nir_variable *idx = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "const_copy_idx");
    nir_store_var(b, idx, nir_imm_int(b, 0), 0x1);
 
-   nir_ssa_def *param_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PARAMS);
-   nir_ssa_def *param_offset = nir_imul_imm(b, vbo_cnt, 24);
-   nir_ssa_def *param_offset_offset = nir_iadd_imm(b, param_offset, MESA_VULKAN_SHADER_STAGES * 12);
-   nir_ssa_def *param_const_offset =
+   nir_def *param_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PARAMS);
+   nir_def *param_offset = nir_imul_imm(b, vbo_cnt, 24);
+   nir_def *param_offset_offset = nir_iadd_imm(b, param_offset, MESA_VULKAN_SHADER_STAGES * 12);
+   nir_def *param_const_offset =
       nir_iadd_imm(b, param_offset, MAX_PUSH_CONSTANTS_SIZE + MESA_VULKAN_SHADER_STAGES * 12);
    nir_push_loop(b);
    {
-      nir_ssa_def *cur_idx = nir_load_var(b, idx);
+      nir_def *cur_idx = nir_load_var(b, idx);
       nir_push_if(b, nir_uge(b, cur_idx, const_copy_words));
       {
          nir_jump(b, nir_jump_break);
@@ -440,14 +440,14 @@ dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
 
       nir_variable *data = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "copy_data");
 
-      nir_ssa_def *update = nir_iand(b, push_const_mask, nir_ishl(b, nir_imm_int64(b, 1), cur_idx));
+      nir_def *update = nir_iand(b, push_const_mask, nir_ishl(b, nir_imm_int64(b, 1), cur_idx));
       update = nir_bcsel(b, nir_ult_imm(b, cur_idx, 64 /* bits in push_const_mask */), update, nir_imm_int64(b, 0));
 
       nir_push_if(b, nir_ine_imm(b, update, 0));
       {
-         nir_ssa_def *stream_offset =
+         nir_def *stream_offset =
             nir_load_ssbo(b, 1, 32, param_buf, nir_iadd(b, param_offset_offset, nir_ishl_imm(b, cur_idx, 2)));
-         nir_ssa_def *new_data = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd(b, stream_base, stream_offset));
+         nir_def *new_data = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd(b, stream_base, stream_offset));
          nir_store_var(b, data, new_data, 0x1);
       }
       nir_push_else(b, NULL);
@@ -468,27 +468,27 @@ dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
 
    nir_variable *shader_idx = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "shader_idx");
    nir_store_var(b, shader_idx, nir_imm_int(b, 0), 0x1);
-   nir_ssa_def *shader_cnt = load_param16(b, push_constant_shader_cnt);
+   nir_def *shader_cnt = load_param16(b, push_constant_shader_cnt);
 
    nir_push_loop(b);
    {
-      nir_ssa_def *cur_shader_idx = nir_load_var(b, shader_idx);
+      nir_def *cur_shader_idx = nir_load_var(b, shader_idx);
       nir_push_if(b, nir_uge(b, cur_shader_idx, shader_cnt));
       {
          nir_jump(b, nir_jump_break);
       }
       nir_pop_if(b, NULL);
 
-      nir_ssa_def *reg_info =
+      nir_def *reg_info =
          nir_load_ssbo(b, 3, 32, param_buf, nir_iadd(b, param_offset, nir_imul_imm(b, cur_shader_idx, 12)));
-      nir_ssa_def *upload_sgpr = nir_ubfe_imm(b, nir_channel(b, reg_info, 0), 0, 16);
-      nir_ssa_def *inline_sgpr = nir_ubfe_imm(b, nir_channel(b, reg_info, 0), 16, 16);
-      nir_ssa_def *inline_mask = nir_pack_64_2x32(b, nir_channels(b, reg_info, 0x6));
+      nir_def *upload_sgpr = nir_ubfe_imm(b, nir_channel(b, reg_info, 0), 0, 16);
+      nir_def *inline_sgpr = nir_ubfe_imm(b, nir_channel(b, reg_info, 0), 16, 16);
+      nir_def *inline_mask = nir_pack_64_2x32(b, nir_channels(b, reg_info, 0x6));
 
       nir_push_if(b, nir_ine_imm(b, upload_sgpr, 0));
       {
-         nir_ssa_def *pkt[3] = {nir_imm_int(b, PKT3(PKT3_SET_SH_REG, 1, 0)), upload_sgpr,
-                                nir_iadd(b, load_param32(b, upload_addr), nir_load_var(b, upload_offset))};
+         nir_def *pkt[3] = {nir_imm_int(b, PKT3(PKT3_SET_SH_REG, 1, 0)), upload_sgpr,
+                            nir_iadd(b, load_param32(b, upload_addr), nir_load_var(b, upload_offset))};
 
          dgc_emit(b, cs, nir_vec(b, pkt, 3));
       }
@@ -496,23 +496,23 @@ dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
 
       nir_push_if(b, nir_ine_imm(b, inline_sgpr, 0));
       {
-         nir_ssa_def *inline_len = nir_bit_count(b, inline_mask);
+         nir_def *inline_len = nir_bit_count(b, inline_mask);
          nir_store_var(b, idx, nir_imm_int(b, 0), 0x1);
 
-         nir_ssa_def *pkt[2] = {nir_pkt3(b, PKT3_SET_SH_REG, inline_len), inline_sgpr};
+         nir_def *pkt[2] = {nir_pkt3(b, PKT3_SET_SH_REG, inline_len), inline_sgpr};
 
          dgc_emit(b, cs, nir_vec(b, pkt, 2));
 
          nir_push_loop(b);
          {
-            nir_ssa_def *cur_idx = nir_load_var(b, idx);
+            nir_def *cur_idx = nir_load_var(b, idx);
             nir_push_if(b, nir_uge_imm(b, cur_idx, 64 /* bits in inline_mask */));
             {
                nir_jump(b, nir_jump_break);
             }
             nir_pop_if(b, NULL);
 
-            nir_ssa_def *l = nir_ishl(b, nir_imm_int64(b, 1), cur_idx);
+            nir_def *l = nir_ishl(b, nir_imm_int64(b, 1), cur_idx);
             nir_push_if(b, nir_ieq_imm(b, nir_iand(b, l, inline_mask), 0));
             {
                nir_store_var(b, idx, nir_iadd_imm(b, cur_idx, 1), 0x1);
@@ -522,15 +522,15 @@ dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
 
             nir_variable *data = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "copy_data");
 
-            nir_ssa_def *update = nir_iand(b, push_const_mask, nir_ishl(b, nir_imm_int64(b, 1), cur_idx));
+            nir_def *update = nir_iand(b, push_const_mask, nir_ishl(b, nir_imm_int64(b, 1), cur_idx));
             update =
                nir_bcsel(b, nir_ult_imm(b, cur_idx, 64 /* bits in push_const_mask */), update, nir_imm_int64(b, 0));
 
             nir_push_if(b, nir_ine_imm(b, update, 0));
             {
-               nir_ssa_def *stream_offset =
+               nir_def *stream_offset =
                   nir_load_ssbo(b, 1, 32, param_buf, nir_iadd(b, param_offset_offset, nir_ishl_imm(b, cur_idx, 2)));
-               nir_ssa_def *new_data = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd(b, stream_base, stream_offset));
+               nir_def *new_data = nir_load_ssbo(b, 1, 32, stream_buf, nir_iadd(b, stream_base, stream_offset));
                nir_store_var(b, data, new_data, 0x1);
             }
             nir_push_else(b, NULL);
@@ -558,10 +558,10 @@ dgc_emit_push_constant(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
  * For emitting VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV.
  */
 static void
-dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *stream_buf, nir_ssa_def *stream_base,
-                       nir_ssa_def *vbo_bind_mask, nir_variable *upload_offset, const struct radv_device *device)
+dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_def *stream_buf, nir_def *stream_base,
+                       nir_def *vbo_bind_mask, nir_variable *upload_offset, const struct radv_device *device)
 {
-   nir_ssa_def *vbo_cnt = load_param8(b, vbo_cnt);
+   nir_def *vbo_cnt = load_param8(b, vbo_cnt);
    nir_variable *vbo_idx = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "vbo_idx");
    nir_store_var(b, vbo_idx, nir_imm_int(b, 0), 0x1);
 
@@ -573,40 +573,39 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
       }
       nir_pop_if(b, NULL);
 
-      nir_ssa_def *vbo_offset = nir_imul_imm(b, nir_load_var(b, vbo_idx), 16);
+      nir_def *vbo_offset = nir_imul_imm(b, nir_load_var(b, vbo_idx), 16);
       nir_variable *vbo_data = nir_variable_create(b->shader, nir_var_shader_temp, glsl_uvec4_type(), "vbo_data");
 
-      nir_ssa_def *param_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PARAMS);
+      nir_def *param_buf = radv_meta_load_descriptor(b, 0, DGC_DESC_PARAMS);
       nir_store_var(b, vbo_data, nir_load_ssbo(b, 4, 32, param_buf, vbo_offset), 0xf);
 
-      nir_ssa_def *vbo_override =
+      nir_def *vbo_override =
          nir_ine_imm(b, nir_iand(b, vbo_bind_mask, nir_ishl(b, nir_imm_int(b, 1), nir_load_var(b, vbo_idx))), 0);
       nir_push_if(b, vbo_override);
       {
-         nir_ssa_def *vbo_offset_offset =
+         nir_def *vbo_offset_offset =
             nir_iadd(b, nir_imul_imm(b, vbo_cnt, 16), nir_imul_imm(b, nir_load_var(b, vbo_idx), 8));
-         nir_ssa_def *vbo_over_data = nir_load_ssbo(b, 2, 32, param_buf, vbo_offset_offset);
-         nir_ssa_def *stream_offset =
-            nir_iadd(b, stream_base, nir_iand_imm(b, nir_channel(b, vbo_over_data, 0), 0x7FFF));
-         nir_ssa_def *stream_data = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
+         nir_def *vbo_over_data = nir_load_ssbo(b, 2, 32, param_buf, vbo_offset_offset);
+         nir_def *stream_offset = nir_iadd(b, stream_base, nir_iand_imm(b, nir_channel(b, vbo_over_data, 0), 0x7FFF));
+         nir_def *stream_data = nir_load_ssbo(b, 4, 32, stream_buf, stream_offset);
 
-         nir_ssa_def *va = nir_pack_64_2x32(b, nir_trim_vector(b, stream_data, 2));
-         nir_ssa_def *size = nir_channel(b, stream_data, 2);
-         nir_ssa_def *stride = nir_channel(b, stream_data, 3);
+         nir_def *va = nir_pack_64_2x32(b, nir_trim_vector(b, stream_data, 2));
+         nir_def *size = nir_channel(b, stream_data, 2);
+         nir_def *stride = nir_channel(b, stream_data, 3);
 
-         nir_ssa_def *dyn_stride = nir_test_mask(b, nir_channel(b, vbo_over_data, 0), DGC_DYNAMIC_STRIDE);
-         nir_ssa_def *old_stride = nir_ubfe_imm(b, nir_channel(b, nir_load_var(b, vbo_data), 1), 16, 14);
+         nir_def *dyn_stride = nir_test_mask(b, nir_channel(b, vbo_over_data, 0), DGC_DYNAMIC_STRIDE);
+         nir_def *old_stride = nir_ubfe_imm(b, nir_channel(b, nir_load_var(b, vbo_data), 1), 16, 14);
          stride = nir_bcsel(b, dyn_stride, stride, old_stride);
 
-         nir_ssa_def *use_per_attribute_vb_descs = nir_test_mask(b, nir_channel(b, vbo_over_data, 0), 1u << 31);
+         nir_def *use_per_attribute_vb_descs = nir_test_mask(b, nir_channel(b, vbo_over_data, 0), 1u << 31);
          nir_variable *num_records =
             nir_variable_create(b->shader, nir_var_shader_temp, glsl_uint_type(), "num_records");
          nir_store_var(b, num_records, size, 0x1);
 
          nir_push_if(b, use_per_attribute_vb_descs);
          {
-            nir_ssa_def *attrib_end = nir_ubfe_imm(b, nir_channel(b, vbo_over_data, 1), 16, 16);
-            nir_ssa_def *attrib_index_offset = nir_ubfe_imm(b, nir_channel(b, vbo_over_data, 1), 0, 16);
+            nir_def *attrib_end = nir_ubfe_imm(b, nir_channel(b, vbo_over_data, 1), 16, 16);
+            nir_def *attrib_index_offset = nir_ubfe_imm(b, nir_channel(b, vbo_over_data, 1), 0, 16);
 
             nir_push_if(b, nir_ult(b, nir_load_var(b, num_records), attrib_end));
             {
@@ -619,7 +618,7 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
             }
             nir_push_else(b, NULL);
             {
-               nir_ssa_def *r = nir_iadd(
+               nir_def *r = nir_iadd(
                   b, nir_iadd_imm(b, nir_udiv(b, nir_isub(b, nir_load_var(b, num_records), attrib_end), stride), 1),
                   attrib_index_offset);
                nir_store_var(b, num_records, r, 0x1);
@@ -627,13 +626,13 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
             nir_pop_if(b, NULL);
             nir_pop_if(b, NULL);
 
-            nir_ssa_def *convert_cond = nir_ine_imm(b, nir_load_var(b, num_records), 0);
+            nir_def *convert_cond = nir_ine_imm(b, nir_load_var(b, num_records), 0);
             if (device->physical_device->rad_info.gfx_level == GFX9)
                convert_cond = nir_imm_false(b);
             else if (device->physical_device->rad_info.gfx_level != GFX8)
                convert_cond = nir_iand(b, convert_cond, nir_ieq_imm(b, stride, 0));
 
-            nir_ssa_def *new_records =
+            nir_def *new_records =
                nir_iadd(b, nir_imul(b, nir_iadd_imm(b, nir_load_var(b, num_records), -1), stride), attrib_end);
             new_records = nir_bcsel(b, convert_cond, new_records, nir_load_var(b, num_records));
             nir_store_var(b, num_records, new_records, 0x1);
@@ -643,7 +642,7 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
             if (device->physical_device->rad_info.gfx_level != GFX8) {
                nir_push_if(b, nir_ine_imm(b, stride, 0));
                {
-                  nir_ssa_def *r = nir_iadd(b, nir_load_var(b, num_records), nir_iadd_imm(b, stride, -1));
+                  nir_def *r = nir_iadd(b, nir_load_var(b, num_records), nir_iadd_imm(b, stride, -1));
                   nir_store_var(b, num_records, nir_udiv(b, r, stride), 0x1);
                }
                nir_pop_if(b, NULL);
@@ -651,19 +650,18 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
          }
          nir_pop_if(b, NULL);
 
-         nir_ssa_def *rsrc_word3 = nir_channel(b, nir_load_var(b, vbo_data), 3);
+         nir_def *rsrc_word3 = nir_channel(b, nir_load_var(b, vbo_data), 3);
          if (device->physical_device->rad_info.gfx_level >= GFX10) {
-            nir_ssa_def *oob_select = nir_bcsel(b, nir_ieq_imm(b, stride, 0), nir_imm_int(b, V_008F0C_OOB_SELECT_RAW),
-                                                nir_imm_int(b, V_008F0C_OOB_SELECT_STRUCTURED));
+            nir_def *oob_select = nir_bcsel(b, nir_ieq_imm(b, stride, 0), nir_imm_int(b, V_008F0C_OOB_SELECT_RAW),
+                                            nir_imm_int(b, V_008F0C_OOB_SELECT_STRUCTURED));
             rsrc_word3 = nir_iand_imm(b, rsrc_word3, C_008F0C_OOB_SELECT);
             rsrc_word3 = nir_ior(b, rsrc_word3, nir_ishl_imm(b, oob_select, 28));
          }
 
-         nir_ssa_def *va_hi = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, va), 0xFFFF);
+         nir_def *va_hi = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, va), 0xFFFF);
          stride = nir_iand_imm(b, stride, 0x3FFF);
-         nir_ssa_def *new_vbo_data[4] = {nir_unpack_64_2x32_split_x(b, va),
-                                         nir_ior(b, nir_ishl_imm(b, stride, 16), va_hi), nir_load_var(b, num_records),
-                                         rsrc_word3};
+         nir_def *new_vbo_data[4] = {nir_unpack_64_2x32_split_x(b, va), nir_ior(b, nir_ishl_imm(b, stride, 16), va_hi),
+                                     nir_load_var(b, num_records), rsrc_word3};
          nir_store_var(b, vbo_data, nir_vec(b, new_vbo_data, 4), 0xf);
       }
       nir_pop_if(b, NULL);
@@ -672,23 +670,23 @@ dgc_emit_vertex_buffer(nir_builder *b, struct dgc_cmdbuf *cs, nir_ssa_def *strea
        * num_records and stride are zero. This doesn't seem necessary on GFX8, GFX10 and
        * GFX10.3 but it doesn't hurt.
        */
-      nir_ssa_def *num_records = nir_channel(b, nir_load_var(b, vbo_data), 2);
-      nir_ssa_def *buf_va =
+      nir_def *num_records = nir_channel(b, nir_load_var(b, vbo_data), 2);
+      nir_def *buf_va =
          nir_iand_imm(b, nir_pack_64_2x32(b, nir_trim_vector(b, nir_load_var(b, vbo_data), 2)), (1ull << 48) - 1ull);
       nir_push_if(b, nir_ior(b, nir_ieq_imm(b, num_records, 0), nir_ieq_imm(b, buf_va, 0)));
       {
-         nir_ssa_def *new_vbo_data[4] = {nir_imm_int(b, 0), nir_imm_int(b, 0), nir_imm_int(b, 0), nir_imm_int(b, 0)};
+         nir_def *new_vbo_data[4] = {nir_imm_int(b, 0), nir_imm_int(b, 0), nir_imm_int(b, 0), nir_imm_int(b, 0)};
          nir_store_var(b, vbo_data, nir_vec(b, new_vbo_data, 4), 0xf);
       }
       nir_pop_if(b, NULL);
 
-      nir_ssa_def *upload_off = nir_iadd(b, nir_load_var(b, upload_offset), vbo_offset);
+      nir_def *upload_off = nir_iadd(b, nir_load_var(b, upload_offset), vbo_offset);
       nir_store_ssbo(b, nir_load_var(b, vbo_data), cs->descriptor, upload_off, .access = ACCESS_NON_READABLE);
       nir_store_var(b, vbo_idx, nir_iadd_imm(b, nir_load_var(b, vbo_idx), 1), 0x1);
    }
    nir_pop_loop(b, NULL);
-   nir_ssa_def *packet[3] = {nir_imm_int(b, PKT3(PKT3_SET_SH_REG, 1, 0)), load_param16(b, vbo_reg),
-                             nir_iadd(b, load_param32(b, upload_addr), nir_load_var(b, upload_offset))};
+   nir_def *packet[3] = {nir_imm_int(b, PKT3(PKT3_SET_SH_REG, 1, 0)), load_param16(b, vbo_reg),
+                         nir_iadd(b, load_param32(b, upload_addr), nir_load_var(b, upload_offset))};
 
    dgc_emit(b, cs, nir_vec(b, packet, 3));
 
@@ -701,15 +699,15 @@ build_dgc_prepare_shader(struct radv_device *dev)
    nir_builder b = radv_meta_init_shader(dev, MESA_SHADER_COMPUTE, "meta_dgc_prepare");
    b.shader->info.workgroup_size[0] = 64;
 
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
-   nir_ssa_def *sequence_id = global_id;
+   nir_def *sequence_id = global_id;
 
-   nir_ssa_def *cmd_buf_stride = load_param32(&b, cmd_buf_stride);
-   nir_ssa_def *sequence_count = load_param32(&b, sequence_count);
-   nir_ssa_def *stream_stride = load_param32(&b, stream_stride);
+   nir_def *cmd_buf_stride = load_param32(&b, cmd_buf_stride);
+   nir_def *sequence_count = load_param32(&b, sequence_count);
+   nir_def *stream_stride = load_param32(&b, stream_stride);
 
-   nir_ssa_def *use_count = nir_iand_imm(&b, sequence_count, 1u << 31);
+   nir_def *use_count = nir_iand_imm(&b, sequence_count, 1u << 31);
    sequence_count = nir_iand_imm(&b, sequence_count, UINT32_MAX >> 1);
 
    /* The effective number of draws is
@@ -720,8 +718,8 @@ build_dgc_prepare_shader(struct radv_device *dev)
 
    nir_push_if(&b, nir_ine_imm(&b, use_count, 0));
    {
-      nir_ssa_def *count_buf = radv_meta_load_descriptor(&b, 0, DGC_DESC_COUNT);
-      nir_ssa_def *cnt = nir_load_ssbo(&b, 1, 32, count_buf, nir_imm_int(&b, 0));
+      nir_def *count_buf = radv_meta_load_descriptor(&b, 0, DGC_DESC_COUNT);
+      nir_def *cnt = nir_load_ssbo(&b, 1, 32, count_buf, nir_imm_int(&b, 0));
       /* Must clamp count against the API count explicitly.
        * The workgroup potentially contains more threads than maxSequencesCount from API,
        * and we have to ensure these threads write NOP packets to pad out the IB. */
@@ -739,10 +737,10 @@ build_dgc_prepare_shader(struct radv_device *dev)
          .offset = nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "cmd_buf_offset"),
       };
       nir_store_var(&b, cmd_buf.offset, nir_imul(&b, global_id, cmd_buf_stride), 1);
-      nir_ssa_def *cmd_buf_end = nir_iadd(&b, nir_load_var(&b, cmd_buf.offset), cmd_buf_stride);
+      nir_def *cmd_buf_end = nir_iadd(&b, nir_load_var(&b, cmd_buf.offset), cmd_buf_stride);
 
-      nir_ssa_def *stream_buf = radv_meta_load_descriptor(&b, 0, DGC_DESC_STREAM);
-      nir_ssa_def *stream_base = nir_imul(&b, sequence_id, stream_stride);
+      nir_def *stream_buf = radv_meta_load_descriptor(&b, 0, DGC_DESC_STREAM);
+      nir_def *stream_base = nir_imul(&b, sequence_id, stream_stride);
 
       nir_variable *upload_offset =
          nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "upload_offset");
@@ -750,14 +748,14 @@ build_dgc_prepare_shader(struct radv_device *dev)
          &b, upload_offset,
          nir_iadd(&b, load_param32(&b, cmd_buf_size), nir_imul(&b, load_param32(&b, upload_stride), sequence_id)), 0x1);
 
-      nir_ssa_def *vbo_bind_mask = load_param32(&b, vbo_bind_mask);
+      nir_def *vbo_bind_mask = load_param32(&b, vbo_bind_mask);
       nir_push_if(&b, nir_ine_imm(&b, vbo_bind_mask, 0));
       {
          dgc_emit_vertex_buffer(&b, &cmd_buf, stream_buf, stream_base, vbo_bind_mask, upload_offset, dev);
       }
       nir_pop_if(&b, NULL);
 
-      nir_ssa_def *push_const_mask = load_param64(&b, push_constant_mask);
+      nir_def *push_const_mask = load_param64(&b, push_constant_mask);
       nir_push_if(&b, nir_ine_imm(&b, push_const_mask, 0));
       {
          dgc_emit_push_constant(&b, &cmd_buf, stream_buf, stream_base, push_const_mask, upload_offset);
@@ -777,7 +775,7 @@ build_dgc_prepare_shader(struct radv_device *dev)
             nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "max_index_count");
          nir_store_var(&b, max_index_count_var, load_param32(&b, max_index_count), 0x1);
 
-         nir_ssa_def *bind_index_buffer = nir_ieq_imm(&b, nir_load_var(&b, index_size_var), 0);
+         nir_def *bind_index_buffer = nir_ieq_imm(&b, nir_load_var(&b, index_size_var), 0);
          nir_push_if(&b, bind_index_buffer);
          {
             dgc_emit_index_buffer(&b, &cmd_buf, stream_buf, stream_base, load_param16(&b, index_buffer_offset),
@@ -786,8 +784,8 @@ build_dgc_prepare_shader(struct radv_device *dev)
          }
          nir_pop_if(&b, NULL);
 
-         nir_ssa_def *index_size = nir_load_var(&b, index_size_var);
-         nir_ssa_def *max_index_count = nir_load_var(&b, max_index_count_var);
+         nir_def *index_size = nir_load_var(&b, index_size_var);
+         nir_def *max_index_count = nir_load_var(&b, max_index_count_var);
 
          index_size = nir_bcsel(&b, bind_index_buffer, nir_load_var(&b, index_size_var), index_size);
          max_index_count = nir_bcsel(&b, bind_index_buffer, nir_load_var(&b, max_index_count_var), max_index_count);
@@ -803,7 +801,7 @@ build_dgc_prepare_shader(struct radv_device *dev)
          if (dev->physical_device->rad_info.gfx_ib_pad_with_type2) {
             nir_push_loop(&b);
             {
-               nir_ssa_def *curr_offset = nir_load_var(&b, cmd_buf.offset);
+               nir_def *curr_offset = nir_load_var(&b, cmd_buf.offset);
 
                nir_push_if(&b, nir_ieq(&b, curr_offset, cmd_buf_end));
                {
@@ -811,16 +809,16 @@ build_dgc_prepare_shader(struct radv_device *dev)
                }
                nir_pop_if(&b, NULL);
 
-               nir_ssa_def *pkt = nir_imm_int(&b, PKT2_NOP_PAD);
+               nir_def *pkt = nir_imm_int(&b, PKT2_NOP_PAD);
 
                dgc_emit(&b, &cmd_buf, pkt);
             }
             nir_pop_loop(&b, NULL);
          } else {
-            nir_ssa_def *cnt = nir_isub(&b, cmd_buf_end, nir_load_var(&b, cmd_buf.offset));
+            nir_def *cnt = nir_isub(&b, cmd_buf_end, nir_load_var(&b, cmd_buf.offset));
             cnt = nir_ushr_imm(&b, cnt, 2);
             cnt = nir_iadd_imm(&b, cnt, -2);
-            nir_ssa_def *pkt = nir_pkt3(&b, PKT3_NOP, cnt);
+            nir_def *pkt = nir_pkt3(&b, PKT3_NOP, cnt);
 
             dgc_emit(&b, &cmd_buf, pkt);
          }
index 1ae65d0..3f3f2c8 100644 (file)
@@ -58,8 +58,7 @@ radv_get_pipelinestat_query_size(struct radv_device *device)
 }
 
 static void
-radv_store_availability(nir_builder *b, nir_ssa_def *flags, nir_ssa_def *dst_buf, nir_ssa_def *offset,
-                        nir_ssa_def *value32)
+radv_store_availability(nir_builder *b, nir_def *flags, nir_def *dst_buf, nir_def *offset, nir_def *value32)
 {
    nir_push_if(b, nir_test_mask(b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
 
@@ -129,23 +128,23 @@ build_occlusion_query_shader(struct radv_device *device)
    uint64_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
    unsigned db_count = device->physical_device->rad_info.max_render_backends;
 
-   nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
+   nir_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
 
-   nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
-   nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
 
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
-   nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
-   nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
-   nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
-   nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
+   nir_def *input_stride = nir_imm_int(&b, db_count * 16);
+   nir_def *input_base = nir_imul(&b, input_stride, global_id);
+   nir_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
+   nir_def *output_base = nir_imul(&b, output_stride, global_id);
 
    nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
    nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
    nir_store_var(&b, available, nir_imm_true(&b), 0x1);
 
-   nir_ssa_def *query_result_wait = nir_test_mask(&b, flags, VK_QUERY_RESULT_WAIT_BIT);
+   nir_def *query_result_wait = nir_test_mask(&b, flags, VK_QUERY_RESULT_WAIT_BIT);
    nir_push_if(&b, query_result_wait);
    {
       /* Wait on the upper word of the last DB entry. */
@@ -156,8 +155,8 @@ build_occlusion_query_shader(struct radv_device *device)
          /* Prevent the SSBO load to be moved out of the loop. */
          nir_scoped_memory_barrier(&b, SCOPE_INVOCATION, NIR_MEMORY_ACQUIRE, nir_var_mem_ssbo);
 
-         nir_ssa_def *load_offset = nir_iadd_imm(&b, input_base, rb_avail_offset);
-         nir_ssa_def *load = nir_load_ssbo(&b, 1, 32, src_buf, load_offset, .align_mul = 4, .access = ACCESS_COHERENT);
+         nir_def *load_offset = nir_iadd_imm(&b, input_base, rb_avail_offset);
+         nir_def *load = nir_load_ssbo(&b, 1, 32, src_buf, load_offset, .align_mul = 4, .access = ACCESS_COHERENT);
 
          nir_push_if(&b, nir_ige_imm(&b, load, 0x80000000));
          {
@@ -171,24 +170,23 @@ build_occlusion_query_shader(struct radv_device *device)
 
    nir_push_loop(&b);
 
-   nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
+   nir_def *current_outer_count = nir_load_var(&b, outer_counter);
    radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
 
-   nir_ssa_def *enabled_cond =
-      nir_iand_imm(&b, nir_ishl(&b, nir_imm_int64(&b, 1), current_outer_count), enabled_rb_mask);
+   nir_def *enabled_cond = nir_iand_imm(&b, nir_ishl(&b, nir_imm_int64(&b, 1), current_outer_count), enabled_rb_mask);
 
    nir_push_if(&b, nir_i2b(&b, enabled_cond));
 
-   nir_ssa_def *load_offset = nir_imul_imm(&b, current_outer_count, 16);
+   nir_def *load_offset = nir_imul_imm(&b, current_outer_count, 16);
    load_offset = nir_iadd(&b, input_base, load_offset);
 
-   nir_ssa_def *load = nir_load_ssbo(&b, 2, 64, src_buf, load_offset, .align_mul = 16);
+   nir_def *load = nir_load_ssbo(&b, 2, 64, src_buf, load_offset, .align_mul = 16);
 
    nir_store_var(&b, start, nir_channel(&b, load, 0), 0x1);
    nir_store_var(&b, end, nir_channel(&b, load, 1), 0x1);
 
-   nir_ssa_def *start_done = nir_ilt_imm(&b, nir_load_var(&b, start), 0);
-   nir_ssa_def *end_done = nir_ilt_imm(&b, nir_load_var(&b, end), 0);
+   nir_def *start_done = nir_ilt_imm(&b, nir_load_var(&b, start), 0);
+   nir_def *end_done = nir_ilt_imm(&b, nir_load_var(&b, end), 0);
 
    nir_push_if(&b, nir_iand(&b, start_done, end_done));
 
@@ -206,8 +204,8 @@ build_occlusion_query_shader(struct radv_device *device)
 
    /* Store the result if complete or if partial results have been requested. */
 
-   nir_ssa_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
-   nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
+   nir_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
+   nir_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
    nir_push_if(&b, nir_ior(&b, nir_test_mask(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
 
    nir_push_if(&b, result_is_64bit);
@@ -280,27 +278,27 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
    nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
    nir_variable *result = nir_local_variable_create(b.impl, glsl_int64_t_type(), "result");
 
-   nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
-   nir_ssa_def *stats_mask = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 12);
-   nir_ssa_def *avail_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
+   nir_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
+   nir_def *stats_mask = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 12);
+   nir_def *avail_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
 
-   nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
-   nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
 
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
-   nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
-   nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
-   nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
-   nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
+   nir_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
+   nir_def *input_base = nir_imul(&b, input_stride, global_id);
+   nir_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
+   nir_def *output_base = nir_imul(&b, output_stride, global_id);
 
    avail_offset = nir_iadd(&b, avail_offset, nir_imul_imm(&b, global_id, 4));
 
-   nir_ssa_def *available32 = nir_load_ssbo(&b, 1, 32, src_buf, avail_offset);
+   nir_def *available32 = nir_load_ssbo(&b, 1, 32, src_buf, avail_offset);
 
-   nir_ssa_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
-   nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
-   nir_ssa_def *elem_count = nir_ushr_imm(&b, stats_mask, 16);
+   nir_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
+   nir_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
+   nir_def *elem_count = nir_ushr_imm(&b, stats_mask, 16);
 
    radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)),
                            available32);
@@ -311,12 +309,11 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
    for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
       nir_push_if(&b, nir_test_mask(&b, stats_mask, BITFIELD64_BIT(i)));
 
-      nir_ssa_def *start_offset = nir_iadd_imm(&b, input_base, pipeline_statistics_indices[i] * 8);
-      nir_ssa_def *start = nir_load_ssbo(&b, 1, 64, src_buf, start_offset);
+      nir_def *start_offset = nir_iadd_imm(&b, input_base, pipeline_statistics_indices[i] * 8);
+      nir_def *start = nir_load_ssbo(&b, 1, 64, src_buf, start_offset);
 
-      nir_ssa_def *end_offset =
-         nir_iadd_imm(&b, input_base, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size);
-      nir_ssa_def *end = nir_load_ssbo(&b, 1, 64, src_buf, end_offset);
+      nir_def *end_offset = nir_iadd_imm(&b, input_base, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size);
+      nir_def *end = nir_load_ssbo(&b, 1, 64, src_buf, end_offset);
 
       nir_store_var(&b, result, nir_isub(&b, end, start), 0x1);
 
@@ -347,10 +344,10 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
 
    nir_loop *loop = nir_push_loop(&b);
 
-   nir_ssa_def *current_counter = nir_load_var(&b, counter);
+   nir_def *current_counter = nir_load_var(&b, counter);
    radv_break_on_count(&b, counter, elem_count);
 
-   nir_ssa_def *output_elem = nir_iadd(&b, output_base, nir_imul(&b, elem_size, current_counter));
+   nir_def *output_elem = nir_iadd(&b, output_base, nir_imul(&b, elem_size, current_counter));
    nir_push_if(&b, result_is_64bit);
 
    nir_store_ssbo(&b, nir_imm_int64(&b, 0), dst_buf, output_elem);
@@ -415,44 +412,44 @@ build_tfb_query_shader(struct radv_device *device)
    nir_store_var(&b, result, nir_replicate(&b, nir_imm_int64(&b, 0), 2), 0x3);
    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
 
-   nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
+   nir_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
 
    /* Load resources. */
-   nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
-   nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
 
    /* Compute global ID. */
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
    /* Compute src/dst strides. */
-   nir_ssa_def *input_stride = nir_imm_int(&b, 32);
-   nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
-   nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
-   nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
+   nir_def *input_stride = nir_imm_int(&b, 32);
+   nir_def *input_base = nir_imul(&b, input_stride, global_id);
+   nir_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
+   nir_def *output_base = nir_imul(&b, output_stride, global_id);
 
    /* Load data from the query pool. */
-   nir_ssa_def *load1 = nir_load_ssbo(&b, 4, 32, src_buf, input_base, .align_mul = 32);
-   nir_ssa_def *load2 = nir_load_ssbo(&b, 4, 32, src_buf, nir_iadd_imm(&b, input_base, 16), .align_mul = 16);
+   nir_def *load1 = nir_load_ssbo(&b, 4, 32, src_buf, input_base, .align_mul = 32);
+   nir_def *load2 = nir_load_ssbo(&b, 4, 32, src_buf, nir_iadd_imm(&b, input_base, 16), .align_mul = 16);
 
    /* Check if result is available. */
-   nir_ssa_def *avails[2];
+   nir_def *avails[2];
    avails[0] = nir_iand(&b, nir_channel(&b, load1, 1), nir_channel(&b, load1, 3));
    avails[1] = nir_iand(&b, nir_channel(&b, load2, 1), nir_channel(&b, load2, 3));
-   nir_ssa_def *result_is_available = nir_test_mask(&b, nir_iand(&b, avails[0], avails[1]), 0x80000000);
+   nir_def *result_is_available = nir_test_mask(&b, nir_iand(&b, avails[0], avails[1]), 0x80000000);
 
    /* Only compute result if available. */
    nir_push_if(&b, result_is_available);
 
    /* Pack values. */
-   nir_ssa_def *packed64[4];
+   nir_def *packed64[4];
    packed64[0] = nir_pack_64_2x32(&b, nir_trim_vector(&b, load1, 2));
    packed64[1] = nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load1, 2), nir_channel(&b, load1, 3)));
    packed64[2] = nir_pack_64_2x32(&b, nir_trim_vector(&b, load2, 2));
    packed64[3] = nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load2, 2), nir_channel(&b, load2, 3)));
 
    /* Compute result. */
-   nir_ssa_def *num_primitive_written = nir_isub(&b, packed64[3], packed64[1]);
-   nir_ssa_def *primitive_storage_needed = nir_isub(&b, packed64[2], packed64[0]);
+   nir_def *num_primitive_written = nir_isub(&b, packed64[3], packed64[1]);
+   nir_def *primitive_storage_needed = nir_isub(&b, packed64[2], packed64[0]);
 
    nir_store_var(&b, result, nir_vec2(&b, num_primitive_written, primitive_storage_needed), 0x3);
    nir_store_var(&b, available, nir_imm_true(&b), 0x1);
@@ -460,8 +457,8 @@ build_tfb_query_shader(struct radv_device *device)
    nir_pop_if(&b, NULL);
 
    /* Determine if result is 64 or 32 bit. */
-   nir_ssa_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
-   nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16), nir_imm_int(&b, 8));
+   nir_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
+   nir_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16), nir_imm_int(&b, 8));
 
    /* Store the result if complete or partial results have been requested. */
    nir_push_if(&b, nir_ior(&b, nir_test_mask(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
@@ -527,30 +524,30 @@ build_timestamp_query_shader(struct radv_device *device)
    nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
 
-   nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
+   nir_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4);
 
    /* Load resources. */
-   nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
-   nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
 
    /* Compute global ID. */
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
    /* Compute src/dst strides. */
-   nir_ssa_def *input_stride = nir_imm_int(&b, 8);
-   nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
-   nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
-   nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
+   nir_def *input_stride = nir_imm_int(&b, 8);
+   nir_def *input_base = nir_imul(&b, input_stride, global_id);
+   nir_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 8);
+   nir_def *output_base = nir_imul(&b, output_stride, global_id);
 
    /* Load data from the query pool. */
-   nir_ssa_def *load = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 8);
+   nir_def *load = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 8);
 
    /* Pack the timestamp. */
-   nir_ssa_def *timestamp;
+   nir_def *timestamp;
    timestamp = nir_pack_64_2x32(&b, nir_trim_vector(&b, load, 2));
 
    /* Check if result is available. */
-   nir_ssa_def *result_is_available = nir_i2b(&b, nir_ine_imm(&b, timestamp, TIMESTAMP_NOT_READY));
+   nir_def *result_is_available = nir_i2b(&b, nir_ine_imm(&b, timestamp, TIMESTAMP_NOT_READY));
 
    /* Only store result if available. */
    nir_push_if(&b, result_is_available);
@@ -561,8 +558,8 @@ build_timestamp_query_shader(struct radv_device *device)
    nir_pop_if(&b, NULL);
 
    /* Determine if result is 64 or 32 bit. */
-   nir_ssa_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
-   nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
+   nir_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
+   nir_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
 
    /* Store the result if complete or partial results have been requested. */
    nir_push_if(&b, nir_ior(&b, nir_test_mask(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
@@ -639,58 +636,57 @@ build_pg_query_shader(struct radv_device *device)
    nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
    nir_store_var(&b, available, nir_imm_false(&b), 0x1);
 
-   nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
+   nir_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
 
    /* Load resources. */
-   nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
-   nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
+   nir_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
+   nir_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
 
    /* Compute global ID. */
-   nir_ssa_def *global_id = get_global_ids(&b, 1);
+   nir_def *global_id = get_global_ids(&b, 1);
 
    /* Determine if the query pool uses GDS for NGG. */
-   nir_ssa_def *uses_gds = nir_i2b(&b, nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 16), .range = 20));
+   nir_def *uses_gds = nir_i2b(&b, nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 16), .range = 20));
 
    /* Compute src/dst strides. */
-   nir_ssa_def *input_stride =
+   nir_def *input_stride =
       nir_bcsel(&b, uses_gds, nir_imm_int(&b, RADV_PGQ_STRIDE_GDS), nir_imm_int(&b, RADV_PGQ_STRIDE));
-   nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
-   nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
-   nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
+   nir_def *input_base = nir_imul(&b, input_stride, global_id);
+   nir_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
+   nir_def *output_base = nir_imul(&b, output_stride, global_id);
 
    /* Load data from the query pool. */
-   nir_ssa_def *load1 = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 32);
-   nir_ssa_def *load2 =
-      nir_load_ssbo(&b, 2, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 16)), .align_mul = 16);
+   nir_def *load1 = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 32);
+   nir_def *load2 = nir_load_ssbo(&b, 2, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 16)), .align_mul = 16);
 
    /* Check if result is available. */
-   nir_ssa_def *avails[2];
+   nir_def *avails[2];
    avails[0] = nir_channel(&b, load1, 1);
    avails[1] = nir_channel(&b, load2, 1);
-   nir_ssa_def *result_is_available =
+   nir_def *result_is_available =
       nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]), nir_imm_int(&b, 0x80000000)));
 
    /* Only compute result if available. */
    nir_push_if(&b, result_is_available);
 
    /* Pack values. */
-   nir_ssa_def *packed64[2];
+   nir_def *packed64[2];
    packed64[0] = nir_pack_64_2x32(&b, nir_trim_vector(&b, load1, 2));
    packed64[1] = nir_pack_64_2x32(&b, nir_trim_vector(&b, load2, 2));
 
    /* Compute result. */
-   nir_ssa_def *primitive_storage_needed = nir_isub(&b, packed64[1], packed64[0]);
+   nir_def *primitive_storage_needed = nir_isub(&b, packed64[1], packed64[0]);
 
    nir_store_var(&b, result, primitive_storage_needed, 0x1);
 
    nir_push_if(&b, uses_gds);
    {
-      nir_ssa_def *gds_start =
+      nir_def *gds_start =
          nir_load_ssbo(&b, 1, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 32)), .align_mul = 4);
-      nir_ssa_def *gds_end =
+      nir_def *gds_end =
          nir_load_ssbo(&b, 1, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 36)), .align_mul = 4);
 
-      nir_ssa_def *ngg_gds_result = nir_isub(&b, gds_end, gds_start);
+      nir_def *ngg_gds_result = nir_isub(&b, gds_end, gds_start);
 
       nir_store_var(&b, result, nir_iadd(&b, nir_load_var(&b, result), nir_u2u64(&b, ngg_gds_result)), 0x1);
    }
@@ -701,8 +697,8 @@ build_pg_query_shader(struct radv_device *device)
    nir_pop_if(&b, NULL);
 
    /* Determine if result is 64 or 32 bit. */
-   nir_ssa_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
-   nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
+   nir_def *result_is_64bit = nir_test_mask(&b, flags, VK_QUERY_RESULT_64_BIT);
+   nir_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
 
    /* Store the result if complete or partial results have been requested. */
    nir_push_if(&b, nir_ior(&b, nir_test_mask(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT), nir_load_var(&b, available)));
index 3e94c93..781df7a 100644 (file)
@@ -29,8 +29,7 @@
 #include <llvm/Config/llvm-config.h>
 #endif
 
-static nir_ssa_def *build_node_to_addr(struct radv_device *device, nir_builder *b, nir_ssa_def *node,
-                                       bool skip_type_and);
+static nir_def *build_node_to_addr(struct radv_device *device, nir_builder *b, nir_def *node, bool skip_type_and);
 
 bool
 radv_enable_rt(const struct radv_physical_device *pdevice, bool rt_pipelines)
@@ -54,16 +53,14 @@ void
 nir_sort_hit_pair(nir_builder *b, nir_variable *var_distances, nir_variable *var_indices, uint32_t chan_1,
                   uint32_t chan_2)
 {
-   nir_ssa_def *ssa_distances = nir_load_var(b, var_distances);
-   nir_ssa_def *ssa_indices = nir_load_var(b, var_indices);
+   nir_def *ssa_distances = nir_load_var(b, var_distances);
+   nir_def *ssa_indices = nir_load_var(b, var_indices);
    /* if (distances[chan_2] < distances[chan_1]) { */
    nir_push_if(b, nir_flt(b, nir_channel(b, ssa_distances, chan_2), nir_channel(b, ssa_distances, chan_1)));
    {
       /* swap(distances[chan_2], distances[chan_1]); */
-      nir_ssa_def *new_distances[4] = {nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32),
-                                       nir_ssa_undef(b, 1, 32)};
-      nir_ssa_def *new_indices[4] = {nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32), nir_ssa_undef(b, 1, 32),
-                                     nir_ssa_undef(b, 1, 32)};
+      nir_def *new_distances[4] = {nir_undef(b, 1, 32), nir_undef(b, 1, 32), nir_undef(b, 1, 32), nir_undef(b, 1, 32)};
+      nir_def *new_indices[4] = {nir_undef(b, 1, 32), nir_undef(b, 1, 32), nir_undef(b, 1, 32), nir_undef(b, 1, 32)};
       new_distances[chan_2] = nir_channel(b, ssa_distances, chan_1);
       new_distances[chan_1] = nir_channel(b, ssa_distances, chan_2);
       new_indices[chan_2] = nir_channel(b, ssa_indices, chan_1);
@@ -75,9 +72,9 @@ nir_sort_hit_pair(nir_builder *b, nir_variable *var_distances, nir_variable *var
    nir_pop_if(b, NULL);
 }
 
-nir_ssa_def *
-intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node, nir_ssa_def *ray_tmax,
-                               nir_ssa_def *origin, nir_ssa_def *dir, nir_ssa_def *inv_dir)
+nir_def *
+intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_def *bvh_node, nir_def *ray_tmax,
+                               nir_def *origin, nir_def *dir, nir_def *inv_dir)
 {
    const struct glsl_type *vec4_type = glsl_vector_type(GLSL_TYPE_FLOAT, 4);
    const struct glsl_type *uvec4_type = glsl_vector_type(GLSL_TYPE_UINT, 4);
@@ -85,7 +82,7 @@ intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_s
    bool old_exact = b->exact;
    b->exact = true;
 
-   nir_ssa_def *node_addr = build_node_to_addr(device, b, bvh_node, false);
+   nir_def *node_addr = build_node_to_addr(device, b, bvh_node, false);
 
    /* vec4 distances = vec4(INF, INF, INF, INF); */
    nir_variable *distances = nir_variable_create(b->shader, nir_var_shader_temp, vec4_type, "distances");
@@ -108,10 +105,10 @@ intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_s
       };
 
       /* node->children[i] -> uint */
-      nir_ssa_def *child_index = nir_build_load_global(b, 1, 32, nir_iadd_imm(b, node_addr, child_offset),
-                                                       .align_mul = 64, .align_offset = child_offset % 64);
+      nir_def *child_index = nir_build_load_global(b, 1, 32, nir_iadd_imm(b, node_addr, child_offset), .align_mul = 64,
+                                                   .align_offset = child_offset % 64);
       /* node->coords[i][0], node->coords[i][1] -> vec3 */
-      nir_ssa_def *node_coords[2] = {
+      nir_def *node_coords[2] = {
          nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[0]), .align_mul = 64,
                                .align_offset = coord_offsets[0] % 64),
          nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[1]), .align_mul = 64,
@@ -122,27 +119,27 @@ intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_s
        * We don't need to care about any other components being NaN as that is UB.
        * https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap36.html#VkAabbPositionsKHR
        */
-      nir_ssa_def *min_x = nir_channel(b, node_coords[0], 0);
-      nir_ssa_def *min_x_is_not_nan = nir_inot(b, nir_fneu(b, min_x, min_x)); /* NaN != NaN -> true */
+      nir_def *min_x = nir_channel(b, node_coords[0], 0);
+      nir_def *min_x_is_not_nan = nir_inot(b, nir_fneu(b, min_x, min_x)); /* NaN != NaN -> true */
 
       /* vec3 bound0 = (node->coords[i][0] - origin) * inv_dir; */
-      nir_ssa_def *bound0 = nir_fmul(b, nir_fsub(b, node_coords[0], origin), inv_dir);
+      nir_def *bound0 = nir_fmul(b, nir_fsub(b, node_coords[0], origin), inv_dir);
       /* vec3 bound1 = (node->coords[i][1] - origin) * inv_dir; */
-      nir_ssa_def *bound1 = nir_fmul(b, nir_fsub(b, node_coords[1], origin), inv_dir);
+      nir_def *bound1 = nir_fmul(b, nir_fsub(b, node_coords[1], origin), inv_dir);
 
       /* float tmin = max(max(min(bound0.x, bound1.x), min(bound0.y, bound1.y)), min(bound0.z,
        * bound1.z)); */
-      nir_ssa_def *tmin = nir_fmax(b,
-                                   nir_fmax(b, nir_fmin(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
-                                            nir_fmin(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
-                                   nir_fmin(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
+      nir_def *tmin = nir_fmax(b,
+                               nir_fmax(b, nir_fmin(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
+                                        nir_fmin(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
+                               nir_fmin(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
 
       /* float tmax = min(min(max(bound0.x, bound1.x), max(bound0.y, bound1.y)), max(bound0.z,
        * bound1.z)); */
-      nir_ssa_def *tmax = nir_fmin(b,
-                                   nir_fmin(b, nir_fmax(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
-                                            nir_fmax(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
-                                   nir_fmax(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
+      nir_def *tmax = nir_fmin(b,
+                               nir_fmin(b, nir_fmax(b, nir_channel(b, bound0, 0), nir_channel(b, bound1, 0)),
+                                        nir_fmax(b, nir_channel(b, bound0, 1), nir_channel(b, bound1, 1))),
+                               nir_fmax(b, nir_channel(b, bound0, 2), nir_channel(b, bound1, 2)));
 
       /* if (!isnan(node->coords[i][0].x) && tmax >= max(0.0f, tmin) && tmin < ray_tmax) { */
       nir_push_if(b, nir_iand(b, min_x_is_not_nan,
@@ -150,11 +147,11 @@ intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_s
                                        nir_flt(b, tmin, ray_tmax))));
       {
          /* child_indices[i] = node->children[i]; */
-         nir_ssa_def *new_child_indices[4] = {child_index, child_index, child_index, child_index};
+         nir_def *new_child_indices[4] = {child_index, child_index, child_index, child_index};
          nir_store_var(b, child_indices, nir_vec(b, new_child_indices, 4), 1u << i);
 
          /* distances[i] = tmin; */
-         nir_ssa_def *new_distances[4] = {tmin, tmin, tmin, tmin};
+         nir_def *new_distances[4] = {tmin, tmin, tmin, tmin};
          nir_store_var(b, distances, nir_vec(b, new_distances, 4), 1u << i);
       }
       /* } */
@@ -172,16 +169,16 @@ intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_s
    return nir_load_var(b, child_indices);
 }
 
-nir_ssa_def *
-intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node, nir_ssa_def *ray_tmax,
-                               nir_ssa_def *origin, nir_ssa_def *dir, nir_ssa_def *inv_dir)
+nir_def *
+intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_def *bvh_node, nir_def *ray_tmax,
+                               nir_def *origin, nir_def *dir, nir_def *inv_dir)
 {
    const struct glsl_type *vec4_type = glsl_vector_type(GLSL_TYPE_FLOAT, 4);
 
    bool old_exact = b->exact;
    b->exact = true;
 
-   nir_ssa_def *node_addr = build_node_to_addr(device, b, bvh_node, false);
+   nir_def *node_addr = build_node_to_addr(device, b, bvh_node, false);
 
    const uint32_t coord_offsets[3] = {
       offsetof(struct radv_bvh_triangle_node, coords[0]),
@@ -190,7 +187,7 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s
    };
 
    /* node->coords[0], node->coords[1], node->coords[2] -> vec3 */
-   nir_ssa_def *node_coords[3] = {
+   nir_def *node_coords[3] = {
       nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[0]), .align_mul = 64,
                             .align_offset = coord_offsets[0] % 64),
       nir_build_load_global(b, 3, 32, nir_iadd_imm(b, node_addr, coord_offsets[1]), .align_mul = 64,
@@ -206,22 +203,21 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s
     * http://jcgt.org/published/0002/01/05/paper.pdf */
 
    /* Calculate the dimension where the ray direction is largest */
-   nir_ssa_def *abs_dir = nir_fabs(b, dir);
+   nir_def *abs_dir = nir_fabs(b, dir);
 
-   nir_ssa_def *abs_dirs[3] = {
+   nir_def *abs_dirs[3] = {
       nir_channel(b, abs_dir, 0),
       nir_channel(b, abs_dir, 1),
       nir_channel(b, abs_dir, 2),
    };
    /* Find index of greatest value of abs_dir and put that as kz. */
-   nir_ssa_def *kz =
-      nir_bcsel(b, nir_fge(b, abs_dirs[0], abs_dirs[1]),
-                nir_bcsel(b, nir_fge(b, abs_dirs[0], abs_dirs[2]), nir_imm_int(b, 0), nir_imm_int(b, 2)),
-                nir_bcsel(b, nir_fge(b, abs_dirs[1], abs_dirs[2]), nir_imm_int(b, 1), nir_imm_int(b, 2)));
-   nir_ssa_def *kx = nir_imod_imm(b, nir_iadd_imm(b, kz, 1), 3);
-   nir_ssa_def *ky = nir_imod_imm(b, nir_iadd_imm(b, kx, 1), 3);
-   nir_ssa_def *k_indices[3] = {kx, ky, kz};
-   nir_ssa_def *k = nir_vec(b, k_indices, 3);
+   nir_def *kz = nir_bcsel(b, nir_fge(b, abs_dirs[0], abs_dirs[1]),
+                           nir_bcsel(b, nir_fge(b, abs_dirs[0], abs_dirs[2]), nir_imm_int(b, 0), nir_imm_int(b, 2)),
+                           nir_bcsel(b, nir_fge(b, abs_dirs[1], abs_dirs[2]), nir_imm_int(b, 1), nir_imm_int(b, 2)));
+   nir_def *kx = nir_imod_imm(b, nir_iadd_imm(b, kz, 1), 3);
+   nir_def *ky = nir_imod_imm(b, nir_iadd_imm(b, kx, 1), 3);
+   nir_def *k_indices[3] = {kx, ky, kz};
+   nir_def *k = nir_vec(b, k_indices, 3);
 
    /* Swap kx and ky dimensions to preserve winding order */
    unsigned swap_xy_swizzle[4] = {1, 0, 2, 3};
@@ -232,35 +228,35 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s
    kz = nir_channel(b, k, 2);
 
    /* Calculate shear constants */
-   nir_ssa_def *sz = nir_frcp(b, nir_vector_extract(b, dir, kz));
-   nir_ssa_def *sx = nir_fmul(b, nir_vector_extract(b, dir, kx), sz);
-   nir_ssa_def *sy = nir_fmul(b, nir_vector_extract(b, dir, ky), sz);
+   nir_def *sz = nir_frcp(b, nir_vector_extract(b, dir, kz));
+   nir_def *sx = nir_fmul(b, nir_vector_extract(b, dir, kx), sz);
+   nir_def *sy = nir_fmul(b, nir_vector_extract(b, dir, ky), sz);
 
    /* Calculate vertices relative to ray origin */
-   nir_ssa_def *v_a = nir_fsub(b, node_coords[0], origin);
-   nir_ssa_def *v_b = nir_fsub(b, node_coords[1], origin);
-   nir_ssa_def *v_c = nir_fsub(b, node_coords[2], origin);
+   nir_def *v_a = nir_fsub(b, node_coords[0], origin);
+   nir_def *v_b = nir_fsub(b, node_coords[1], origin);
+   nir_def *v_c = nir_fsub(b, node_coords[2], origin);
 
    /* Perform shear and scale */
-   nir_ssa_def *ax = nir_fsub(b, nir_vector_extract(b, v_a, kx), nir_fmul(b, sx, nir_vector_extract(b, v_a, kz)));
-   nir_ssa_def *ay = nir_fsub(b, nir_vector_extract(b, v_a, ky), nir_fmul(b, sy, nir_vector_extract(b, v_a, kz)));
-   nir_ssa_def *bx = nir_fsub(b, nir_vector_extract(b, v_b, kx), nir_fmul(b, sx, nir_vector_extract(b, v_b, kz)));
-   nir_ssa_def *by = nir_fsub(b, nir_vector_extract(b, v_b, ky), nir_fmul(b, sy, nir_vector_extract(b, v_b, kz)));
-   nir_ssa_def *cx = nir_fsub(b, nir_vector_extract(b, v_c, kx), nir_fmul(b, sx, nir_vector_extract(b, v_c, kz)));
-   nir_ssa_def *cy = nir_fsub(b, nir_vector_extract(b, v_c, ky), nir_fmul(b, sy, nir_vector_extract(b, v_c, kz)));
+   nir_def *ax = nir_fsub(b, nir_vector_extract(b, v_a, kx), nir_fmul(b, sx, nir_vector_extract(b, v_a, kz)));
+   nir_def *ay = nir_fsub(b, nir_vector_extract(b, v_a, ky), nir_fmul(b, sy, nir_vector_extract(b, v_a, kz)));
+   nir_def *bx = nir_fsub(b, nir_vector_extract(b, v_b, kx), nir_fmul(b, sx, nir_vector_extract(b, v_b, kz)));
+   nir_def *by = nir_fsub(b, nir_vector_extract(b, v_b, ky), nir_fmul(b, sy, nir_vector_extract(b, v_b, kz)));
+   nir_def *cx = nir_fsub(b, nir_vector_extract(b, v_c, kx), nir_fmul(b, sx, nir_vector_extract(b, v_c, kz)));
+   nir_def *cy = nir_fsub(b, nir_vector_extract(b, v_c, ky), nir_fmul(b, sy, nir_vector_extract(b, v_c, kz)));
 
-   nir_ssa_def *u = nir_fsub(b, nir_fmul(b, cx, by), nir_fmul(b, cy, bx));
-   nir_ssa_def *v = nir_fsub(b, nir_fmul(b, ax, cy), nir_fmul(b, ay, cx));
-   nir_ssa_def *w = nir_fsub(b, nir_fmul(b, bx, ay), nir_fmul(b, by, ax));
+   nir_def *u = nir_fsub(b, nir_fmul(b, cx, by), nir_fmul(b, cy, bx));
+   nir_def *v = nir_fsub(b, nir_fmul(b, ax, cy), nir_fmul(b, ay, cx));
+   nir_def *w = nir_fsub(b, nir_fmul(b, bx, ay), nir_fmul(b, by, ax));
 
    /* Perform edge tests. */
-   nir_ssa_def *cond_back =
+   nir_def *cond_back =
       nir_ior(b, nir_ior(b, nir_flt_imm(b, u, 0.0f), nir_flt_imm(b, v, 0.0f)), nir_flt_imm(b, w, 0.0f));
 
-   nir_ssa_def *cond_front =
+   nir_def *cond_front =
       nir_ior(b, nir_ior(b, nir_fgt_imm(b, u, 0.0f), nir_fgt_imm(b, v, 0.0f)), nir_fgt_imm(b, w, 0.0f));
 
-   nir_ssa_def *cond = nir_inot(b, nir_iand(b, cond_back, cond_front));
+   nir_def *cond = nir_inot(b, nir_iand(b, cond_back, cond_front));
 
    /* If the ray is exactly on the edge where v is 0, consider it a miss.
     * This seems to correspond to what the hardware is doing.
@@ -271,21 +267,21 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s
 
    nir_push_if(b, cond);
    {
-      nir_ssa_def *det = nir_fadd(b, u, nir_fadd(b, v, w));
+      nir_def *det = nir_fadd(b, u, nir_fadd(b, v, w));
 
-      nir_ssa_def *az = nir_fmul(b, sz, nir_vector_extract(b, v_a, kz));
-      nir_ssa_def *bz = nir_fmul(b, sz, nir_vector_extract(b, v_b, kz));
-      nir_ssa_def *cz = nir_fmul(b, sz, nir_vector_extract(b, v_c, kz));
+      nir_def *az = nir_fmul(b, sz, nir_vector_extract(b, v_a, kz));
+      nir_def *bz = nir_fmul(b, sz, nir_vector_extract(b, v_b, kz));
+      nir_def *cz = nir_fmul(b, sz, nir_vector_extract(b, v_c, kz));
 
-      nir_ssa_def *t = nir_fadd(b, nir_fadd(b, nir_fmul(b, u, az), nir_fmul(b, v, bz)), nir_fmul(b, w, cz));
+      nir_def *t = nir_fadd(b, nir_fadd(b, nir_fmul(b, u, az), nir_fmul(b, v, bz)), nir_fmul(b, w, cz));
 
-      nir_ssa_def *t_signed = nir_fmul(b, nir_fsign(b, det), t);
+      nir_def *t_signed = nir_fmul(b, nir_fsign(b, det), t);
 
-      nir_ssa_def *det_cond_front = nir_inot(b, nir_flt_imm(b, t_signed, 0.0f));
+      nir_def *det_cond_front = nir_inot(b, nir_flt_imm(b, t_signed, 0.0f));
 
       nir_push_if(b, det_cond_front);
       {
-         nir_ssa_def *indices[4] = {t, det, v, w};
+         nir_def *indices[4] = {t, det, v, w};
          nir_store_var(b, result, nir_vec(b, indices, 4), 0xf);
       }
       nir_pop_if(b, NULL);
@@ -296,35 +292,35 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s
    return nir_load_var(b, result);
 }
 
-nir_ssa_def *
-build_addr_to_node(nir_builder *b, nir_ssa_def *addr)
+nir_def *
+build_addr_to_node(nir_builder *b, nir_def *addr)
 {
    const uint64_t bvh_size = 1ull << 42;
-   nir_ssa_def *node = nir_ushr_imm(b, addr, 3);
+   nir_def *node = nir_ushr_imm(b, addr, 3);
    return nir_iand_imm(b, node, (bvh_size - 1) << 3);
 }
 
-static nir_ssa_def *
-build_node_to_addr(struct radv_device *device, nir_builder *b, nir_ssa_def *node, bool skip_type_and)
+static nir_def *
+build_node_to_addr(struct radv_device *device, nir_builder *b, nir_def *node, bool skip_type_and)
 {
-   nir_ssa_def *addr = skip_type_and ? node : nir_iand_imm(b, node, ~7ull);
+   nir_def *addr = skip_type_and ? node : nir_iand_imm(b, node, ~7ull);
    addr = nir_ishl_imm(b, addr, 3);
    /* Assumes everything is in the top half of address space, which is true in
     * GFX9+ for now. */
    return device->physical_device->rad_info.gfx_level >= GFX9 ? nir_ior_imm(b, addr, 0xffffull << 48) : addr;
 }
 
-nir_ssa_def *
-nir_build_vec3_mat_mult(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *matrix[], bool translation)
+nir_def *
+nir_build_vec3_mat_mult(nir_builder *b, nir_def *vec, nir_def *matrix[], bool translation)
 {
-   nir_ssa_def *result_components[3] = {
+   nir_def *result_components[3] = {
       nir_channel(b, matrix[0], 3),
       nir_channel(b, matrix[1], 3),
       nir_channel(b, matrix[2], 3),
    };
    for (unsigned i = 0; i < 3; ++i) {
       for (unsigned j = 0; j < 3; ++j) {
-         nir_ssa_def *v = nir_fmul(b, nir_channels(b, vec, 1 << j), nir_channels(b, matrix[i], 1 << j));
+         nir_def *v = nir_fmul(b, nir_channels(b, vec, 1 << j), nir_channels(b, matrix[i], 1 << j));
          result_components[i] = (translation || j) ? nir_fadd(b, result_components[i], v) : v;
       }
    }
@@ -332,7 +328,7 @@ nir_build_vec3_mat_mult(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *matrix[],
 }
 
 void
-nir_build_wto_matrix_load(nir_builder *b, nir_ssa_def *instance_addr, nir_ssa_def **out)
+nir_build_wto_matrix_load(nir_builder *b, nir_def *instance_addr, nir_def **out)
 {
    unsigned offset = offsetof(struct radv_bvh_instance_node, wto_matrix);
    for (unsigned i = 0; i < 3; ++i) {
@@ -343,18 +339,18 @@ nir_build_wto_matrix_load(nir_builder *b, nir_ssa_def *instance_addr, nir_ssa_de
 
 /* When a hit is opaque the any_hit shader is skipped for this hit and the hit
  * is assumed to be an actual hit. */
-static nir_ssa_def *
-hit_is_opaque(nir_builder *b, nir_ssa_def *sbt_offset_and_flags, const struct radv_ray_flags *ray_flags,
-              nir_ssa_def *geometry_id_and_flags)
+static nir_def *
+hit_is_opaque(nir_builder *b, nir_def *sbt_offset_and_flags, const struct radv_ray_flags *ray_flags,
+              nir_def *geometry_id_and_flags)
 {
-   nir_ssa_def *opaque = nir_uge_imm(b, nir_ior(b, geometry_id_and_flags, sbt_offset_and_flags),
-                                     RADV_INSTANCE_FORCE_OPAQUE | RADV_INSTANCE_NO_FORCE_NOT_OPAQUE);
+   nir_def *opaque = nir_uge_imm(b, nir_ior(b, geometry_id_and_flags, sbt_offset_and_flags),
+                                 RADV_INSTANCE_FORCE_OPAQUE | RADV_INSTANCE_NO_FORCE_NOT_OPAQUE);
    opaque = nir_bcsel(b, ray_flags->force_opaque, nir_imm_true(b), opaque);
    opaque = nir_bcsel(b, ray_flags->force_not_opaque, nir_imm_false(b), opaque);
    return opaque;
 }
 
-nir_ssa_def *
+nir_def *
 create_bvh_descriptor(nir_builder *b)
 {
    /* We create a BVH descriptor that covers the entire memory range. That way we can always
@@ -367,25 +363,25 @@ create_bvh_descriptor(nir_builder *b)
 
 static void
 insert_traversal_triangle_case(struct radv_device *device, nir_builder *b, const struct radv_ray_traversal_args *args,
-                               const struct radv_ray_flags *ray_flags, nir_ssa_def *result, nir_ssa_def *bvh_node)
+                               const struct radv_ray_flags *ray_flags, nir_def *result, nir_def *bvh_node)
 {
    if (!args->triangle_cb)
       return;
 
    struct radv_triangle_intersection intersection;
    intersection.t = nir_channel(b, result, 0);
-   nir_ssa_def *div = nir_channel(b, result, 1);
+   nir_def *div = nir_channel(b, result, 1);
    intersection.t = nir_fdiv(b, intersection.t, div);
 
    nir_push_if(b, nir_flt(b, intersection.t, nir_load_deref(b, args->vars.tmax)));
    {
       intersection.frontface = nir_fgt_imm(b, div, 0);
-      nir_ssa_def *switch_ccw =
+      nir_def *switch_ccw =
          nir_test_mask(b, nir_load_deref(b, args->vars.sbt_offset_and_flags), RADV_INSTANCE_TRIANGLE_FLIP_FACING);
       intersection.frontface = nir_ixor(b, intersection.frontface, switch_ccw);
 
-      nir_ssa_def *not_cull = ray_flags->no_skip_triangles;
-      nir_ssa_def *not_facing_cull =
+      nir_def *not_cull = ray_flags->no_skip_triangles;
+      nir_def *not_facing_cull =
          nir_bcsel(b, intersection.frontface, ray_flags->no_cull_front, ray_flags->no_cull_back);
 
       not_cull = nir_iand(b, not_cull,
@@ -398,7 +394,7 @@ insert_traversal_triangle_case(struct radv_device *device, nir_builder *b, const
                               nir_flt(b, args->tmin, intersection.t), not_cull));
       {
          intersection.base.node_addr = build_node_to_addr(device, b, bvh_node, false);
-         nir_ssa_def *triangle_info = nir_build_load_global(
+         nir_def *triangle_info = nir_build_load_global(
             b, 2, 32,
             nir_iadd_imm(b, intersection.base.node_addr, offsetof(struct radv_bvh_triangle_node, triangle_id)));
          intersection.base.primitive_id = nir_channel(b, triangle_info, 0);
@@ -409,7 +405,7 @@ insert_traversal_triangle_case(struct radv_device *device, nir_builder *b, const
          not_cull = nir_bcsel(b, intersection.base.opaque, ray_flags->no_cull_opaque, ray_flags->no_cull_no_opaque);
          nir_push_if(b, not_cull);
          {
-            nir_ssa_def *divs[2] = {div, div};
+            nir_def *divs[2] = {div, div};
             intersection.barycentrics = nir_fdiv(b, nir_channels(b, result, 0xc), nir_vec(b, divs, 2));
 
             args->triangle_cb(b, &intersection, args, ray_flags);
@@ -423,21 +419,21 @@ insert_traversal_triangle_case(struct radv_device *device, nir_builder *b, const
 
 static void
 insert_traversal_aabb_case(struct radv_device *device, nir_builder *b, const struct radv_ray_traversal_args *args,
-                           const struct radv_ray_flags *ray_flags, nir_ssa_def *bvh_node)
+                           const struct radv_ray_flags *ray_flags, nir_def *bvh_node)
 {
    if (!args->aabb_cb)
       return;
 
    struct radv_leaf_intersection intersection;
    intersection.node_addr = build_node_to_addr(device, b, bvh_node, false);
-   nir_ssa_def *triangle_info = nir_build_load_global(
+   nir_def *triangle_info = nir_build_load_global(
       b, 2, 32, nir_iadd_imm(b, intersection.node_addr, offsetof(struct radv_bvh_aabb_node, primitive_id)));
    intersection.primitive_id = nir_channel(b, triangle_info, 0);
    intersection.geometry_id_and_flags = nir_channel(b, triangle_info, 1);
    intersection.opaque = hit_is_opaque(b, nir_load_deref(b, args->vars.sbt_offset_and_flags), ray_flags,
                                        intersection.geometry_id_and_flags);
 
-   nir_ssa_def *not_cull = nir_bcsel(b, intersection.opaque, ray_flags->no_cull_opaque, ray_flags->no_cull_no_opaque);
+   nir_def *not_cull = nir_bcsel(b, intersection.opaque, ray_flags->no_cull_opaque, ray_flags->no_cull_no_opaque);
    not_cull = nir_iand(b, not_cull, ray_flags->no_skip_aabbs);
    nir_push_if(b, not_cull);
    {
@@ -446,22 +442,22 @@ insert_traversal_aabb_case(struct radv_device *device, nir_builder *b, const str
    nir_pop_if(b, NULL);
 }
 
-static nir_ssa_def *
-fetch_parent_node(nir_builder *b, nir_ssa_def *bvh, nir_ssa_def *node)
+static nir_def *
+fetch_parent_node(nir_builder *b, nir_def *bvh, nir_def *node)
 {
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, nir_udiv_imm(b, node, 8), 4), 4);
+   nir_def *offset = nir_iadd_imm(b, nir_imul_imm(b, nir_udiv_imm(b, node, 8), 4), 4);
 
    return nir_build_load_global(b, 1, 32, nir_isub(b, bvh, nir_u2u64(b, offset)), .align_mul = 4);
 }
 
-nir_ssa_def *
+nir_def *
 radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struct radv_ray_traversal_args *args)
 {
    nir_variable *incomplete = nir_local_variable_create(b->impl, glsl_bool_type(), "incomplete");
    nir_store_var(b, incomplete, nir_imm_true(b), 0x1);
 
-   nir_ssa_def *desc = create_bvh_descriptor(b);
-   nir_ssa_def *vec3ones = nir_imm_vec3(b, 1.0, 1.0, 1.0);
+   nir_def *desc = create_bvh_descriptor(b);
+   nir_def *vec3ones = nir_imm_vec3(b, 1.0, 1.0, 1.0);
 
    struct radv_ray_flags ray_flags = {
       .force_opaque = nir_test_mask(b, args->flags, SpvRayFlagsOpaqueKHRMask),
@@ -487,9 +483,9 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
          }
          nir_pop_if(b, NULL);
 
-         nir_ssa_def *stack_instance_exit =
+         nir_def *stack_instance_exit =
             nir_ige(b, nir_load_deref(b, args->vars.top_stack), nir_load_deref(b, args->vars.stack));
-         nir_ssa_def *root_instance_exit =
+         nir_def *root_instance_exit =
             nir_ieq(b, nir_load_deref(b, args->vars.previous_node), nir_load_deref(b, args->vars.instance_bottom_node));
          nir_if *instance_exit = nir_push_if(b, nir_ior(b, stack_instance_exit, root_instance_exit));
          instance_exit->control = nir_selection_control_dont_flatten;
@@ -508,10 +504,10 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
          nir_push_if(
             b, nir_ige(b, nir_load_deref(b, args->vars.stack_low_watermark), nir_load_deref(b, args->vars.stack)));
          {
-            nir_ssa_def *prev = nir_load_deref(b, args->vars.previous_node);
-            nir_ssa_def *bvh_addr = build_node_to_addr(device, b, nir_load_deref(b, args->vars.bvh_base), true);
+            nir_def *prev = nir_load_deref(b, args->vars.previous_node);
+            nir_def *bvh_addr = build_node_to_addr(device, b, nir_load_deref(b, args->vars.bvh_base), true);
 
-            nir_ssa_def *parent = fetch_parent_node(b, bvh_addr, prev);
+            nir_def *parent = fetch_parent_node(b, bvh_addr, prev);
             nir_push_if(b, nir_ieq_imm(b, parent, RADV_BVH_INVALID_NODE));
             {
                nir_store_var(b, incomplete, nir_imm_false(b), 0x1);
@@ -525,9 +521,9 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
             nir_store_deref(b, args->vars.stack,
                             nir_iadd_imm(b, nir_load_deref(b, args->vars.stack), -args->stack_stride), 1);
 
-            nir_ssa_def *stack_ptr =
+            nir_def *stack_ptr =
                nir_umod_imm(b, nir_load_deref(b, args->vars.stack), args->stack_stride * args->stack_entries);
-            nir_ssa_def *bvh_node = args->stack_load_cb(b, stack_ptr, args);
+            nir_def *bvh_node = args->stack_load_cb(b, stack_ptr, args);
             nir_store_deref(b, args->vars.current_node, bvh_node, 0x1);
             nir_store_deref(b, args->vars.previous_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), 0x1);
          }
@@ -539,15 +535,15 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
       }
       nir_pop_if(b, NULL);
 
-      nir_ssa_def *bvh_node = nir_load_deref(b, args->vars.current_node);
+      nir_def *bvh_node = nir_load_deref(b, args->vars.current_node);
 
-      nir_ssa_def *prev_node = nir_load_deref(b, args->vars.previous_node);
+      nir_def *prev_node = nir_load_deref(b, args->vars.previous_node);
       nir_store_deref(b, args->vars.previous_node, bvh_node, 0x1);
       nir_store_deref(b, args->vars.current_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), 0x1);
 
-      nir_ssa_def *global_bvh_node = nir_iadd(b, nir_load_deref(b, args->vars.bvh_base), nir_u2u64(b, bvh_node));
+      nir_def *global_bvh_node = nir_iadd(b, nir_load_deref(b, args->vars.bvh_base), nir_u2u64(b, bvh_node));
 
-      nir_ssa_def *intrinsic_result = NULL;
+      nir_def *intrinsic_result = NULL;
       if (!radv_emulate_rt(device->physical_device)) {
          intrinsic_result =
             nir_bvh64_intersect_ray_amd(b, 32, desc, nir_unpack_64_2x32(b, global_bvh_node),
@@ -555,7 +551,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
                                         nir_load_deref(b, args->vars.dir), nir_load_deref(b, args->vars.inv_dir));
       }
 
-      nir_ssa_def *node_type = nir_iand_imm(b, bvh_node, 7);
+      nir_def *node_type = nir_iand_imm(b, bvh_node, 7);
       nir_push_if(b, nir_uge_imm(b, node_type, radv_bvh_node_box16));
       {
          nir_push_if(b, nir_uge_imm(b, node_type, radv_bvh_node_instance));
@@ -567,18 +563,18 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
             nir_push_else(b, NULL);
             {
                /* instance */
-               nir_ssa_def *instance_node_addr = build_node_to_addr(device, b, global_bvh_node, false);
+               nir_def *instance_node_addr = build_node_to_addr(device, b, global_bvh_node, false);
                nir_store_deref(b, args->vars.instance_addr, instance_node_addr, 1);
 
-               nir_ssa_def *instance_data =
+               nir_def *instance_data =
                   nir_build_load_global(b, 4, 32, instance_node_addr, .align_mul = 64, .align_offset = 0);
 
-               nir_ssa_def *wto_matrix[3];
+               nir_def *wto_matrix[3];
                nir_build_wto_matrix_load(b, instance_node_addr, wto_matrix);
 
                nir_store_deref(b, args->vars.sbt_offset_and_flags, nir_channel(b, instance_data, 3), 1);
 
-               nir_ssa_def *instance_and_mask = nir_channel(b, instance_data, 2);
+               nir_def *instance_and_mask = nir_channel(b, instance_data, 2);
                nir_push_if(b, nir_ult(b, nir_iand(b, instance_and_mask, args->cull_mask), nir_imm_int(b, 1 << 24)));
                {
                   nir_jump(b, nir_jump_continue);
@@ -602,7 +598,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
          }
          nir_push_else(b, NULL);
          {
-            nir_ssa_def *result = intrinsic_result;
+            nir_def *result = intrinsic_result;
             if (!result) {
                /* If we didn't run the intrinsic cause the hardware didn't support it,
                 * emulate ray/box intersection here */
@@ -614,7 +610,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
             /* box */
             nir_push_if(b, nir_ieq_imm(b, prev_node, RADV_BVH_INVALID_NODE));
             {
-               nir_ssa_def *new_nodes[4];
+               nir_def *new_nodes[4];
                for (unsigned i = 0; i < 4; ++i)
                   new_nodes[i] = nir_channel(b, result, i);
 
@@ -622,13 +618,13 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
                   nir_push_if(b, nir_ine_imm(b, new_nodes[i], RADV_BVH_INVALID_NODE));
 
                for (unsigned i = 4; i-- > 1;) {
-                  nir_ssa_def *stack = nir_load_deref(b, args->vars.stack);
-                  nir_ssa_def *stack_ptr = nir_umod_imm(b, stack, args->stack_entries * args->stack_stride);
+                  nir_def *stack = nir_load_deref(b, args->vars.stack);
+                  nir_def *stack_ptr = nir_umod_imm(b, stack, args->stack_entries * args->stack_stride);
                   args->stack_store_cb(b, stack_ptr, new_nodes[i], args);
                   nir_store_deref(b, args->vars.stack, nir_iadd_imm(b, stack, args->stack_stride), 1);
 
                   if (i == 1) {
-                     nir_ssa_def *new_watermark =
+                     nir_def *new_watermark =
                         nir_iadd_imm(b, nir_load_deref(b, args->vars.stack), -args->stack_entries * args->stack_stride);
                      new_watermark = nir_imax(b, nir_load_deref(b, args->vars.stack_low_watermark), new_watermark);
                      nir_store_deref(b, args->vars.stack_low_watermark, new_watermark, 0x1);
@@ -640,7 +636,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
             }
             nir_push_else(b, NULL);
             {
-               nir_ssa_def *next = nir_imm_int(b, RADV_BVH_INVALID_NODE);
+               nir_def *next = nir_imm_int(b, RADV_BVH_INVALID_NODE);
                for (unsigned i = 0; i < 3; ++i) {
                   next = nir_bcsel(b, nir_ieq(b, prev_node, nir_channel(b, result, i)), nir_channel(b, result, i + 1),
                                    next);
@@ -653,7 +649,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, const struc
       }
       nir_push_else(b, NULL);
       {
-         nir_ssa_def *result = intrinsic_result;
+         nir_def *result = intrinsic_result;
          if (!result) {
             /* If we didn't run the intrinsic cause the hardware didn't support it,
              * emulate ray/tri intersection here */
index 9e71e5a..949a301 100644 (file)
 void nir_sort_hit_pair(nir_builder *b, nir_variable *var_distances, nir_variable *var_indices, uint32_t chan_1,
                        uint32_t chan_2);
 
-nir_ssa_def *intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node,
-                                            nir_ssa_def *ray_tmax, nir_ssa_def *origin, nir_ssa_def *dir,
-                                            nir_ssa_def *inv_dir);
+nir_def *intersect_ray_amd_software_box(struct radv_device *device, nir_builder *b, nir_def *bvh_node,
+                                        nir_def *ray_tmax, nir_def *origin, nir_def *dir, nir_def *inv_dir);
 
-nir_ssa_def *intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_ssa_def *bvh_node,
-                                            nir_ssa_def *ray_tmax, nir_ssa_def *origin, nir_ssa_def *dir,
-                                            nir_ssa_def *inv_dir);
+nir_def *intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_def *bvh_node,
+                                        nir_def *ray_tmax, nir_def *origin, nir_def *dir, nir_def *inv_dir);
 
-nir_ssa_def *build_addr_to_node(nir_builder *b, nir_ssa_def *addr);
+nir_def *build_addr_to_node(nir_builder *b, nir_def *addr);
 
-nir_ssa_def *nir_build_vec3_mat_mult(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *matrix[], bool translation);
+nir_def *nir_build_vec3_mat_mult(nir_builder *b, nir_def *vec, nir_def *matrix[], bool translation);
 
-void nir_build_wto_matrix_load(nir_builder *b, nir_ssa_def *instance_addr, nir_ssa_def **out);
+void nir_build_wto_matrix_load(nir_builder *b, nir_def *instance_addr, nir_def **out);
 
-nir_ssa_def *create_bvh_descriptor(nir_builder *b);
+nir_def *create_bvh_descriptor(nir_builder *b);
 
 struct radv_ray_traversal_args;
 
 struct radv_ray_flags {
-   nir_ssa_def *force_opaque;
-   nir_ssa_def *force_not_opaque;
-   nir_ssa_def *terminate_on_first_hit;
-   nir_ssa_def *no_cull_front;
-   nir_ssa_def *no_cull_back;
-   nir_ssa_def *no_cull_opaque;
-   nir_ssa_def *no_cull_no_opaque;
-   nir_ssa_def *no_skip_triangles;
-   nir_ssa_def *no_skip_aabbs;
+   nir_def *force_opaque;
+   nir_def *force_not_opaque;
+   nir_def *terminate_on_first_hit;
+   nir_def *no_cull_front;
+   nir_def *no_cull_back;
+   nir_def *no_cull_opaque;
+   nir_def *no_cull_no_opaque;
+   nir_def *no_skip_triangles;
+   nir_def *no_skip_aabbs;
 };
 
 struct radv_leaf_intersection {
-   nir_ssa_def *node_addr;
-   nir_ssa_def *primitive_id;
-   nir_ssa_def *geometry_id_and_flags;
-   nir_ssa_def *opaque;
+   nir_def *node_addr;
+   nir_def *primitive_id;
+   nir_def *geometry_id_and_flags;
+   nir_def *opaque;
 };
 
 typedef void (*radv_aabb_intersection_cb)(nir_builder *b, struct radv_leaf_intersection *intersection,
@@ -78,20 +76,19 @@ typedef void (*radv_aabb_intersection_cb)(nir_builder *b, struct radv_leaf_inter
 struct radv_triangle_intersection {
    struct radv_leaf_intersection base;
 
-   nir_ssa_def *t;
-   nir_ssa_def *frontface;
-   nir_ssa_def *barycentrics;
+   nir_def *t;
+   nir_def *frontface;
+   nir_def *barycentrics;
 };
 
 typedef void (*radv_triangle_intersection_cb)(nir_builder *b, struct radv_triangle_intersection *intersection,
                                               const struct radv_ray_traversal_args *args,
                                               const struct radv_ray_flags *ray_flags);
 
-typedef void (*radv_rt_stack_store_cb)(nir_builder *b, nir_ssa_def *index, nir_ssa_def *value,
+typedef void (*radv_rt_stack_store_cb)(nir_builder *b, nir_def *index, nir_def *value,
                                        const struct radv_ray_traversal_args *args);
 
-typedef nir_ssa_def *(*radv_rt_stack_load_cb)(nir_builder *b, nir_ssa_def *index,
-                                              const struct radv_ray_traversal_args *args);
+typedef nir_def *(*radv_rt_stack_load_cb)(nir_builder *b, nir_def *index, const struct radv_ray_traversal_args *args);
 
 struct radv_ray_traversal_vars {
    /* For each accepted hit, tmax will be set to the t value. This allows for automatic intersection
@@ -132,12 +129,12 @@ struct radv_ray_traversal_vars {
 };
 
 struct radv_ray_traversal_args {
-   nir_ssa_def *root_bvh_base;
-   nir_ssa_def *flags;
-   nir_ssa_def *cull_mask;
-   nir_ssa_def *origin;
-   nir_ssa_def *tmin;
-   nir_ssa_def *dir;
+   nir_def *root_bvh_base;
+   nir_def *flags;
+   nir_def *cull_mask;
+   nir_def *origin;
+   nir_def *tmin;
+   nir_def *dir;
 
    struct radv_ray_traversal_vars vars;
 
@@ -164,7 +161,7 @@ struct radv_ray_traversal_args {
  * rayQueryProceedEXT. Traversal will only be considered incomplete, if one of the specified
  * callbacks breaks out of the traversal loop.
  */
-nir_ssa_def *radv_build_ray_traversal(struct radv_device *device, nir_builder *b,
-                                      const struct radv_ray_traversal_args *args);
+nir_def *radv_build_ray_traversal(struct radv_device *device, nir_builder *b,
+                                  const struct radv_ray_traversal_args *args);
 
 #endif
index 0a87098..7542e32 100644 (file)
@@ -44,7 +44,7 @@ lower_rt_derefs(nir_shader *shader)
 
    nir_builder b = nir_builder_at(nir_before_cf_list(&impl->body));
 
-   nir_ssa_def *arg_offset = nir_load_rt_arg_scratch_offset_amd(&b);
+   nir_def *arg_offset = nir_load_rt_arg_scratch_offset_amd(&b);
 
    nir_foreach_block (block, impl) {
       nir_foreach_instr_safe (instr, block) {
@@ -62,7 +62,7 @@ lower_rt_derefs(nir_shader *shader)
             b.cursor = nir_before_instr(&deref->instr);
             nir_deref_instr *replacement =
                nir_build_deref_cast(&b, arg_offset, nir_var_function_temp, deref->var->type, 0);
-            nir_ssa_def_rewrite_uses(&deref->dest.ssa, &replacement->dest.ssa);
+            nir_def_rewrite_uses(&deref->dest.ssa, &replacement->dest.ssa);
             nir_instr_remove(&deref->instr);
          }
       }
@@ -239,25 +239,25 @@ enum sbt_entry {
    SBT_ANY_HIT_IDX = offsetof(struct radv_pipeline_group_handle, any_hit_index),
 };
 
-static nir_ssa_def *
-get_sbt_ptr(nir_builder *b, nir_ssa_def *idx, enum sbt_type binding)
+static nir_def *
+get_sbt_ptr(nir_builder *b, nir_def *idx, enum sbt_type binding)
 {
-   nir_ssa_def *desc_base_addr = nir_load_sbt_base_amd(b);
+   nir_def *desc_base_addr = nir_load_sbt_base_amd(b);
 
-   nir_ssa_def *desc = nir_pack_64_2x32(b, nir_load_smem_amd(b, 2, desc_base_addr, nir_imm_int(b, binding)));
+   nir_def *desc = nir_pack_64_2x32(b, nir_load_smem_amd(b, 2, desc_base_addr, nir_imm_int(b, binding)));
 
-   nir_ssa_def *stride_offset = nir_imm_int(b, binding + (binding == SBT_RAYGEN ? 8 : 16));
-   nir_ssa_def *stride = nir_pack_64_2x32(b, nir_load_smem_amd(b, 2, desc_base_addr, stride_offset));
+   nir_def *stride_offset = nir_imm_int(b, binding + (binding == SBT_RAYGEN ? 8 : 16));
+   nir_def *stride = nir_pack_64_2x32(b, nir_load_smem_amd(b, 2, desc_base_addr, stride_offset));
 
    return nir_iadd(b, desc, nir_imul(b, nir_u2u64(b, idx), stride));
 }
 
 static void
-load_sbt_entry(nir_builder *b, const struct rt_variables *vars, nir_ssa_def *idx, enum sbt_type binding,
+load_sbt_entry(nir_builder *b, const struct rt_variables *vars, nir_def *idx, enum sbt_type binding,
                enum sbt_entry offset)
 {
-   nir_ssa_def *addr = get_sbt_ptr(b, idx, binding);
-   nir_ssa_def *load_addr = nir_iadd_imm(b, addr, offset);
+   nir_def *addr = get_sbt_ptr(b, idx, binding);
+   nir_def *load_addr = nir_iadd_imm(b, addr, offset);
 
    if (offset == SBT_RECURSIVE_PTR) {
       nir_store_var(b, vars->shader_va, nir_build_load_global(b, 1, 64, load_addr), 1);
@@ -265,7 +265,7 @@ load_sbt_entry(nir_builder *b, const struct rt_variables *vars, nir_ssa_def *idx
       nir_store_var(b, vars->idx, nir_build_load_global(b, 1, 32, load_addr), 1);
    }
 
-   nir_ssa_def *record_addr = nir_iadd_imm(b, addr, RADV_RT_HANDLE_SIZE);
+   nir_def *record_addr = nir_iadd_imm(b, addr, RADV_RT_HANDLE_SIZE);
    nir_store_var(b, vars->shader_record_ptr, record_addr, 1);
 }
 
@@ -282,12 +282,12 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
          case nir_instr_type_intrinsic: {
             b_shader.cursor = nir_before_instr(instr);
             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-            nir_ssa_def *ret = NULL;
+            nir_def *ret = NULL;
 
             switch (intr->intrinsic) {
             case nir_intrinsic_rt_execute_callable: {
                uint32_t size = align(nir_intrinsic_stack_size(intr), 16);
-               nir_ssa_def *ret_ptr = nir_load_resume_shader_address_amd(&b_shader, nir_intrinsic_call_idx(intr));
+               nir_def *ret_ptr = nir_load_resume_shader_address_amd(&b_shader, nir_intrinsic_call_idx(intr));
                ret_ptr = nir_ior_imm(&b_shader, ret_ptr, radv_get_rt_priority(shader->info.stage));
 
                nir_store_var(&b_shader, vars->stack_ptr,
@@ -305,7 +305,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
             }
             case nir_intrinsic_rt_trace_ray: {
                uint32_t size = align(nir_intrinsic_stack_size(intr), 16);
-               nir_ssa_def *ret_ptr = nir_load_resume_shader_address_amd(&b_shader, nir_intrinsic_call_idx(intr));
+               nir_def *ret_ptr = nir_load_resume_shader_address_amd(&b_shader, nir_intrinsic_call_idx(intr));
                ret_ptr = nir_ior_imm(&b_shader, ret_ptr, radv_get_rt_priority(shader->info.stage));
 
                nir_store_var(&b_shader, vars->stack_ptr,
@@ -385,8 +385,8 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
                break;
             }
             case nir_intrinsic_load_ray_instance_custom_index: {
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
-               nir_ssa_def *custom_instance_and_mask = nir_build_load_global(
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *custom_instance_and_mask = nir_build_load_global(
                   &b_shader, 1, 32,
                   nir_iadd_imm(&b_shader, instance_node_addr,
                                offsetof(struct radv_bvh_instance_node, custom_instance_and_mask)));
@@ -403,7 +403,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
                break;
             }
             case nir_intrinsic_load_instance_id: {
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
                ret = nir_build_load_global(
                   &b_shader, 1, 32,
                   nir_iadd_imm(&b_shader, instance_node_addr, offsetof(struct radv_bvh_instance_node, instance_id)));
@@ -419,11 +419,11 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
             }
             case nir_intrinsic_load_ray_world_to_object: {
                unsigned c = nir_intrinsic_column(intr);
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
-               nir_ssa_def *wto_matrix[3];
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *wto_matrix[3];
                nir_build_wto_matrix_load(&b_shader, instance_node_addr, wto_matrix);
 
-               nir_ssa_def *vals[3];
+               nir_def *vals[3];
                for (unsigned i = 0; i < 3; ++i)
                   vals[i] = nir_channel(&b_shader, wto_matrix[i], c);
 
@@ -432,8 +432,8 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
             }
             case nir_intrinsic_load_ray_object_to_world: {
                unsigned c = nir_intrinsic_column(intr);
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
-               nir_ssa_def *rows[3];
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *rows[3];
                for (unsigned r = 0; r < 3; ++r)
                   rows[r] =
                      nir_build_load_global(&b_shader, 4, 32,
@@ -444,15 +444,15 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
                break;
             }
             case nir_intrinsic_load_ray_object_origin: {
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
-               nir_ssa_def *wto_matrix[3];
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *wto_matrix[3];
                nir_build_wto_matrix_load(&b_shader, instance_node_addr, wto_matrix);
                ret = nir_build_vec3_mat_mult(&b_shader, nir_load_var(&b_shader, vars->origin), wto_matrix, true);
                break;
             }
             case nir_intrinsic_load_ray_object_direction: {
-               nir_ssa_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
-               nir_ssa_def *wto_matrix[3];
+               nir_def *instance_node_addr = nir_load_var(&b_shader, vars->instance_addr);
+               nir_def *wto_matrix[3];
                nir_build_wto_matrix_load(&b_shader, instance_node_addr, wto_matrix);
                ret = nir_build_vec3_mat_mult(&b_shader, nir_load_var(&b_shader, vars->direction), wto_matrix, false);
                break;
@@ -521,8 +521,8 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
                nir_store_var(&b_shader, vars->hit_kind, intr->src[5].ssa, 0x1);
                load_sbt_entry(&b_shader, vars, intr->src[0].ssa, SBT_HIT, SBT_RECURSIVE_PTR);
 
-               nir_ssa_def *should_return = nir_test_mask(&b_shader, nir_load_var(&b_shader, vars->cull_mask_and_flags),
-                                                          SpvRayFlagsSkipClosestHitShaderKHRMask);
+               nir_def *should_return = nir_test_mask(&b_shader, nir_load_var(&b_shader, vars->cull_mask_and_flags),
+                                                      SpvRayFlagsSkipClosestHitShaderKHRMask);
 
                if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR)) {
                   should_return = nir_ior(&b_shader, should_return,
@@ -538,12 +538,12 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
             }
             case nir_intrinsic_execute_miss_amd: {
                nir_store_var(&b_shader, vars->tmax, intr->src[0].ssa, 0x1);
-               nir_ssa_def *undef = nir_ssa_undef(&b_shader, 1, 32);
+               nir_def *undef = nir_undef(&b_shader, 1, 32);
                nir_store_var(&b_shader, vars->primitive_id, undef, 0x1);
-               nir_store_var(&b_shader, vars->instance_addr, nir_ssa_undef(&b_shader, 1, 64), 0x1);
+               nir_store_var(&b_shader, vars->instance_addr, nir_undef(&b_shader, 1, 64), 0x1);
                nir_store_var(&b_shader, vars->geometry_id_and_flags, undef, 0x1);
                nir_store_var(&b_shader, vars->hit_kind, undef, 0x1);
-               nir_ssa_def *miss_index = nir_load_var(&b_shader, vars->miss_index);
+               nir_def *miss_index = nir_load_var(&b_shader, vars->miss_index);
                load_sbt_entry(&b_shader, vars, miss_index, SBT_MISS, SBT_RECURSIVE_PTR);
 
                if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR)) {
@@ -560,7 +560,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
             }
 
             if (ret)
-               nir_ssa_def_rewrite_uses(&intr->dest.ssa, ret);
+               nir_def_rewrite_uses(&intr->dest.ssa, ret);
             nir_instr_remove(instr);
             break;
          }
@@ -603,7 +603,7 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
       uint32_t num_components = intrin->dest.ssa.num_components;
       uint32_t bit_size = intrin->dest.ssa.bit_size;
 
-      nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
+      nir_def *components[NIR_MAX_VEC_COMPONENTS];
 
       for (uint32_t comp = 0; comp < num_components; comp++) {
          uint32_t offset = deref->var->data.driver_location + comp * bit_size / 8;
@@ -626,9 +626,9 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
          }
       }
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_vec(b, components, num_components));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec(b, components, num_components));
    } else {
-      nir_ssa_def *value = intrin->src[1].ssa;
+      nir_def *value = intrin->src[1].ssa;
       uint32_t num_components = value->num_components;
       uint32_t bit_size = value->bit_size;
 
@@ -637,7 +637,7 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
          uint32_t base = offset / 4;
          uint32_t comp_offset = offset % 4;
 
-         nir_ssa_def *component = nir_channel(b, value, comp);
+         nir_def *component = nir_channel(b, value, comp);
 
          if (bit_size == 64) {
             nir_store_hit_attrib_amd(b, nir_unpack_64_2x32_split_x(b, component), .base = base);
@@ -645,14 +645,14 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
          } else if (bit_size == 32) {
             nir_store_hit_attrib_amd(b, component, .base = base);
          } else if (bit_size == 16) {
-            nir_ssa_def *prev = nir_unpack_32_2x16(b, nir_load_hit_attrib_amd(b, .base = base));
-            nir_ssa_def *components[2];
+            nir_def *prev = nir_unpack_32_2x16(b, nir_load_hit_attrib_amd(b, .base = base));
+            nir_def *components[2];
             for (uint32_t word = 0; word < 2; word++)
                components[word] = (word == comp_offset / 2) ? nir_channel(b, value, comp) : nir_channel(b, prev, word);
             nir_store_hit_attrib_amd(b, nir_pack_32_2x16(b, nir_vec(b, components, 2)), .base = base);
          } else if (bit_size == 8) {
-            nir_ssa_def *prev = nir_unpack_bits(b, nir_load_hit_attrib_amd(b, .base = base), 8);
-            nir_ssa_def *components[4];
+            nir_def *prev = nir_unpack_bits(b, nir_load_hit_attrib_amd(b, .base = base), 8);
+            nir_def *components[4];
             for (uint32_t byte = 0; byte < 4; byte++)
                components[byte] = (byte == comp_offset) ? nir_channel(b, value, comp) : nir_channel(b, prev, byte);
             nir_store_hit_attrib_amd(b, nir_pack_32_4x8(b, nir_vec(b, components, 4)), .base = base);
@@ -703,19 +703,19 @@ lower_hit_attribs(nir_shader *shader, nir_variable **hit_attribs, uint32_t workg
 
          b.cursor = nir_after_instr(instr);
 
-         nir_ssa_def *offset;
+         nir_def *offset;
          if (!hit_attribs)
             offset = nir_imul_imm(
                &b, nir_iadd_imm(&b, nir_load_local_invocation_index(&b), nir_intrinsic_base(intrin) * workgroup_size),
                sizeof(uint32_t));
 
          if (intrin->intrinsic == nir_intrinsic_load_hit_attrib_amd) {
-            nir_ssa_def *ret;
+            nir_def *ret;
             if (hit_attribs)
                ret = nir_load_var(&b, hit_attribs[nir_intrinsic_base(intrin)]);
             else
                ret = nir_load_shared(&b, 1, 32, offset, .base = 0, .align_mul = 4);
-            nir_ssa_def_rewrite_uses(nir_instr_ssa_def(instr), ret);
+            nir_def_rewrite_uses(nir_instr_ssa_def(instr), ret);
          } else {
             if (hit_attribs)
                nir_store_var(&b, hit_attribs[nir_intrinsic_base(intrin)], intrin->src->ssa, 0x1);
@@ -772,7 +772,7 @@ inline_constants(nir_shader *dst, nir_shader *src)
 }
 
 static void
-insert_rt_case(nir_builder *b, nir_shader *shader, struct rt_variables *vars, nir_ssa_def *idx, uint32_t call_idx_base,
+insert_rt_case(nir_builder *b, nir_shader *shader, struct rt_variables *vars, nir_def *idx, uint32_t call_idx_base,
                uint32_t call_idx, unsigned stage_idx, struct radv_ray_tracing_stage *stages)
 {
    uint32_t workgroup_size =
@@ -880,10 +880,10 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
    nir_builder build = nir_builder_at(nir_before_cf_list(&impl->body));
    nir_builder *b = &build;
 
-   nir_ssa_def *commit_ptr = nir_load_param(b, 0);
-   nir_ssa_def *hit_t = nir_load_param(b, 1);
-   nir_ssa_def *hit_kind = nir_load_param(b, 2);
-   nir_ssa_def *scratch_offset = nir_load_param(b, 3);
+   nir_def *commit_ptr = nir_load_param(b, 0);
+   nir_def *hit_t = nir_load_param(b, 1);
+   nir_def *hit_kind = nir_load_param(b, 2);
+   nir_def *scratch_offset = nir_load_param(b, 3);
 
    nir_deref_instr *commit = nir_build_deref_cast(b, commit_ptr, nir_var_function_temp, glsl_bool_type(), 0);
 
@@ -913,12 +913,12 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
                break;
 
             case nir_intrinsic_load_ray_t_max:
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa, hit_t);
+               nir_def_rewrite_uses(&intrin->dest.ssa, hit_t);
                nir_instr_remove(&intrin->instr);
                break;
 
             case nir_intrinsic_load_ray_hit_kind:
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa, hit_kind);
+               nir_def_rewrite_uses(&intrin->dest.ssa, hit_kind);
                nir_instr_remove(&intrin->instr);
                break;
 
@@ -939,8 +939,8 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
                break;
             case nir_intrinsic_load_rt_arg_scratch_offset_amd:
                b->cursor = nir_after_instr(instr);
-               nir_ssa_def *arg_offset = nir_isub(b, &intrin->dest.ssa, scratch_offset);
-               nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, arg_offset, arg_offset->parent_instr);
+               nir_def *arg_offset = nir_isub(b, &intrin->dest.ssa, scratch_offset);
+               nir_def_rewrite_uses_after(&intrin->dest.ssa, arg_offset, arg_offset->parent_instr);
                break;
 
             default:
@@ -1012,10 +1012,10 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
             continue;
 
          b->cursor = nir_instr_remove(&intrin->instr);
-         nir_ssa_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
-         nir_ssa_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
-         nir_ssa_def *min_t = nir_load_ray_t_min(b);
-         nir_ssa_def *max_t = nir_load_ray_t_max(b);
+         nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
+         nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
+         nir_def *min_t = nir_load_ray_t_min(b);
+         nir_def *max_t = nir_load_ray_t_max(b);
 
          /* bool commit_tmp = false; */
          nir_variable *commit_tmp = nir_local_variable_create(impl, glsl_bool_type(), "commit_tmp");
@@ -1029,7 +1029,7 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
             if (any_hit_impl != NULL) {
                nir_push_if(b, nir_inot(b, nir_load_intersection_opaque_amd(b)));
                {
-                  nir_ssa_def *params[] = {
+                  nir_def *params[] = {
                      &nir_build_deref_var(b, commit_tmp)->dest.ssa,
                      hit_t,
                      hit_kind,
@@ -1048,8 +1048,8 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
          }
          nir_pop_if(b, NULL);
 
-         nir_ssa_def *accepted = nir_load_var(b, commit_tmp);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, accepted);
+         nir_def *accepted = nir_load_var(b, commit_tmp);
+         nir_def_rewrite_uses(&intrin->dest.ssa, accepted);
       }
    }
    nir_metadata_preserve(impl, nir_metadata_none);
@@ -1124,7 +1124,7 @@ static void
 visit_any_hit_shaders(struct radv_device *device, nir_builder *b, struct traversal_data *data,
                       struct rt_variables *vars)
 {
-   nir_ssa_def *sbt_idx = nir_load_var(b, vars->idx);
+   nir_def *sbt_idx = nir_load_var(b, vars->idx);
 
    if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
       nir_push_if(b, nir_ine_imm(b, sbt_idx, 0));
@@ -1170,16 +1170,16 @@ handle_candidate_triangle(nir_builder *b, struct radv_triangle_intersection *int
 {
    struct traversal_data *data = args->data;
 
-   nir_ssa_def *geometry_id = nir_iand_imm(b, intersection->base.geometry_id_and_flags, 0xfffffff);
-   nir_ssa_def *sbt_idx =
+   nir_def *geometry_id = nir_iand_imm(b, intersection->base.geometry_id_and_flags, 0xfffffff);
+   nir_def *sbt_idx =
       nir_iadd(b,
                nir_iadd(b, nir_load_var(b, data->vars->sbt_offset),
                         nir_iand_imm(b, nir_load_var(b, data->trav_vars->sbt_offset_and_flags), 0xffffff)),
                nir_imul(b, nir_load_var(b, data->vars->sbt_stride), geometry_id));
 
-   nir_ssa_def *hit_kind = nir_bcsel(b, intersection->frontface, nir_imm_int(b, 0xFE), nir_imm_int(b, 0xFF));
+   nir_def *hit_kind = nir_bcsel(b, intersection->frontface, nir_imm_int(b, 0xFE), nir_imm_int(b, 0xFF));
 
-   nir_ssa_def *prev_barycentrics = nir_load_var(b, data->barycentrics);
+   nir_def *prev_barycentrics = nir_load_var(b, data->barycentrics);
    nir_store_var(b, data->barycentrics, intersection->barycentrics, 0x3);
 
    nir_store_var(b, data->vars->ahit_accept, nir_imm_true(b), 0x1);
@@ -1217,7 +1217,7 @@ handle_candidate_triangle(nir_builder *b, struct radv_triangle_intersection *int
    nir_store_var(b, data->vars->idx, sbt_idx, 1);
    nir_store_var(b, data->trav_vars->hit, nir_imm_true(b), 1);
 
-   nir_ssa_def *ray_terminated = nir_load_var(b, data->vars->ahit_terminate);
+   nir_def *ray_terminated = nir_load_var(b, data->vars->ahit_terminate);
    nir_push_if(b, nir_ior(b, ray_flags->terminate_on_first_hit, ray_terminated));
    {
       nir_jump(b, nir_jump_break);
@@ -1231,8 +1231,8 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
 {
    struct traversal_data *data = args->data;
 
-   nir_ssa_def *geometry_id = nir_iand_imm(b, intersection->geometry_id_and_flags, 0xfffffff);
-   nir_ssa_def *sbt_idx =
+   nir_def *geometry_id = nir_iand_imm(b, intersection->geometry_id_and_flags, 0xfffffff);
+   nir_def *sbt_idx =
       nir_iadd(b,
                nir_iadd(b, nir_load_var(b, data->vars->sbt_offset),
                         nir_iand_imm(b, nir_load_var(b, data->trav_vars->sbt_offset_and_flags), 0xffffff)),
@@ -1317,8 +1317,8 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
       nir_store_var(b, data->vars->idx, sbt_idx, 1);
       nir_store_var(b, data->trav_vars->hit, nir_imm_true(b), 1);
 
-      nir_ssa_def *terminate_on_first_hit = nir_test_mask(b, args->flags, SpvRayFlagsTerminateOnFirstHitKHRMask);
-      nir_ssa_def *ray_terminated = nir_load_var(b, data->vars->ahit_terminate);
+      nir_def *terminate_on_first_hit = nir_test_mask(b, args->flags, SpvRayFlagsTerminateOnFirstHitKHRMask);
+      nir_def *ray_terminated = nir_load_var(b, data->vars->ahit_terminate);
       nir_push_if(b, nir_ior(b, terminate_on_first_hit, ray_terminated));
       {
          nir_jump(b, nir_jump_break);
@@ -1329,13 +1329,13 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
 }
 
 static void
-store_stack_entry(nir_builder *b, nir_ssa_def *index, nir_ssa_def *value, const struct radv_ray_traversal_args *args)
+store_stack_entry(nir_builder *b, nir_def *index, nir_def *value, const struct radv_ray_traversal_args *args)
 {
    nir_store_shared(b, value, index, .base = 0, .align_mul = 4);
 }
 
-static nir_ssa_def *
-load_stack_entry(nir_builder *b, nir_ssa_def *index, const struct radv_ray_traversal_args *args)
+static nir_def *
+load_stack_entry(nir_builder *b, nir_def *index, const struct radv_ray_traversal_args *args)
 {
    return nir_load_shared(b, 1, 32, index, .base = 0, .align_mul = 4);
 }
@@ -1366,8 +1366,8 @@ radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_
    barycentrics->data.driver_location = 0;
 
    /* initialize trace_ray arguments */
-   nir_ssa_def *accel_struct = nir_load_accel_struct_amd(&b);
-   nir_ssa_def *cull_mask_and_flags = nir_load_cull_mask_and_flags_amd(&b);
+   nir_def *accel_struct = nir_load_accel_struct_amd(&b);
+   nir_def *cull_mask_and_flags = nir_load_cull_mask_and_flags_amd(&b);
    nir_store_var(&b, vars.cull_mask_and_flags, cull_mask_and_flags, 0x1);
    nir_store_var(&b, vars.sbt_offset, nir_load_sbt_offset_amd(&b), 0x1);
    nir_store_var(&b, vars.sbt_stride, nir_load_sbt_stride_amd(&b), 0x1);
@@ -1382,15 +1382,15 @@ radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_
 
    nir_store_var(&b, trav_vars.hit, nir_imm_false(&b), 1);
 
-   nir_ssa_def *bvh_offset = nir_build_load_global(
+   nir_def *bvh_offset = nir_build_load_global(
       &b, 1, 32, nir_iadd_imm(&b, accel_struct, offsetof(struct radv_accel_struct_header, bvh_offset)),
       .access = ACCESS_NON_WRITEABLE);
-   nir_ssa_def *root_bvh_base = nir_iadd(&b, accel_struct, nir_u2u64(&b, bvh_offset));
+   nir_def *root_bvh_base = nir_iadd(&b, accel_struct, nir_u2u64(&b, bvh_offset));
    root_bvh_base = build_addr_to_node(&b, root_bvh_base);
 
    nir_store_var(&b, trav_vars.bvh_base, root_bvh_base, 1);
 
-   nir_ssa_def *vec3ones = nir_imm_vec3(&b, 1.0, 1.0, 1.0);
+   nir_def *vec3ones = nir_imm_vec3(&b, 1.0, 1.0, 1.0);
 
    nir_store_var(&b, trav_vars.origin, nir_load_var(&b, vars.origin), 7);
    nir_store_var(&b, trav_vars.dir, nir_load_var(&b, vars.direction), 7);
@@ -1504,15 +1504,15 @@ radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_
  * CHit / Miss  :  Callable  >  Chit / Miss  >  Traversal  >  Raygen
  * Callable     :  Callable  >  Chit / Miss  >             >  Raygen
  */
-static nir_ssa_def *
-select_next_shader(nir_builder *b, nir_ssa_def *shader_va, unsigned wave_size)
+static nir_def *
+select_next_shader(nir_builder *b, nir_def *shader_va, unsigned wave_size)
 {
    gl_shader_stage stage = b->shader->info.stage;
-   nir_ssa_def *prio = nir_iand_imm(b, shader_va, radv_rt_priority_mask);
-   nir_ssa_def *ballot = nir_ballot(b, 1, wave_size, nir_imm_bool(b, true));
-   nir_ssa_def *ballot_traversal = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_traversal));
-   nir_ssa_def *ballot_hit_miss = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_hit_miss));
-   nir_ssa_def *ballot_callable = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_callable));
+   nir_def *prio = nir_iand_imm(b, shader_va, radv_rt_priority_mask);
+   nir_def *ballot = nir_ballot(b, 1, wave_size, nir_imm_bool(b, true));
+   nir_def *ballot_traversal = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_traversal));
+   nir_def *ballot_hit_miss = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_hit_miss));
+   nir_def *ballot_callable = nir_ballot(b, 1, wave_size, nir_ieq_imm(b, prio, radv_rt_priority_callable));
 
    if (stage != MESA_SHADER_CALLABLE && stage != MESA_SHADER_INTERSECTION)
       ballot = nir_bcsel(b, nir_ine_imm(b, ballot_traversal, 0), ballot_traversal, ballot);
@@ -1521,8 +1521,8 @@ select_next_shader(nir_builder *b, nir_ssa_def *shader_va, unsigned wave_size)
    if (stage != MESA_SHADER_INTERSECTION)
       ballot = nir_bcsel(b, nir_ine_imm(b, ballot_callable, 0), ballot_callable, ballot);
 
-   nir_ssa_def *lsb = nir_find_lsb(b, ballot);
-   nir_ssa_def *next = nir_read_invocation(b, shader_va, lsb);
+   nir_def *lsb = nir_find_lsb(b, ballot);
+   nir_def *next = nir_read_invocation(b, shader_va, lsb);
    return nir_iand_imm(b, next, ~radv_rt_priority_mask);
 }
 
@@ -1552,17 +1552,17 @@ radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKH
    /* initialize variables */
    nir_builder b = nir_builder_at(nir_before_cf_list(&impl->body));
 
-   nir_ssa_def *traversal_addr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.traversal_shader);
+   nir_def *traversal_addr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.traversal_shader);
    nir_store_var(&b, vars.traversal_addr, nir_pack_64_2x32(&b, traversal_addr), 1);
-   nir_ssa_def *shader_va = ac_nir_load_arg(&b, &args->ac, args->ac.rt.next_shader);
+   nir_def *shader_va = ac_nir_load_arg(&b, &args->ac, args->ac.rt.next_shader);
    shader_va = nir_pack_64_2x32(&b, shader_va);
    nir_store_var(&b, vars.shader_va, shader_va, 1);
    nir_store_var(&b, vars.stack_ptr, ac_nir_load_arg(&b, &args->ac, args->ac.rt.dynamic_callable_stack_base), 1);
-   nir_ssa_def *record_ptr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.shader_record);
+   nir_def *record_ptr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.shader_record);
    nir_store_var(&b, vars.shader_record_ptr, nir_pack_64_2x32(&b, record_ptr), 1);
    nir_store_var(&b, vars.arg, ac_nir_load_arg(&b, &args->ac, args->ac.rt.payload_offset), 1);
 
-   nir_ssa_def *accel_struct = ac_nir_load_arg(&b, &args->ac, args->ac.rt.accel_struct);
+   nir_def *accel_struct = ac_nir_load_arg(&b, &args->ac, args->ac.rt.accel_struct);
    nir_store_var(&b, vars.accel_struct, nir_pack_64_2x32(&b, accel_struct), 1);
    nir_store_var(&b, vars.cull_mask_and_flags, ac_nir_load_arg(&b, &args->ac, args->ac.rt.cull_mask_and_flags), 1);
    nir_store_var(&b, vars.sbt_offset, ac_nir_load_arg(&b, &args->ac, args->ac.rt.sbt_offset), 1);
@@ -1574,7 +1574,7 @@ radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKH
    nir_store_var(&b, vars.tmax, ac_nir_load_arg(&b, &args->ac, args->ac.rt.ray_tmax), 1);
 
    nir_store_var(&b, vars.primitive_id, ac_nir_load_arg(&b, &args->ac, args->ac.rt.primitive_id), 1);
-   nir_ssa_def *instance_addr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.instance_addr);
+   nir_def *instance_addr = ac_nir_load_arg(&b, &args->ac, args->ac.rt.instance_addr);
    nir_store_var(&b, vars.instance_addr, nir_pack_64_2x32(&b, instance_addr), 1);
    nir_store_var(&b, vars.geometry_id_and_flags, ac_nir_load_arg(&b, &args->ac, args->ac.rt.geometry_id_and_flags), 1);
    nir_store_var(&b, vars.hit_kind, ac_nir_load_arg(&b, &args->ac, args->ac.rt.hit_kind), 1);
@@ -1582,7 +1582,7 @@ radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKH
    /* guard the shader, so that only the correct invocations execute it */
    nir_if *shader_guard = NULL;
    if (shader->info.stage != MESA_SHADER_RAYGEN || resume_shader) {
-      nir_ssa_def *shader_pc = ac_nir_load_arg(&b, &args->ac, args->ac.rt.shader_pc);
+      nir_def *shader_pc = ac_nir_load_arg(&b, &args->ac, args->ac.rt.shader_pc);
       shader_pc = nir_pack_64_2x32(&b, shader_pc);
       shader_pc = nir_ior_imm(&b, shader_pc, radv_get_rt_priority(shader->info.stage));
 
@@ -1598,7 +1598,7 @@ radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKH
    /* select next shader */
    b.cursor = nir_after_cf_list(&impl->body);
    shader_va = nir_load_var(&b, vars.shader_va);
-   nir_ssa_def *next = select_next_shader(&b, shader_va, info->wave_size);
+   nir_def *next = select_next_shader(&b, shader_va, info->wave_size);
    ac_nir_store_arg(&b, &args->ac, args->ac.rt.shader_pc, next);
 
    /* store back all variables to registers */
index ea7e2ef..8da3400 100644 (file)
@@ -40,7 +40,7 @@ gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_inst
    case MESA_SHADER_VERTEX: {
       unsigned idx = nir_intrinsic_io_semantics(instr).location;
       unsigned component = nir_intrinsic_component(instr);
-      unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
+      unsigned mask = nir_def_components_read(&instr->dest.ssa);
       mask = (instr->dest.ssa.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
 
       info->vs.input_usage_mask[idx] |= mask & 0xf;
@@ -95,11 +95,11 @@ gather_intrinsic_store_output_info(const nir_shader *nir, const nir_intrinsic_in
       unsigned pos_w_chan = 3 - component;
 
       if (write_mask & BITFIELD_BIT(pos_w_chan)) {
-         nir_ssa_scalar pos_w = nir_ssa_scalar_resolved(instr->src[0].ssa, pos_w_chan);
+         nir_scalar pos_w = nir_scalar_resolved(instr->src[0].ssa, pos_w_chan);
          /* Use coarse shading if the value of Pos.W can't be determined or if its value is != 1
           * (typical for non-GUI elements).
           */
-         if (!nir_ssa_scalar_is_const(pos_w) || nir_ssa_scalar_as_uint(pos_w) != 0x3f800000u)
+         if (!nir_scalar_is_const(pos_w) || nir_scalar_as_uint(pos_w) != 0x3f800000u)
             info->force_vrs_per_vertex = true;
       }
    }
@@ -179,7 +179,7 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s
       break;
    case nir_intrinsic_load_local_invocation_id:
    case nir_intrinsic_load_workgroup_id: {
-      unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
+      unsigned mask = nir_def_components_read(&instr->dest.ssa);
       while (mask) {
          unsigned i = u_bit_scan(&mask);
 
@@ -191,10 +191,10 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s
       break;
    }
    case nir_intrinsic_load_frag_coord:
-      info->ps.reads_frag_coord_mask |= nir_ssa_def_components_read(&instr->dest.ssa);
+      info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->dest.ssa);
       break;
    case nir_intrinsic_load_sample_pos:
-      info->ps.reads_sample_pos_mask |= nir_ssa_def_components_read(&instr->dest.ssa);
+      info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->dest.ssa);
       break;
    case nir_intrinsic_load_push_constant:
       gather_push_constant_info(nir, instr, info);
index b1010de..bab7276 100644 (file)
@@ -409,7 +409,7 @@ agx_emit_load_vary(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr)
    nir_src *offset = nir_get_io_offset_src(instr);
    assert(nir_src_is_const(*offset) && "no indirects");
 
-   assert(nir_ssa_def_components_read(&instr->dest.ssa) ==
+   assert(nir_def_components_read(&instr->dest.ssa) ==
              nir_component_mask(components) &&
           "iter does not handle write-after-write hazards");
 
@@ -771,10 +771,10 @@ agx_emit_local_store(agx_builder *b, nir_intrinsic_instr *instr)
 static agx_index
 agx_translate_bindless_handle(agx_builder *b, nir_src *handle, agx_index *base)
 {
-   nir_ssa_scalar base_scalar = nir_ssa_scalar_resolved(handle->ssa, 0);
-   assert(nir_ssa_scalar_is_const(base_scalar) && "base must be constant");
+   nir_scalar base_scalar = nir_scalar_resolved(handle->ssa, 0);
+   assert(nir_scalar_is_const(base_scalar) && "base must be constant");
 
-   unsigned base_uint = nir_ssa_scalar_as_uint(base_scalar);
+   unsigned base_uint = nir_scalar_as_uint(base_scalar);
    *base = agx_uniform(base_uint, AGX_SIZE_64);
 
    return agx_emit_extract(b, agx_src_index(handle), 1);
@@ -801,7 +801,7 @@ static unsigned
 agx_expand_tex_to(agx_builder *b, nir_dest *dest, agx_index src, bool masked)
 {
    unsigned nr_channels = nir_dest_num_components(*dest);
-   nir_component_mask_t mask = nir_ssa_def_components_read(&dest->ssa);
+   nir_component_mask_t mask = nir_def_components_read(&dest->ssa);
 
    if (!masked)
       mask = (nir_component_mask_t)BITFIELD_MASK(nr_channels);
@@ -1798,7 +1798,7 @@ agx_emit_phis_deferred(agx_context *ctx)
 }
 
 static void
-agx_emit_undef(agx_builder *b, nir_ssa_undef_instr *instr)
+agx_emit_undef(agx_builder *b, nir_undef_instr *instr)
 {
    /* For now, just lower undefs to zero. This doesn't matter too much, since
     * the lowering happens in NIR and this just allows for late lowering passes
@@ -2095,17 +2095,17 @@ agx_lower_sincos_filter(const nir_instr *instr, UNUSED const void *_)
  * implemented by shifting by one quadrant: cos(x) = sin(x + tau/4).
  */
 
-static nir_ssa_def *
+static nir_def *
 agx_lower_sincos_impl(struct nir_builder *b, nir_instr *instr, UNUSED void *_)
 {
    nir_alu_instr *alu = nir_instr_as_alu(instr);
-   nir_ssa_def *x = nir_mov_alu(b, alu->src[0], 1);
-   nir_ssa_def *turns = nir_fmul_imm(b, x, M_1_PI * 0.5f);
+   nir_def *x = nir_mov_alu(b, alu->src[0], 1);
+   nir_def *turns = nir_fmul_imm(b, x, M_1_PI * 0.5f);
 
    if (alu->op == nir_op_fcos)
       turns = nir_fadd_imm(b, turns, 0.25f);
 
-   nir_ssa_def *quadrants = nir_fmul_imm(b, nir_ffract(b, turns), 4.0);
+   nir_def *quadrants = nir_fmul_imm(b, nir_ffract(b, turns), 4.0);
    return nir_fsin_agx(b, quadrants);
 }
 
@@ -2126,11 +2126,11 @@ agx_lower_front_face(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    if (intr->intrinsic != nir_intrinsic_load_front_face)
       return false;
 
-   nir_ssa_def *def = &intr->dest.ssa;
+   nir_def *def = &intr->dest.ssa;
    assert(def->bit_size == 1);
 
    b->cursor = nir_before_instr(&intr->instr);
-   nir_ssa_def_rewrite_uses(def, nir_inot(b, nir_load_back_face_agx(b, 1)));
+   nir_def_rewrite_uses(def, nir_inot(b, nir_load_back_face_agx(b, 1)));
    return true;
 }
 
@@ -2347,8 +2347,8 @@ agx_gather_texcoords(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    nir_src src = tex->src[coord_idx].src;
-   nir_ssa_scalar x = nir_ssa_scalar_resolved(src.ssa, 0);
-   nir_ssa_scalar y = nir_ssa_scalar_resolved(src.ssa, 1);
+   nir_scalar x = nir_scalar_resolved(src.ssa, 0);
+   nir_scalar y = nir_scalar_resolved(src.ssa, 1);
 
    if (x.def != y.def)
       return false;
index 4f8db86..1992d8c 100644 (file)
@@ -454,7 +454,7 @@ agx_size_for_bits(unsigned bits)
 }
 
 static inline agx_index
-agx_nir_ssa_index(nir_ssa_def *ssa)
+agx_nir_ssa_index(nir_def *ssa)
 {
    return agx_get_index(ssa->index, agx_size_for_bits(ssa->bit_size));
 }
index ab77b64..4a39d08 100644 (file)
@@ -8,7 +8,7 @@
 
 /* Results of pattern matching */
 struct match {
-   nir_ssa_scalar base, offset;
+   nir_scalar base, offset;
    bool has_offset;
    bool sign_extend;
 
@@ -25,18 +25,18 @@ struct match {
  * variables. Otherwise, returns false.
  */
 static bool
-match_imul_imm(nir_ssa_scalar scalar, nir_ssa_scalar *variable, uint32_t *imm)
+match_imul_imm(nir_scalar scalar, nir_scalar *variable, uint32_t *imm)
 {
-   if (!nir_ssa_scalar_is_alu(scalar))
+   if (!nir_scalar_is_alu(scalar))
       return false;
 
-   nir_op op = nir_ssa_scalar_alu_op(scalar);
+   nir_op op = nir_scalar_alu_op(scalar);
    if (op != nir_op_imul && op != nir_op_ishl)
       return false;
 
-   nir_ssa_scalar inputs[] = {
-      nir_ssa_scalar_chase_alu_src(scalar, 0),
-      nir_ssa_scalar_chase_alu_src(scalar, 1),
+   nir_scalar inputs[] = {
+      nir_scalar_chase_alu_src(scalar, 0),
+      nir_scalar_chase_alu_src(scalar, 1),
    };
 
    /* For imul check both operands for an immediate, since imul is commutative.
@@ -45,12 +45,12 @@ match_imul_imm(nir_ssa_scalar scalar, nir_ssa_scalar *variable, uint32_t *imm)
    bool commutes = (op == nir_op_imul);
 
    for (unsigned i = commutes ? 0 : 1; i < ARRAY_SIZE(inputs); ++i) {
-      if (!nir_ssa_scalar_is_const(inputs[i]))
+      if (!nir_scalar_is_const(inputs[i]))
          continue;
 
       *variable = inputs[1 - i];
 
-      uint32_t value = nir_ssa_scalar_as_uint(inputs[i]);
+      uint32_t value = nir_scalar_as_uint(inputs[i]);
 
       if (op == nir_op_imul)
          *imm = value;
@@ -75,17 +75,17 @@ match_imul_imm(nir_ssa_scalar scalar, nir_ssa_scalar *variable, uint32_t *imm)
 static bool
 match_soa(nir_builder *b, struct match *match, unsigned format_shift)
 {
-   if (!nir_ssa_scalar_is_alu(match->offset) ||
-       nir_ssa_scalar_alu_op(match->offset) != nir_op_iadd)
+   if (!nir_scalar_is_alu(match->offset) ||
+       nir_scalar_alu_op(match->offset) != nir_op_iadd)
       return false;
 
-   nir_ssa_scalar summands[] = {
-      nir_ssa_scalar_chase_alu_src(match->offset, 0),
-      nir_ssa_scalar_chase_alu_src(match->offset, 1),
+   nir_scalar summands[] = {
+      nir_scalar_chase_alu_src(match->offset, 0),
+      nir_scalar_chase_alu_src(match->offset, 1),
    };
 
    for (unsigned i = 0; i < ARRAY_SIZE(summands); ++i) {
-      if (!nir_ssa_scalar_is_const(summands[i]))
+      if (!nir_scalar_is_const(summands[i]))
          continue;
 
       /* Note: This is treated as signed regardless of the sign of the match.
@@ -104,8 +104,8 @@ match_soa(nir_builder *b, struct match *match, unsigned format_shift)
        * TODO: We need to confirm how the hardware handles 32-bit overflow when
        * applying the format shift, which might need rework here again.
        */
-      int offset = nir_ssa_scalar_as_int(summands[i]);
-      nir_ssa_scalar variable;
+      int offset = nir_scalar_as_int(summands[i]);
+      nir_scalar variable;
       uint32_t multiplier;
 
       /* The other operand must multiply */
@@ -123,9 +123,9 @@ match_soa(nir_builder *b, struct match *match, unsigned format_shift)
          return false;
 
       /* Otherwise, rewrite! */
-      nir_ssa_def *unmultiplied = nir_vec_scalars(b, &variable, 1);
+      nir_def *unmultiplied = nir_vec_scalars(b, &variable, 1);
 
-      nir_ssa_def *rewrite = nir_iadd_imm(
+      nir_def *rewrite = nir_iadd_imm(
          b, nir_imul_imm(b, unmultiplied, multiplier_shifted), offset_shifted);
 
       match->offset = nir_get_ssa_scalar(rewrite, 0);
@@ -138,27 +138,26 @@ match_soa(nir_builder *b, struct match *match, unsigned format_shift)
 
 /* Try to pattern match address calculation */
 static struct match
-match_address(nir_builder *b, nir_ssa_scalar base, int8_t format_shift)
+match_address(nir_builder *b, nir_scalar base, int8_t format_shift)
 {
    struct match match = {.base = base};
 
    /* All address calculations are iadd at the root */
-   if (!nir_ssa_scalar_is_alu(base) ||
-       nir_ssa_scalar_alu_op(base) != nir_op_iadd)
+   if (!nir_scalar_is_alu(base) || nir_scalar_alu_op(base) != nir_op_iadd)
       return match;
 
    /* Only 64+32 addition is supported, look for an extension */
-   nir_ssa_scalar summands[] = {
-      nir_ssa_scalar_chase_alu_src(base, 0),
-      nir_ssa_scalar_chase_alu_src(base, 1),
+   nir_scalar summands[] = {
+      nir_scalar_chase_alu_src(base, 0),
+      nir_scalar_chase_alu_src(base, 1),
    };
 
    for (unsigned i = 0; i < ARRAY_SIZE(summands); ++i) {
       /* We can add a small constant to the 64-bit base for free */
-      if (nir_ssa_scalar_is_const(summands[i]) &&
-          nir_ssa_scalar_as_uint(summands[i]) < (1ull << 32)) {
+      if (nir_scalar_is_const(summands[i]) &&
+          nir_scalar_as_uint(summands[i]) < (1ull << 32)) {
 
-         uint32_t value = nir_ssa_scalar_as_uint(summands[i]);
+         uint32_t value = nir_scalar_as_uint(summands[i]);
 
          return (struct match){
             .base = summands[1 - i],
@@ -169,17 +168,17 @@ match_address(nir_builder *b, nir_ssa_scalar base, int8_t format_shift)
       }
 
       /* Otherwise, we can only add an offset extended from 32-bits */
-      if (!nir_ssa_scalar_is_alu(summands[i]))
+      if (!nir_scalar_is_alu(summands[i]))
          continue;
 
-      nir_op op = nir_ssa_scalar_alu_op(summands[i]);
+      nir_op op = nir_scalar_alu_op(summands[i]);
 
       if (op != nir_op_u2u64 && op != nir_op_i2i64)
          continue;
 
       /* We've found a summand, commit to it */
       match.base = summands[1 - i];
-      match.offset = nir_ssa_scalar_chase_alu_src(summands[i], 0);
+      match.offset = nir_scalar_chase_alu_src(summands[i], 0);
       match.sign_extend = (op == nir_op_i2i64);
 
       /* Undo the implicit shift from using as offset */
@@ -192,7 +191,7 @@ match_address(nir_builder *b, nir_ssa_scalar base, int8_t format_shift)
       return match;
 
    /* But if we did, we can try to fold in in a multiply */
-   nir_ssa_scalar multiplied;
+   nir_scalar multiplied;
    uint32_t multiplier;
 
    if (match_imul_imm(match.offset, &multiplied, &multiplier)) {
@@ -211,7 +210,7 @@ match_address(nir_builder *b, nir_ssa_scalar base, int8_t format_shift)
          return match;
       }
 
-      nir_ssa_def *multiplied_ssa = nir_vec_scalars(b, &multiplied, 1);
+      nir_def *multiplied_ssa = nir_vec_scalars(b, &multiplied, 1);
 
       /* Only fold in if we wouldn't overflow the lsl field */
       if (new_shift <= 2) {
@@ -224,7 +223,7 @@ match_address(nir_builder *b, nir_ssa_scalar base, int8_t format_shift)
           */
          assert(new_shift >= 3);
 
-         nir_ssa_def *rewrite =
+         nir_def *rewrite =
             nir_imul_imm(b, multiplied_ssa, multiplier << new_shift);
 
          match.offset = nir_get_ssa_scalar(rewrite, 0);
@@ -276,13 +275,12 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    unsigned format_shift = util_logbase2(util_format_get_blocksize(format));
 
    nir_src *orig_offset = nir_get_io_offset_src(intr);
-   nir_ssa_scalar base = nir_ssa_scalar_resolved(orig_offset->ssa, 0);
+   nir_scalar base = nir_scalar_resolved(orig_offset->ssa, 0);
    struct match match = match_address(b, base, format_shift);
 
-   nir_ssa_def *offset =
-      match.offset.def != NULL
-         ? nir_channel(b, match.offset.def, match.offset.comp)
-         : nir_imm_int(b, 0);
+   nir_def *offset = match.offset.def != NULL
+                        ? nir_channel(b, match.offset.def, match.offset.comp)
+                        : nir_imm_int(b, 0);
 
    /* If we were unable to fold in the shift, insert a right-shift now to undo
     * the implicit left shift of the instruction.
@@ -309,9 +307,9 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    }
 
    assert(match.shift >= 0);
-   nir_ssa_def *new_base = nir_channel(b, match.base.def, match.base.comp);
+   nir_def *new_base = nir_channel(b, match.base.def, match.base.comp);
 
-   nir_ssa_def *repl = NULL;
+   nir_def *repl = NULL;
    bool has_dest = (intr->intrinsic != nir_intrinsic_store_global);
    unsigned num_components = has_dest ? nir_dest_num_components(intr->dest) : 0;
    unsigned bit_size = has_dest ? nir_dest_bit_size(intr->dest) : 0;
@@ -346,7 +344,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    }
 
    if (repl)
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, repl);
+      nir_def_rewrite_uses(&intr->dest.ssa, repl);
 
    nir_instr_remove(instr);
    return true;
index 1241558..71742ec 100644 (file)
@@ -33,7 +33,7 @@ lower_zs_emit(nir_block *block)
 
       nir_builder b = nir_builder_at(nir_before_instr(instr));
 
-      nir_ssa_def *value = intr->src[0].ssa;
+      nir_def *value = intr->src[0].ssa;
       bool z = (sem.location == FRAG_RESULT_DEPTH);
 
       unsigned src_idx = z ? 1 : 2;
@@ -51,10 +51,10 @@ lower_zs_emit(nir_block *block)
          /* Multisampling will get lowered later if needed, default to
           * broadcast
           */
-         nir_ssa_def *sample_mask = nir_imm_intN_t(&b, ALL_SAMPLES, 16);
-         zs_emit = nir_store_zs_agx(&b, sample_mask,
-                                    nir_ssa_undef(&b, 1, 32) /* depth */,
-                                    nir_ssa_undef(&b, 1, 16) /* stencil */);
+         nir_def *sample_mask = nir_imm_intN_t(&b, ALL_SAMPLES, 16);
+         zs_emit =
+            nir_store_zs_agx(&b, sample_mask, nir_undef(&b, 1, 32) /* depth */,
+                             nir_undef(&b, 1, 16) /* stencil */);
       }
 
       assert((nir_intrinsic_base(zs_emit) & base) == 0 &&
@@ -83,9 +83,9 @@ lower_discard(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *all_samples = nir_imm_intN_t(b, ALL_SAMPLES, 16);
-   nir_ssa_def *no_samples = nir_imm_intN_t(b, 0, 16);
-   nir_ssa_def *killed_samples = all_samples;
+   nir_def *all_samples = nir_imm_intN_t(b, ALL_SAMPLES, 16);
+   nir_def *no_samples = nir_imm_intN_t(b, 0, 16);
+   nir_def *killed_samples = all_samples;
 
    if (intr->intrinsic == nir_intrinsic_discard_if)
       killed_samples = nir_bcsel(b, intr->src[0].ssa, all_samples, no_samples);
index a786ff6..13ad0fa 100644 (file)
@@ -22,7 +22,7 @@
 static void
 insert_z_write(nir_builder *b)
 {
-   nir_ssa_def *z = nir_load_frag_coord_zw(b, .component = 2);
+   nir_def *z = nir_load_frag_coord_zw(b, .component = 2);
 
    nir_store_output(b, z, nir_imm_int(b, 0),
                     .io_semantics.location = FRAG_RESULT_DEPTH,
index afc006d..f20ad84 100644 (file)
  */
 
 /* XXX: It's not clear what this is for, but seems necessary */
-static nir_ssa_def *
-cf_valid(nir_builder *b, nir_ssa_def *cf)
+static nir_def *
+cf_valid(nir_builder *b, nir_def *cf)
 {
-   nir_ssa_def *bit =
-      nir_ieq_imm(b, nir_iand_imm(b, nir_channel(b, cf, 0), 1), 0);
+   nir_def *bit = nir_ieq_imm(b, nir_iand_imm(b, nir_channel(b, cf, 0), 1), 0);
 
    /* XXX: Apple's compiler actually checks that the significand is nonzero and
     * the exponent is 0 or 1. This is probably a typo -- it doesn't make any
     * logical sense.  Presumably they just meant to check for denorms, so let's
     * do that. Either way the tests pass.
     */
-   nir_ssa_def *cf01 = nir_trim_vector(b, cf, 2);
+   nir_def *cf01 = nir_trim_vector(b, cf, 2);
    return nir_ior(b, bit, nir_fisnormal(b, cf01));
 }
 
-static nir_ssa_def *
-interpolate_at_offset(nir_builder *b, nir_ssa_def *cf, nir_ssa_def *offset,
+static nir_def *
+interpolate_at_offset(nir_builder *b, nir_def *cf, nir_def *offset,
                       bool perspective)
 {
    /* Get the coordinate of the pixel within the tile */
-   nir_ssa_def *pixel_coords = nir_load_pixel_coord(b);
-   nir_ssa_def *tile_offs = nir_umod_imm(b, pixel_coords, 32);
+   nir_def *pixel_coords = nir_load_pixel_coord(b);
+   nir_def *tile_offs = nir_umod_imm(b, pixel_coords, 32);
 
    /* Convert to float, getting the center of the pixel */
-   nir_ssa_def *center = nir_fadd_imm(b, nir_u2f32(b, tile_offs), 0.5);
+   nir_def *center = nir_fadd_imm(b, nir_u2f32(b, tile_offs), 0.5);
 
    /* Calculate the location to interpolate. offset is defined relative to the
     * center of the pixel and is a float.
     */
-   nir_ssa_def *pos = nir_fadd(b, center, nir_f2f32(b, offset));
+   nir_def *pos = nir_fadd(b, center, nir_f2f32(b, offset));
 
    /* Interpolate with the given coefficients */
-   nir_ssa_def *interp = nir_ffma(b, nir_channel(b, pos, 1),
-                                  nir_channel(b, cf, 1), nir_channel(b, cf, 2));
+   nir_def *interp = nir_ffma(b, nir_channel(b, pos, 1), nir_channel(b, cf, 1),
+                              nir_channel(b, cf, 2));
 
    interp = nir_ffma(b, nir_channel(b, pos, 0), nir_channel(b, cf, 0), interp);
 
    /* Divide by RHW. This load will be lowered recursively. */
    if (perspective) {
-      nir_ssa_def *bary = nir_load_barycentric_at_offset(
+      nir_def *bary = nir_load_barycentric_at_offset(
          b, 32, offset, .interp_mode = INTERP_MODE_NOPERSPECTIVE);
 
-      nir_ssa_def *rhw = nir_load_interpolated_input(
+      nir_def *rhw = nir_load_interpolated_input(
          b, 1, 32, bary, nir_imm_int(b, 0), .component = 3,
          .io_semantics = {
             .location = VARYING_SLOT_POS,
@@ -80,8 +79,8 @@ interpolate_at_offset(nir_builder *b, nir_ssa_def *cf, nir_ssa_def *offset,
    return nir_bcsel(b, cf_valid(b, cf), interp, nir_channel(b, cf, 2));
 }
 
-static nir_ssa_def *
-interpolate_flat(nir_builder *b, nir_ssa_def *coefficients)
+static nir_def *
+interpolate_flat(nir_builder *b, nir_def *coefficients)
 {
    /* Same value anywhere, so just take the constant (affine) component */
    return nir_channel(b, coefficients, 2);
@@ -114,7 +113,7 @@ needs_lower(const nir_instr *instr, UNUSED const void *_)
    return (load->intrinsic == nir_intrinsic_load_input);
 }
 
-static nir_ssa_def *
+static nir_def *
 interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
 {
    nir_io_semantics sem = nir_intrinsic_io_semantics(load);
@@ -123,7 +122,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
    sem.location += nir_src_as_uint(*nir_get_io_offset_src(load));
    sem.num_slots = 1;
 
-   nir_ssa_def *coefficients = nir_load_coefficients_agx(
+   nir_def *coefficients = nir_load_coefficients_agx(
       b, .component = nir_intrinsic_component(load) + channel,
       .interp_mode = interp_mode_for_load(load), .io_semantics = sem);
 
@@ -133,7 +132,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
    } else {
       nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);
 
-      nir_ssa_def *interp = interpolate_at_offset(
+      nir_def *interp = interpolate_at_offset(
          b, coefficients, bary->src[0].ssa,
          nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE);
 
@@ -141,13 +140,13 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower(nir_builder *b, nir_instr *instr, void *data)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
 
    /* Each component is loaded separated */
-   nir_ssa_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
+   nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
    for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
       values[i] = interpolate_channel(b, intr, i);
    }
index 11c85a9..6d25294 100644 (file)
@@ -21,13 +21,13 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
    if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
       return false;
 
-   unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
+   unsigned mask = nir_def_components_read(&intr->dest.ssa);
    if (mask == 0 || mask == nir_component_mask(intr->num_components))
       return false;
 
    b->cursor = nir_before_instr(instr);
    unsigned bit_size = nir_dest_bit_size(intr->dest);
-   nir_ssa_def *comps[4] = {NULL};
+   nir_def *comps[4] = {NULL};
 
    for (unsigned c = 0; c < intr->num_components; ++c) {
       if (mask & BITFIELD_BIT(c)) {
@@ -44,7 +44,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
 
          /* Shrink the load to count contiguous components */
          nir_ssa_dest_init(clone, &clone_intr->dest, count, bit_size);
-         nir_ssa_def *clone_vec = &clone_intr->dest.ssa;
+         nir_def *clone_vec = &clone_intr->dest.ssa;
          clone_intr->num_components = count;
 
          /* The load starts from component c relative to the original load */
@@ -64,12 +64,12 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
          /* The value of unused components is irrelevant, but use an undef for
           * semantics. It will be eliminated by DCE after copyprop.
           */
-         comps[c] = nir_ssa_undef(b, 1, bit_size);
+         comps[c] = nir_undef(b, 1, bit_size);
       }
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                            nir_vec(b, comps, intr->num_components));
+   nir_def_rewrite_uses(&intr->dest.ssa,
+                        nir_vec(b, comps, intr->num_components));
    return true;
 }
 
index 3c5ec38..6b6ff35 100644 (file)
@@ -84,7 +84,7 @@ lower_sample_mask_to_zs(nir_builder *b, nir_instr *instr, UNUSED void *data)
     */
    if (intr->intrinsic == nir_intrinsic_store_zs_agx && !depth_written) {
       /* Load the current depth at this pixel */
-      nir_ssa_def *z = nir_load_frag_coord_zw(b, .component = 2);
+      nir_def *z = nir_load_frag_coord_zw(b, .component = 2);
 
       /* Write it out from this store_zs */
       nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) | BASE_Z);
@@ -103,7 +103,7 @@ lower_sample_mask_to_zs(nir_builder *b, nir_instr *instr, UNUSED void *data)
    /* Write a NaN depth value for discarded samples */
    nir_store_zs_agx(b, intr->src[0].ssa, nir_imm_float(b, NAN),
                     stencil_written ? nir_imm_intN_t(b, 0, 16)
-                                    : nir_ssa_undef(b, 1, 16) /* stencil */,
+                                    : nir_undef(b, 1, 16) /* stencil */,
                     .base = BASE_Z | (stencil_written ? BASE_S : 0));
 
    nir_instr_remove(instr);
@@ -196,9 +196,9 @@ agx_nir_lower_sample_mask(nir_shader *shader, unsigned nr_samples)
             /* Last discard is executed unconditionally, so fuse tests. */
             b.cursor = nir_before_instr(&intr->instr);
 
-            nir_ssa_def *all_samples = nir_imm_intN_t(&b, ALL_SAMPLES, 16);
-            nir_ssa_def *killed = intr->src[0].ssa;
-            nir_ssa_def *live = nir_ixor(&b, killed, all_samples);
+            nir_def *all_samples = nir_imm_intN_t(&b, ALL_SAMPLES, 16);
+            nir_def *killed = intr->src[0].ssa;
+            nir_def *live = nir_ixor(&b, killed, all_samples);
 
             nir_sample_mask_agx(&b, all_samples, live);
             nir_instr_remove(&intr->instr);
index 17d6eaa..25aedbd 100644 (file)
 #define AGX_FORMAT_RGB32_EMULATED 0x36
 #define AGX_LAYOUT_LINEAR         0x0
 
-static nir_ssa_def *
-texture_descriptor_ptr_for_handle(nir_builder *b, nir_ssa_def *handle)
+static nir_def *
+texture_descriptor_ptr_for_handle(nir_builder *b, nir_def *handle)
 {
    /* Bindless handles are a vec2, where the first source is the (constant)
     * uniform register number and the second source is the byte offset.
     */
-   nir_ssa_scalar uniform = nir_ssa_scalar_resolved(handle, 0);
-   unsigned uniform_idx = nir_ssa_scalar_as_uint(uniform);
+   nir_scalar uniform = nir_scalar_resolved(handle, 0);
+   unsigned uniform_idx = nir_scalar_as_uint(uniform);
 
-   nir_ssa_def *base = nir_load_preamble(b, 1, 64, uniform_idx);
-   nir_ssa_def *offset = nir_u2u64(b, nir_channel(b, handle, 1));
+   nir_def *base = nir_load_preamble(b, 1, 64, uniform_idx);
+   nir_def *offset = nir_u2u64(b, nir_channel(b, handle, 1));
 
    return nir_iadd(b, base, offset);
 }
 
-static nir_ssa_def *
-texture_descriptor_ptr_for_index(nir_builder *b, nir_ssa_def *index)
+static nir_def *
+texture_descriptor_ptr_for_index(nir_builder *b, nir_def *index)
 {
    return nir_iadd(
       b, nir_load_texture_base_agx(b),
       nir_u2u64(b, nir_imul_imm(b, index, AGX_TEXTURE_DESC_STRIDE)));
 }
 
-static nir_ssa_def *
+static nir_def *
 texture_descriptor_ptr(nir_builder *b, nir_tex_instr *tex)
 {
    int handle_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
@@ -50,7 +50,7 @@ texture_descriptor_ptr(nir_builder *b, nir_tex_instr *tex)
       return texture_descriptor_ptr_for_handle(b, tex->src[handle_idx].src.ssa);
 
    /* For non-bindless, compute from the texture index */
-   nir_ssa_def *index;
+   nir_def *index;
 
    int offs_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
    if (offs_idx >= 0)
@@ -66,40 +66,40 @@ texture_descriptor_ptr(nir_builder *b, nir_tex_instr *tex)
  * original size is irrecoverable. Instead, we stash it in the "Acceleration
  * buffer" field, which is unused for linear images. Fetch just that.
  */
-static nir_ssa_def *
-agx_txs_buffer(nir_builder *b, nir_ssa_def *descriptor)
+static nir_def *
+agx_txs_buffer(nir_builder *b, nir_def *descriptor)
 {
-   nir_ssa_def *size_ptr = nir_iadd_imm(b, descriptor, 16);
+   nir_def *size_ptr = nir_iadd_imm(b, descriptor, 16);
 
    return nir_load_global_constant(b, size_ptr, 8, 1, 32);
 }
 
-static nir_ssa_def *
+static nir_def *
 agx_txs(nir_builder *b, nir_tex_instr *tex)
 {
-   nir_ssa_def *ptr = texture_descriptor_ptr(b, tex);
-   nir_ssa_def *comp[4] = {NULL};
+   nir_def *ptr = texture_descriptor_ptr(b, tex);
+   nir_def *comp[4] = {NULL};
 
    if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF)
       return agx_txs_buffer(b, ptr);
 
-   nir_ssa_def *desc = nir_load_global_constant(b, ptr, 8, 4, 32);
-   nir_ssa_def *w0 = nir_channel(b, desc, 0);
-   nir_ssa_def *w1 = nir_channel(b, desc, 1);
-   nir_ssa_def *w3 = nir_channel(b, desc, 3);
+   nir_def *desc = nir_load_global_constant(b, ptr, 8, 4, 32);
+   nir_def *w0 = nir_channel(b, desc, 0);
+   nir_def *w1 = nir_channel(b, desc, 1);
+   nir_def *w3 = nir_channel(b, desc, 3);
 
    /* Width minus 1: bits [28, 42) */
-   nir_ssa_def *width_m1 =
+   nir_def *width_m1 =
       nir_extr_agx(b, w0, w1, nir_imm_int(b, 28), nir_imm_int(b, 14));
 
    /* Height minus 1: bits [42, 56) */
-   nir_ssa_def *height_m1 = nir_ubitfield_extract_imm(b, w1, 42 - 32, 14);
+   nir_def *height_m1 = nir_ubitfield_extract_imm(b, w1, 42 - 32, 14);
 
    /* Depth minus 1: bits [110, 124) */
-   nir_ssa_def *depth_m1 = nir_ubitfield_extract_imm(b, w3, 110 - 96, 14);
+   nir_def *depth_m1 = nir_ubitfield_extract_imm(b, w3, 110 - 96, 14);
 
    /* First level: bits [56, 60) */
-   nir_ssa_def *lod = nir_ubitfield_extract_imm(b, w1, 56 - 32, 4);
+   nir_def *lod = nir_ubitfield_extract_imm(b, w1, 56 - 32, 4);
 
    /* Add LOD offset to first level to get the interesting LOD */
    int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
@@ -116,14 +116,13 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
        * TODO: Optimize this, since linear 2D arrays aren't needed for APIs and
        * this just gets used internally for blits.
        */
-      nir_ssa_def *layout = nir_ubitfield_extract_imm(b, w0, 4, 2);
+      nir_def *layout = nir_ubitfield_extract_imm(b, w0, 4, 2);
 
       /* Get the 2 bytes after the first 128-bit descriptor */
-      nir_ssa_def *extension =
+      nir_def *extension =
          nir_load_global_constant(b, nir_iadd_imm(b, ptr, 16), 8, 1, 16);
 
-      nir_ssa_def *depth_linear_m1 =
-         nir_iand_imm(b, extension, BITFIELD_MASK(11));
+      nir_def *depth_linear_m1 = nir_iand_imm(b, extension, BITFIELD_MASK(11));
 
       depth_linear_m1 = nir_u2uN(b, depth_linear_m1, depth_m1->bit_size);
 
@@ -132,9 +131,9 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
    }
 
    /* Add 1 to width-1, height-1 to get base dimensions */
-   nir_ssa_def *width = nir_iadd_imm(b, width_m1, 1);
-   nir_ssa_def *height = nir_iadd_imm(b, height_m1, 1);
-   nir_ssa_def *depth = nir_iadd_imm(b, depth_m1, 1);
+   nir_def *width = nir_iadd_imm(b, width_m1, 1);
+   nir_def *height = nir_iadd_imm(b, height_m1, 1);
+   nir_def *depth = nir_iadd_imm(b, depth_m1, 1);
 
    /* 1D Arrays have their second component as the layer count */
    if (tex->sampler_dim == GLSL_SAMPLER_DIM_1D && tex->is_array)
@@ -179,42 +178,42 @@ lower_txs(nir_builder *b, nir_instr *instr, UNUSED void *data)
    if (tex->op != nir_texop_txs)
       return false;
 
-   nir_ssa_def *res = agx_txs(b, tex);
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, res, instr);
+   nir_def *res = agx_txs(b, tex);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, res, instr);
    nir_instr_remove(instr);
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 format_is_rgb32(nir_builder *b, nir_tex_instr *tex)
 {
-   nir_ssa_def *ptr = texture_descriptor_ptr(b, tex);
-   nir_ssa_def *desc = nir_load_global_constant(b, ptr, 8, 1, 32);
-   nir_ssa_def *channels = nir_ubitfield_extract_imm(b, desc, 6, 7);
+   nir_def *ptr = texture_descriptor_ptr(b, tex);
+   nir_def *desc = nir_load_global_constant(b, ptr, 8, 1, 32);
+   nir_def *channels = nir_ubitfield_extract_imm(b, desc, 6, 7);
 
    return nir_ieq_imm(b, channels, AGX_FORMAT_RGB32_EMULATED);
 }
 
 /* Load from an RGB32 buffer texture */
-static nir_ssa_def *
-load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coordinate)
+static nir_def *
+load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_def *coordinate)
 {
    /* Base address right-shifted 4: bits [66, 102) */
-   nir_ssa_def *ptr_hi = nir_iadd_imm(b, texture_descriptor_ptr(b, tex), 8);
-   nir_ssa_def *desc_hi_words = nir_load_global_constant(b, ptr_hi, 8, 2, 32);
-   nir_ssa_def *desc_hi = nir_pack_64_2x32(b, desc_hi_words);
-   nir_ssa_def *base_shr4 =
+   nir_def *ptr_hi = nir_iadd_imm(b, texture_descriptor_ptr(b, tex), 8);
+   nir_def *desc_hi_words = nir_load_global_constant(b, ptr_hi, 8, 2, 32);
+   nir_def *desc_hi = nir_pack_64_2x32(b, desc_hi_words);
+   nir_def *base_shr4 =
       nir_iand_imm(b, nir_ushr_imm(b, desc_hi, 2), BITFIELD64_MASK(36));
-   nir_ssa_def *base = nir_ishl_imm(b, base_shr4, 4);
+   nir_def *base = nir_ishl_imm(b, base_shr4, 4);
 
-   nir_ssa_def *raw = nir_load_constant_agx(
-      b, 3, nir_dest_bit_size(tex->dest), base, nir_imul_imm(b, coordinate, 3),
-      .format = AGX_INTERNAL_FORMAT_I32);
+   nir_def *raw = nir_load_constant_agx(b, 3, nir_dest_bit_size(tex->dest),
+                                        base, nir_imul_imm(b, coordinate, 3),
+                                        .format = AGX_INTERNAL_FORMAT_I32);
 
    /* Set alpha to 1 (in the appropriate format) */
    bool is_float = nir_alu_type_get_base_type(tex->dest_type) == nir_type_float;
 
-   nir_ssa_def *swizzled[4] = {
+   nir_def *swizzled[4] = {
       nir_channel(b, raw, 0), nir_channel(b, raw, 1), nir_channel(b, raw, 2),
       is_float ? nir_imm_float(b, 1.0) : nir_imm_int(b, 1)};
 
@@ -225,8 +224,8 @@ load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coordinate)
  * Given a 1D buffer texture coordinate, calculate the 2D coordinate vector that
  * will be used to access the linear 2D texture bound to the buffer.
  */
-static nir_ssa_def *
-coords_for_buffer_texture(nir_builder *b, nir_ssa_def *coord)
+static nir_def *
+coords_for_buffer_texture(nir_builder *b, nir_def *coord)
 {
    return nir_vec2(b, nir_iand_imm(b, coord, BITFIELD_MASK(10)),
                    nir_ushr_imm(b, coord, 10));
@@ -247,7 +246,7 @@ coords_for_buffer_texture(nir_builder *b, nir_ssa_def *coord)
 static bool
 lower_buffer_texture(nir_builder *b, nir_tex_instr *tex)
 {
-   nir_ssa_def *coord = nir_steal_tex_src(tex, nir_tex_src_coord);
+   nir_def *coord = nir_steal_tex_src(tex, nir_tex_src_coord);
 
    /* The OpenGL ES 3.2 specification says on page 187:
     *
@@ -258,19 +257,19 @@ lower_buffer_texture(nir_builder *b, nir_tex_instr *tex)
     *
     * However, faulting would be undesirable for robustness, so clamp.
     */
-   nir_ssa_def *size = nir_get_texture_size(b, tex);
+   nir_def *size = nir_get_texture_size(b, tex);
    coord = nir_umin(b, coord, nir_iadd_imm(b, size, -1));
 
    /* Lower RGB32 reads if the format requires */
    nir_if *nif = nir_push_if(b, format_is_rgb32(b, tex));
-   nir_ssa_def *rgb32 = load_rgb32(b, tex, coord);
+   nir_def *rgb32 = load_rgb32(b, tex, coord);
    nir_push_else(b, nif);
 
    /* Otherwise, lower the texture instruction to read from 2D */
    assert(coord->num_components == 1 && "buffer textures are 1D");
    tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
 
-   nir_ssa_def *coord2d = coords_for_buffer_texture(b, coord);
+   nir_def *coord2d = coords_for_buffer_texture(b, coord);
    nir_instr_remove(&tex->instr);
    nir_builder_instr_insert(b, &tex->instr);
    nir_tex_instr_add_src(tex, nir_tex_src_backend1, nir_src_for_ssa(coord2d));
@@ -278,8 +277,8 @@ lower_buffer_texture(nir_builder *b, nir_tex_instr *tex)
    nir_pop_if(b, nif);
 
    /* Put it together with a phi */
-   nir_ssa_def *phi = nir_if_phi(b, rgb32, &tex->dest.ssa);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, phi);
+   nir_def *phi = nir_if_phi(b, rgb32, &tex->dest.ssa);
+   nir_def_rewrite_uses(&tex->dest.ssa, phi);
    nir_phi_instr *phi_instr = nir_instr_as_phi(phi->parent_instr);
    nir_phi_src *else_src = nir_phi_get_src_from_block(phi_instr, else_block);
    nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->dest.ssa);
@@ -307,8 +306,8 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
       return lower_buffer_texture(b, tex);
 
    /* Get the coordinates */
-   nir_ssa_def *coord = nir_steal_tex_src(tex, nir_tex_src_coord);
-   nir_ssa_def *ms_idx = nir_steal_tex_src(tex, nir_tex_src_ms_index);
+   nir_def *coord = nir_steal_tex_src(tex, nir_tex_src_coord);
+   nir_def *ms_idx = nir_steal_tex_src(tex, nir_tex_src_ms_index);
 
    /* It's unclear if mipmapped 1D textures work in the hardware. For now, we
     * always lower to 2D.
@@ -333,7 +332,7 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
       };
 
       for (unsigned i = 0; i < ARRAY_SIZE(other_srcs); ++i) {
-         nir_ssa_def *src = nir_steal_tex_src(tex, other_srcs[i]);
+         nir_def *src = nir_steal_tex_src(tex, other_srcs[i]);
 
          if (!src)
             continue;
@@ -350,11 +349,11 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
    /* The layer is always the last component of the NIR coordinate, split it off
     * because we'll need to swizzle.
     */
-   nir_ssa_def *layer = NULL;
+   nir_def *layer = NULL;
 
    if (tex->is_array) {
       unsigned lidx = coord->num_components - 1;
-      nir_ssa_def *unclamped_layer = nir_channel(b, coord, lidx);
+      nir_def *unclamped_layer = nir_channel(b, coord, lidx);
       coord = nir_trim_vector(b, coord, lidx);
 
       /* Round layer to nearest even */
@@ -364,9 +363,9 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
       /* Clamp to max layer = (# of layers - 1) for out-of-bounds handling.
        * Layer must be 16-bits for the hardware, drop top bits after clamping.
        */
-      nir_ssa_def *txs = nir_get_texture_size(b, tex);
-      nir_ssa_def *nr_layers = nir_channel(b, txs, lidx);
-      nir_ssa_def *max_layer = nir_iadd_imm(b, nr_layers, -1);
+      nir_def *txs = nir_get_texture_size(b, tex);
+      nir_def *nr_layers = nir_channel(b, txs, lidx);
+      nir_def *max_layer = nir_iadd_imm(b, nr_layers, -1);
       layer = nir_u2u16(b, nir_umin(b, unclamped_layer, max_layer));
    }
 
@@ -374,11 +373,11 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
     * vec6 16-bit coordinate tuple, which would be inconvenient in NIR for
     * little benefit (a minor optimization, I guess).
     */
-   nir_ssa_def *sample_array = (ms_idx && layer)
-                                  ? nir_pack_32_2x16_split(b, ms_idx, layer)
-                               : ms_idx ? nir_u2u32(b, ms_idx)
-                               : layer  ? nir_u2u32(b, layer)
-                                        : NULL;
+   nir_def *sample_array = (ms_idx && layer)
+                              ? nir_pack_32_2x16_split(b, ms_idx, layer)
+                           : ms_idx ? nir_u2u32(b, ms_idx)
+                           : layer  ? nir_u2u32(b, layer)
+                                    : NULL;
 
    /* Combine into the final 32-bit tuple */
    if (sample_array != NULL) {
@@ -390,14 +389,14 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
    nir_tex_instr_add_src(tex, nir_tex_src_backend1, nir_src_for_ssa(coord));
 
    /* Furthermore, if there is an offset vector, it must be packed */
-   nir_ssa_def *offset = nir_steal_tex_src(tex, nir_tex_src_offset);
+   nir_def *offset = nir_steal_tex_src(tex, nir_tex_src_offset);
 
    if (offset != NULL) {
-      nir_ssa_def *packed = NULL;
+      nir_def *packed = NULL;
 
       for (unsigned c = 0; c < offset->num_components; ++c) {
-         nir_ssa_def *nibble = nir_iand_imm(b, nir_channel(b, offset, c), 0xF);
-         nir_ssa_def *shifted = nir_ishl_imm(b, nibble, 4 * c);
+         nir_def *nibble = nir_iand_imm(b, nir_channel(b, offset, c), 0xF);
+         nir_def *shifted = nir_ishl_imm(b, nibble, 4 * c);
 
          if (packed != NULL)
             packed = nir_ior(b, packed, shifted);
@@ -411,7 +410,7 @@ lower_regular_texture(nir_builder *b, nir_instr *instr, UNUSED void *data)
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 bias_for_tex(nir_builder *b, nir_tex_instr *tex)
 {
    nir_instr *instr = nir_get_texture_size(b, tex)->parent_instr;
@@ -446,7 +445,7 @@ lower_sampler_bias(nir_builder *b, nir_instr *instr, UNUSED void *data)
       nir_tex_src_type src =
          tex->op == nir_texop_txl ? nir_tex_src_lod : nir_tex_src_bias;
 
-      nir_ssa_def *orig = nir_steal_tex_src(tex, src);
+      nir_def *orig = nir_steal_tex_src(tex, src);
       assert(orig != NULL && "invalid NIR");
 
       if (orig->bit_size != 16)
@@ -463,14 +462,14 @@ lower_sampler_bias(nir_builder *b, nir_instr *instr, UNUSED void *data)
        * derivatives. So scale derivatives by exp2(bias) to
        * get level-of-detail log2(exp2(bias) * rho) = bias + log2(rho).
        */
-      nir_ssa_def *scale = nir_fexp2(b, nir_f2f32(b, bias_for_tex(b, tex)));
+      nir_def *scale = nir_fexp2(b, nir_f2f32(b, bias_for_tex(b, tex)));
       nir_tex_src_type src[] = {nir_tex_src_ddx, nir_tex_src_ddy};
 
       for (unsigned s = 0; s < ARRAY_SIZE(src); ++s) {
-         nir_ssa_def *orig = nir_steal_tex_src(tex, src[s]);
+         nir_def *orig = nir_steal_tex_src(tex, src[s]);
          assert(orig != NULL && "invalid");
 
-         nir_ssa_def *scaled = nir_fmul(b, nir_f2f32(b, orig), scale);
+         nir_def *scaled = nir_fmul(b, nir_f2f32(b, orig), scale);
          nir_tex_instr_add_src(tex, src[s], nir_src_for_ssa(scaled));
       }
 
@@ -520,11 +519,11 @@ legalize_image_lod(nir_builder *b, nir_instr *instr, UNUSED void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_src_rewrite_ssa(src, nir_i2i16(b, src->ssa));
+   nir_src_rewrite(src, nir_i2i16(b, src->ssa));
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 txs_for_image(nir_builder *b, nir_intrinsic_instr *intr,
               unsigned num_components, unsigned bit_size)
 {
@@ -554,44 +553,40 @@ txs_for_image(nir_builder *b, nir_intrinsic_instr *intr,
    return &tex->dest.ssa;
 }
 
-static nir_ssa_def *
-nir_bitfield_mask(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+nir_bitfield_mask(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
+   nir_def *one = nir_imm_intN_t(b, 1, x->bit_size);
    return nir_iadd_imm(b, nir_ishl(b, one, nir_u2u32(b, x)), -1);
 }
 
-static nir_ssa_def *
-calculate_twiddled_coordinates(nir_builder *b, nir_ssa_def *coord,
-                               nir_ssa_def *tile_w_px_log2,
-                               nir_ssa_def *tile_h_px_log2,
-                               nir_ssa_def *width_tl,
-                               nir_ssa_def *layer_stride_el)
+static nir_def *
+calculate_twiddled_coordinates(nir_builder *b, nir_def *coord,
+                               nir_def *tile_w_px_log2, nir_def *tile_h_px_log2,
+                               nir_def *width_tl, nir_def *layer_stride_el)
 {
    /* SIMD-within-a-register */
-   nir_ssa_def *coord_px = nir_pack_32_2x16(b, nir_u2u16(b, coord));
-   nir_ssa_def *tile_mask =
+   nir_def *coord_px = nir_pack_32_2x16(b, nir_u2u16(b, coord));
+   nir_def *tile_mask =
       nir_pack_32_2x16_split(b, nir_bitfield_mask(b, tile_w_px_log2),
                              nir_bitfield_mask(b, tile_h_px_log2));
 
    /* Modulo by the tile width/height to get the offsets within the tile */
-   nir_ssa_def *offs_xy_px = nir_iand(b, coord_px, tile_mask);
+   nir_def *offs_xy_px = nir_iand(b, coord_px, tile_mask);
 
    /* Get the coordinates of the corner of the tile */
-   nir_ssa_def *tile_xy_px = nir_isub(b, coord_px, offs_xy_px);
+   nir_def *tile_xy_px = nir_isub(b, coord_px, offs_xy_px);
 
    /* Unpack SIMD-within-a-register */
-   nir_ssa_def *offs_x_px = nir_unpack_32_2x16_split_x(b, offs_xy_px);
-   nir_ssa_def *offs_y_px = nir_unpack_32_2x16_split_y(b, offs_xy_px);
-   nir_ssa_def *tile_x_px =
-      nir_u2u32(b, nir_unpack_32_2x16_split_x(b, tile_xy_px));
-   nir_ssa_def *tile_y_px =
-      nir_u2u32(b, nir_unpack_32_2x16_split_y(b, tile_xy_px));
+   nir_def *offs_x_px = nir_unpack_32_2x16_split_x(b, offs_xy_px);
+   nir_def *offs_y_px = nir_unpack_32_2x16_split_y(b, offs_xy_px);
+   nir_def *tile_x_px = nir_u2u32(b, nir_unpack_32_2x16_split_x(b, tile_xy_px));
+   nir_def *tile_y_px = nir_u2u32(b, nir_unpack_32_2x16_split_y(b, tile_xy_px));
 
    /* Get the tile size */
-   nir_ssa_def *one_32 = nir_imm_int(b, 1);
-   nir_ssa_def *tile_w_px = nir_ishl(b, one_32, nir_u2u32(b, tile_w_px_log2));
-   nir_ssa_def *tile_h_px = nir_ishl(b, one_32, nir_u2u32(b, tile_h_px_log2));
+   nir_def *one_32 = nir_imm_int(b, 1);
+   nir_def *tile_w_px = nir_ishl(b, one_32, nir_u2u32(b, tile_w_px_log2));
+   nir_def *tile_h_px = nir_ishl(b, one_32, nir_u2u32(b, tile_h_px_log2));
 
    /* tile row start (px) =
     *   (y // tile height) * (# of tiles/row) * (# of pix/tile) =
@@ -599,7 +594,7 @@ calculate_twiddled_coordinates(nir_builder *b, nir_ssa_def *coord,
     *        tile height =
     *   align_down(y, tile height) * width_tl * tile width
     */
-   nir_ssa_def *tile_row_start_px =
+   nir_def *tile_row_start_px =
       nir_imul(b, nir_u2u32(b, tile_y_px), nir_imul(b, width_tl, tile_w_px));
 
    /* tile column start (px) =
@@ -607,38 +602,37 @@ calculate_twiddled_coordinates(nir_builder *b, nir_ssa_def *coord,
     *   align(x, tile width) / tile width * tile width * tile height =
     *   align(x, tile width) * tile height
     */
-   nir_ssa_def *tile_col_start_px = nir_imul(b, tile_x_px, tile_h_px);
+   nir_def *tile_col_start_px = nir_imul(b, tile_x_px, tile_h_px);
 
    /* The pixel at which the tile starts is thus... */
-   nir_ssa_def *tile_offset_px =
-      nir_iadd(b, tile_row_start_px, tile_col_start_px);
+   nir_def *tile_offset_px = nir_iadd(b, tile_row_start_px, tile_col_start_px);
 
    /* Get the total offset */
-   nir_ssa_def *offs_px = nir_interleave_agx(b, offs_x_px, offs_y_px);
-   nir_ssa_def *total_px = nir_iadd(b, tile_offset_px, nir_u2u32(b, offs_px));
+   nir_def *offs_px = nir_interleave_agx(b, offs_x_px, offs_y_px);
+   nir_def *total_px = nir_iadd(b, tile_offset_px, nir_u2u32(b, offs_px));
 
    if (layer_stride_el) {
-      nir_ssa_def *layer = nir_channel(b, coord, 2);
-      nir_ssa_def *layer_offset_px = nir_imul(b, layer, layer_stride_el);
+      nir_def *layer = nir_channel(b, coord, 2);
+      nir_def *layer_offset_px = nir_imul(b, layer, layer_stride_el);
       total_px = nir_iadd(b, total_px, layer_offset_px);
    }
 
    return total_px;
 }
 
-static nir_ssa_def *
+static nir_def *
 image_texel_address(nir_builder *b, nir_intrinsic_instr *intr,
                     bool return_index)
 {
    /* First, calculate the address of the PBE descriptor */
-   nir_ssa_def *desc_address;
+   nir_def *desc_address;
    if (intr->intrinsic == nir_intrinsic_bindless_image_texel_address ||
        intr->intrinsic == nir_intrinsic_bindless_image_store)
       desc_address = texture_descriptor_ptr_for_handle(b, intr->src[0].ssa);
    else
       desc_address = texture_descriptor_ptr_for_index(b, intr->src[0].ssa);
 
-   nir_ssa_def *coord = intr->src[1].ssa;
+   nir_def *coord = intr->src[1].ssa;
 
    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
    bool layered = nir_intrinsic_image_array(intr) ||
@@ -649,36 +643,36 @@ image_texel_address(nir_builder *b, nir_intrinsic_instr *intr,
     * software-defined atomic descriptor, or (if array image) a pointer to the
     * descriptor. Grab it.
     */
-   nir_ssa_def *meta_ptr = nir_iadd_imm(b, desc_address, 16);
-   nir_ssa_def *meta = nir_load_global_constant(b, meta_ptr, 8, 1, 64);
-   nir_ssa_def *layer_stride_el = NULL;
+   nir_def *meta_ptr = nir_iadd_imm(b, desc_address, 16);
+   nir_def *meta = nir_load_global_constant(b, meta_ptr, 8, 1, 64);
+   nir_def *layer_stride_el = NULL;
 
    if (layered) {
-      nir_ssa_def *desc = nir_load_global_constant(b, meta, 8, 3, 32);
+      nir_def *desc = nir_load_global_constant(b, meta, 8, 3, 32);
       meta = nir_pack_64_2x32(b, nir_trim_vector(b, desc, 2));
       layer_stride_el = nir_channel(b, desc, 2);
    }
 
-   nir_ssa_def *meta_hi = nir_unpack_64_2x32_split_y(b, meta);
+   nir_def *meta_hi = nir_unpack_64_2x32_split_y(b, meta);
 
    /* See the GenXML definitions of the software-defined atomic descriptors */
-   nir_ssa_def *base;
+   nir_def *base;
 
    if (dim == GLSL_SAMPLER_DIM_BUF)
       base = meta;
    else
       base = nir_ishl_imm(b, nir_iand_imm(b, meta, BITFIELD64_MASK(33)), 7);
 
-   nir_ssa_def *tile_w_px_log2 =
+   nir_def *tile_w_px_log2 =
       nir_u2u16(b, nir_ubitfield_extract_imm(b, meta_hi, 33 - 32, 3));
-   nir_ssa_def *tile_h_px_log2 =
+   nir_def *tile_h_px_log2 =
       nir_u2u16(b, nir_ubitfield_extract_imm(b, meta_hi, 36 - 32, 3));
-   nir_ssa_def *width_tl = nir_ubitfield_extract_imm(b, meta_hi, 39 - 32, 14);
+   nir_def *width_tl = nir_ubitfield_extract_imm(b, meta_hi, 39 - 32, 14);
 
    /* We do not allow atomics on linear 2D or linear 2D arrays, as there are no
     * known use cases. So, we're linear if buffer or 1D, and twiddled otherwise.
     */
-   nir_ssa_def *total_px;
+   nir_def *total_px;
    if (dim == GLSL_SAMPLER_DIM_BUF || dim == GLSL_SAMPLER_DIM_1D) {
       /* 1D linear is indexed directly */
       total_px = nir_channel(b, coord, 0);
@@ -687,12 +681,11 @@ image_texel_address(nir_builder *b, nir_intrinsic_instr *intr,
          b, coord, tile_w_px_log2, tile_h_px_log2, width_tl, layer_stride_el);
    }
 
-   nir_ssa_def *total_sa;
+   nir_def *total_sa;
 
    if (dim == GLSL_SAMPLER_DIM_MS) {
-      nir_ssa_def *sample_idx = intr->src[2].ssa;
-      nir_ssa_def *samples_log2 =
-         nir_ubitfield_extract_imm(b, meta_hi, 54 - 32, 2);
+      nir_def *sample_idx = intr->src[2].ssa;
+      nir_def *samples_log2 = nir_ubitfield_extract_imm(b, meta_hi, 54 - 32, 2);
 
       total_sa = nir_iadd(b, nir_ishl(b, total_px, samples_log2), sample_idx);
    } else {
@@ -709,7 +702,7 @@ image_texel_address(nir_builder *b, nir_intrinsic_instr *intr,
    enum pipe_format format = nir_intrinsic_format(intr);
    unsigned bytes_per_sample_B = util_format_get_blocksize(format);
 
-   nir_ssa_def *total_B = nir_imul_imm(b, total_sa, bytes_per_sample_B);
+   nir_def *total_B = nir_imul_imm(b, total_sa, bytes_per_sample_B);
    return nir_iadd(b, base, nir_u2u64(b, total_B));
 }
 
@@ -719,14 +712,14 @@ lower_buffer_image(nir_builder *b, nir_intrinsic_instr *intr)
    if (nir_intrinsic_image_dim(intr) != GLSL_SAMPLER_DIM_BUF)
       return false;
 
-   nir_ssa_def *coord_vector = intr->src[1].ssa;
-   nir_ssa_def *coord = nir_channel(b, coord_vector, 0);
+   nir_def *coord_vector = intr->src[1].ssa;
+   nir_def *coord = nir_channel(b, coord_vector, 0);
 
    /* Lower the buffer load/store to a 2D image load/store, matching the 2D
     * texture/PBE descriptor the driver supplies for buffer images.
     */
-   nir_ssa_def *coord2d = coords_for_buffer_texture(b, coord);
-   nir_src_rewrite_ssa(&intr->src[1], nir_pad_vector(b, coord2d, 4));
+   nir_def *coord2d = coords_for_buffer_texture(b, coord);
+   nir_src_rewrite(&intr->src[1], nir_pad_vector(b, coord2d, 4));
    nir_intrinsic_set_image_dim(intr, GLSL_SAMPLER_DIM_2D);
    return true;
 }
@@ -749,7 +742,7 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    case nir_intrinsic_image_size:
    case nir_intrinsic_bindless_image_size:
-      nir_ssa_def_rewrite_uses(
+      nir_def_rewrite_uses(
          &intr->dest.ssa,
          txs_for_image(b, intr, nir_dest_num_components(intr->dest),
                        nir_dest_bit_size(intr->dest)));
@@ -757,8 +750,8 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    case nir_intrinsic_image_texel_address:
    case nir_intrinsic_bindless_image_texel_address:
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                               image_texel_address(b, intr, false));
+      nir_def_rewrite_uses(&intr->dest.ssa,
+                           image_texel_address(b, intr, false));
       return true;
 
    default:
@@ -842,10 +835,10 @@ lower_multisampled_store(nir_builder *b, nir_instr *instr, UNUSED void *data)
    if (nir_intrinsic_image_dim(intr) != GLSL_SAMPLER_DIM_MS)
       return false;
 
-   nir_ssa_def *index_px = image_texel_address(b, intr, true);
-   nir_ssa_def *coord2d = coords_for_buffer_texture(b, index_px);
+   nir_def *index_px = image_texel_address(b, intr, true);
+   nir_def *coord2d = coords_for_buffer_texture(b, index_px);
 
-   nir_src_rewrite_ssa(&intr->src[1], nir_pad_vector(b, coord2d, 4));
+   nir_src_rewrite(&intr->src[1], nir_pad_vector(b, coord2d, 4));
    nir_intrinsic_set_image_dim(intr, GLSL_SAMPLER_DIM_2D);
    return true;
 }
index 2da694e..61a72d8 100644 (file)
@@ -20,15 +20,15 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *ubo_index = nir_ssa_for_src(b, intr->src[0], 1);
-   nir_ssa_def *offset = nir_ssa_for_src(b, *nir_get_io_offset_src(intr), 1);
-   nir_ssa_def *address =
+   nir_def *ubo_index = nir_ssa_for_src(b, intr->src[0], 1);
+   nir_def *offset = nir_ssa_for_src(b, *nir_get_io_offset_src(intr), 1);
+   nir_def *address =
       nir_iadd(b, nir_load_ubo_base_agx(b, ubo_index), nir_u2u64(b, offset));
-   nir_ssa_def *value = nir_load_global_constant(
+   nir_def *value = nir_load_global_constant(
       b, address, nir_intrinsic_align(intr), intr->num_components,
       nir_dest_bit_size(intr->dest));
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, value);
+   nir_def_rewrite_uses(&intr->dest.ssa, value);
    return true;
 }
 
index b927784..5d6f633 100644 (file)
@@ -8,7 +8,7 @@
 #include "agx_compiler.h"
 
 static void
-def_size(nir_ssa_def *def, unsigned *size, unsigned *align)
+def_size(nir_def *def, unsigned *size, unsigned *align)
 {
    unsigned bit_size = MAX2(def->bit_size, 16);
 
@@ -50,7 +50,7 @@ instr_cost(nir_instr *instr, const void *data)
 }
 
 static float
-rewrite_cost(nir_ssa_def *def, const void *data)
+rewrite_cost(nir_def *def, const void *data)
 {
    bool mov_needed = false;
    nir_foreach_use(use, def) {
@@ -76,7 +76,7 @@ rewrite_cost(nir_ssa_def *def, const void *data)
 static bool
 avoid_instr(const nir_instr *instr, const void *data)
 {
-   const nir_ssa_def *def = nir_instr_ssa_def((nir_instr *)instr);
+   const nir_def *def = nir_instr_ssa_def((nir_instr *)instr);
 
    /* Do not move bindless handles, since we need those to retain their constant
     * base index.
index eb4fc2c..eb81fdc 100644 (file)
@@ -36,7 +36,7 @@ agx_compile_meta_shader(struct agx_meta_cache *cache, nir_shader *shader,
    return res;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt,
                     unsigned nr, bool msaa)
 {
index fe6df12..b95e4e4 100644 (file)
@@ -10,8 +10,8 @@
 #include "nir_builder.h"
 #include "nir_format_convert.h"
 
-static inline nir_ssa_def *
-nir_sign_extend_if_sint(nir_builder *b, nir_ssa_def *x, enum pipe_format format)
+static inline nir_def *
+nir_sign_extend_if_sint(nir_builder *b, nir_def *x, enum pipe_format format)
 {
    if (!util_format_is_pure_sint(format))
       return x;
index f624c13..8dfd440 100644 (file)
@@ -46,7 +46,7 @@ agx_nir_lower_alpha_to_coverage(nir_shader *shader, uint8_t nr_samples)
       return;
 
    /* Similarly, if there are less than 4 components, alpha is undefined */
-   nir_ssa_def *rgba = store->src[0].ssa;
+   nir_def *rgba = store->src[0].ssa;
    if (rgba->num_components < 4)
       return;
 
@@ -59,9 +59,9 @@ agx_nir_lower_alpha_to_coverage(nir_shader *shader, uint8_t nr_samples)
     *    # of bits = (unsigned int) (alpha * nr_samples)
     *    mask = (1 << (# of bits)) - 1
     */
-   nir_ssa_def *alpha = nir_channel(b, rgba, 3);
-   nir_ssa_def *bits = nir_f2u32(b, nir_fmul_imm(b, alpha, nr_samples));
-   nir_ssa_def *mask =
+   nir_def *alpha = nir_channel(b, rgba, 3);
+   nir_def *bits = nir_f2u32(b, nir_fmul_imm(b, alpha, nr_samples));
+   nir_def *mask =
       nir_iadd_imm(b, nir_ishl(b, nir_imm_intN_t(b, 1, 16), bits), -1);
 
    /* Discard samples that aren't covered */
@@ -100,12 +100,12 @@ agx_nir_lower_alpha_to_one(nir_shader *shader)
       if (sem.location < FRAG_RESULT_DATA0)
          continue;
 
-      nir_ssa_def *rgba = intr->src[0].ssa;
+      nir_def *rgba = intr->src[0].ssa;
       if (rgba->num_components < 4)
          continue;
 
       nir_builder b = nir_builder_at(nir_before_instr(instr));
-      nir_ssa_def *rgb1 = nir_vector_insert_imm(
+      nir_def *rgb1 = nir_vector_insert_imm(
          &b, rgba, nir_imm_floatN_t(&b, 1.0, rgba->bit_size), 3);
 
       nir_instr_rewrite_src_ssa(instr, &intr->src[0], rgb1);
index a4c2334..635e98a 100644 (file)
@@ -11,7 +11,7 @@
 static bool
 lower_wrapped(nir_builder *b, nir_instr *instr, void *data)
 {
-   nir_ssa_def *sample_id = data;
+   nir_def *sample_id = data;
    if (instr->type != nir_instr_type_intrinsic)
       return false;
 
@@ -21,7 +21,7 @@ lower_wrapped(nir_builder *b, nir_instr *instr, void *data)
    switch (intr->intrinsic) {
    case nir_intrinsic_load_sample_id: {
       unsigned size = nir_dest_bit_size(intr->dest);
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size));
+      nir_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size));
       nir_instr_remove(instr);
       return true;
    }
@@ -34,10 +34,10 @@ lower_wrapped(nir_builder *b, nir_instr *instr, void *data)
       unsigned mask_index =
          (intr->intrinsic == nir_intrinsic_store_local_pixel_agx) ? 1 : 0;
 
-      nir_ssa_def *mask = intr->src[mask_index].ssa;
-      nir_ssa_def *id_mask = nir_ishl(b, nir_imm_intN_t(b, 1, mask->bit_size),
-                                      nir_u2u32(b, sample_id));
-      nir_src_rewrite_ssa(&intr->src[mask_index], nir_iand(b, mask, id_mask));
+      nir_def *mask = intr->src[mask_index].ssa;
+      nir_def *id_mask = nir_ishl(b, nir_imm_intN_t(b, 1, mask->bit_size),
+                                  nir_u2u32(b, sample_id));
+      nir_src_rewrite(&intr->src[mask_index], nir_iand(b, mask, id_mask));
       return true;
    }
 
@@ -70,7 +70,7 @@ agx_nir_wrap_per_sample_loop(nir_shader *shader, uint8_t nr_samples)
    nir_variable *i =
       nir_local_variable_create(impl, glsl_uintN_t_type(16), NULL);
    nir_store_var(&b, i, nir_imm_intN_t(&b, 0, 16), ~0);
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
 
    /* Create a loop in the wrapped function */
    nir_loop *loop = nir_push_loop(&b);
@@ -151,11 +151,11 @@ lower_sample_mask_read(nir_builder *b, nir_instr *instr, UNUSED void *_)
    if (intr->intrinsic != nir_intrinsic_load_sample_mask_in)
       return false;
 
-   nir_ssa_def *old = &intr->dest.ssa;
-   nir_ssa_def *lowered = nir_iand(
+   nir_def *old = &intr->dest.ssa;
+   nir_def *lowered = nir_iand(
       b, old, nir_u2uN(b, nir_load_api_sample_mask_agx(b), old->bit_size));
 
-   nir_ssa_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
+   nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
    return true;
 }
 
index 430f9b1..34ba027 100644 (file)
@@ -7,10 +7,10 @@
 #include "agx_tilebuffer.h"
 #include "nir_builder.h"
 
-static nir_ssa_def *
-mask_by_sample_id(nir_builder *b, nir_ssa_def *mask)
+static nir_def *
+mask_by_sample_id(nir_builder *b, nir_def *mask)
 {
-   nir_ssa_def *id_mask =
+   nir_def *id_mask =
       nir_ishl(b, nir_imm_intN_t(b, 1, mask->bit_size), nir_load_sample_id(b));
    return nir_iand(b, mask, id_mask);
 }
@@ -36,16 +36,16 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
        *       xy[component] = ((float)nibble) / 16.0;
        *    }
        */
-      nir_ssa_def *packed = nir_load_sample_positions_agx(b);
+      nir_def *packed = nir_load_sample_positions_agx(b);
 
       /* The n'th sample is the in the n'th byte of the register */
-      nir_ssa_def *shifted = nir_ushr(
+      nir_def *shifted = nir_ushr(
          b, packed, nir_u2u32(b, nir_imul_imm(b, nir_load_sample_id(b), 8)));
 
-      nir_ssa_def *xy[2];
+      nir_def *xy[2];
       for (unsigned i = 0; i < 2; ++i) {
          /* Get the appropriate nibble */
-         nir_ssa_def *nibble =
+         nir_def *nibble =
             nir_iand_imm(b, nir_ushr_imm(b, shifted, i * 4), 0xF);
 
          /* Convert it from fixed point to float */
@@ -56,7 +56,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
       }
 
       /* Collect and rewrite */
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_vec2(b, xy[0], xy[1]));
+      nir_def_rewrite_uses(&intr->dest.ssa, nir_vec2(b, xy[0], xy[1]));
       nir_instr_remove(instr);
       return true;
    }
@@ -67,9 +67,9 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
        * by the sample ID to make that happen.
        */
       b->cursor = nir_after_instr(instr);
-      nir_ssa_def *old = &intr->dest.ssa;
-      nir_ssa_def *lowered = mask_by_sample_id(b, old);
-      nir_ssa_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
+      nir_def *old = &intr->dest.ssa;
+      nir_def *lowered = mask_by_sample_id(b, old);
+      nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
       return true;
    }
 
@@ -78,13 +78,13 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
        * interpolateAtSample() with the sample ID
        */
       b->cursor = nir_after_instr(instr);
-      nir_ssa_def *old = &intr->dest.ssa;
+      nir_def *old = &intr->dest.ssa;
 
-      nir_ssa_def *lowered = nir_load_barycentric_at_sample(
+      nir_def *lowered = nir_load_barycentric_at_sample(
          b, nir_dest_bit_size(intr->dest), nir_load_sample_id(b),
          .interp_mode = nir_intrinsic_interp_mode(intr));
 
-      nir_ssa_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
+      nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
       return true;
    }
 
index 84a1386..c457f41 100644 (file)
@@ -43,7 +43,7 @@ tib_filter(const nir_instr *instr, UNUSED const void *_)
 static void
 store_tilebuffer(nir_builder *b, struct agx_tilebuffer_layout *tib,
                  enum pipe_format format, enum pipe_format logical_format,
-                 unsigned rt, nir_ssa_def *value, unsigned write_mask)
+                 unsigned rt, nir_def *value, unsigned write_mask)
 {
    /* The hardware cannot extend for a 32-bit format. Extend ourselves. */
    if (format == PIPE_FORMAT_R32_UINT && value->bit_size == 16) {
@@ -61,7 +61,7 @@ store_tilebuffer(nir_builder *b, struct agx_tilebuffer_layout *tib,
                              .format = format);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_tilebuffer(nir_builder *b, struct agx_tilebuffer_layout *tib,
                 uint8_t load_comps, uint8_t bit_size, unsigned rt,
                 enum pipe_format format, enum pipe_format logical_format)
@@ -74,7 +74,7 @@ load_tilebuffer(nir_builder *b, struct agx_tilebuffer_layout *tib,
       format = PIPE_FORMAT_R16_UINT;
 
    uint8_t offset_B = agx_tilebuffer_offset_B(tib, rt);
-   nir_ssa_def *res = nir_load_local_pixel_agx(
+   nir_def *res = nir_load_local_pixel_agx(
       b, MIN2(load_comps, comps), f16 ? 16 : bit_size,
       nir_imm_intN_t(b, ALL_SAMPLES, 16), .base = offset_B, .format = format);
 
@@ -100,7 +100,7 @@ load_tilebuffer(nir_builder *b, struct agx_tilebuffer_layout *tib,
  * texture/PBE descriptors are alternated for each render target. This is
  * ABI. If we need to make this more flexible for Vulkan later, we can.
  */
-static nir_ssa_def *
+static nir_def *
 handle_for_rt(nir_builder *b, unsigned base, unsigned rt, bool pbe,
               bool *bindless)
 {
@@ -117,7 +117,7 @@ handle_for_rt(nir_builder *b, unsigned base, unsigned rt, bool pbe,
 }
 
 static enum glsl_sampler_dim
-dim_for_rt(nir_builder *b, unsigned nr_samples, nir_ssa_def **sample)
+dim_for_rt(nir_builder *b, unsigned nr_samples, nir_def **sample)
 {
    if (nr_samples == 1) {
       *sample = nir_imm_intN_t(b, 0, 16);
@@ -129,7 +129,7 @@ dim_for_rt(nir_builder *b, unsigned nr_samples, nir_ssa_def **sample)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 image_coords(nir_builder *b)
 {
    return nir_pad_vector(b, nir_u2u32(b, nir_load_pixel_coord(b)), 4);
@@ -137,25 +137,25 @@ image_coords(nir_builder *b)
 
 static void
 store_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
-             enum pipe_format format, unsigned rt, nir_ssa_def *value)
+             enum pipe_format format, unsigned rt, nir_def *value)
 {
    /* Force bindless for multisampled image writes. It avoids the late lowering
     * needing a texture_base_agx sysval.
     */
    bool bindless = (nr_samples > 1);
-   nir_ssa_def *image = handle_for_rt(b, bindless_base, rt, true, &bindless);
-   nir_ssa_def *zero = nir_imm_intN_t(b, 0, 16);
-   nir_ssa_def *lod = zero;
+   nir_def *image = handle_for_rt(b, bindless_base, rt, true, &bindless);
+   nir_def *zero = nir_imm_intN_t(b, 0, 16);
+   nir_def *lod = zero;
 
-   nir_ssa_def *sample;
+   nir_def *sample;
    enum glsl_sampler_dim dim = dim_for_rt(b, nr_samples, &sample);
-   nir_ssa_def *coords = image_coords(b);
+   nir_def *coords = image_coords(b);
 
    nir_begin_invocation_interlock(b);
 
    if (nr_samples > 1) {
-      nir_ssa_def *coverage = nir_load_sample_mask(b);
-      nir_ssa_def *covered = nir_ubitfield_extract(
+      nir_def *coverage = nir_load_sample_mask(b);
+      nir_def *covered = nir_ubitfield_extract(
          b, coverage, nir_u2u32(b, sample), nir_imm_int(b, 1));
 
       nir_push_if(b, nir_ine_imm(b, covered, 0));
@@ -176,19 +176,19 @@ store_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
    b->shader->info.writes_memory = true;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
             uint8_t comps, uint8_t bit_size, unsigned rt,
             enum pipe_format format)
 {
    bool bindless = false;
-   nir_ssa_def *image = handle_for_rt(b, bindless_base, rt, false, &bindless);
-   nir_ssa_def *zero = nir_imm_intN_t(b, 0, 16);
-   nir_ssa_def *lod = zero;
+   nir_def *image = handle_for_rt(b, bindless_base, rt, false, &bindless);
+   nir_def *zero = nir_imm_intN_t(b, 0, 16);
+   nir_def *lod = zero;
 
-   nir_ssa_def *sample;
+   nir_def *sample;
    enum glsl_sampler_dim dim = dim_for_rt(b, nr_samples, &sample);
-   nir_ssa_def *coords = image_coords(b);
+   nir_def *coords = image_coords(b);
 
    /* Ensure pixels below this one have written out their results */
    nir_begin_invocation_interlock(b);
@@ -204,7 +204,7 @@ load_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 tib_impl(nir_builder *b, nir_instr *instr, void *data)
 {
    struct ctx *ctx = data;
@@ -250,7 +250,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
       if (!write_mask)
          return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 
-      nir_ssa_def *value = intr->src[0].ssa;
+      nir_def *value = intr->src[0].ssa;
 
       /* Trim to format as required by hardware */
       value = nir_trim_vector(b, intr->src[0].ssa, comps);
@@ -272,7 +272,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
        * possible to encode in the hardware, delete them.
        */
       if (logical_format == PIPE_FORMAT_NONE) {
-         return nir_ssa_undef(b, intr->num_components, bit_size);
+         return nir_undef(b, intr->num_components, bit_size);
       } else if (tib->spilled[rt]) {
          *(ctx->translucent) = true;
 
index 5f6851c..1becc39 100644 (file)
@@ -78,8 +78,8 @@ agx_vbo_supports_format(enum pipe_format format)
    return agx_vbo_internal_format(format) != PIPE_FORMAT_NONE;
 }
 
-static nir_ssa_def *
-apply_swizzle_channel(nir_builder *b, nir_ssa_def *vec, unsigned swizzle,
+static nir_def *
+apply_swizzle_channel(nir_builder *b, nir_def *vec, unsigned swizzle,
                       bool is_int)
 {
    switch (swizzle) {
@@ -158,12 +158,11 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
    /* Calculate the element to fetch the vertex for. Divide the instance ID by
     * the divisor for per-instance data. Divisor=0 specifies per-vertex data.
     */
-   nir_ssa_def *el =
-      (attrib.divisor == 0)
-         ? nir_load_vertex_id(b)
-         : nir_udiv_imm(b, nir_load_instance_id(b), attrib.divisor);
+   nir_def *el = (attrib.divisor == 0)
+                    ? nir_load_vertex_id(b)
+                    : nir_udiv_imm(b, nir_load_instance_id(b), attrib.divisor);
 
-   nir_ssa_def *base = nir_load_vbo_base_agx(b, nir_imm_int(b, attrib.buf));
+   nir_def *base = nir_load_vbo_base_agx(b, nir_imm_int(b, attrib.buf));
 
    assert((stride % interchange_align) == 0 && "must be aligned");
    assert((offset % interchange_align) == 0 && "must be aligned");
@@ -183,11 +182,11 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
       stride_el = 1;
    }
 
-   nir_ssa_def *stride_offset_el =
+   nir_def *stride_offset_el =
       nir_iadd_imm(b, nir_imul_imm(b, el, stride_el), offset_el);
 
    /* Load the raw vector */
-   nir_ssa_def *memory = nir_load_constant_agx(
+   nir_def *memory = nir_load_constant_agx(
       b, interchange_comps, interchange_register_size, base, stride_offset_el,
       .format = interchange_format, .base = shift);
 
@@ -240,14 +239,14 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
    /* We now have a properly formatted vector of the components in memory. Apply
     * the format swizzle forwards to trim/pad/reorder as needed.
     */
-   nir_ssa_def *channels[4] = {NULL};
+   nir_def *channels[4] = {NULL};
    assert(nir_intrinsic_component(intr) == 0 && "unimplemented");
 
    for (unsigned i = 0; i < intr->num_components; ++i)
       channels[i] = apply_swizzle_channel(b, memory, desc->swizzle[i], is_int);
 
-   nir_ssa_def *logical = nir_vec(b, channels, intr->num_components);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, logical);
+   nir_def *logical = nir_vec(b, channels, intr->num_components);
+   nir_def_rewrite_uses(&intr->dest.ssa, logical);
    return true;
 }
 
index 323aa1f..aa9bc58 100644 (file)
@@ -310,7 +310,7 @@ ntq_add_pending_tmu_flush(struct v3d_compile *c,
 
                 nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
                 if (store != NULL) {
-                        nir_ssa_def *reg = store->src[1].ssa;
+                        nir_def *reg = store->src[1].ssa;
                         _mesa_set_add(c->tmu.outstanding_regs, reg);
                 }
         }
@@ -716,7 +716,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
 }
 
 static struct qreg *
-ntq_init_ssa_def(struct v3d_compile *c, nir_ssa_def *def)
+ntq_init_ssa_def(struct v3d_compile *c, nir_def *def)
 {
         struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
                                           def->num_components);
@@ -789,7 +789,7 @@ ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
 
                 qregs[chan] = result;
         } else {
-                nir_ssa_def *reg = store->src[1].ssa;
+                nir_def *reg = store->src[1].ssa;
                 ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
                 assert(nir_intrinsic_base(store) == 0);
                 assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -858,7 +858,7 @@ ntq_get_src(struct v3d_compile *c, nir_src src, int i)
                         entry = _mesa_hash_table_search(c->def_ht, src.ssa);
                 }
         } else {
-                nir_ssa_def *reg = load->src[0].ssa;
+                nir_def *reg = load->src[0].ssa;
                 ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
                 assert(nir_intrinsic_base(load) == 0);
                 assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -2471,7 +2471,7 @@ ntq_setup_registers(struct v3d_compile *c, nir_function_impl *impl)
                 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
                                                   array_len * num_components);
 
-                nir_ssa_def *nir_reg = &decl->dest.ssa;
+                nir_def *nir_reg = &decl->dest.ssa;
                 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
 
                 for (int i = 0; i < array_len * num_components; i++)
index b4b10f7..9041602 100644 (file)
@@ -135,7 +135,7 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
          * instruction writes and how many the instruction could produce.
          */
         p1_unpacked.return_words_of_texture_data =
-                nir_ssa_def_components_read(&instr->dest.ssa);
+                nir_def_components_read(&instr->dest.ssa);
 
         uint32_t p0_packed;
         V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL,
index f782493..06adc64 100644 (file)
@@ -253,15 +253,15 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
         nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa);
         if (store == NULL) {
                 p0_unpacked.return_words_of_texture_data =
-                        nir_ssa_def_components_read(&instr->dest.ssa);
+                        nir_def_components_read(&instr->dest.ssa);
         } else {
-                nir_ssa_def *reg = store->src[1].ssa;
+                nir_def *reg = store->src[1].ssa;
                 nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
                 unsigned reg_num_components =
                         nir_intrinsic_num_components(decl);
 
                 /* For the non-ssa case we don't have a full equivalent to
-                 * nir_ssa_def_components_read. This is a problem for the 16
+                 * nir_def_components_read. This is a problem for the 16
                  * bit case. nir_lower_tex will not change the destination as
                  * nir_tex_instr_dest_size will still return 4. The driver is
                  * just expected to not store on other channels, so we
index ee114f4..5fbc376 100644 (file)
@@ -622,7 +622,7 @@ struct v3d_compile {
         void *debug_output_data;
 
         /**
-         * Mapping from nir_register * or nir_ssa_def * to array of struct
+         * Mapping from nir_register * or nir_def * to array of struct
          * qreg for the values.
          */
         struct hash_table *def_ht;
index b4cbfa7..c0061d5 100644 (file)
@@ -60,14 +60,14 @@ v3d_gl_format_is_return_32(enum pipe_format format)
 /* Packs a 32-bit vector of colors in the range [0, (1 << bits[i]) - 1] to a
  * 32-bit SSA value, with as many channels as necessary to store all the bits
  */
-static nir_ssa_def *
-pack_bits(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
+static nir_def *
+pack_bits(nir_builder *b, nir_def *color, const unsigned *bits,
           int num_components, bool mask)
 {
-        nir_ssa_def *results[4];
+        nir_def *results[4];
         int offset = 0;
         for (int i = 0; i < num_components; i++) {
-                nir_ssa_def *chan = nir_channel(b, color, i);
+                nir_def *chan = nir_channel(b, color, i);
 
                 /* Channels being stored shouldn't cross a 32-bit boundary. */
                 assert((offset & ~31) == ((offset + bits[i] - 1) & ~31));
@@ -103,10 +103,10 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
 
         b->cursor = nir_before_instr(&instr->instr);
 
-        nir_ssa_def *color = nir_trim_vector(b,
+        nir_def *color = nir_trim_vector(b,
                                              nir_ssa_for_src(b, instr->src[3], 4),
                                              num_components);
-        nir_ssa_def *formatted = NULL;
+        nir_def *formatted = NULL;
 
         if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
                 formatted = nir_format_pack_11f11f10f(b, color);
@@ -182,14 +182,14 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
 
         b->cursor = nir_after_instr(&instr->instr);
 
-        nir_ssa_def *result = &instr->dest.ssa;
+        nir_def *result = &instr->dest.ssa;
         if (util_format_is_pure_uint(format)) {
                 result = nir_format_unpack_uint(b, result, bits16, 4);
         } else if (util_format_is_pure_sint(format)) {
                 result = nir_format_unpack_sint(b, result, bits16, 4);
         } else {
-                nir_ssa_def *rg = nir_channel(b, result, 0);
-                nir_ssa_def *ba = nir_channel(b, result, 1);
+                nir_def *rg = nir_channel(b, result, 0);
+                nir_def *ba = nir_channel(b, result, 1);
                 result = nir_vec4(b,
                                   nir_unpack_half_2x16_split_x(b, rg),
                                   nir_unpack_half_2x16_split_y(b, rg),
@@ -197,7 +197,7 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
                                   nir_unpack_half_2x16_split_y(b, ba));
         }
 
-        nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, result,
+        nir_def_rewrite_uses_after(&instr->dest.ssa, result,
                                        result->parent_instr);
 
         return true;
index a2c1335..9a90614 100644 (file)
@@ -62,7 +62,7 @@ struct v3d_nir_lower_io_state {
 
         BITSET_WORD varyings_stored[BITSET_WORDS(V3D_MAX_ANY_STAGE_INPUTS)];
 
-        nir_ssa_def *pos[4];
+        nir_def *pos[4];
 };
 
 static void
@@ -70,8 +70,8 @@ v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
                             struct v3d_nir_lower_io_state *state);
 
 static void
-v3d_nir_store_output(nir_builder *b, int base, nir_ssa_def *offset,
-                     nir_ssa_def *chan)
+v3d_nir_store_output(nir_builder *b, int base, nir_def *offset,
+                     nir_def *chan)
 {
         if (offset) {
                 /* When generating the VIR instruction, the base and the offset
@@ -134,13 +134,13 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
         /* If this is a geometry shader we need to emit our outputs
          * to the current vertex offset in the VPM.
          */
-        nir_ssa_def *offset_reg =
+        nir_def *offset_reg =
                 c->s->info.stage == MESA_SHADER_GEOMETRY ?
                         nir_load_var(b, state->gs.output_offset_var) : NULL;
 
         int start_comp = nir_intrinsic_component(intr);
         unsigned location = nir_intrinsic_io_semantics(intr).location;
-        nir_ssa_def *src = nir_ssa_for_src(b, intr->src[0],
+        nir_def *src = nir_ssa_for_src(b, intr->src[0],
                                            intr->num_components);
         /* Save off the components of the position for the setup of VPM inputs
          * read by fixed function HW.
@@ -159,7 +159,7 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
 
         if (location == VARYING_SLOT_LAYER) {
                 assert(c->s->info.stage == MESA_SHADER_GEOMETRY);
-                nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
+                nir_def *header = nir_load_var(b, state->gs.header_var);
                 header = nir_iand_imm(b, header, 0xff00ffff);
 
                 /* From the GLES 3.2 spec:
@@ -180,9 +180,9 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
                  * to 0 in that case (we always allocate tile state for at
                  * least one layer).
                  */
-                nir_ssa_def *fb_layers = nir_load_fb_layers_v3d(b, 32);
-                nir_ssa_def *cond = nir_ige(b, src, fb_layers);
-                nir_ssa_def *layer_id =
+                nir_def *fb_layers = nir_load_fb_layers_v3d(b, 32);
+                nir_def *cond = nir_ige(b, src, fb_layers);
+                nir_def *layer_id =
                         nir_bcsel(b, cond,
                                   nir_imm_int(b, 0),
                                   nir_ishl_imm(b, src, 16));
@@ -238,9 +238,9 @@ v3d_nir_lower_emit_vertex(struct v3d_compile *c, nir_builder *b,
 {
         b->cursor = nir_before_instr(&instr->instr);
 
-        nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
-        nir_ssa_def *header_offset = nir_load_var(b, state->gs.header_offset_var);
-        nir_ssa_def *output_offset = nir_load_var(b, state->gs.output_offset_var);
+        nir_def *header = nir_load_var(b, state->gs.header_var);
+        nir_def *header_offset = nir_load_var(b, state->gs.header_offset_var);
+        nir_def *output_offset = nir_load_var(b, state->gs.output_offset_var);
 
         /* Emit fixed function outputs */
         v3d_nir_emit_ff_vpm_outputs(c, b, state);
@@ -476,16 +476,16 @@ v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
         /* If this is a geometry shader we need to emit our fixed function
          * outputs to the current vertex offset in the VPM.
          */
-        nir_ssa_def *offset_reg =
+        nir_def *offset_reg =
                 c->s->info.stage == MESA_SHADER_GEOMETRY ?
                         nir_load_var(b, state->gs.output_offset_var) : NULL;
 
         for (int i = 0; i < 4; i++) {
                 if (!state->pos[i])
-                        state->pos[i] = nir_ssa_undef(b, 1, 32);
+                        state->pos[i] = nir_undef(b, 1, 32);
         }
 
-        nir_ssa_def *rcp_wc = nir_frcp(b, state->pos[3]);
+        nir_def *rcp_wc = nir_frcp(b, state->pos[3]);
 
         if (state->pos_vpm_offset != -1) {
                 for (int i = 0; i < 4; i++) {
@@ -496,8 +496,8 @@ v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
 
         if (state->vp_vpm_offset != -1) {
                 for (int i = 0; i < 2; i++) {
-                        nir_ssa_def *pos;
-                        nir_ssa_def *scale;
+                        nir_def *pos;
+                        nir_def *scale;
                         pos = state->pos[i];
                         if (i == 0)
                                 scale = nir_load_viewport_x_scale(b);
@@ -523,7 +523,7 @@ v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
         }
 
         if (state->zs_vpm_offset != -1) {
-                nir_ssa_def *z = state->pos[2];
+                nir_def *z = state->pos[2];
                 z = nir_fmul(b, z, nir_load_viewport_z_scale(b));
                 z = nir_fmul(b, z, rcp_wc);
                 z = nir_fadd(b, z, nir_load_viewport_z_offset(b));
@@ -599,11 +599,11 @@ emit_gs_vpm_output_header_prolog(struct v3d_compile *c, nir_builder *b,
          * offset variable by removing the one generic header slot that always
          * goes at the beginning of out header.
          */
-        nir_ssa_def *header_offset =
+        nir_def *header_offset =
                 nir_load_var(b, state->gs.header_offset_var);
-        nir_ssa_def *vertex_count =
+        nir_def *vertex_count =
                 nir_iadd_imm(b, header_offset, -1);
-        nir_ssa_def *header =
+        nir_def *header =
                 nir_ior_imm(b,
                             nir_ishl_imm(b, vertex_count,
                                          VERTEX_COUNT_OFFSET),
index f6f27a1..5795f6d 100644 (file)
@@ -42,11 +42,11 @@ lower_line_smooth_intrinsic(struct lower_line_smooth_state *state,
 {
         b->cursor = nir_before_instr(&intr->instr);
 
-        nir_ssa_def *one = nir_imm_float(b, 1.0f);
+        nir_def *one = nir_imm_float(b, 1.0f);
 
-        nir_ssa_def *coverage = nir_load_var(b, state->coverage);
+        nir_def *coverage = nir_load_var(b, state->coverage);
 
-        nir_ssa_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
+        nir_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
                                         intr->src[0].ssa);
 
         nir_instr_rewrite_src(&intr->instr,
@@ -89,21 +89,21 @@ initialise_coverage_var(struct lower_line_smooth_state *state,
 {
         nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
 
-        nir_ssa_def *line_width = nir_load_line_width(&b);
+        nir_def *line_width = nir_load_line_width(&b);
 
-        nir_ssa_def *real_line_width = nir_load_aa_line_width(&b);
+        nir_def *real_line_width = nir_load_aa_line_width(&b);
 
         /* The line coord varies from 0.0 to 1.0 across the width of the line */
-        nir_ssa_def *line_coord = nir_load_line_coord(&b);
+        nir_def *line_coord = nir_load_line_coord(&b);
 
         /* fabs(line_coord - 0.5) * real_line_width */
-        nir_ssa_def *pixels_from_center =
+        nir_def *pixels_from_center =
                 nir_fmul(&b, real_line_width,
                          nir_fabs(&b, nir_fsub(&b, line_coord,
                                                nir_imm_float(&b, 0.5f))));
 
         /* 0.5 - 1/√2 * (pixels_from_center - line_width * 0.5) */
-        nir_ssa_def *coverage =
+        nir_def *coverage =
                 nir_fsub(&b,
                          nir_imm_float(&b, 0.5f),
                          nir_fmul(&b,
@@ -114,14 +114,14 @@ initialise_coverage_var(struct lower_line_smooth_state *state,
                                                         0.5f))));
 
         /* Discard fragments that aren’t covered at all by the line */
-        nir_ssa_def *outside = nir_fle_imm(&b, coverage, 0.0f);
+        nir_def *outside = nir_fle_imm(&b, coverage, 0.0f);
 
         nir_discard_if(&b, outside);
 
         /* Clamp to at most 1.0. If it was less than 0.0 then the fragment will
          * be discarded so we don’t need to handle that.
          */
-        nir_ssa_def *clamped = nir_fmin(&b, coverage, nir_imm_float(&b, 1.0f));
+        nir_def *clamped = nir_fmin(&b, coverage, nir_imm_float(&b, 1.0f));
 
         nir_store_var(&b, state->coverage, clamped, 0x1 /* writemask */);
 }
index d482fdb..884a9fa 100644 (file)
@@ -71,9 +71,9 @@ static nir_intrinsic_instr *
 init_scalar_intrinsic(nir_builder *b,
                       nir_intrinsic_instr *intr,
                       uint32_t component,
-                      nir_ssa_def *offset,
+                      nir_def *offset,
                       uint32_t bit_size,
-                      nir_ssa_def **scalar_offset)
+                      nir_def **scalar_offset)
 {
 
         nir_intrinsic_instr *new_intr =
@@ -129,20 +129,20 @@ lower_load_bitsize(nir_builder *b,
 
         /* For global 2x32 we ignore Y component because it must be zero */
         unsigned offset_idx = offset_src(intr->intrinsic);
-        nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
+        nir_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
 
         /* Split vector store to multiple scalar loads */
-        nir_ssa_def *dest_components[4] = { NULL };
+        nir_def *dest_components[4] = { NULL };
         const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
         for (int component = 0; component < num_comp; component++) {
-                nir_ssa_def *scalar_offset;
+                nir_def *scalar_offset;
                 nir_intrinsic_instr *new_intr =
                         init_scalar_intrinsic(b, intr, component, offset,
                                               bit_size, &scalar_offset);
 
                 for (unsigned i = 0; i < info->num_srcs; i++) {
                         if (i == offset_idx) {
-                                nir_ssa_def *final_offset;
+                                nir_def *final_offset;
                                 final_offset = intr->intrinsic != nir_intrinsic_load_global_2x32 ?
                                         scalar_offset :
                                         nir_vec2(b, scalar_offset,
@@ -160,8 +160,8 @@ lower_load_bitsize(nir_builder *b,
                 nir_builder_instr_insert(b, &new_intr->instr);
         }
 
-        nir_ssa_def *new_dst = nir_vec(b, dest_components, num_comp);
-        nir_ssa_def_rewrite_uses(&intr->dest.ssa, new_dst);
+        nir_def *new_dst = nir_vec(b, dest_components, num_comp);
+        nir_def_rewrite_uses(&intr->dest.ssa, new_dst);
 
         nir_instr_remove(&intr->instr);
         return true;
@@ -181,13 +181,13 @@ lower_store_bitsize(nir_builder *b,
         if (nir_src_bit_size(intr->src[value_idx]) == 32)
                 return false;
 
-        nir_ssa_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
+        nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
 
         b->cursor = nir_before_instr(&intr->instr);
 
         /* For global 2x32 we ignore Y component because it must be zero */
         unsigned offset_idx = offset_src(intr->intrinsic);
-        nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
+        nir_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
 
         /* Split vector store to multiple scalar stores */
         const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
@@ -195,7 +195,7 @@ lower_store_bitsize(nir_builder *b,
         while (wrmask) {
                 unsigned component = ffs(wrmask) - 1;
 
-                nir_ssa_def *scalar_offset;
+                nir_def *scalar_offset;
                 nir_intrinsic_instr *new_intr =
                         init_scalar_intrinsic(b, intr, component, offset,
                                               value->bit_size, &scalar_offset);
@@ -204,11 +204,11 @@ lower_store_bitsize(nir_builder *b,
 
                 for (unsigned i = 0; i < info->num_srcs; i++) {
                         if (i == value_idx) {
-                                nir_ssa_def *scalar_value =
+                                nir_def *scalar_value =
                                         nir_channels(b, value, 1 << component);
                                 new_intr->src[i] = nir_src_for_ssa(scalar_value);
                         } else if (i == offset_idx) {
-                                nir_ssa_def *final_offset;
+                                nir_def *final_offset;
                                 final_offset = intr->intrinsic != nir_intrinsic_store_global_2x32 ?
                                         scalar_offset :
                                         nir_vec2(b, scalar_offset,
index 932d94d..09c636d 100644 (file)
@@ -36,8 +36,8 @@
 #include "v3d_compiler.h"
 
 
-typedef nir_ssa_def *(*nir_pack_func)(nir_builder *b, nir_ssa_def *c);
-typedef nir_ssa_def *(*nir_unpack_func)(nir_builder *b, nir_ssa_def *c);
+typedef nir_def *(*nir_pack_func)(nir_builder *b, nir_def *c);
+typedef nir_def *(*nir_unpack_func)(nir_builder *b, nir_def *c);
 
 static bool
 logicop_depends_on_dst_color(int logicop_func)
@@ -53,9 +53,9 @@ logicop_depends_on_dst_color(int logicop_func)
         }
 }
 
-static nir_ssa_def *
+static nir_def *
 v3d_logicop(nir_builder *b, int logicop_func,
-            nir_ssa_def *src, nir_ssa_def *dst)
+            nir_def *src, nir_def *dst)
 {
         switch (logicop_func) {
         case PIPE_LOGICOP_CLEAR:
@@ -96,8 +96,8 @@ v3d_logicop(nir_builder *b, int logicop_func,
         }
 }
 
-static nir_ssa_def *
-v3d_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
+static nir_def *
+v3d_nir_get_swizzled_channel(nir_builder *b, nir_def **srcs, int swiz)
 {
         switch (swiz) {
         default:
@@ -116,48 +116,48 @@ v3d_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
         }
 }
 
-static nir_ssa_def *
-v3d_nir_swizzle_and_pack(nir_builder *b, nir_ssa_def **chans,
+static nir_def *
+v3d_nir_swizzle_and_pack(nir_builder *b, nir_def **chans,
                          const uint8_t *swiz, nir_pack_func pack_func)
 {
-        nir_ssa_def *c[4];
+        nir_def *c[4];
         for (int i = 0; i < 4; i++)
                 c[i] = v3d_nir_get_swizzled_channel(b, chans, swiz[i]);
 
         return pack_func(b, nir_vec4(b, c[0], c[1], c[2], c[3]));
 }
 
-static nir_ssa_def *
-v3d_nir_unpack_and_swizzle(nir_builder *b, nir_ssa_def *packed,
+static nir_def *
+v3d_nir_unpack_and_swizzle(nir_builder *b, nir_def *packed,
                            const uint8_t *swiz, nir_unpack_func unpack_func)
 {
-        nir_ssa_def *unpacked = unpack_func(b, packed);
+        nir_def *unpacked = unpack_func(b, packed);
 
-        nir_ssa_def *unpacked_chans[4];
+        nir_def *unpacked_chans[4];
         for (int i = 0; i < 4; i++)
                 unpacked_chans[i] = nir_channel(b, unpacked, i);
 
-        nir_ssa_def *c[4];
+        nir_def *c[4];
         for (int i = 0; i < 4; i++)
                 c[i] = v3d_nir_get_swizzled_channel(b, unpacked_chans, swiz[i]);
 
         return nir_vec4(b, c[0], c[1], c[2], c[3]);
 }
 
-static nir_ssa_def *
-pack_unorm_rgb10a2(nir_builder *b, nir_ssa_def *c)
+static nir_def *
+pack_unorm_rgb10a2(nir_builder *b, nir_def *c)
 {
         static const unsigned bits[4] = { 10, 10, 10, 2 };
-        nir_ssa_def *unorm = nir_format_float_to_unorm(b, c, bits);
+        nir_def *unorm = nir_format_float_to_unorm(b, c, bits);
 
-        nir_ssa_def *chans[4];
+        nir_def *chans[4];
         for (int i = 0; i < 4; i++)
                 chans[i] = nir_channel(b, unorm, i);
 
-        nir_ssa_def *result = nir_mov(b, chans[0]);
+        nir_def *result = nir_mov(b, chans[0]);
         int offset = bits[0];
         for (int i = 1; i < 4; i++) {
-                nir_ssa_def *shifted_chan =
+                nir_def *shifted_chan =
                         nir_ishl_imm(b, chans[i], offset);
                 result = nir_ior(b, result, shifted_chan);
                 offset += bits[i];
@@ -165,8 +165,8 @@ pack_unorm_rgb10a2(nir_builder *b, nir_ssa_def *c)
         return result;
 }
 
-static nir_ssa_def *
-unpack_unorm_rgb10a2(nir_builder *b, nir_ssa_def *c)
+static nir_def *
+unpack_unorm_rgb10a2(nir_builder *b, nir_def *c)
 {
         static const unsigned bits[4] = { 10, 10, 10, 2 };
         const unsigned masks[4] = { BITFIELD_MASK(bits[0]),
@@ -174,9 +174,9 @@ unpack_unorm_rgb10a2(nir_builder *b, nir_ssa_def *c)
                                     BITFIELD_MASK(bits[2]),
                                     BITFIELD_MASK(bits[3]) };
 
-        nir_ssa_def *chans[4];
+        nir_def *chans[4];
         for (int i = 0; i < 4; i++) {
-                nir_ssa_def *unorm = nir_iand_imm(b, c, masks[i]);
+                nir_def *unorm = nir_iand_imm(b, c, masks[i]);
                 chans[i] = nir_format_unorm_to_float(b, unorm, &bits[i]);
                 c = nir_ushr_imm(b, c, bits[i]);
         }
@@ -201,13 +201,13 @@ v3d_get_format_swizzle_for_rt(struct v3d_compile *c, int rt)
         }
 }
 
-static nir_ssa_def *
+static nir_def *
 v3d_nir_get_tlb_color(nir_builder *b, struct v3d_compile *c, int rt, int sample)
 {
         uint32_t num_components =
                 util_format_get_nr_components(c->fs_key->color_fmt[rt].format);
 
-        nir_ssa_def *color[4];
+        nir_def *color[4];
         for (int i = 0; i < 4; i++) {
                 if (i < num_components) {
                         color[i] =
@@ -222,17 +222,17 @@ v3d_nir_get_tlb_color(nir_builder *b, struct v3d_compile *c, int rt, int sample)
         return nir_vec4(b, color[0], color[1], color[2], color[3]);
 }
 
-static nir_ssa_def *
+static nir_def *
 v3d_emit_logic_op_raw(struct v3d_compile *c, nir_builder *b,
-                      nir_ssa_def **src_chans, nir_ssa_def **dst_chans,
+                      nir_def **src_chans, nir_def **dst_chans,
                       int rt, int sample)
 {
         const uint8_t *fmt_swz = v3d_get_format_swizzle_for_rt(c, rt);
 
-        nir_ssa_def *op_res[4];
+        nir_def *op_res[4];
         for (int i = 0; i < 4; i++) {
-                nir_ssa_def *src = src_chans[i];
-                nir_ssa_def *dst =
+                nir_def *src = src_chans[i];
+                nir_def *dst =
                         v3d_nir_get_swizzled_channel(b, dst_chans, fmt_swz[i]);
                 op_res[i] = v3d_logicop(b, c->fs_key->logicop_func, src, dst);
 
@@ -250,40 +250,40 @@ v3d_emit_logic_op_raw(struct v3d_compile *c, nir_builder *b,
                 }
         }
 
-        nir_ssa_def *r[4];
+        nir_def *r[4];
         for (int i = 0; i < 4; i++)
                 r[i] = v3d_nir_get_swizzled_channel(b, op_res, fmt_swz[i]);
 
         return nir_vec4(b, r[0], r[1], r[2], r[3]);
 }
 
-static nir_ssa_def *
+static nir_def *
 v3d_emit_logic_op_unorm(struct v3d_compile *c, nir_builder *b,
-                        nir_ssa_def **src_chans, nir_ssa_def **dst_chans,
+                        nir_def **src_chans, nir_def **dst_chans,
                         int rt, int sample,
                         nir_pack_func pack_func, nir_unpack_func unpack_func)
 {
         static const uint8_t src_swz[4] = { 0, 1, 2, 3 };
-        nir_ssa_def *packed_src =
+        nir_def *packed_src =
                 v3d_nir_swizzle_and_pack(b, src_chans, src_swz, pack_func);
 
         const uint8_t *fmt_swz = v3d_get_format_swizzle_for_rt(c, rt);
-        nir_ssa_def *packed_dst =
+        nir_def *packed_dst =
                 v3d_nir_swizzle_and_pack(b, dst_chans, fmt_swz, pack_func);
 
-        nir_ssa_def *packed_result =
+        nir_def *packed_result =
                 v3d_logicop(b, c->fs_key->logicop_func, packed_src, packed_dst);
 
         return v3d_nir_unpack_and_swizzle(b, packed_result, fmt_swz, unpack_func);
 }
 
-static nir_ssa_def *
+static nir_def *
 v3d_nir_emit_logic_op(struct v3d_compile *c, nir_builder *b,
-                      nir_ssa_def *src, int rt, int sample)
+                      nir_def *src, int rt, int sample)
 {
-        nir_ssa_def *dst = v3d_nir_get_tlb_color(b, c, rt, sample);
+        nir_def *dst = v3d_nir_get_tlb_color(b, c, rt, sample);
 
-        nir_ssa_def *src_chans[4], *dst_chans[4];
+        nir_def *src_chans[4], *dst_chans[4];
         for (unsigned i = 0; i < 4; i++) {
                 src_chans[i] = nir_channel(b, src, i);
                 dst_chans[i] = nir_channel(b, dst, i);
@@ -306,7 +306,7 @@ v3d_nir_emit_logic_op(struct v3d_compile *c, nir_builder *b,
 
 static void
 v3d_emit_ms_output(nir_builder *b,
-                   nir_ssa_def *color, nir_src *offset,
+                   nir_def *color, nir_src *offset,
                    nir_alu_type type, int rt, int sample)
 {
         nir_store_tlb_sample_color_v3d(b, color, nir_imm_int(b, rt), .base = sample, .component = 0, .src_type = type);
@@ -318,7 +318,7 @@ v3d_nir_lower_logic_op_instr(struct v3d_compile *c,
                              nir_intrinsic_instr *intr,
                              int rt)
 {
-        nir_ssa_def *frag_color = intr->src[0].ssa;
+        nir_def *frag_color = intr->src[0].ssa;
 
 
         const int logic_op = c->fs_key->logicop_func;
@@ -328,7 +328,7 @@ v3d_nir_lower_logic_op_instr(struct v3d_compile *c,
                 nir_src *offset = &intr->src[1];
                 nir_alu_type type = nir_intrinsic_src_type(intr);
                 for (int i = 0; i < V3D_MAX_SAMPLES; i++) {
-                        nir_ssa_def *sample =
+                        nir_def *sample =
                                 v3d_nir_emit_logic_op(c, b, frag_color, rt, i);
 
                         v3d_emit_ms_output(b, sample, offset, type, rt, i);
@@ -336,7 +336,7 @@ v3d_nir_lower_logic_op_instr(struct v3d_compile *c,
 
                 nir_instr_remove(&intr->instr);
         } else {
-                nir_ssa_def *result =
+                nir_def *result =
                         v3d_nir_emit_logic_op(c, b, frag_color, rt, 0);
 
                 nir_instr_rewrite_src(&intr->instr, &intr->src[0],
index d2fddcb..4dbc9b3 100644 (file)
  * writemasks in the process.
  */
 
-static nir_ssa_def *
+static nir_def *
 v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
 {
         bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
-        nir_ssa_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
+        nir_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
 
         assert(nir_intrinsic_align_mul(instr) >= 4);
         assert(nir_intrinsic_align_offset(instr) == 0);
@@ -55,11 +55,11 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
 {
         b->cursor = nir_before_instr(&instr->instr);
 
-        nir_ssa_def *offset = v3d_nir_scratch_offset(b,instr);
+        nir_def *offset = v3d_nir_scratch_offset(b,instr);
 
-        nir_ssa_def *chans[NIR_MAX_VEC_COMPONENTS];
+        nir_def *chans[NIR_MAX_VEC_COMPONENTS];
         for (int i = 0; i < instr->num_components; i++) {
-                nir_ssa_def *chan_offset =
+                nir_def *chan_offset =
                         nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
 
                 nir_intrinsic_instr *chan_instr =
@@ -77,8 +77,8 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
                 chans[i] = &chan_instr->dest.ssa;
         }
 
-        nir_ssa_def *result = nir_vec(b, chans, instr->num_components);
-        nir_ssa_def_rewrite_uses(&instr->dest.ssa, result);
+        nir_def *result = nir_vec(b, chans, instr->num_components);
+        nir_def_rewrite_uses(&instr->dest.ssa, result);
         nir_instr_remove(&instr->instr);
 }
 
@@ -87,15 +87,15 @@ v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
 {
         b->cursor = nir_before_instr(&instr->instr);
 
-        nir_ssa_def *offset = v3d_nir_scratch_offset(b, instr);
-        nir_ssa_def *value = nir_ssa_for_src(b, instr->src[0],
+        nir_def *offset = v3d_nir_scratch_offset(b, instr);
+        nir_def *value = nir_ssa_for_src(b, instr->src[0],
                                              instr->num_components);
 
         for (int i = 0; i < instr->num_components; i++) {
                 if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
                         continue;
 
-                nir_ssa_def *chan_offset =
+                nir_def *chan_offset =
                         nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
 
                 nir_intrinsic_instr *chan_instr =
index 19fc4b3..09fa922 100644 (file)
  * 2x2 quad.
  */
 
-static nir_ssa_def *
+static nir_def *
 v3d_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *in_instr, void *data)
 {
         nir_tex_instr *instr = nir_instr_as_tex(in_instr);
 
         b->cursor = nir_before_instr(&instr->instr);
 
-        nir_ssa_def *coord = nir_steal_tex_src(instr, nir_tex_src_coord);
-        nir_ssa_def *sample = nir_steal_tex_src(instr, nir_tex_src_ms_index);
+        nir_def *coord = nir_steal_tex_src(instr, nir_tex_src_coord);
+        nir_def *sample = nir_steal_tex_src(instr, nir_tex_src_ms_index);
 
-        nir_ssa_def *one = nir_imm_int(b, 1);
-        nir_ssa_def *x = nir_iadd(b,
+        nir_def *one = nir_imm_int(b, 1);
+        nir_def *x = nir_iadd(b,
                                   nir_ishl(b, nir_channel(b, coord, 0), one),
                                   nir_iand(b, sample, one));
-        nir_ssa_def *y = nir_iadd(b,
+        nir_def *y = nir_iadd(b,
                                   nir_ishl(b, nir_channel(b, coord, 1), one),
                                   nir_iand(b, nir_ushr(b, sample, one), one));
         if (instr->is_array)
index 36ba386..1599aad 100644 (file)
@@ -1481,8 +1481,8 @@ lower_load_num_subgroups(struct v3d_compile *c,
                 DIV_ROUND_UP(c->s->info.workgroup_size[0] *
                              c->s->info.workgroup_size[1] *
                              c->s->info.workgroup_size[2], V3D_CHANNELS);
-        nir_ssa_def *result = nir_imm_int(b, num_subgroups);
-        nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+        nir_def *result = nir_imm_int(b, num_subgroups);
+        nir_def_rewrite_uses(&intr->dest.ssa, result);
         nir_instr_remove(&intr->instr);
 }
 
index 5543888..a3aad37 100644 (file)
@@ -33,16 +33,16 @@ get_set_event_cs()
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options,
                                                   "set event cs");
 
-   nir_ssa_def *buf =
+   nir_def *buf =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 0,
                                 .binding = 0,
                                 .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
-   nir_ssa_def *value =
+   nir_def *value =
       nir_load_push_constant(&b, 1, 8, nir_imm_int(&b, 0), .base = 4, .range = 4);
 
    nir_store_ssbo(&b, value, buf, offset,
@@ -58,19 +58,19 @@ get_wait_event_cs()
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options,
                                                   "wait event cs");
 
-   nir_ssa_def *buf =
+   nir_def *buf =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 0,
                                 .binding = 0,
                                 .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
    nir_loop *loop = nir_push_loop(&b);
-      nir_ssa_def *load =
+      nir_def *load =
          nir_load_ssbo(&b, 1, 8, buf, offset, .access = 0, .align_mul = 4);
-      nir_ssa_def *value = nir_i2i32(&b, load);
+      nir_def *value = nir_i2i32(&b, load);
 
       nir_if *if_stmt = nir_push_if(&b, nir_ieq_imm(&b, value, 1));
       nir_jump(&b, nir_jump_break);
index a200298..a0d3ec3 100644 (file)
@@ -329,7 +329,7 @@ get_clear_rect_vs()
       nir_variable_create(b.shader, nir_var_shader_out, vec4, "gl_Position");
    vs_out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
    nir_store_var(&b, vs_out_pos, pos, 0xf);
 
    return b.shader;
@@ -386,7 +386,7 @@ get_clear_rect_gs(uint32_t push_constant_layer_base)
       nir_copy_deref(&b, nir_build_deref_var(&b, gs_out_pos), in_pos_i);
 
       /* gl_Layer from push constants */
-      nir_ssa_def *layer =
+      nir_def *layer =
          nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0),
                                 .base = push_constant_layer_base, .range = 4);
       nir_store_var(&b, gs_out_layer, layer, 0x1);
@@ -414,7 +414,7 @@ get_color_clear_rect_fs(uint32_t rt_idx, VkFormat format)
       nir_variable_create(b.shader, nir_var_shader_out, fs_out_type, "out_color");
    fs_out_color->data.location = FRAG_RESULT_DATA0 + rt_idx;
 
-   nir_ssa_def *color_load = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 0, .range = 16);
+   nir_def *color_load = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 0, .range = 16);
    nir_store_var(&b, fs_out_color, color_load, 0xf);
 
    return b.shader;
@@ -432,7 +432,7 @@ get_depth_clear_rect_fs()
                           "out_depth");
    fs_out_depth->data.location = FRAG_RESULT_DEPTH;
 
-   nir_ssa_def *depth_load =
+   nir_def *depth_load =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
    nir_store_var(&b, fs_out_depth, depth_load, 0x1);
index 02eb8a7..318b1e4 100644 (file)
@@ -2151,7 +2151,7 @@ get_texel_buffer_copy_vs()
                           glsl_vec4_type(), "gl_Position");
    vs_out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
    nir_store_var(&b, vs_out_pos, pos, 0xf);
 
    return b.shader;
@@ -2208,7 +2208,7 @@ get_texel_buffer_copy_gs()
       nir_copy_deref(&b, nir_build_deref_var(&b, gs_out_pos), in_pos_i);
 
       /* gl_Layer from push constants */
-      nir_ssa_def *layer =
+      nir_def *layer =
          nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0),
                                 .base = TEXEL_BUFFER_COPY_GS_LAYER_PC_OFFSET,
                                 .range = 4);
@@ -2222,7 +2222,7 @@ get_texel_buffer_copy_gs()
    return nir;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_frag_coord(nir_builder *b)
 {
    nir_foreach_shader_in_variable(var, b->shader) {
@@ -2286,24 +2286,24 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
    /* Load the box describing the pixel region we want to copy from the
     * texel buffer.
     */
-   nir_ssa_def *box =
+   nir_def *box =
       nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0),
                              .base = TEXEL_BUFFER_COPY_FS_BOX_PC_OFFSET,
                              .range = 16);
 
    /* Load the buffer stride (this comes in texel units) */
-   nir_ssa_def *stride =
+   nir_def *stride =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0),
                              .base = TEXEL_BUFFER_COPY_FS_STRIDE_PC_OFFSET,
                              .range = 4);
 
    /* Load the buffer offset (this comes in texel units) */
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0),
                              .base = TEXEL_BUFFER_COPY_FS_OFFSET_PC_OFFSET,
                              .range = 4);
 
-   nir_ssa_def *coord = nir_f2i32(&b, load_frag_coord(&b));
+   nir_def *coord = nir_f2i32(&b, load_frag_coord(&b));
 
    /* Load pixel data from texel buffer based on the x,y offset of the pixel
     * within the box. Texel buffers are 1D arrays of texels.
@@ -2313,17 +2313,17 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
     * texel buffer should always be within its bounds and we we don't need
     * to add a check for that here.
     */
-   nir_ssa_def *x_offset =
+   nir_def *x_offset =
       nir_isub(&b, nir_channel(&b, coord, 0),
                    nir_channel(&b, box, 0));
-   nir_ssa_def *y_offset =
+   nir_def *y_offset =
       nir_isub(&b, nir_channel(&b, coord, 1),
                    nir_channel(&b, box, 1));
-   nir_ssa_def *texel_offset =
+   nir_def *texel_offset =
       nir_iadd(&b, nir_iadd(&b, offset, x_offset),
                    nir_imul(&b, y_offset, stride));
 
-   nir_ssa_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
+   nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
    nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
    tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
    tex->op = nir_texop_txf;
@@ -2344,7 +2344,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
       component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_B, cswizzle->b);
    swiz[3] =
       component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_A, cswizzle->a);
-   nir_ssa_def *s = nir_swizzle(&b, &tex->dest.ssa, swiz, 4);
+   nir_def *s = nir_swizzle(&b, &tex->dest.ssa, swiz, 4);
    nir_store_var(&b, fs_out_color, s, 0xf);
 
    return b.shader;
@@ -3543,16 +3543,16 @@ create_blit_render_pass(struct v3dv_device *device,
    return result == VK_SUCCESS;
 }
 
-static nir_ssa_def *
+static nir_def *
 gen_tex_coords(nir_builder *b)
 {
-   nir_ssa_def *tex_box =
+   nir_def *tex_box =
       nir_load_push_constant(b, 4, 32, nir_imm_int(b, 0), .base = 0, .range = 16);
 
-   nir_ssa_def *tex_z =
+   nir_def *tex_z =
       nir_load_push_constant(b, 1, 32, nir_imm_int(b, 0), .base = 16, .range = 4);
 
-   nir_ssa_def *vertex_id = nir_load_vertex_id(b);
+   nir_def *vertex_id = nir_load_vertex_id(b);
 
    /* vertex 0: src0_x, src0_y
     * vertex 1: src0_x, src1_y
@@ -3565,11 +3565,11 @@ gen_tex_coords(nir_builder *b)
     * channel 1 is vertex id & 1 ? src1_y : src0_y
     */
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
-   nir_ssa_def *c1cmp = nir_ieq(b, nir_iand(b, vertex_id, one), one);
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
+   nir_def *c1cmp = nir_ieq(b, nir_iand(b, vertex_id, one), one);
 
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    comp[0] = nir_bcsel(b, c0cmp,
                        nir_channel(b, tex_box, 0),
                        nir_channel(b, tex_box, 2));
@@ -3582,9 +3582,9 @@ gen_tex_coords(nir_builder *b)
    return nir_vec(b, comp, 4);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_nir_tex_op_read(struct nir_builder *b,
-                      nir_ssa_def *tex_pos,
+                      nir_def *tex_pos,
                       enum glsl_base_type tex_type,
                       enum glsl_sampler_dim dim)
 {
@@ -3597,7 +3597,7 @@ build_nir_tex_op_read(struct nir_builder *b,
    sampler->data.descriptor_set = 0;
    sampler->data.binding = 0;
 
-   nir_ssa_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+   nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
    tex->sampler_dim = dim;
    tex->op = nir_texop_tex;
@@ -3613,13 +3613,13 @@ build_nir_tex_op_read(struct nir_builder *b,
    return &tex->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_nir_tex_op_ms_fetch_sample(struct nir_builder *b,
                                  nir_variable *sampler,
-                                 nir_ssa_def *tex_deref,
+                                 nir_def *tex_deref,
                                  enum glsl_base_type tex_type,
-                                 nir_ssa_def *tex_pos,
-                                 nir_ssa_def *sample_idx)
+                                 nir_def *tex_pos,
+                                 nir_def *sample_idx)
 {
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
    tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
@@ -3637,9 +3637,9 @@ build_nir_tex_op_ms_fetch_sample(struct nir_builder *b,
 }
 
 /* Fetches all samples at the given position and averages them */
-static nir_ssa_def *
+static nir_def *
 build_nir_tex_op_ms_resolve(struct nir_builder *b,
-                            nir_ssa_def *tex_pos,
+                            nir_def *tex_pos,
                             enum glsl_base_type tex_type,
                             VkSampleCountFlagBits src_samples)
 {
@@ -3653,10 +3653,10 @@ build_nir_tex_op_ms_resolve(struct nir_builder *b,
 
    const bool is_int = glsl_base_type_is_integer(tex_type);
 
-   nir_ssa_def *tmp = NULL;
-   nir_ssa_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+   nir_def *tmp = NULL;
+   nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
    for (uint32_t i = 0; i < src_samples; i++) {
-      nir_ssa_def *s =
+      nir_def *s =
          build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
                                           tex_type, tex_pos,
                                           nir_imm_int(b, i));
@@ -3675,9 +3675,9 @@ build_nir_tex_op_ms_resolve(struct nir_builder *b,
 }
 
 /* Fetches the current sample (gl_SampleID) at the given position */
-static nir_ssa_def *
+static nir_def *
 build_nir_tex_op_ms_read(struct nir_builder *b,
-                         nir_ssa_def *tex_pos,
+                         nir_def *tex_pos,
                          enum glsl_base_type tex_type)
 {
    const struct glsl_type *sampler_type =
@@ -3687,17 +3687,17 @@ build_nir_tex_op_ms_read(struct nir_builder *b,
    sampler->data.descriptor_set = 0;
    sampler->data.binding = 0;
 
-   nir_ssa_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+   nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
 
    return build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
                                            tex_type, tex_pos,
                                            nir_load_sample_id(b));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_nir_tex_op(struct nir_builder *b,
                  struct v3dv_device *device,
-                 nir_ssa_def *tex_pos,
+                 nir_def *tex_pos,
                  enum glsl_base_type tex_type,
                  VkSampleCountFlagBits dst_samples,
                  VkSampleCountFlagBits src_samples,
@@ -3741,10 +3741,10 @@ get_blit_vs()
    vs_out_tex_coord->data.location = VARYING_SLOT_VAR0;
    vs_out_tex_coord->data.interpolation = INTERP_MODE_SMOOTH;
 
-   nir_ssa_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
+   nir_def *pos = nir_gen_rect_vertices(&b, NULL, NULL);
    nir_store_var(&b, vs_out_pos, pos, 0xf);
 
-   nir_ssa_def *tex_coord = gen_tex_coords(&b);
+   nir_def *tex_coord = gen_tex_coords(&b);
    nir_store_var(&b, vs_out_tex_coord, tex_coord, 0xf);
 
    return b.shader;
@@ -3795,11 +3795,11 @@ get_color_blit_fs(struct v3dv_device *device,
       nir_variable_create(b.shader, nir_var_shader_out, fs_out_type, "out_color");
    fs_out_color->data.location = FRAG_RESULT_DATA0;
 
-   nir_ssa_def *tex_coord = nir_load_var(&b, fs_in_tex_coord);
+   nir_def *tex_coord = nir_load_var(&b, fs_in_tex_coord);
    const uint32_t channel_mask = get_channel_mask_for_sampler_dim(sampler_dim);
    tex_coord = nir_channels(&b, tex_coord, channel_mask);
 
-   nir_ssa_def *color = build_nir_tex_op(&b, device, tex_coord, src_base_type,
+   nir_def *color = build_nir_tex_op(&b, device, tex_coord, src_base_type,
                                          dst_samples, src_samples, sampler_dim);
 
    /* For integer textures, if the bit-size of the destination is too small to
@@ -3814,7 +3814,7 @@ get_color_blit_fs(struct v3dv_device *device,
       enum pipe_format src_pformat = vk_format_to_pipe_format(src_format);
       enum pipe_format dst_pformat = vk_format_to_pipe_format(dst_format);
 
-      nir_ssa_def *c[4];
+      nir_def *c[4];
       for (uint32_t i = 0; i < 4; i++) {
          c[i] = nir_channel(&b, color, i);
 
@@ -3832,11 +3832,11 @@ get_color_blit_fs(struct v3dv_device *device,
 
          assert(dst_bit_size > 0);
          if (util_format_is_pure_uint(dst_pformat)) {
-            nir_ssa_def *max = nir_imm_int(&b, (1 << dst_bit_size) - 1);
+            nir_def *max = nir_imm_int(&b, (1 << dst_bit_size) - 1);
             c[i] = nir_umin(&b, c[i], max);
          } else {
-            nir_ssa_def *max = nir_imm_int(&b, (1 << (dst_bit_size - 1)) - 1);
-            nir_ssa_def *min = nir_imm_int(&b, -(1 << (dst_bit_size - 1)));
+            nir_def *max = nir_imm_int(&b, (1 << (dst_bit_size - 1)) - 1);
+            nir_def *min = nir_imm_int(&b, -(1 << (dst_bit_size - 1)));
             c[i] = nir_imax(&b, nir_imin(&b, c[i], max), min);
          }
       }
index b7b164a..750f162 100644 (file)
@@ -568,7 +568,7 @@ lower_vulkan_resource_index(nir_builder *b,
     * vulkan_load_descriptor return a vec2 providing an index and
     * offset. Our backend compiler only cares about the index part.
     */
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+   nir_def_rewrite_uses(&instr->dest.ssa,
                             nir_imm_ivec2(b, index, 0));
    nir_instr_remove(&instr->instr);
 }
@@ -594,7 +594,7 @@ lower_tex_src(nir_builder *b,
               unsigned src_idx,
               struct lower_pipeline_layout_state *state)
 {
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    unsigned base_index = 0;
    unsigned array_elements = 1;
    nir_tex_src *src = &instr->src[src_idx];
@@ -739,7 +739,7 @@ lower_image_deref(nir_builder *b,
                   struct lower_pipeline_layout_state *state)
 {
    nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    unsigned array_elements = 1;
    unsigned base_index = 0;
 
@@ -826,7 +826,7 @@ lower_intrinsic(nir_builder *b,
       /* Loading the descriptor happens as part of load/store instructions,
        * so for us this is a no-op.
        */
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa);
+      nir_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa);
       nir_instr_remove(&instr->instr);
       return true;
    }
@@ -907,11 +907,11 @@ lower_point_coord_cb(nir_builder *b, nir_instr *instr, void *_state)
       return false;
 
    b->cursor = nir_after_instr(&intr->instr);
-   nir_ssa_def *result = &intr->dest.ssa;
+   nir_def *result = &intr->dest.ssa;
    result =
       nir_vector_insert_imm(b, result,
                             nir_fsub_imm(b, 1.0, nir_channel(b, result, 1)), 1);
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
+   nir_def_rewrite_uses_after(&intr->dest.ssa,
                                   result, result->parent_instr);
    return true;
 }
@@ -2257,7 +2257,7 @@ pipeline_add_multiview_gs(struct v3dv_pipeline *pipeline,
    out_layer->data.location = VARYING_SLOT_LAYER;
 
    /* Get the view index value that we will write to gl_Layer */
-   nir_ssa_def *layer =
+   nir_def *layer =
       nir_load_system_value(&b, nir_intrinsic_load_view_index, 0, 1, 32);
 
    /* Emit all output vertices */
index 43b6efb..6fbae5b 100644 (file)
@@ -1345,23 +1345,23 @@ v3dv_ReleaseProfilingLockKHR(VkDevice device)
 
 static inline void
 nir_set_query_availability(nir_builder *b,
-                           nir_ssa_def *buf,
-                           nir_ssa_def *offset,
-                           nir_ssa_def *query_idx,
-                           nir_ssa_def *avail)
+                           nir_def *buf,
+                           nir_def *offset,
+                           nir_def *query_idx,
+                           nir_def *avail)
 {
    offset = nir_iadd(b, offset, query_idx); /* we use 1B per query */
    nir_store_ssbo(b, avail, buf, offset, .write_mask = 0x1, .align_mul = 1);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_get_query_availability(nir_builder *b,
-                           nir_ssa_def *buf,
-                           nir_ssa_def *offset,
-                           nir_ssa_def *query_idx)
+                           nir_def *buf,
+                           nir_def *offset,
+                           nir_def *query_idx)
 {
    offset = nir_iadd(b, offset, query_idx); /* we use 1B per query */
-   nir_ssa_def *avail = nir_load_ssbo(b, 1, 8, buf, offset, .align_mul = 1);
+   nir_def *avail = nir_load_ssbo(b, 1, 8, buf, offset, .align_mul = 1);
    return nir_i2i32(b, avail);
 }
 
@@ -1372,7 +1372,7 @@ get_set_query_availability_cs()
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options,
                                                   "set query availability cs");
 
-   nir_ssa_def *buf =
+   nir_def *buf =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 0,
                                 .binding = 0,
@@ -1382,15 +1382,15 @@ get_set_query_availability_cs()
     * ever change any of these parameters we need to update how we compute the
     * query index here.
     */
-   nir_ssa_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
+   nir_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
-   nir_ssa_def *query_idx =
+   nir_def *query_idx =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 4, .range = 4);
 
-   nir_ssa_def *avail =
+   nir_def *avail =
       nir_load_push_constant(&b, 1, 8, nir_imm_int(&b, 0), .base = 8, .range = 1);
 
    query_idx = nir_iadd(&b, query_idx, wg_id);
@@ -1399,12 +1399,12 @@ get_set_query_availability_cs()
    return b.shader;
 }
 
-static inline nir_ssa_def *
-nir_get_occlusion_counter_offset(nir_builder *b, nir_ssa_def *query_idx)
+static inline nir_def *
+nir_get_occlusion_counter_offset(nir_builder *b, nir_def *query_idx)
 {
-   nir_ssa_def *query_group = nir_udiv_imm(b, query_idx, 16);
-   nir_ssa_def *query_group_offset = nir_umod_imm(b, query_idx, 16);
-   nir_ssa_def *offset =
+   nir_def *query_group = nir_udiv_imm(b, query_idx, 16);
+   nir_def *query_group_offset = nir_umod_imm(b, query_idx, 16);
+   nir_def *offset =
       nir_iadd(b, nir_imul_imm(b, query_group, 1024),
                   nir_imul_imm(b, query_group_offset, 4));
    return offset;
@@ -1412,20 +1412,20 @@ nir_get_occlusion_counter_offset(nir_builder *b, nir_ssa_def *query_idx)
 
 static inline void
 nir_reset_occlusion_counter(nir_builder *b,
-                            nir_ssa_def *buf,
-                            nir_ssa_def *query_idx)
+                            nir_def *buf,
+                            nir_def *query_idx)
 {
-   nir_ssa_def *offset = nir_get_occlusion_counter_offset(b, query_idx);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *offset = nir_get_occlusion_counter_offset(b, query_idx);
+   nir_def *zero = nir_imm_int(b, 0);
    nir_store_ssbo(b, zero, buf, offset, .write_mask = 0x1, .align_mul = 4);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_read_occlusion_counter(nir_builder *b,
-                           nir_ssa_def *buf,
-                           nir_ssa_def *query_idx)
+                           nir_def *buf,
+                           nir_def *query_idx)
 {
-   nir_ssa_def *offset = nir_get_occlusion_counter_offset(b, query_idx);
+   nir_def *offset = nir_get_occlusion_counter_offset(b, query_idx);
    return nir_load_ssbo(b, 1, 32, buf, offset, .access = 0, .align_mul = 4);
 }
 
@@ -1436,7 +1436,7 @@ get_reset_occlusion_query_cs()
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options,
                                                   "reset occlusion query cs");
 
-   nir_ssa_def *buf =
+   nir_def *buf =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 0,
                                 .binding = 0,
@@ -1446,15 +1446,15 @@ get_reset_occlusion_query_cs()
     * ever change any of these parameters we need to update how we compute the
     * query index here.
     */
-   nir_ssa_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
+   nir_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
 
-   nir_ssa_def *avail_offset =
+   nir_def *avail_offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
-   nir_ssa_def *base_query_idx =
+   nir_def *base_query_idx =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 4, .range = 4);
 
-   nir_ssa_def *query_idx = nir_iadd(&b, base_query_idx, wg_id);
+   nir_def *query_idx = nir_iadd(&b, base_query_idx, wg_id);
 
    nir_set_query_availability(&b, buf, avail_offset, query_idx,
                               nir_imm_intN_t(&b, 0, 8));
@@ -1465,16 +1465,16 @@ get_reset_occlusion_query_cs()
 
 static void
 write_query_buffer(nir_builder *b,
-                   nir_ssa_def *buf,
-                   nir_ssa_def **offset,
-                   nir_ssa_def *value,
+                   nir_def *buf,
+                   nir_def **offset,
+                   nir_def *value,
                    bool flag_64bit)
 {
    if (flag_64bit) {
       /* Create a 64-bit value using a vec2 with the .Y component set to 0
        * so we can write a 64-bit value in a single store.
        */
-      nir_ssa_def *value64 = nir_vec2(b, value, nir_imm_int(b, 0));
+      nir_def *value64 = nir_vec2(b, value, nir_imm_int(b, 0));
       nir_store_ssbo(b, value64, buf, *offset, .write_mask = 0x3, .align_mul = 8);
       *offset = nir_iadd_imm(b, *offset, 8);
    } else {
@@ -1494,55 +1494,55 @@ get_copy_query_results_cs(VkQueryResultFlags flags)
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options,
                                                   "copy query results cs");
 
-   nir_ssa_def *buf =
+   nir_def *buf =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 0,
                                 .binding = 0,
                                 .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
 
-   nir_ssa_def *buf_out =
+   nir_def *buf_out =
       nir_vulkan_resource_index(&b, 2, 32, nir_imm_int(&b, 0),
                                 .desc_set = 1,
                                 .binding = 0,
                                 .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
 
    /* Read push constants */
-   nir_ssa_def *avail_offset =
+   nir_def *avail_offset =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 0, .range = 4);
 
-   nir_ssa_def *base_query_idx =
+   nir_def *base_query_idx =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 4, .range = 4);
 
-   nir_ssa_def *base_offset_out =
+   nir_def *base_offset_out =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 8, .range = 4);
 
-   nir_ssa_def *stride =
+   nir_def *stride =
       nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 12, .range = 4);
 
    /* This assumes a local size of 1 and a horizontal-only dispatch. If we
     * ever change any of these parameters we need to update how we compute the
     * query index here.
     */
-   nir_ssa_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
-   nir_ssa_def *query_idx = nir_iadd(&b, base_query_idx, wg_id);
+   nir_def *wg_id = nir_channel(&b, nir_load_workgroup_id(&b, 32), 0);
+   nir_def *query_idx = nir_iadd(&b, base_query_idx, wg_id);
 
    /* Read query availability if needed */
-   nir_ssa_def *avail = NULL;
+   nir_def *avail = NULL;
    if (flag_avail || !flag_partial)
       avail = nir_get_query_availability(&b, buf, avail_offset, query_idx);
 
    /* Write occusion query result... */
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_iadd(&b, base_offset_out, nir_imul(&b, wg_id, stride));
 
    /* ...if partial is requested, we always write */
    if(flag_partial) {
-      nir_ssa_def *query_res = nir_read_occlusion_counter(&b, buf, query_idx);
+      nir_def *query_res = nir_read_occlusion_counter(&b, buf, query_idx);
       write_query_buffer(&b, buf_out, &offset, query_res, flag_64bit);
    } else {
       /*...otherwise, we only write if the query is available */
       nir_if *if_stmt = nir_push_if(&b, nir_ine_imm(&b, avail, 0));
-         nir_ssa_def *query_res = nir_read_occlusion_counter(&b, buf, query_idx);
+         nir_def *query_res = nir_read_occlusion_counter(&b, buf, query_idx);
          write_query_buffer(&b, buf_out, &offset, query_res, flag_64bit);
       nir_pop_if(&b, if_stmt);
    }
index 232e886..2300d84 100644 (file)
@@ -53,7 +53,7 @@ lower_clc_call_instr(nir_instr *instr, nir_builder *b,
       return false;
    }
 
-   nir_ssa_def **params = rzalloc_array(b->shader, nir_ssa_def*, call->num_params);
+   nir_def **params = rzalloc_array(b->shader, nir_def*, call->num_params);
 
    for (unsigned i = 0; i < call->num_params; i++) {
       params[i] = nir_ssa_for_src(b, call->params[i],
index 3ff80c7..f44003b 100644 (file)
@@ -2610,10 +2610,10 @@ replace_unused_interpolate_at_with_undef(nir_builder *b, nir_instr *instr,
          nir_variable *var = nir_intrinsic_get_var(intrin, 0);
          if (var->data.mode == nir_var_shader_temp) {
             /* Create undef and rewrite the interp uses */
-            nir_ssa_def *undef =
-               nir_ssa_undef(b, intrin->dest.ssa.num_components,
+            nir_def *undef =
+               nir_undef(b, intrin->dest.ssa.num_components,
                              intrin->dest.ssa.bit_size);
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, undef);
+            nir_def_rewrite_uses(&intrin->dest.ssa, undef);
 
             nir_instr_remove(&intrin->instr);
             return true;
index 8ea8088..e2e19a9 100644 (file)
@@ -936,7 +936,7 @@ static void
 zero_array_members(nir_builder *b, nir_variable *var)
 {
    nir_deref_instr *deref = nir_build_deref_var(b, var);
-   nir_ssa_def *zero = nir_imm_zero(b, 4, 32);
+   nir_def *zero = nir_imm_zero(b, 4, 32);
    for (int i = 0; i < glsl_array_size(var->type); i++) {
       nir_deref_instr *arr = nir_build_deref_array_imm(b, deref, i);
       uint32_t mask = BITFIELD_MASK(glsl_get_vector_elements(arr->type));
index e39889e..ca06d68 100644 (file)
@@ -118,7 +118,7 @@ lower_deref_instr(nir_builder *b, nir_intrinsic_instr *instr,
    else
       range_base = var->data.offset;
 
-   nir_ssa_def *offset = nir_imm_int(b, offset_value);
+   nir_def *offset = nir_imm_int(b, offset_value);
    for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
         d = nir_deref_instr_parent(d)) {
       assert(d->deref_type == nir_deref_type_array);
index 2404882..0792603 100644 (file)
@@ -34,8 +34,8 @@
 #define imm1(b, x) nir_imm_float(b, x)
 #define imm3(b, x) nir_imm_vec3(b, x, x, x)
 
-static nir_ssa_def *
-swizzle(nir_builder *b, nir_ssa_def *src, int swizzle, int components)
+static nir_def *
+swizzle(nir_builder *b, nir_def *src, int swizzle, int components)
 {
    unsigned swizzle_arr[4];
    swizzle_arr[0] = GET_SWZ(swizzle, 0);
@@ -46,72 +46,72 @@ swizzle(nir_builder *b, nir_ssa_def *src, int swizzle, int components)
    return nir_swizzle(b, src, swizzle_arr, components);
 }
 
-static nir_ssa_def *
-swizzle_x(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swizzle_x(nir_builder *b, nir_def *src)
 {
    return nir_channel(b, src, 0);
 }
 
-static nir_ssa_def *
-swizzle_y(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swizzle_y(nir_builder *b, nir_def *src)
 {
    return nir_channel(b, src, 1);
 }
 
-static nir_ssa_def *
-swizzle_z(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swizzle_z(nir_builder *b, nir_def *src)
 {
    return nir_channel(b, src, 2);
 }
 
-static nir_ssa_def *
-swizzle_w(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swizzle_w(nir_builder *b, nir_def *src)
 {
    return nir_channel(b, src, 3);
 }
 
-static nir_ssa_def *
-blend_multiply(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_multiply(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = Cs*Cd */
    return nir_fmul(b, src, dst);
 }
 
-static nir_ssa_def *
-blend_screen(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_screen(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = Cs+Cd-Cs*Cd */
    return nir_fsub(b, nir_fadd(b, src, dst), nir_fmul(b, src, dst));
 }
 
-static nir_ssa_def *
-blend_overlay(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_overlay(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = 2*Cs*Cd, if Cd <= 0.5
     *            1-2*(1-Cs)*(1-Cd), otherwise
     */
-   nir_ssa_def *rule_1 = nir_fmul(b, nir_fmul(b, src, dst), imm3(b, 2.0));
-   nir_ssa_def *rule_2 =
+   nir_def *rule_1 = nir_fmul(b, nir_fmul(b, src, dst), imm3(b, 2.0));
+   nir_def *rule_2 =
       nir_fsub(b, imm3(b, 1.0), nir_fmul(b, nir_fmul(b, nir_fsub(b, imm3(b, 1.0), src), nir_fsub(b, imm3(b, 1.0), dst)), imm3(b, 2.0)));
    return nir_bcsel(b, nir_fge(b, imm3(b, 0.5f), dst), rule_1, rule_2);
 }
 
-static nir_ssa_def *
-blend_darken(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_darken(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = min(Cs,Cd) */
    return nir_fmin(b, src, dst);
 }
 
-static nir_ssa_def *
-blend_lighten(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_lighten(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = max(Cs,Cd) */
    return nir_fmax(b, src, dst);
 }
 
-static nir_ssa_def *
-blend_colordodge(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_colordodge(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) =
     *   0, if Cd <= 0
@@ -123,8 +123,8 @@ blend_colordodge(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
                               nir_fmin(b, imm3(b, 1.0), nir_fdiv(b, dst, nir_fsub(b, imm3(b, 1.0), src)))));
 }
 
-static nir_ssa_def *
-blend_colorburn(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_colorburn(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) =
     *   1, if Cd >= 1
@@ -136,20 +136,20 @@ blend_colorburn(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
                               nir_fsub(b, imm3(b, 1.0), nir_fmin(b, imm3(b, 1.0), nir_fdiv(b, nir_fsub(b, imm3(b, 1.0), dst), src)))));
 }
 
-static nir_ssa_def *
-blend_hardlight(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_hardlight(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) = 2*Cs*Cd, if Cs <= 0.5
     *            1-2*(1-Cs)*(1-Cd), otherwise
     */
-   nir_ssa_def *rule_1 = nir_fmul(b, imm3(b, 2.0), nir_fmul(b, src, dst));
-   nir_ssa_def *rule_2 =
+   nir_def *rule_1 = nir_fmul(b, imm3(b, 2.0), nir_fmul(b, src, dst));
+   nir_def *rule_2 =
       nir_fsub(b, imm3(b, 1.0), nir_fmul(b, imm3(b, 2.0), nir_fmul(b, nir_fsub(b, imm3(b, 1.0), src), nir_fsub(b, imm3(b, 1.0), dst))));
    return nir_bcsel(b, nir_fge(b, imm3(b, 0.5), src), rule_1, rule_2);
 }
 
-static nir_ssa_def *
-blend_softlight(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_softlight(nir_builder *b, nir_def *src, nir_def *dst)
 {
    /* f(Cs,Cd) =
     *   Cd-(1-2*Cs)*Cd*(1-Cd),
@@ -166,49 +166,49 @@ blend_softlight(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
     *            Cd*((16*Cd-12)*Cd+3) if Cs > 0.5 and Cd <= 0.25
     *            sqrt(Cd)-Cd,         otherwise
     */
-   nir_ssa_def *factor_1 = nir_fmul(b, dst, nir_fsub(b, imm3(b, 1.0), dst));
-   nir_ssa_def *factor_2 =
+   nir_def *factor_1 = nir_fmul(b, dst, nir_fsub(b, imm3(b, 1.0), dst));
+   nir_def *factor_2 =
       nir_fmul(b, dst, nir_fadd(b, nir_fmul(b, nir_fsub(b, nir_fmul(b, imm3(b, 16.0), dst), imm3(b, 12.0)), dst), imm3(b, 3.0)));
-   nir_ssa_def *factor_3 = nir_fsub(b, nir_fsqrt(b, dst), dst);
-   nir_ssa_def *factor = nir_bcsel(b, nir_fge(b, imm3(b, 0.5), src), factor_1,
+   nir_def *factor_3 = nir_fsub(b, nir_fsqrt(b, dst), dst);
+   nir_def *factor = nir_bcsel(b, nir_fge(b, imm3(b, 0.5), src), factor_1,
                                    nir_bcsel(b, nir_fge(b, imm3(b, 0.25), dst), factor_2, factor_3));
    return nir_fadd(b, dst, nir_fmul(b, nir_fsub(b, nir_fmul(b, imm3(b, 2.0), src), imm3(b, 1.0)), factor));
 }
 
-static nir_ssa_def *
-blend_difference(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_difference(nir_builder *b, nir_def *src, nir_def *dst)
 {
    return nir_fabs(b, nir_fsub(b, dst, src));
 }
 
-static nir_ssa_def *
-blend_exclusion(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst)
+static nir_def *
+blend_exclusion(nir_builder *b, nir_def *src, nir_def *dst)
 {
    return nir_fadd(b, src, nir_fsub(b, dst, nir_fmul(b, imm3(b, 2.0), nir_fmul(b, src, dst))));
 }
 
 /* Return the minimum of a vec3's components */
-static nir_ssa_def *
-minv3(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+minv3(nir_builder *b, nir_def *v)
 {
    return nir_fmin(b, nir_fmin(b, swizzle_x(b, v), swizzle_y(b, v)), swizzle_z(b, v));
 }
 
 /* Return the maximum of a vec3's components */
-static nir_ssa_def *
-maxv3(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+maxv3(nir_builder *b, nir_def *v)
 {
    return nir_fmax(b, nir_fmax(b, swizzle_x(b, v), swizzle_y(b, v)), swizzle_z(b, v));
 }
 
-static nir_ssa_def *
-lumv3(nir_builder *b, nir_ssa_def *c)
+static nir_def *
+lumv3(nir_builder *b, nir_def *c)
 {
    return nir_fdot(b, c, nir_imm_vec3(b, 0.30, 0.59, 0.11));
 }
 
-static nir_ssa_def *
-satv3(nir_builder *b, nir_ssa_def *c)
+static nir_def *
+satv3(nir_builder *b, nir_def *c)
 {
    return nir_fsub(b, maxv3(b, c), minv3(b, c));
 }
@@ -240,20 +240,20 @@ set_lum(nir_builder *b,
         nir_variable *cbase,
         nir_variable *clum)
 {
-   nir_ssa_def *cbase_def = nir_load_var(b, cbase);
+   nir_def *cbase_def = nir_load_var(b, cbase);
    nir_store_var(b, color, nir_fadd(b, cbase_def, nir_fsub(b, lumv3(b, nir_load_var(b, clum)), lumv3(b, cbase_def))), ~0);
 
    nir_variable *llum = add_temp_var(b, "__blend_lum", glsl_float_type());
    nir_variable *mincol = add_temp_var(b, "__blend_mincol", glsl_float_type());
    nir_variable *maxcol = add_temp_var(b, "__blend_maxcol", glsl_float_type());
 
-   nir_ssa_def *color_def = nir_load_var(b, color);
+   nir_def *color_def = nir_load_var(b, color);
    nir_store_var(b, llum, lumv3(b, color_def), ~0);
    nir_store_var(b, mincol, minv3(b, color_def), ~0);
    nir_store_var(b, maxcol, maxv3(b, color_def), ~0);
 
-   nir_ssa_def *mincol_def = nir_load_var(b, mincol);
-   nir_ssa_def *llum_def = nir_load_var(b, llum);
+   nir_def *mincol_def = nir_load_var(b, mincol);
+   nir_def *llum_def = nir_load_var(b, llum);
    nir_if *nif = nir_push_if(b, nir_flt(b, mincol_def, imm1(b, 0.0)));
 
    /* Add then block */
@@ -261,7 +261,7 @@ set_lum(nir_builder *b,
 
    /* Add else block */
    nir_push_else(b, nif);
-   nir_ssa_def *maxcol_def = nir_load_var(b, maxcol);
+   nir_def *maxcol_def = nir_load_var(b, maxcol);
    nir_if *nif2 = nir_push_if(b, nir_flt(b, imm1(b, 1.0), maxcol_def));
    nir_store_var(b, color, nir_fadd(b, llum_def, nir_fdiv(b, nir_fmul(b, nir_fsub(b, color_def, llum_def), nir_fsub(b, imm3(b, 1.0), llum_def)), nir_fsub(b, maxcol_def, llum_def))), ~0);
    nir_pop_if(b, nif2);
@@ -279,8 +279,8 @@ set_lum_sat(nir_builder *b,
             nir_variable *csat,
             nir_variable *clum)
 {
-   nir_ssa_def *cbase_def = nir_load_var(b, cbase);
-   nir_ssa_def *csat_def = nir_load_var(b, csat);
+   nir_def *cbase_def = nir_load_var(b, cbase);
+   nir_def *csat_def = nir_load_var(b, csat);
 
    nir_variable *sbase = add_temp_var(b, "__blend_sbase", glsl_float_type());
    nir_store_var(b, sbase, satv3(b, cbase_def), ~0);
@@ -290,10 +290,10 @@ set_lum_sat(nir_builder *b,
     * and interpolating the "middle" component based on its
     * original value relative to the smallest/largest.
     */
-   nir_ssa_def *sbase_def = nir_load_var(b, sbase);
+   nir_def *sbase_def = nir_load_var(b, sbase);
    nir_if *nif = nir_push_if(b, nir_flt(b, imm1(b, 0.0), sbase_def));
-   nir_ssa_def *ssat = satv3(b, csat_def);
-   nir_ssa_def *minbase = minv3(b, cbase_def);
+   nir_def *ssat = satv3(b, csat_def);
+   nir_def *minbase = minv3(b, cbase_def);
    nir_store_var(b, color, nir_fdiv(b, nir_fmul(b, nir_fsub(b, cbase_def, minbase), ssat), sbase_def), ~0);
    nir_push_else(b, nif);
    nir_store_var(b, color, imm3(b, 0.0), ~0);
@@ -302,7 +302,7 @@ set_lum_sat(nir_builder *b,
    set_lum(b, color, color, clum);
 }
 
-static nir_ssa_def *
+static nir_def *
 is_mode(nir_builder *b, nir_variable *mode, enum gl_advanced_blend_mode q)
 {
    return nir_ieq_imm(b, nir_load_var(b, mode), (unsigned) q);
@@ -312,7 +312,7 @@ static nir_variable *
 calc_blend_result(nir_builder *b,
                   nir_variable *mode,
                   nir_variable *fb,
-                  nir_ssa_def *blend_src,
+                  nir_def *blend_src,
                   GLbitfield blend_qualifiers)
 {
    nir_variable *result = add_temp_var(b, "__blend_result", glsl_vec4_type());
@@ -337,10 +337,10 @@ calc_blend_result(nir_builder *b,
    nir_variable *dst_rgb = add_temp_var(b, "__blend_dst_rgb", glsl_vec_type(3));
    nir_variable *dst_alpha = add_temp_var(b, "__blend_dst_a", glsl_float_type());
 
-   nir_ssa_def *fb_def = nir_load_var(b, fb);
+   nir_def *fb_def = nir_load_var(b, fb);
    nir_store_var(b, dst_alpha, swizzle_w(b, fb_def), ~0);
 
-   nir_ssa_def *dst_alpha_def = nir_load_var(b, dst_alpha);
+   nir_def *dst_alpha_def = nir_load_var(b, dst_alpha);
    nir_if *nif = nir_push_if(b, nir_feq(b, dst_alpha_def, imm1(b, 0.0)));
    nir_store_var(b, dst_rgb, imm3(b, 0.0), ~0);
    nir_push_else(b, nif);
@@ -348,7 +348,7 @@ calc_blend_result(nir_builder *b,
    nir_pop_if(b, nif);
 
    nir_store_var(b, src_alpha, swizzle_w(b, blend_src), ~0);
-   nir_ssa_def *src_alpha_def = nir_load_var(b, src_alpha);
+   nir_def *src_alpha_def = nir_load_var(b, src_alpha);
    nif = nir_push_if(b, nir_feq(b, src_alpha_def, imm1(b, 0.0)));
    nir_store_var(b, src_rgb, imm3(b, 0.0), ~0);
    nir_push_else(b, nif);
@@ -357,15 +357,15 @@ calc_blend_result(nir_builder *b,
 
    nir_variable *factor = add_temp_var(b, "__blend_factor", glsl_vec_type(3));
 
-   nir_ssa_def *src_rgb_def = nir_load_var(b, src_rgb);
-   nir_ssa_def *dst_rgb_def = nir_load_var(b, dst_rgb);
+   nir_def *src_rgb_def = nir_load_var(b, src_rgb);
+   nir_def *dst_rgb_def = nir_load_var(b, dst_rgb);
 
    unsigned choices = blend_qualifiers;
    while (choices) {
       enum gl_advanced_blend_mode choice = (enum gl_advanced_blend_mode)u_bit_scan(&choices);
 
       nir_if *iff = nir_push_if(b, is_mode(b, mode, choice));
-      nir_ssa_def *val = NULL;
+      nir_def *val = NULL;
 
       switch (choice) {
       case BLEND_MULTIPLY:
@@ -454,7 +454,7 @@ calc_blend_result(nir_builder *b,
    /* WRITEMASK_XYZ */
    nir_store_var(b, result, nir_pad_vec4(b, nir_fadd(b, nir_fadd(b, nir_fmul(b, nir_load_var(b, factor), nir_load_var(b, p0)), nir_fmul(b, src_rgb_def, nir_load_var(b, p1))), nir_fmul(b, dst_rgb_def, nir_load_var(b, p2)))), 0x7);
    /* WRITEMASK_W */
-   nir_ssa_def *val = nir_fadd(b, nir_fadd(b, nir_load_var(b, p0), nir_load_var(b, p1)), nir_load_var(b, p2));
+   nir_def *val = nir_fadd(b, nir_fadd(b, nir_load_var(b, p0), nir_load_var(b, p1)), nir_load_var(b, p2));
    nir_store_var(b, result, nir_vec4(b, val, val, val, val), 0x8);
 
    /* reset cursor to the end of the main function */
@@ -466,10 +466,10 @@ calc_blend_result(nir_builder *b,
 /**
  * Dereference var, or var[0] if it's an array.
  */
-static nir_ssa_def *
+static nir_def *
 load_output(nir_builder *b, nir_variable *var)
 {
-   nir_ssa_def *var_def;
+   nir_def *var_def;
    if (glsl_type_is_array(var->type)) {
       var_def = nir_load_array_var_imm(b, var, 0);
    } else {
@@ -539,12 +539,12 @@ gl_nir_lower_blend_equation_advanced(nir_shader *sh, bool coherent)
    /* Combine values written to outputs into a single RGBA blend source.
     * We assign <0, 0, 0, 1> to any components with no corresponding output.
     */
-   nir_ssa_def *blend_source;
+   nir_def *blend_source;
    if (outputs[0] &&
        glsl_get_vector_elements(glsl_without_array(outputs[0]->type)) == 4) {
       blend_source = load_output(&b, outputs[0]);
    } else {
-      nir_ssa_def *blend_comps[4];
+      nir_def *blend_comps[4];
       for (int i = 0; i < 4; i++) {
          nir_variable *var = outputs[i];
          if (var) {
@@ -570,7 +570,7 @@ gl_nir_lower_blend_equation_advanced(nir_shader *sh, bool coherent)
       if (glsl_type_is_array(outputs[i]->type)) {
          nir_store_array_var_imm(&b, outputs[i], 0, nir_load_var(&b, result_dest), 1 << i);
       } else {
-         nir_ssa_def *val = swizzle(&b, nir_load_var(&b, result_dest), i, 1);
+         nir_def *val = swizzle(&b, nir_load_var(&b, result_dest), i, 1);
          nir_store_var(&b, outputs[i], nir_vec4(&b, val, val, val, val), 1 << i);
       }
    }
index 1a1d623..0f54d75 100644 (file)
@@ -29,7 +29,7 @@
 #include "util/compiler.h"
 #include "main/shader_types.h"
 
-static nir_ssa_def *
+static nir_def *
 get_block_array_index(nir_builder *b, nir_deref_instr *deref,
                       const struct gl_shader_program *shader_program)
 {
@@ -41,7 +41,7 @@ get_block_array_index(nir_builder *b, nir_deref_instr *deref,
     */
    int const_array_offset = 0;
    const char *block_name = "";
-   nir_ssa_def *nonconst_index = NULL;
+   nir_def *nonconst_index = NULL;
    while (deref->deref_type == nir_deref_type_array) {
       nir_deref_instr *parent = nir_deref_instr_parent(deref);
       assert(parent && glsl_type_is_array(parent->type));
@@ -56,9 +56,9 @@ get_block_array_index(nir_builder *b, nir_deref_instr *deref,
 
          const_array_offset += arr_index * array_elements;
       } else {
-         nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+         nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
          arr_index = nir_umin(b, arr_index, nir_imm_int(b, arr_size - 1));
-         nir_ssa_def *arr_offset = nir_amul_imm(b, arr_index, array_elements);
+         nir_def *arr_offset = nir_amul_imm(b, arr_index, array_elements);
          if (nonconst_index)
             nonconst_index = nir_iadd(b, nonconst_index, arr_offset);
          else
@@ -202,7 +202,7 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
             b.cursor = nir_before_instr(&deref->instr);
 
             unsigned offset = 0;
-            nir_ssa_def *ptr;
+            nir_def *ptr;
             if (deref->deref_type == nir_deref_type_var &&
                 !glsl_type_is_interface(glsl_without_array(deref->var->type))) {
                /* This variable is contained in an interface block rather than
@@ -219,7 +219,7 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
                 * Everything after this point is a byte offset and will be
                 * handled by nir_lower_explicit_io().
                 */
-               nir_ssa_def *index = get_block_array_index(&b, deref,
+               nir_def *index = get_block_array_index(&b, deref,
                                                           shader_program);
                ptr = nir_vec2(&b, index, nir_imm_int(&b, offset));
             } else {
@@ -237,7 +237,7 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
             cast->cast.align_mul = NIR_ALIGN_MUL_MAX;
             cast->cast.align_offset = offset % NIR_ALIGN_MUL_MAX;
 
-            nir_ssa_def_rewrite_uses(&deref->dest.ssa,
+            nir_def_rewrite_uses(&deref->dest.ssa,
                                      &cast->dest.ssa);
             nir_deref_instr_remove_if_unused(deref);
             break;
@@ -262,8 +262,8 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
                if (glsl_type_is_boolean(deref->type)) {
                   b.cursor = nir_after_instr(&intrin->instr);
                   intrin->dest.ssa.bit_size = 32;
-                  nir_ssa_def *bval = nir_i2b(&b, &intrin->dest.ssa);
-                  nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
+                  nir_def *bval = nir_i2b(&b, &intrin->dest.ssa);
+                  nir_def_rewrite_uses_after(&intrin->dest.ssa,
                                                  bval,
                                                  bval->parent_instr);
                   progress = true;
@@ -288,7 +288,7 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
                 */
                if (glsl_type_is_boolean(deref->type)) {
                   b.cursor = nir_before_instr(&intrin->instr);
-                  nir_ssa_def *ival = nir_b2i32(&b, intrin->src[1].ssa);
+                  nir_def *ival = nir_b2i32(&b, intrin->src[1].ssa);
                   nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
                                         nir_src_for_ssa(ival));
                   progress = true;
index 4d2dd2b..86ba3d8 100644 (file)
@@ -86,7 +86,7 @@ lower_instr(nir_builder *b, nir_instr *instr, void *cb_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *src;
+   nir_def *src;
    int range_base = 0;
    if (bindless) {
       src = nir_load_deref(b, deref);
index 0eeedd3..a761940 100644 (file)
@@ -359,7 +359,7 @@ get_packed_varying_deref(struct lower_packed_varyings_state *state,
 struct packing_store_values {
    bool is_64bit;
    unsigned writemasks[2];
-   nir_ssa_def *values[2];
+   nir_def *values[2];
    nir_deref_instr *deref;
 };
 
@@ -374,7 +374,7 @@ bitwise_assign_pack(struct lower_packed_varyings_state *state,
                     nir_deref_instr *packed_deref,
                     nir_deref_instr *unpacked_deref,
                     const struct glsl_type *unpacked_type,
-                    nir_ssa_def *value,
+                    nir_def *value,
                     unsigned writemask)
 
 {
@@ -406,7 +406,7 @@ bitwise_assign_pack(struct lower_packed_varyings_state *state,
 
             unsigned swiz_x = 0;
             unsigned writemask = 0x3;
-            nir_ssa_def *swizzle = nir_swizzle(&state->b, value, &swiz_x, 1);
+            nir_def *swizzle = nir_swizzle(&state->b, value, &swiz_x, 1);
 
             store_state->is_64bit = true;
             store_state->deref = packed_deref;
@@ -450,7 +450,7 @@ bitwise_assign_unpack(struct lower_packed_varyings_state *state,
                       nir_deref_instr *unpacked_deref,
                       nir_deref_instr *packed_deref,
                       const struct glsl_type *unpacked_type,
-                      nir_ssa_def *value, unsigned writemask)
+                      nir_def *value, unsigned writemask)
 {
    nir_variable *packed_var = nir_deref_instr_get_variable(packed_deref);
 
@@ -523,7 +523,7 @@ bitwise_assign_unpack(struct lower_packed_varyings_state *state,
 
 static void
 create_store_deref(struct lower_packed_varyings_state *state,
-                   nir_deref_instr *deref, nir_ssa_def *value,
+                   nir_deref_instr *deref, nir_def *value,
                    unsigned writemask, bool is_64bit)
 {
    /* If dest and value have different number of components pack the srcs
@@ -532,7 +532,7 @@ create_store_deref(struct lower_packed_varyings_state *state,
    const struct glsl_type *type = glsl_without_array(deref->type);
    unsigned comps = glsl_get_vector_elements(type);
    if (value->num_components != comps) {
-      nir_ssa_def *srcs[4];
+      nir_def *srcs[4];
 
       unsigned comp = 0;
       for (unsigned i = 0; i < comps; i++) {
@@ -543,7 +543,7 @@ create_store_deref(struct lower_packed_varyings_state *state,
                srcs[i] = nir_swizzle(&state->b, value, &comp, 1);
             comp++;
          } else {
-            srcs[i] = nir_ssa_undef(&state->b, 1,
+            srcs[i] = nir_undef(&state->b, 1,
                                     glsl_type_is_64bit(type) ? 64 : 32);
          }
       }
@@ -555,7 +555,7 @@ create_store_deref(struct lower_packed_varyings_state *state,
 
 static unsigned
 lower_varying(struct lower_packed_varyings_state *state,
-              nir_ssa_def *rhs_swizzle, unsigned writemask,
+              nir_def *rhs_swizzle, unsigned writemask,
               const struct glsl_type *type, unsigned fine_location,
               nir_variable *unpacked_var, nir_deref_instr *unpacked_var_deref,
               const char *name, bool gs_input_toplevel, unsigned vertex_index);
@@ -576,7 +576,7 @@ lower_varying(struct lower_packed_varyings_state *state,
  */
 static unsigned
 lower_arraylike(struct lower_packed_varyings_state *state,
-                nir_ssa_def *rhs_swizzle, unsigned writemask,
+                nir_def *rhs_swizzle, unsigned writemask,
                 const struct glsl_type *type, unsigned fine_location,
                 nir_variable *unpacked_var, nir_deref_instr *unpacked_var_deref,
                 const char *name, bool gs_input_toplevel, unsigned vertex_index)
@@ -640,7 +640,7 @@ lower_arraylike(struct lower_packed_varyings_state *state,
  */
 static unsigned
 lower_varying(struct lower_packed_varyings_state *state,
-              nir_ssa_def *rhs_swizzle, unsigned writemask,
+              nir_def *rhs_swizzle, unsigned writemask,
               const struct glsl_type *type, unsigned fine_location,
               nir_variable *unpacked_var, nir_deref_instr *unpacked_var_deref,
               const char *name, bool gs_input_toplevel, unsigned vertex_index)
@@ -741,10 +741,10 @@ lower_varying(struct lower_packed_varyings_state *state,
             ralloc_asprintf(state->mem_ctx, "%s.%s", name, left_swizzle_name) :
             NULL;
 
-         nir_ssa_def *left_swizzle = NULL;
+         nir_def *left_swizzle = NULL;
          unsigned left_writemask = ~0u;
          if (state->mode == nir_var_shader_out) {
-            nir_ssa_def *ssa_def = rhs_swizzle ?
+            nir_def *ssa_def = rhs_swizzle ?
                rhs_swizzle : nir_load_deref(&state->b, unpacked_var_deref);
             left_swizzle =
                nir_swizzle(&state->b, ssa_def,
@@ -767,10 +767,10 @@ lower_varying(struct lower_packed_varyings_state *state,
          ralloc_asprintf(state->mem_ctx, "%s.%s", name, right_swizzle_name) :
          NULL;
 
-      nir_ssa_def *right_swizzle = NULL;
+      nir_def *right_swizzle = NULL;
       unsigned right_writemask = ~0u;
       if (state->mode == nir_var_shader_out) {
-        nir_ssa_def *ssa_def = rhs_swizzle ?
+        nir_def *ssa_def = rhs_swizzle ?
            rhs_swizzle : nir_load_deref(&state->b, unpacked_var_deref);
         right_swizzle =
            nir_swizzle(&state->b, ssa_def,
@@ -810,7 +810,7 @@ lower_varying(struct lower_packed_varyings_state *state,
       struct packing_store_values *store_value;
       if (state->mode == nir_var_shader_out) {
          unsigned writemask = ((1 << components) - 1) << location_frac;
-         nir_ssa_def *value = rhs_swizzle ? rhs_swizzle :
+         nir_def *value = rhs_swizzle ? rhs_swizzle :
             nir_load_deref(&state->b, unpacked_var_deref);
 
          store_value =
@@ -822,9 +822,9 @@ lower_varying(struct lower_packed_varyings_state *state,
             swizzle_values[i] = i + location_frac;
          }
 
-         nir_ssa_def *ssa_def = &packed_deref->dest.ssa;
+         nir_def *ssa_def = &packed_deref->dest.ssa;
          ssa_def = nir_load_deref(&state->b, packed_deref);
-         nir_ssa_def *swizzle =
+         nir_def *swizzle =
             nir_swizzle(&state->b, ssa_def, swizzle_values, components);
 
          store_value = bitwise_assign_unpack(state, unpacked_var_deref,
index fc3fd1f..61e4470 100644 (file)
@@ -140,11 +140,11 @@ copy_to_new_var(nir_builder *b, nir_deref_instr *deref,
          nir_deref_instr *new_var_m_deref =
             nir_build_deref_array(b, new_var_deref, &c->def);
 
-         nir_ssa_def *value = nir_load_deref(b, m_deref);
+         nir_def *value = nir_load_deref(b, m_deref);
          nir_store_deref(b, new_var_m_deref, value, writemask);
       }
    } else {
-      nir_ssa_def *value = nir_load_deref(b, deref);
+      nir_def *value = nir_load_deref(b, deref);
       nir_store_deref(b, new_var_deref, value, writemask);
    }
 }
index b527af3..97d39ac 100644 (file)
@@ -298,7 +298,7 @@ rewrite_varying_deref(nir_builder *b, struct replace_varyings_data *rv_data,
       unsigned i = nir_src_as_uint(deref->arr.index);
       nir_deref_instr *new_deref =
          nir_build_deref_var(b, rv_data->new_texcoord[i]);
-      nir_ssa_def_rewrite_uses(&deref->dest.ssa, &new_deref->dest.ssa);
+      nir_def_rewrite_uses(&deref->dest.ssa, &new_deref->dest.ssa);
       return;
    }
 }
index daee07d..0a0705d 100644 (file)
@@ -84,21 +84,21 @@ public:
 
 private:
    void add_instr(nir_instr *instr, unsigned num_components, unsigned bit_size);
-   nir_ssa_def *evaluate_rvalue(ir_rvalue *ir);
+   nir_def *evaluate_rvalue(ir_rvalue *ir);
 
-   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def **srcs);
-   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1);
-   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1,
-                       nir_ssa_def *src2);
-   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_ssa_def *src1,
-                       nir_ssa_def *src2, nir_ssa_def *src3);
+   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def **srcs);
+   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1);
+   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1,
+                       nir_def *src2);
+   nir_alu_instr *emit(nir_op op, unsigned dest_size, nir_def *src1,
+                       nir_def *src2, nir_def *src3);
 
    bool supports_std430;
 
    nir_shader *shader;
    nir_function_impl *impl;
    nir_builder b;
-   nir_ssa_def *result; /* result of the expression tree last visited */
+   nir_def *result; /* result of the expression tree last visited */
 
    nir_deref_instr *evaluate_deref(ir_instruction *ir);
 
@@ -122,7 +122,7 @@ private:
    struct set *sparse_variable_set;
 
    void adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
-                               nir_ssa_def *dest);
+                               nir_def *dest);
 
    const struct gl_constants *consts;
 };
@@ -465,7 +465,7 @@ nir_visitor::constant_copy(ir_constant *ir, void *mem_ctx)
 
 void
 nir_visitor::adjust_sparse_variable(nir_deref_instr *var_deref, const glsl_type *type,
-                                    nir_ssa_def *dest)
+                                    nir_def *dest)
 {
    const glsl_type *texel_type = type->field_type("texel");
    assert(texel_type);
@@ -914,7 +914,7 @@ nir_visitor::visit(ir_return *ir)
          nir_build_deref_cast(&b, nir_load_param(&b, 0),
                               nir_var_function_temp, ir->value->type, 0);
 
-      nir_ssa_def *val = evaluate_rvalue(ir->value);
+      nir_def *val = evaluate_rvalue(ir->value);
       nir_store_deref(&b, ret_deref, val, ~0);
    }
 
@@ -1180,7 +1180,7 @@ nir_visitor::visit(ir_call *ir)
       }
 
       nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
-      nir_ssa_def *ret = &instr->dest.ssa;
+      nir_def *ret = &instr->dest.ssa;
 
       switch (op) {
       case nir_intrinsic_deref_atomic:
@@ -1341,15 +1341,15 @@ nir_visitor::visit(ir_call *ir)
          /* Set the address argument, extending the coordinate vector to four
           * components.
           */
-         nir_ssa_def *src_addr =
+         nir_def *src_addr =
             evaluate_rvalue((ir_dereference *)param);
-         nir_ssa_def *srcs[4];
+         nir_def *srcs[4];
 
          for (int i = 0; i < 4; i++) {
             if (i < type->coordinate_components())
                srcs[i] = nir_channel(&b, src_addr, i);
             else
-               srcs[i] = nir_ssa_undef(&b, 1, 32);
+               srcs[i] = nir_undef(&b, 1, 32);
          }
 
          instr->src[1] = nir_src_for_ssa(nir_vec(&b, srcs, 4));
@@ -1363,7 +1363,7 @@ nir_visitor::visit(ir_call *ir)
                nir_src_for_ssa(evaluate_rvalue((ir_dereference *)param));
             param = param->get_next();
          } else {
-            instr->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32));
+            instr->src[2] = nir_src_for_ssa(nir_undef(&b, 1, 32));
          }
 
          /* Set the intrinsic parameters. */
@@ -1468,7 +1468,7 @@ nir_visitor::visit(ir_call *ir)
          ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
          assert(write_mask);
 
-         nir_ssa_def *nir_val = evaluate_rvalue(val);
+         nir_def *nir_val = evaluate_rvalue(val);
          if (val->type->is_boolean())
             nir_val = nir_b2i32(&b, nir_val);
 
@@ -1521,7 +1521,7 @@ nir_visitor::visit(ir_call *ir)
 
          nir_intrinsic_set_write_mask(instr, write_mask->value.u[0]);
 
-         nir_ssa_def *nir_val = evaluate_rvalue(val);
+         nir_def *nir_val = evaluate_rvalue(val);
          /* The value in shared memory is a 32-bit value */
          if (val->type->is_boolean())
             nir_val = nir_b2b32(&b, nir_val);
@@ -1639,7 +1639,7 @@ nir_visitor::visit(ir_call *ir)
          nir_deref_instr *out_deref = evaluate_deref(param_rvalue);
          call->params[i] = nir_src_for_ssa(&out_deref->dest.ssa);
       } else if (sig_param->data.mode == ir_var_function_in) {
-         nir_ssa_def *val = evaluate_rvalue(param_rvalue);
+         nir_def *val = evaluate_rvalue(param_rvalue);
          nir_src src = nir_src_for_ssa(val);
 
          nir_src_copy(&call->params[i], &src, &call->instr);
@@ -1685,7 +1685,7 @@ nir_visitor::visit(ir_assignment *ir)
 
    ir->lhs->accept(this);
    nir_deref_instr *lhs_deref = this->deref;
-   nir_ssa_def *src = evaluate_rvalue(ir->rhs);
+   nir_def *src = evaluate_rvalue(ir->rhs);
 
    if (is_sparse) {
       adjust_sparse_variable(lhs_deref, tex->type, src);
@@ -1767,7 +1767,7 @@ nir_visitor::add_instr(nir_instr *instr, unsigned num_components,
    }
 }
 
-nir_ssa_def *
+nir_def *
 nir_visitor::evaluate_rvalue(ir_rvalue* ir)
 {
    ir->accept(this);
@@ -1880,7 +1880,7 @@ nir_visitor::visit(ir_expression *ir)
       break;
    }
 
-   nir_ssa_def *srcs[4];
+   nir_def *srcs[4];
    for (unsigned i = 0; i < ir->num_operands; i++)
       srcs[i] = evaluate_rvalue(ir->operands[i]);
 
@@ -2457,7 +2457,7 @@ nir_visitor::visit(ir_texture *ir)
    /* check for bindless handles */
    if (!nir_deref_mode_is(sampler_deref, nir_var_uniform) ||
        nir_deref_instr_get_variable(sampler_deref)->data.bindless) {
-      nir_ssa_def *load = nir_load_deref(&b, sampler_deref);
+      nir_def *load = nir_load_deref(&b, sampler_deref);
       instr->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_handle, load);
       instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_handle, load);
    } else {
@@ -2618,10 +2618,10 @@ nir_visitor::visit(ir_dereference_record *ir)
     */
    if (this->deref->deref_type == nir_deref_type_var &&
        _mesa_set_search(this->sparse_variable_set, this->deref->var)) {
-      nir_ssa_def *load = nir_load_deref(&b, this->deref);
+      nir_def *load = nir_load_deref(&b, this->deref);
       assert(load->num_components >= 2);
 
-      nir_ssa_def *ssa;
+      nir_def *ssa;
       const glsl_type *type = ir->record->type;
       if (field_index == type->field_index("code")) {
          /* last channel holds residency code */
@@ -2645,7 +2645,7 @@ nir_visitor::visit(ir_dereference_record *ir)
 void
 nir_visitor::visit(ir_dereference_array *ir)
 {
-   nir_ssa_def *index = evaluate_rvalue(ir->array_index);
+   nir_def *index = evaluate_rvalue(ir->array_index);
 
    ir->array->accept(this);
 
index 4d9bbdd..69700ce 100644 (file)
@@ -114,7 +114,7 @@ files_libnir = files(
   'nir_format_convert.h',
   'nir_from_ssa.c',
   'nir_gather_info.c',
-  'nir_gather_ssa_types.c',
+  'nir_gather_types.c',
   'nir_gather_xfb_info.c',
   'nir_group_loads.c',
   'nir_gs_count_vertices.c',
index 4979527..9aaec3a 100644 (file)
@@ -718,7 +718,7 @@ nir_load_const_instr_create(nir_shader *shader, unsigned num_components,
       gc_zalloc_zla(shader->gctx, nir_load_const_instr, nir_const_value, num_components);
    instr_init(&instr->instr, nir_instr_type_load_const);
 
-   nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size);
+   nir_def_init(&instr->instr, &instr->def, num_components, bit_size);
 
    return instr;
 }
@@ -870,15 +870,15 @@ nir_parallel_copy_instr_create(nir_shader *shader)
    return instr;
 }
 
-nir_ssa_undef_instr *
-nir_ssa_undef_instr_create(nir_shader *shader,
-                           unsigned num_components,
-                           unsigned bit_size)
+nir_undef_instr *
+nir_undef_instr_create(nir_shader *shader,
+                       unsigned num_components,
+                       unsigned bit_size)
 {
-   nir_ssa_undef_instr *instr = gc_alloc(shader->gctx, nir_ssa_undef_instr, 1);
+   nir_undef_instr *instr = gc_alloc(shader->gctx, nir_undef_instr, 1);
    instr_init(&instr->instr, nir_instr_type_ssa_undef);
 
-   nir_ssa_def_init(&instr->instr, &instr->def, num_components, bit_size);
+   nir_def_init(&instr->instr, &instr->def, num_components, bit_size);
 
    return instr;
 }
@@ -1036,7 +1036,7 @@ add_use_cb(nir_src *src, void *state)
 }
 
 static bool
-add_ssa_def_cb(nir_ssa_def *def, void *state)
+add_ssa_def_cb(nir_def *def, void *state)
 {
    nir_instr *instr = state;
 
@@ -1194,11 +1194,11 @@ nir_instr_free_list(struct exec_list *list)
 }
 
 static bool
-nir_instr_free_and_dce_live_cb(nir_ssa_def *def, void *state)
+nir_instr_free_and_dce_live_cb(nir_def *def, void *state)
 {
    bool *live = state;
 
-   if (!nir_ssa_def_is_unused(def)) {
+   if (!nir_def_is_unused(def)) {
       *live = true;
       return false;
    } else {
@@ -1291,7 +1291,7 @@ struct foreach_ssa_def_state {
 };
 
 static inline bool
-nir_ssa_def_visitor(nir_dest *dest, void *void_state)
+nir_def_visitor(nir_dest *dest, void *void_state)
 {
    struct foreach_ssa_def_state *state = void_state;
 
@@ -1309,7 +1309,7 @@ nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
    case nir_instr_type_phi:
    case nir_instr_type_parallel_copy: {
       struct foreach_ssa_def_state foreach_state = { cb, state };
-      return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
+      return nir_foreach_dest(instr, nir_def_visitor, &foreach_state);
    }
 
    case nir_instr_type_load_const:
@@ -1324,7 +1324,7 @@ nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, void *state)
    }
 }
 
-nir_ssa_def *
+nir_def *
 nir_instr_ssa_def(nir_instr *instr)
 {
    switch (instr->type) {
@@ -1539,9 +1539,9 @@ nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src)
 }
 
 void
-nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
-                 unsigned num_components,
-                 unsigned bit_size)
+nir_def_init(nir_instr *instr, nir_def *def,
+             unsigned num_components,
+             unsigned bit_size)
 {
    def->parent_instr = instr;
    list_inithead(&def->uses);
@@ -1565,22 +1565,22 @@ void
 nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
                   unsigned num_components, unsigned bit_size)
 {
-   nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size);
+   nir_def_init(instr, &dest->ssa, num_components, bit_size);
 }
 
 void
-nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa)
+nir_def_rewrite_uses(nir_def *def, nir_def *new_ssa)
 {
    assert(def != new_ssa);
    nir_foreach_use_including_if_safe(use_src, def) {
-      nir_src_rewrite_ssa(use_src, new_ssa);
+      nir_src_rewrite(use_src, new_ssa);
    }
 }
 
 void
-nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src)
+nir_def_rewrite_uses_src(nir_def *def, nir_src new_src)
 {
-   nir_ssa_def_rewrite_uses(def, new_src.ssa);
+   nir_def_rewrite_uses(def, new_src.ssa);
 }
 
 static bool
@@ -1614,8 +1614,8 @@ is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
  * def->parent_instr and that after_me comes after def->parent_instr.
  */
 void
-nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_ssa_def *new_ssa,
-                               nir_instr *after_me)
+nir_def_rewrite_uses_after(nir_def *def, nir_def *new_ssa,
+                           nir_instr *after_me)
 {
    if (def == new_ssa)
       return;
@@ -1632,11 +1632,11 @@ nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_ssa_def *new_ssa,
             continue;
       }
 
-      nir_src_rewrite_ssa(use_src, new_ssa);
+      nir_src_rewrite(use_src, new_ssa);
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 get_store_value(nir_intrinsic_instr *intrin)
 {
    assert(nir_intrinsic_has_write_mask(intrin));
@@ -1672,7 +1672,7 @@ nir_src_components_read(const nir_src *src)
 }
 
 nir_component_mask_t
-nir_ssa_def_components_read(const nir_ssa_def *def)
+nir_def_components_read(const nir_def *def)
 {
    nir_component_mask_t read_mask = 0;
 
@@ -1950,7 +1950,7 @@ nir_index_blocks(nir_function_impl *impl)
 }
 
 static bool
-index_ssa_def_cb(nir_ssa_def *def, void *state)
+index_ssa_def_cb(nir_def *def, void *state)
 {
    unsigned *index = (unsigned *)state;
    def->index = (*index)++;
@@ -2084,14 +2084,14 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
          continue;
       }
 
-      nir_ssa_def *old_def = nir_instr_ssa_def(instr);
+      nir_def *old_def = nir_instr_ssa_def(instr);
       struct list_head old_uses;
       if (old_def != NULL) {
          /* We're about to ask the callback to generate a replacement for instr.
           * Save off the uses from instr's SSA def so we know what uses to
-          * rewrite later.  If we use nir_ssa_def_rewrite_uses, it fails in the
+          * rewrite later.  If we use nir_def_rewrite_uses, it fails in the
           * case where the generated replacement code uses the result of instr
-          * itself.  If we use nir_ssa_def_rewrite_uses_after (which is the
+          * itself.  If we use nir_def_rewrite_uses_after (which is the
           * normal solution to this problem), it doesn't work well if control-
           * flow is inserted as part of the replacement, doesn't handle cases
           * where the replacement is something consumed by instr, and suffers
@@ -2104,7 +2104,7 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
       }
 
       b.cursor = nir_after_instr(instr);
-      nir_ssa_def *new_def = lower(&b, instr, cb_data);
+      nir_def *new_def = lower(&b, instr, cb_data);
       if (new_def && new_def != NIR_LOWER_INSTR_PROGRESS &&
           new_def != NIR_LOWER_INSTR_PROGRESS_REPLACE) {
          assert(old_def != NULL);
@@ -2119,7 +2119,7 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
                nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
          }
 
-         if (nir_ssa_def_is_unused(old_def)) {
+         if (nir_def_is_unused(old_def)) {
             iter = nir_instr_free_and_dce(instr);
          } else {
             iter = nir_after_instr(instr);
@@ -2530,7 +2530,7 @@ nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
 }
 
 void
-nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
+nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_def *src,
                             bool bindless)
 {
    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
@@ -2760,10 +2760,10 @@ nir_alu_instr_is_copy(nir_alu_instr *instr)
    return nir_op_is_vec(instr->op);
 }
 
-nir_ssa_scalar
-nir_ssa_scalar_chase_movs(nir_ssa_scalar s)
+nir_scalar
+nir_scalar_chase_movs(nir_scalar s)
 {
-   while (nir_ssa_scalar_is_alu(s)) {
+   while (nir_scalar_is_alu(s)) {
       nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
       if (!nir_alu_instr_is_copy(alu))
          break;
index 98a4105..6157fb2 100644 (file)
@@ -946,7 +946,7 @@ nir_instr_is_last(const nir_instr *instr)
    return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node));
 }
 
-typedef struct nir_ssa_def {
+typedef struct nir_def {
    /** Instruction which produces this SSA value. */
    nir_instr *parent_instr;
 
@@ -966,7 +966,7 @@ typedef struct nir_ssa_def {
     * invocations of the shader.  This is set by nir_divergence_analysis.
     */
    bool divergent;
-} nir_ssa_def;
+} nir_def;
 
 struct nir_src;
 struct nir_if;
@@ -979,7 +979,7 @@ typedef struct nir_src {
    };
 
    struct list_head use_link;
-   nir_ssa_def *ssa;
+   nir_def *ssa;
 
    bool is_if;
 } nir_src;
@@ -1030,7 +1030,7 @@ nir_src_init(void)
       if (src->is_if)
 
 static inline bool
-nir_ssa_def_used_by_if(const nir_ssa_def *def)
+nir_def_used_by_if(const nir_def *def)
 {
    nir_foreach_if_use(_, def)
       return true;
@@ -1039,7 +1039,7 @@ nir_ssa_def_used_by_if(const nir_ssa_def *def)
 }
 
 typedef struct {
-   nir_ssa_def ssa;
+   nir_def ssa;
 } nir_dest;
 
 static inline nir_dest
@@ -1052,7 +1052,7 @@ nir_dest_init(void)
 #define NIR_DEST_INIT nir_dest_init()
 
 static inline nir_src
-nir_src_for_ssa(nir_ssa_def *def)
+nir_src_for_ssa(nir_def *def)
 {
    nir_src src = NIR_SRC_INIT;
 
@@ -1984,7 +1984,7 @@ nir_image_intrinsic_coord_components(const nir_intrinsic_instr *instr);
 
 /* Converts a image_deref_* intrinsic into a image_* one */
 void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
-                                 nir_ssa_def *handle, bool bindless);
+                                 nir_def *handle, bool bindless);
 
 /* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
 static inline bool
@@ -2412,7 +2412,7 @@ bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
 typedef struct {
    nir_instr instr;
 
-   nir_ssa_def def;
+   nir_def def;
 
    nir_const_value value[];
 } nir_load_const_instr;
@@ -2478,8 +2478,8 @@ typedef struct {
 
 typedef struct {
    nir_instr instr;
-   nir_ssa_def def;
-} nir_ssa_undef_instr;
+   nir_def def;
+} nir_undef_instr;
 
 typedef struct {
    struct exec_node node;
@@ -2554,7 +2554,7 @@ NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
                 type, nir_instr_type_intrinsic)
 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
                 type, nir_instr_type_load_const)
-NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
+NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_undef_instr, instr,
                 type, nir_instr_type_ssa_undef)
 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
                 type, nir_instr_type_phi)
@@ -2589,36 +2589,36 @@ NIR_DEFINE_SRC_AS_CONST(double, float)
 #undef NIR_DEFINE_SRC_AS_CONST
 
 typedef struct {
-   nir_ssa_def *def;
+   nir_def *def;
    unsigned comp;
-} nir_ssa_scalar;
+} nir_scalar;
 
 static inline bool
-nir_ssa_scalar_is_const(nir_ssa_scalar s)
+nir_scalar_is_const(nir_scalar s)
 {
    return s.def->parent_instr->type == nir_instr_type_load_const;
 }
 
 static inline bool
-nir_ssa_scalar_is_undef(nir_ssa_scalar s)
+nir_scalar_is_undef(nir_scalar s)
 {
    return s.def->parent_instr->type == nir_instr_type_ssa_undef;
 }
 
 static inline nir_const_value
-nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
+nir_scalar_as_const_value(nir_scalar s)
 {
    assert(s.comp < s.def->num_components);
    nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
    return load->value[s.comp];
 }
 
-#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix)             \
-   static inline type                                        \
-      nir_ssa_scalar_as_##suffix(nir_ssa_scalar s)           \
-   {                                                         \
-      return nir_const_value_as_##suffix(                    \
-         nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
+#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix)         \
+   static inline type                                    \
+      nir_scalar_as_##suffix(nir_scalar s)               \
+   {                                                     \
+      return nir_const_value_as_##suffix(                \
+         nir_scalar_as_const_value(s), s.def->bit_size); \
    }
 
 NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
@@ -2629,21 +2629,21 @@ NIR_DEFINE_SCALAR_AS_CONST(double, float)
 #undef NIR_DEFINE_SCALAR_AS_CONST
 
 static inline bool
-nir_ssa_scalar_is_alu(nir_ssa_scalar s)
+nir_scalar_is_alu(nir_scalar s)
 {
    return s.def->parent_instr->type == nir_instr_type_alu;
 }
 
 static inline nir_op
-nir_ssa_scalar_alu_op(nir_ssa_scalar s)
+nir_scalar_alu_op(nir_scalar s)
 {
    return nir_instr_as_alu(s.def->parent_instr)->op;
 }
 
-static inline nir_ssa_scalar
-nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
+static inline nir_scalar
+nir_scalar_chase_alu_src(nir_scalar s, unsigned alu_src_idx)
 {
-   nir_ssa_scalar out = { NULL, 0 };
+   nir_scalar out = { NULL, 0 };
 
    nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
    assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
@@ -2671,27 +2671,27 @@ nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
    return out;
 }
 
-nir_ssa_scalar nir_ssa_scalar_chase_movs(nir_ssa_scalar s);
+nir_scalar nir_scalar_chase_movs(nir_scalar s);
 
-static inline nir_ssa_scalar
-nir_get_ssa_scalar(nir_ssa_def *def, unsigned channel)
+static inline nir_scalar
+nir_get_ssa_scalar(nir_def *def, unsigned channel)
 {
-   nir_ssa_scalar s = { def, channel };
+   nir_scalar s = { def, channel };
    return s;
 }
 
-/** Returns a nir_ssa_scalar where we've followed the bit-exact mov/vec use chain to the original definition */
-static inline nir_ssa_scalar
-nir_ssa_scalar_resolved(nir_ssa_def *def, unsigned channel)
+/** Returns a nir_scalar where we've followed the bit-exact mov/vec use chain to the original definition */
+static inline nir_scalar
+nir_scalar_resolved(nir_def *def, unsigned channel)
 {
-   return nir_ssa_scalar_chase_movs(nir_get_ssa_scalar(def, channel));
+   return nir_scalar_chase_movs(nir_get_ssa_scalar(def, channel));
 }
 
 static inline uint64_t
 nir_alu_src_as_uint(nir_alu_src src)
 {
-   nir_ssa_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]);
-   return nir_ssa_scalar_as_uint(scalar);
+   nir_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]);
+   return nir_scalar_as_uint(scalar);
 }
 
 typedef struct {
@@ -2966,7 +2966,7 @@ typedef struct {
 
 typedef struct {
    /* Induction variable. */
-   nir_ssa_def *def;
+   nir_def *def;
 
    /* Init statement with only uniform. */
    nir_src *init_src;
@@ -4112,9 +4112,9 @@ nir_phi_src *nir_phi_instr_add_src(nir_phi_instr *instr, nir_block *pred, nir_sr
 
 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
 
-nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
-                                                unsigned num_components,
-                                                unsigned bit_size);
+nir_undef_instr *nir_undef_instr_create(nir_shader *shader,
+                                        unsigned num_components,
+                                        unsigned bit_size);
 
 nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
 
@@ -4385,9 +4385,9 @@ nir_cursor nir_instr_free_and_dce(nir_instr *instr);
 
 /** @} */
 
-nir_ssa_def *nir_instr_ssa_def(nir_instr *instr);
+nir_def *nir_instr_ssa_def(nir_instr *instr);
 
-typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
+typedef bool (*nir_foreach_ssa_def_cb)(nir_def *def, void *state);
 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
@@ -4419,7 +4419,7 @@ bool nir_srcs_equal(nir_src src1, nir_src src2);
 bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
 
 static inline void
-nir_src_rewrite_ssa(nir_src *src, nir_ssa_def *new_ssa)
+nir_src_rewrite(nir_src *src, nir_def *new_ssa)
 {
    assert(src->ssa);
    assert(src->is_if ? (src->parent_if != NULL) : (src->parent_instr != NULL));
@@ -4430,11 +4430,11 @@ nir_src_rewrite_ssa(nir_src *src, nir_ssa_def *new_ssa)
 
 static inline void
 nir_instr_rewrite_src_ssa(ASSERTED nir_instr *instr,
-                          nir_src *src, nir_ssa_def *new_ssa)
+                          nir_src *src, nir_def *new_ssa)
 {
    assert(!src->is_if);
    assert(src->parent_instr == instr);
-   nir_src_rewrite_ssa(src, new_ssa);
+   nir_src_rewrite(src, new_ssa);
 }
 
 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
@@ -4444,8 +4444,8 @@ void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
 
 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
                        unsigned num_components, unsigned bit_size);
-void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
-                      unsigned num_components, unsigned bit_size);
+void nir_def_init(nir_instr *instr, nir_def *def,
+                  unsigned num_components, unsigned bit_size);
 static inline void
 nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
                            const struct glsl_type *type)
@@ -4454,16 +4454,16 @@ nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
    nir_ssa_dest_init(instr, dest, glsl_get_components(type),
                      glsl_get_bit_size(type));
 }
-void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa);
-void nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src);
-void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_ssa_def *new_ssa,
-                                    nir_instr *after_me);
+void nir_def_rewrite_uses(nir_def *def, nir_def *new_ssa);
+void nir_def_rewrite_uses_src(nir_def *def, nir_src new_src);
+void nir_def_rewrite_uses_after(nir_def *def, nir_def *new_ssa,
+                                nir_instr *after_me);
 
 nir_component_mask_t nir_src_components_read(const nir_src *src);
-nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
+nir_component_mask_t nir_def_components_read(const nir_def *def);
 
 static inline bool
-nir_ssa_def_is_unused(nir_ssa_def *ssa)
+nir_def_is_unused(nir_def *ssa)
 {
    return list_is_empty(&ssa->uses);
 }
@@ -4715,17 +4715,17 @@ typedef bool (*nir_instr_writemask_filter_cb)(const nir_instr *,
  * should either return NULL indicating that no lowering needs to be done or
  * emit a sequence of instructions using the provided builder (whose cursor
  * will already be placed after the instruction to be lowered) and return the
- * resulting nir_ssa_def.
+ * resulting nir_def.
  */
-typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
-                                           nir_instr *, void *);
+typedef nir_def *(*nir_lower_instr_cb)(struct nir_builder *,
+                                       nir_instr *, void *);
 
 /**
  * Special return value for nir_lower_instr_cb when some progress occurred
  * (like changing an input to the instr) that didn't result in a replacement
  * SSA def being generated.
  */
-#define NIR_LOWER_INSTR_PROGRESS ((nir_ssa_def *)(uintptr_t)1)
+#define NIR_LOWER_INSTR_PROGRESS ((nir_def *)(uintptr_t)1)
 
 /**
  * Special return value for nir_lower_instr_cb when some progress occurred
@@ -4733,7 +4733,7 @@ typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
  * (like a store)
  */
 
-#define NIR_LOWER_INSTR_PROGRESS_REPLACE ((nir_ssa_def *)(uintptr_t)2)
+#define NIR_LOWER_INSTR_PROGRESS_REPLACE ((nir_def *)(uintptr_t)2)
 
 /** Iterate over all the instructions in a nir_function_impl and lower them
  *  using the provided callbacks
@@ -4804,7 +4804,7 @@ bool nir_lower_returns(nir_shader *shader);
 
 void nir_inline_function_impl(struct nir_builder *b,
                               const nir_function_impl *impl,
-                              nir_ssa_def **params,
+                              nir_def **params,
                               struct hash_table *shader_var_remap);
 bool nir_inline_functions(nir_shader *shader);
 
@@ -4864,9 +4864,9 @@ void nir_lower_clip_halfz(nir_shader *shader);
 
 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
 
-void nir_gather_ssa_types(nir_function_impl *impl,
-                          BITSET_WORD *float_types,
-                          BITSET_WORD *int_types);
+void nir_gather_types(nir_function_impl *impl,
+                      BITSET_WORD *float_types,
+                      BITSET_WORD *int_types);
 
 void nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
                               unsigned *size,
@@ -5056,26 +5056,26 @@ nir_address_format_to_glsl_type(nir_address_format addr_format)
 
 const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
 
-nir_ssa_def *nir_build_addr_iadd(struct nir_builder *b, nir_ssa_def *addr,
+nir_def *nir_build_addr_iadd(struct nir_builder *b, nir_def *addr,
+                             nir_address_format addr_format,
+                             nir_variable_mode modes,
+                             nir_def *offset);
+
+nir_def *nir_build_addr_iadd_imm(struct nir_builder *b, nir_def *addr,
                                  nir_address_format addr_format,
                                  nir_variable_mode modes,
-                                 nir_ssa_def *offset);
+                                 int64_t offset);
 
-nir_ssa_def *nir_build_addr_iadd_imm(struct nir_builder *b, nir_ssa_def *addr,
-                                     nir_address_format addr_format,
-                                     nir_variable_mode modes,
-                                     int64_t offset);
+nir_def *nir_build_addr_ieq(struct nir_builder *b, nir_def *addr0, nir_def *addr1,
+                            nir_address_format addr_format);
 
-nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
-                                nir_address_format addr_format);
-
-nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
-                                 nir_address_format addr_format);
+nir_def *nir_build_addr_isub(struct nir_builder *b, nir_def *addr0, nir_def *addr1,
+                             nir_address_format addr_format);
 
-nir_ssa_def *nir_explicit_io_address_from_deref(struct nir_builder *b,
-                                                nir_deref_instr *deref,
-                                                nir_ssa_def *base_addr,
-                                                nir_address_format addr_format);
+nir_def *nir_explicit_io_address_from_deref(struct nir_builder *b,
+                                            nir_deref_instr *deref,
+                                            nir_def *base_addr,
+                                            nir_address_format addr_format);
 
 bool nir_get_explicit_deref_align(nir_deref_instr *deref,
                                   bool default_to_type_align,
@@ -5084,7 +5084,7 @@ bool nir_get_explicit_deref_align(nir_deref_instr *deref,
 
 void nir_lower_explicit_io_instr(struct nir_builder *b,
                                  nir_intrinsic_instr *io_instr,
-                                 nir_ssa_def *addr,
+                                 nir_def *addr,
                                  nir_address_format addr_format);
 
 bool nir_lower_explicit_io(nir_shader *shader,
@@ -5330,7 +5330,7 @@ bool nir_lower_subgroups(nir_shader *shader,
 
 bool nir_lower_system_values(nir_shader *shader);
 
-nir_ssa_def *
+nir_def *
 nir_build_lowered_load_helper_invocation(struct nir_builder *b);
 
 typedef struct nir_lower_compute_system_values_options {
@@ -5875,7 +5875,7 @@ void nir_loop_analyze_impl(nir_function_impl *impl,
                            nir_variable_mode indirect_mask,
                            bool force_unroll_sampler_indirect);
 
-bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
+bool nir_defs_interfere(nir_def *a, nir_def *b);
 
 bool nir_repair_ssa_impl(nir_function_impl *impl);
 bool nir_repair_ssa(nir_shader *shader);
@@ -5887,8 +5887,8 @@ bool nir_update_instr_divergence(nir_shader *shader, nir_instr *instr);
 bool nir_has_divergent_loop(nir_shader *shader);
 
 void
-nir_rewrite_uses_to_load_reg(struct nir_builder *b, nir_ssa_def *old,
-                             nir_ssa_def *reg);
+nir_rewrite_uses_to_load_reg(struct nir_builder *b, nir_def *old,
+                             nir_def *reg);
 
 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
  * registers.  If false, convert all values (even those not involved in a phi
@@ -6096,12 +6096,12 @@ typedef struct nir_unsigned_upper_bound_config {
 
 uint32_t
 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
-                         nir_ssa_scalar scalar,
+                         nir_scalar scalar,
                          const nir_unsigned_upper_bound_config *config);
 
 bool
 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
-                            nir_ssa_scalar ssa, unsigned const_val,
+                            nir_scalar ssa, unsigned const_val,
                             const nir_unsigned_upper_bound_config *config);
 
 typedef struct {
@@ -6114,7 +6114,7 @@ typedef struct {
    bool subgroup_size_uniform;
 
    /* size/align for load/store_preamble. */
-   void (*def_size)(nir_ssa_def *def, unsigned *size, unsigned *align);
+   void (*def_size)(nir_def *def, unsigned *size, unsigned *align);
 
    /* Total available size for load/store_preamble storage, in units
     * determined by def_size.
@@ -6132,7 +6132,7 @@ typedef struct {
     * may happen from inserting move instructions, etc. If the benefit doesn't
     * exceed the cost here then we won't rewrite it.
     */
-   float (*rewrite_cost_cb)(nir_ssa_def *def, const void *data);
+   float (*rewrite_cost_cb)(nir_def *def, const void *data);
 
    /* Instructions whose definitions should not be rewritten. These could
     * still be moved to the preamble, but they shouldn't be the root of a
@@ -6154,7 +6154,7 @@ nir_function_impl *nir_shader_get_preamble(nir_shader *shader);
 bool nir_lower_point_smooth(nir_shader *shader);
 bool nir_lower_poly_line_smooth(nir_shader *shader, unsigned num_smooth_aa_sample);
 
-bool nir_mod_analysis(nir_ssa_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod);
+bool nir_mod_analysis(nir_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod);
 
 bool
 nir_remove_tex_shadow(nir_shader *shader, unsigned textures_bitmask);
@@ -6163,7 +6163,7 @@ void
 nir_trivialize_registers(nir_shader *s);
 
 static inline nir_intrinsic_instr *
-nir_reg_get_decl(nir_ssa_def *reg)
+nir_reg_get_decl(nir_def *reg)
 {
    assert(reg->parent_instr->type == nir_instr_type_intrinsic);
    nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr);
@@ -6231,7 +6231,7 @@ nir_is_store_reg(nir_intrinsic_instr *intr)
       if (nir_is_store_reg(nir_instr_as_intrinsic(store->parent_instr)))
 
 static inline nir_intrinsic_instr *
-nir_load_reg_for_def(const nir_ssa_def *def)
+nir_load_reg_for_def(const nir_def *def)
 {
    if (def->parent_instr->type != nir_instr_type_intrinsic)
       return NULL;
@@ -6244,7 +6244,7 @@ nir_load_reg_for_def(const nir_ssa_def *def)
 }
 
 static inline nir_intrinsic_instr *
-nir_store_reg_for_def(const nir_ssa_def *def)
+nir_store_reg_for_def(const nir_def *def)
 {
    /* Look for the trivial store: single use of our destination by a
     * store_register intrinsic.
index 0de722e..b5c1752 100644 (file)
@@ -60,7 +60,7 @@ nir_builder MUST_CHECK PRINTFLIKE(3, 4)
    return b;
 }
 
-nir_ssa_def *
+nir_def *
 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr)
 {
    const nir_op_info *op_info = &nir_op_infos[instr->op];
@@ -121,9 +121,9 @@ nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr
    return &instr->dest.dest.ssa;
 }
 
-nir_ssa_def *
-nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
-              nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
+nir_def *
+nir_build_alu(nir_builder *build, nir_op op, nir_def *src0,
+              nir_def *src1, nir_def *src2, nir_def *src3)
 {
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    if (!instr)
@@ -140,8 +140,8 @@ nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
    return nir_builder_alu_instr_finish_and_insert(build, instr);
 }
 
-nir_ssa_def *
-nir_build_alu1(nir_builder *build, nir_op op, nir_ssa_def *src0)
+nir_def *
+nir_build_alu1(nir_builder *build, nir_op op, nir_def *src0)
 {
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    if (!instr)
@@ -152,9 +152,9 @@ nir_build_alu1(nir_builder *build, nir_op op, nir_ssa_def *src0)
    return nir_builder_alu_instr_finish_and_insert(build, instr);
 }
 
-nir_ssa_def *
-nir_build_alu2(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1)
+nir_def *
+nir_build_alu2(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1)
 {
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    if (!instr)
@@ -166,9 +166,9 @@ nir_build_alu2(nir_builder *build, nir_op op, nir_ssa_def *src0,
    return nir_builder_alu_instr_finish_and_insert(build, instr);
 }
 
-nir_ssa_def *
-nir_build_alu3(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1, nir_ssa_def *src2)
+nir_def *
+nir_build_alu3(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1, nir_def *src2)
 {
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    if (!instr)
@@ -181,9 +181,9 @@ nir_build_alu3(nir_builder *build, nir_op op, nir_ssa_def *src0,
    return nir_builder_alu_instr_finish_and_insert(build, instr);
 }
 
-nir_ssa_def *
-nir_build_alu4(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
+nir_def *
+nir_build_alu4(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1, nir_def *src2, nir_def *src3)
 {
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    if (!instr)
@@ -198,8 +198,8 @@ nir_build_alu4(nir_builder *build, nir_op op, nir_ssa_def *src0,
 }
 
 /* for the couple special cases with more than 4 src args: */
-nir_ssa_def *
-nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
+nir_def *
+nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs)
 {
    const nir_op_info *op_info = &nir_op_infos[op];
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
@@ -212,7 +212,7 @@ nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs)
    return nir_builder_alu_instr_finish_and_insert(build, instr);
 }
 
-nir_ssa_def *
+nir_def *
 nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
                           nir_deref_instr *texture,
                           nir_deref_instr *sampler,
@@ -311,8 +311,8 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
    return &tex->dest.ssa;
 }
 
-nir_ssa_def *
-nir_vec_scalars(nir_builder *build, nir_ssa_scalar *comp, unsigned num_components)
+nir_def *
+nir_vec_scalars(nir_builder *build, nir_scalar *comp, unsigned num_components)
 {
    nir_op op = nir_op_vec(num_components);
    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
@@ -337,12 +337,12 @@ nir_vec_scalars(nir_builder *build, nir_ssa_scalar *comp, unsigned num_component
 }
 
 /**
- * Turns a nir_src into a nir_ssa_def * so it can be passed to
+ * Turns a nir_src into a nir_def * so it can be passed to
  * nir_build_alu()-based builder calls.
  *
  * See nir_ssa_for_alu_src() for alu instructions.
  */
-nir_ssa_def *
+nir_def *
 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
 {
    if (src.ssa->num_components == num_components)
@@ -362,7 +362,7 @@ nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
  * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
  * nir_alu_src's swizzle.
  */
-nir_ssa_def *
+nir_def *
 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
 {
    if (nir_alu_src_is_trivial_ssa(instr, srcn))
@@ -374,7 +374,7 @@ nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
 }
 
 /* Generic builder for system values. */
-nir_ssa_def *
+nir_def *
 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
                       unsigned num_components, unsigned bit_size)
 {
@@ -430,7 +430,7 @@ nir_push_if_src(nir_builder *build, nir_src condition)
 }
 
 nir_if *
-nir_push_if(nir_builder *build, nir_ssa_def *condition)
+nir_push_if(nir_builder *build, nir_def *condition)
 {
    return nir_push_if_src(build, nir_src_for_ssa(condition));
 }
@@ -460,8 +460,8 @@ nir_pop_if(nir_builder *build, nir_if *nif)
    build->cursor = nir_after_cf_node(&nif->cf_node);
 }
 
-nir_ssa_def *
-nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
+nir_def *
+nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def)
 {
    nir_block *block = nir_cursor_current_block(build->cursor);
    nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
@@ -517,9 +517,9 @@ nir_pop_loop(nir_builder *build, nir_loop *loop)
    build->cursor = nir_after_cf_node(&loop->cf_node);
 }
 
-nir_ssa_def *
+nir_def *
 nir_compare_func(nir_builder *b, enum compare_func func,
-                 nir_ssa_def *src0, nir_ssa_def *src1)
+                 nir_def *src0, nir_def *src1)
 {
    switch (func) {
    case COMPARE_FUNC_NEVER:
@@ -542,9 +542,9 @@ nir_compare_func(nir_builder *b, enum compare_func func,
    unreachable("bad compare func");
 }
 
-nir_ssa_def *
+nir_def *
 nir_type_convert(nir_builder *b,
-                 nir_ssa_def *src,
+                 nir_def *src,
                  nir_alu_type src_type,
                  nir_alu_type dest_type,
                  nir_rounding_mode rnd)
@@ -619,15 +619,15 @@ nir_type_convert(nir_builder *b,
    }
 }
 
-nir_ssa_def *
-nir_gen_rect_vertices(nir_builder *b, nir_ssa_def *z, nir_ssa_def *w)
+nir_def *
+nir_gen_rect_vertices(nir_builder *b, nir_def *z, nir_def *w)
 {
    if (!z)
       z = nir_imm_float(b, 0.0);
    if (!w)
       w = nir_imm_float(b, 1.0);
 
-   nir_ssa_def *vertex_id;
+   nir_def *vertex_id;
    if (b->shader->options && b->shader->options->vertex_id_zero_based)
       vertex_id = nir_load_vertex_id_zero_base(b);
    else
@@ -644,10 +644,10 @@ nir_gen_rect_vertices(nir_builder *b, nir_ssa_def *z, nir_ssa_def *w)
     * channel 1 is vertex_id & 1 ?  1.0 : -1.0
     */
 
-   nir_ssa_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
-   nir_ssa_def *c1cmp = nir_test_mask(b, vertex_id, 1);
+   nir_def *c0cmp = nir_ilt_imm(b, vertex_id, 2);
+   nir_def *c1cmp = nir_test_mask(b, vertex_id, 1);
 
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    comp[0] = nir_bcsel(b, c0cmp, nir_imm_float(b, -1.0), nir_imm_float(b, 1.0));
    comp[1] = nir_bcsel(b, c1cmp, nir_imm_float(b, 1.0), nir_imm_float(b, -1.0));
    comp[2] = z;
index b495de8..4be4562 100644 (file)
@@ -126,26 +126,26 @@ nir_builder_last_instr(nir_builder *build)
 }
 
 /* General nir_build_alu() taking a variable arg count with NULLs for the rest. */
-nir_ssa_def *
-nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
-              nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3);
+nir_def *
+nir_build_alu(nir_builder *build, nir_op op, nir_def *src0,
+              nir_def *src1, nir_def *src2, nir_def *src3);
 
 /* Fixed-arg-count variants to reduce size of codegen. */
-nir_ssa_def *
-nir_build_alu1(nir_builder *build, nir_op op, nir_ssa_def *src0);
-nir_ssa_def *
-nir_build_alu2(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1);
-nir_ssa_def *
-nir_build_alu3(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1, nir_ssa_def *src2);
-nir_ssa_def *
-nir_build_alu4(nir_builder *build, nir_op op, nir_ssa_def *src0,
-               nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3);
-
-nir_ssa_def *nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs);
-
-nir_ssa_def *
+nir_def *
+nir_build_alu1(nir_builder *build, nir_op op, nir_def *src0);
+nir_def *
+nir_build_alu2(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1);
+nir_def *
+nir_build_alu3(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1, nir_def *src2);
+nir_def *
+nir_build_alu4(nir_builder *build, nir_op op, nir_def *src0,
+               nir_def *src1, nir_def *src2, nir_def *src3);
+
+nir_def *nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs);
+
+nir_def *
 nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
                           nir_deref_instr *texture,
                           nir_deref_instr *sampler,
@@ -162,15 +162,15 @@ nir_if *
 nir_push_if_src(nir_builder *build, nir_src condition);
 
 nir_if *
-nir_push_if(nir_builder *build, nir_ssa_def *condition);
+nir_push_if(nir_builder *build, nir_def *condition);
 
 nir_if *
 nir_push_else(nir_builder *build, nir_if *nif);
 
 void nir_pop_if(nir_builder *build, nir_if *nif);
 
-nir_ssa_def *
-nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def);
+nir_def *
+nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def);
 
 nir_loop *
 nir_push_loop(nir_builder *build);
@@ -180,11 +180,11 @@ nir_push_continue(nir_builder *build, nir_loop *loop);
 
 void nir_pop_loop(nir_builder *build, nir_loop *loop);
 
-static inline nir_ssa_def *
-nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
+static inline nir_def *
+nir_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
 {
-   nir_ssa_undef_instr *undef =
-      nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
+   nir_undef_instr *undef =
+      nir_undef_instr_create(build->shader, num_components, bit_size);
    if (!undef)
       return NULL;
 
@@ -195,7 +195,7 @@ nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
    return &undef->def;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_build_imm(nir_builder *build, unsigned num_components,
               unsigned bit_size, const nir_const_value *value)
 {
@@ -211,7 +211,7 @@ nir_build_imm(nir_builder *build, unsigned num_components,
    return &load_const->def;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
 {
    nir_load_const_instr *load_const =
@@ -224,57 +224,57 @@ nir_imm_zero(nir_builder *build, unsigned num_components, unsigned bit_size)
    return &load_const->def;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_boolN_t(nir_builder *build, bool x, unsigned bit_size)
 {
    nir_const_value v = nir_const_value_for_bool(x, bit_size);
    return nir_build_imm(build, 1, bit_size, &v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_bool(nir_builder *build, bool x)
 {
    return nir_imm_boolN_t(build, x, 1);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_true(nir_builder *build)
 {
    return nir_imm_bool(build, true);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_false(nir_builder *build)
 {
    return nir_imm_bool(build, false);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_floatN_t(nir_builder *build, double x, unsigned bit_size)
 {
    nir_const_value v = nir_const_value_for_float(x, bit_size);
    return nir_build_imm(build, 1, bit_size, &v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_float16(nir_builder *build, float x)
 {
    return nir_imm_floatN_t(build, x, 16);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_float(nir_builder *build, float x)
 {
    return nir_imm_floatN_t(build, x, 32);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_double(nir_builder *build, double x)
 {
    return nir_imm_floatN_t(build, x, 64);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_vec2(nir_builder *build, float x, float y)
 {
    nir_const_value v[2] = {
@@ -284,7 +284,7 @@ nir_imm_vec2(nir_builder *build, float x, float y)
    return nir_build_imm(build, 2, 32, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_vec3(nir_builder *build, float x, float y, float z)
 {
    nir_const_value v[3] = {
@@ -295,7 +295,7 @@ nir_imm_vec3(nir_builder *build, float x, float y, float z)
    return nir_build_imm(build, 3, 32, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
 {
    nir_const_value v[4] = {
@@ -308,7 +308,7 @@ nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
    return nir_build_imm(build, 4, 32, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
 {
    nir_const_value v[4] = {
@@ -321,26 +321,26 @@ nir_imm_vec4_16(nir_builder *build, float x, float y, float z, float w)
    return nir_build_imm(build, 4, 16, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
 {
    nir_const_value v = nir_const_value_for_raw_uint(x, bit_size);
    return nir_build_imm(build, 1, bit_size, &v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_int(nir_builder *build, int x)
 {
    return nir_imm_intN_t(build, x, 32);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_int64(nir_builder *build, int64_t x)
 {
    return nir_imm_intN_t(build, x, 64);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_ivec2(nir_builder *build, int x, int y)
 {
    nir_const_value v[2] = {
@@ -351,7 +351,7 @@ nir_imm_ivec2(nir_builder *build, int x, int y)
    return nir_build_imm(build, 2, 32, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_ivec3(nir_builder *build, int x, int y, int z)
 {
    nir_const_value v[3] = {
@@ -363,7 +363,7 @@ nir_imm_ivec3(nir_builder *build, int x, int y, int z)
    return nir_build_imm(build, 3, 32, v);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
 {
    nir_const_value v[4] = {
@@ -376,31 +376,31 @@ nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
    return nir_build_imm(build, 4, 32, v);
 }
 
-nir_ssa_def *
+nir_def *
 nir_builder_alu_instr_finish_and_insert(nir_builder *build, nir_alu_instr *instr);
 
 /* for the couple special cases with more than 4 src args: */
-nir_ssa_def *
-nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_ssa_def **srcs);
+nir_def *
+nir_build_alu_src_arr(nir_builder *build, nir_op op, nir_def **srcs);
 
 /* Generic builder for system values. */
-nir_ssa_def *
+nir_def *
 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
                       unsigned num_components, unsigned bit_size);
 
 #include "nir_builder_opcodes.h"
 #undef nir_deref_mode_is
 
-nir_ssa_def *
+nir_def *
 nir_type_convert(nir_builder *b,
-                 nir_ssa_def *src,
+                 nir_def *src,
                  nir_alu_type src_type,
                  nir_alu_type dest_type,
                  nir_rounding_mode rnd);
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_convert_to_bit_size(nir_builder *b,
-                        nir_ssa_def *src,
+                        nir_def *src,
                         nir_alu_type type,
                         unsigned bit_size)
 {
@@ -408,94 +408,94 @@ nir_convert_to_bit_size(nir_builder *b,
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_i2iN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
 }
 
-static inline nir_ssa_def *
-nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_u2uN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
 }
 
-static inline nir_ssa_def *
-nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_b2bN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
 }
 
-static inline nir_ssa_def *
-nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_f2fN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
 }
 
-static inline nir_ssa_def *
-nir_i2b(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_i2b(nir_builder *b, nir_def *src)
 {
    return nir_ine_imm(b, src, 0);
 }
 
-static inline nir_ssa_def *
-nir_b2iN(nir_builder *b, nir_ssa_def *src, uint32_t bit_size)
+static inline nir_def *
+nir_b2iN(nir_builder *b, nir_def *src, uint32_t bit_size)
 {
    return nir_type_convert(b, src, nir_type_bool,
                            (nir_alu_type)(nir_type_int | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_b2fN(nir_builder *b, nir_ssa_def *src, uint32_t bit_size)
+static inline nir_def *
+nir_b2fN(nir_builder *b, nir_def *src, uint32_t bit_size)
 {
    return nir_type_convert(b, src, nir_type_bool,
                            (nir_alu_type)(nir_type_float | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_i2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_i2fN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_type_convert(b, src, nir_type_int,
                            (nir_alu_type)(nir_type_float | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_u2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_u2fN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_type_convert(b, src, nir_type_uint,
                            (nir_alu_type)(nir_type_float | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_f2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_f2uN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_type_convert(b, src, nir_type_float,
                            (nir_alu_type)(nir_type_uint | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_f2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+static inline nir_def *
+nir_f2iN(nir_builder *b, nir_def *src, unsigned bit_size)
 {
    return nir_type_convert(b, src, nir_type_float,
                            (nir_alu_type)(nir_type_int | bit_size),
                            nir_rounding_mode_undef);
 }
 
-static inline nir_ssa_def *
-nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
+static inline nir_def *
+nir_vec(nir_builder *build, nir_def **comp, unsigned num_components)
 {
    return nir_build_alu_src_arr(build, nir_op_vec(num_components), comp);
 }
 
-nir_ssa_def *
-nir_vec_scalars(nir_builder *build, nir_ssa_scalar *comp, unsigned num_components);
+nir_def *
+nir_vec_scalars(nir_builder *build, nir_scalar *comp, unsigned num_components);
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
 {
    if (src.src.ssa->num_components == num_components) {
@@ -521,8 +521,8 @@ nir_mov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
 /**
  * Construct a mov that reswizzles the source's components.
  */
-static inline nir_ssa_def *
-nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
+static inline nir_def *
+nir_swizzle(nir_builder *build, nir_def *src, const unsigned *swiz,
             unsigned num_components)
 {
    assert(num_components <= NIR_MAX_VEC_COMPONENTS);
@@ -543,8 +543,8 @@ nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
 }
 
 /* Selects the right fdot given the number of components in each source. */
-static inline nir_ssa_def *
-nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_fdot(nir_builder *build, nir_def *src0, nir_def *src1)
 {
    assert(src0->num_components == src1->num_components);
    switch (src0->num_components) {
@@ -569,8 +569,8 @@ nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
    return NULL;
 }
 
-static inline nir_ssa_def *
-nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_ball_iequal(nir_builder *b, nir_def *src0, nir_def *src1)
 {
    switch (src0->num_components) {
    case 1:
@@ -592,14 +592,14 @@ nir_ball_iequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
    }
 }
 
-static inline nir_ssa_def *
-nir_ball(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_ball(nir_builder *b, nir_def *src)
 {
    return nir_ball_iequal(b, src, nir_imm_true(b));
 }
 
-static inline nir_ssa_def *
-nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+static inline nir_def *
+nir_bany_inequal(nir_builder *b, nir_def *src0, nir_def *src1)
 {
    switch (src0->num_components) {
    case 1:
@@ -621,20 +621,20 @@ nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
    }
 }
 
-static inline nir_ssa_def *
-nir_bany(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_bany(nir_builder *b, nir_def *src)
 {
    return nir_bany_inequal(b, src, nir_imm_false(b));
 }
 
-static inline nir_ssa_def *
-nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
+static inline nir_def *
+nir_channel(nir_builder *b, nir_def *def, unsigned c)
 {
    return nir_swizzle(b, def, &c, 1);
 }
 
-static inline nir_ssa_def *
-nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
+static inline nir_def *
+nir_channels(nir_builder *b, nir_def *def, nir_component_mask_t mask)
 {
    unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
 
@@ -647,9 +647,9 @@ nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
    return nir_swizzle(b, def, swizzle, num_channels);
 }
 
-static inline nir_ssa_def *
-_nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr,
-                              nir_ssa_def *idx,
+static inline nir_def *
+_nir_select_from_array_helper(nir_builder *b, nir_def **arr,
+                              nir_def *idx,
                               unsigned start, unsigned end)
 {
    if (start == end - 1) {
@@ -662,15 +662,15 @@ _nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr,
    }
 }
 
-static inline nir_ssa_def *
-nir_select_from_ssa_def_array(nir_builder *b, nir_ssa_def **arr,
-                              unsigned arr_len, nir_ssa_def *idx)
+static inline nir_def *
+nir_select_from_ssa_def_array(nir_builder *b, nir_def **arr,
+                              unsigned arr_len, nir_def *idx)
 {
    return _nir_select_from_array_helper(b, arr, idx, 0, arr_len);
 }
 
-static inline nir_ssa_def *
-nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
+static inline nir_def *
+nir_vector_extract(nir_builder *b, nir_def *vec, nir_def *c)
 {
    nir_src c_src = nir_src_for_ssa(c);
    if (nir_src_is_const(c_src)) {
@@ -678,9 +678,9 @@ nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
       if (c_const < vec->num_components)
          return nir_channel(b, vec, c_const);
       else
-         return nir_ssa_undef(b, 1, vec->bit_size);
+         return nir_undef(b, 1, vec->bit_size);
    } else {
-      nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+      nir_def *comps[NIR_MAX_VEC_COMPONENTS];
       for (unsigned i = 0; i < vec->num_components; i++)
          comps[i] = nir_channel(b, vec, i);
       return nir_select_from_ssa_def_array(b, comps, vec->num_components, c);
@@ -688,9 +688,9 @@ nir_vector_extract(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *c)
 }
 
 /** Replaces the component of `vec` specified by `c` with `scalar` */
-static inline nir_ssa_def *
-nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
-                      nir_ssa_def *scalar, unsigned c)
+static inline nir_def *
+nir_vector_insert_imm(nir_builder *b, nir_def *vec,
+                      nir_def *scalar, unsigned c)
 {
    assert(scalar->num_components == 1);
    assert(c < vec->num_components);
@@ -712,9 +712,9 @@ nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
 }
 
 /** Replaces the component of `vec` specified by `c` with `scalar` */
-static inline nir_ssa_def *
-nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
-                  nir_ssa_def *c)
+static inline nir_def *
+nir_vector_insert(nir_builder *b, nir_def *vec, nir_def *scalar,
+                  nir_def *c)
 {
    assert(scalar->num_components == 1);
    assert(c->num_components == 1);
@@ -730,7 +730,7 @@ nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
       nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
       for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
          per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
-      nir_ssa_def *per_comp_idx =
+      nir_def *per_comp_idx =
          nir_build_imm(b, vec->num_components,
                        c->bit_size, per_comp_idx_const);
 
@@ -742,21 +742,21 @@ nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
    }
 }
 
-static inline nir_ssa_def *
-nir_replicate(nir_builder *b, nir_ssa_def *scalar, unsigned num_components)
+static inline nir_def *
+nir_replicate(nir_builder *b, nir_def *scalar, unsigned num_components)
 {
    assert(scalar->num_components == 1);
    assert(num_components <= NIR_MAX_VEC_COMPONENTS);
 
-   nir_ssa_def *copies[NIR_MAX_VEC_COMPONENTS] = { NULL };
+   nir_def *copies[NIR_MAX_VEC_COMPONENTS] = { NULL };
    for (unsigned i = 0; i < num_components; ++i)
       copies[i] = scalar;
 
    return nir_vec(b, copies, num_components);
 }
 
-static inline nir_ssa_def *
-nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_iadd_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    assert(x->bit_size <= 64);
    y &= BITFIELD64_MASK(x->bit_size);
@@ -768,44 +768,44 @@ nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_iadd_imm_nuw(nir_builder *b, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_iadd_imm_nuw(nir_builder *b, nir_def *x, uint64_t y)
 {
-   nir_ssa_def *d = nir_iadd_imm(b, x, y);
+   nir_def *d = nir_iadd_imm(b, x, y);
    if (d != x && d->parent_instr->type == nir_instr_type_alu)
       nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
    return d;
 }
 
-static inline nir_ssa_def *
-nir_iadd_nuw(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_iadd_nuw(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *d = nir_iadd(b, x, y);
+   nir_def *d = nir_iadd(b, x, y);
    nir_instr_as_alu(d->parent_instr)->no_unsigned_wrap = true;
    return d;
 }
 
-static inline nir_ssa_def *
-nir_fgt_imm(nir_builder *build, nir_ssa_def *src1, double src2)
+static inline nir_def *
+nir_fgt_imm(nir_builder *build, nir_def *src1, double src2)
 {
    return nir_flt(build, nir_imm_floatN_t(build, src2, src1->bit_size), src1);
 }
 
-static inline nir_ssa_def *
-nir_fle_imm(nir_builder *build, nir_ssa_def *src1, double src2)
+static inline nir_def *
+nir_fle_imm(nir_builder *build, nir_def *src1, double src2)
 {
    return nir_fge(build, nir_imm_floatN_t(build, src2, src1->bit_size), src1);
 }
 
 /* Use nir_iadd(x, -y) for reversing parameter ordering */
-static inline nir_ssa_def *
-nir_isub_imm(nir_builder *build, uint64_t y, nir_ssa_def *x)
+static inline nir_def *
+nir_isub_imm(nir_builder *build, uint64_t y, nir_def *x)
 {
    return nir_isub(build, nir_imm_intN_t(build, y, x->bit_size), x);
 }
 
-static inline nir_ssa_def *
-_nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
+static inline nir_def *
+_nir_mul_imm(nir_builder *build, nir_def *x, uint64_t y, bool amul)
 {
    assert(x->bit_size <= 64);
    y &= BITFIELD64_MASK(x->bit_size);
@@ -825,44 +825,44 @@ _nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
    }
 }
 
-static inline nir_ssa_def *
-nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_imul_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    return _nir_mul_imm(build, x, y, false);
 }
 
-static inline nir_ssa_def *
-nir_amul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_amul_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    return _nir_mul_imm(build, x, y, true);
 }
 
-static inline nir_ssa_def *
-nir_fadd_imm(nir_builder *build, nir_ssa_def *x, double y)
+static inline nir_def *
+nir_fadd_imm(nir_builder *build, nir_def *x, double y)
 {
    return nir_fadd(build, x, nir_imm_floatN_t(build, y, x->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_fsub_imm(nir_builder *build, double x, nir_ssa_def *y)
+static inline nir_def *
+nir_fsub_imm(nir_builder *build, double x, nir_def *y)
 {
    return nir_fsub(build, nir_imm_floatN_t(build, x, y->bit_size), y);
 }
 
-static inline nir_ssa_def *
-nir_fmul_imm(nir_builder *build, nir_ssa_def *x, double y)
+static inline nir_def *
+nir_fmul_imm(nir_builder *build, nir_def *x, double y)
 {
    return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_fdiv_imm(nir_builder *build, nir_ssa_def *x, double y)
+static inline nir_def *
+nir_fdiv_imm(nir_builder *build, nir_def *x, double y)
 {
    return nir_fdiv(build, x, nir_imm_floatN_t(build, y, x->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_iand_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    assert(x->bit_size <= 64);
    y &= BITFIELD64_MASK(x->bit_size);
@@ -876,15 +876,15 @@ nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_test_mask(nir_builder *build, nir_ssa_def *x, uint64_t mask)
+static inline nir_def *
+nir_test_mask(nir_builder *build, nir_def *x, uint64_t mask)
 {
    assert(mask <= BITFIELD64_MASK(x->bit_size));
    return nir_ine_imm(build, nir_iand_imm(build, x, mask), 0);
 }
 
-static inline nir_ssa_def *
-nir_ior_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_ior_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    assert(x->bit_size <= 64);
    y &= BITFIELD64_MASK(x->bit_size);
@@ -897,8 +897,8 @@ nir_ior_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
       return nir_ior(build, x, nir_imm_intN_t(build, y, x->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_ishl_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+static inline nir_def *
+nir_ishl_imm(nir_builder *build, nir_def *x, uint32_t y)
 {
    if (y == 0) {
       return x;
@@ -908,8 +908,8 @@ nir_ishl_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+static inline nir_def *
+nir_ishr_imm(nir_builder *build, nir_def *x, uint32_t y)
 {
    if (y == 0) {
       return x;
@@ -918,8 +918,8 @@ nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+static inline nir_def *
+nir_ushr_imm(nir_builder *build, nir_def *x, uint32_t y)
 {
    if (y == 0) {
       return x;
@@ -928,14 +928,14 @@ nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_imod_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_imod_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    return nir_imod(build, x, nir_imm_intN_t(build, y, x->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_udiv_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    assert(x->bit_size <= 64);
    y &= BITFIELD64_MASK(x->bit_size);
@@ -949,8 +949,8 @@ nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_umod_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+static inline nir_def *
+nir_umod_imm(nir_builder *build, nir_def *x, uint64_t y)
 {
    assert(y > 0 && y <= u_uintN_max(x->bit_size));
 
@@ -961,47 +961,47 @@ nir_umod_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
    }
 }
 
-static inline nir_ssa_def *
-nir_ibfe_imm(nir_builder *build, nir_ssa_def *x, uint32_t offset, uint32_t size)
+static inline nir_def *
+nir_ibfe_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
 {
    return nir_ibfe(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
 }
 
-static inline nir_ssa_def *
-nir_ubfe_imm(nir_builder *build, nir_ssa_def *x, uint32_t offset, uint32_t size)
+static inline nir_def *
+nir_ubfe_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
 {
    return nir_ubfe(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
 }
 
-static inline nir_ssa_def *
-nir_ubitfield_extract_imm(nir_builder *build, nir_ssa_def *x, uint32_t offset, uint32_t size)
+static inline nir_def *
+nir_ubitfield_extract_imm(nir_builder *build, nir_def *x, uint32_t offset, uint32_t size)
 {
    return nir_ubitfield_extract(build, x, nir_imm_int(build, offset), nir_imm_int(build, size));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_fclamp(nir_builder *b,
-           nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+           nir_def *x, nir_def *min_val, nir_def *max_val)
 {
    return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_iclamp(nir_builder *b,
-           nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+           nir_def *x, nir_def *min_val, nir_def *max_val)
 {
    return nir_imin(b, nir_imax(b, x, min_val), max_val);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_uclamp(nir_builder *b,
-           nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
+           nir_def *x, nir_def *min_val, nir_def *max_val)
 {
    return nir_umin(b, nir_umax(b, x, min_val), max_val);
 }
 
-static inline nir_ssa_def *
-nir_ffma_imm12(nir_builder *build, nir_ssa_def *src0, double src1, double src2)
+static inline nir_def *
+nir_ffma_imm12(nir_builder *build, nir_def *src0, double src1, double src2)
 {
    if (build->shader->options &&
        build->shader->options->avoid_ternary_with_two_constants)
@@ -1011,27 +1011,27 @@ nir_ffma_imm12(nir_builder *build, nir_ssa_def *src0, double src1, double src2)
                       nir_imm_floatN_t(build, src2, src0->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_ffma_imm1(nir_builder *build, nir_ssa_def *src0, double src1, nir_ssa_def *src2)
+static inline nir_def *
+nir_ffma_imm1(nir_builder *build, nir_def *src0, double src1, nir_def *src2)
 {
    return nir_ffma(build, src0, nir_imm_floatN_t(build, src1, src0->bit_size), src2);
 }
 
-static inline nir_ssa_def *
-nir_ffma_imm2(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1, double src2)
+static inline nir_def *
+nir_ffma_imm2(nir_builder *build, nir_def *src0, nir_def *src1, double src2)
 {
    return nir_ffma(build, src0, src1, nir_imm_floatN_t(build, src2, src0->bit_size));
 }
 
-static inline nir_ssa_def *
-nir_a_minus_bc(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1,
-               nir_ssa_def *src2)
+static inline nir_def *
+nir_a_minus_bc(nir_builder *build, nir_def *src0, nir_def *src1,
+               nir_def *src2)
 {
    return nir_ffma(build, nir_fneg(build, src1), src2, src0);
 }
 
-static inline nir_ssa_def *
-nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_pack_bits(nir_builder *b, nir_def *src, unsigned dest_bit_size)
 {
    assert(src->num_components * src->bit_size == dest_bit_size);
 
@@ -1057,17 +1057,17 @@ nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
    }
 
    /* If we got here, we have no dedicated unpack opcode. */
-   nir_ssa_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
+   nir_def *dest = nir_imm_intN_t(b, 0, dest_bit_size);
    for (unsigned i = 0; i < src->num_components; i++) {
-      nir_ssa_def *val = nir_u2uN(b, nir_channel(b, src, i), dest_bit_size);
+      nir_def *val = nir_u2uN(b, nir_channel(b, src, i), dest_bit_size);
       val = nir_ishl(b, val, nir_imm_int(b, i * src->bit_size));
       dest = nir_ior(b, dest, val);
    }
    return dest;
 }
 
-static inline nir_ssa_def *
-nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_unpack_bits(nir_builder *b, nir_def *src, unsigned dest_bit_size)
 {
    assert(src->num_components == 1);
    assert(src->bit_size > dest_bit_size);
@@ -1096,9 +1096,9 @@ nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
    }
 
    /* If we got here, we have no dedicated unpack opcode. */
-   nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < dest_num_components; i++) {
-      nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
+      nir_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
       dest_comps[i] = nir_u2uN(b, val, dest_bit_size);
    }
    return nir_vec(b, dest_comps, dest_num_components);
@@ -1117,8 +1117,8 @@ nir_unpack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
  * values are involved because that would require pack/unpack to/from a vec8
  * which NIR currently does not support.
  */
-static inline nir_ssa_def *
-nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
+static inline nir_def *
+nir_extract_bits(nir_builder *b, nir_def **srcs, unsigned num_srcs,
                  unsigned first_bit,
                  unsigned dest_num_components, unsigned dest_bit_size)
 {
@@ -1134,7 +1134,7 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
    /* We don't want to have to deal with 1-bit values */
    assert(common_bit_size >= 8);
 
-   nir_ssa_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
+   nir_def *common_comps[NIR_MAX_VEC_COMPONENTS * sizeof(uint64_t)];
    assert(num_bits / common_bit_size <= ARRAY_SIZE(common_comps));
 
    /* First, unpack to the common bit size and select the components from the
@@ -1157,10 +1157,10 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
       const unsigned rel_bit = bit - src_start_bit;
       const unsigned src_bit_size = srcs[src_idx]->bit_size;
 
-      nir_ssa_def *comp = nir_channel(b, srcs[src_idx],
-                                      rel_bit / src_bit_size);
+      nir_def *comp = nir_channel(b, srcs[src_idx],
+                                  rel_bit / src_bit_size);
       if (srcs[src_idx]->bit_size > common_bit_size) {
-         nir_ssa_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
+         nir_def *unpacked = nir_unpack_bits(b, comp, common_bit_size);
          comp = nir_channel(b, unpacked, (rel_bit % src_bit_size) / common_bit_size);
       }
       common_comps[i] = comp;
@@ -1169,10 +1169,10 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
    /* Now, re-pack the destination if we have to */
    if (dest_bit_size > common_bit_size) {
       unsigned common_per_dest = dest_bit_size / common_bit_size;
-      nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
+      nir_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
       for (unsigned i = 0; i < dest_num_components; i++) {
-         nir_ssa_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
-                                         common_per_dest);
+         nir_def *unpacked = nir_vec(b, common_comps + i * common_per_dest,
+                                     common_per_dest);
          dest_comps[i] = nir_pack_bits(b, unpacked, dest_bit_size);
       }
       return nir_vec(b, dest_comps, dest_num_components);
@@ -1182,8 +1182,8 @@ nir_extract_bits(nir_builder *b, nir_ssa_def **srcs, unsigned num_srcs,
    }
 }
 
-static inline nir_ssa_def *
-nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
+static inline nir_def *
+nir_bitcast_vector(nir_builder *b, nir_def *src, unsigned dest_bit_size)
 {
    assert((src->bit_size * src->num_components) % dest_bit_size == 0);
    const unsigned dest_num_components =
@@ -1193,8 +1193,8 @@ nir_bitcast_vector(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
    return nir_extract_bits(b, &src, 1, 0, dest_num_components, dest_bit_size);
 }
 
-static inline nir_ssa_def *
-nir_trim_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
+static inline nir_def *
+nir_trim_vector(nir_builder *b, nir_def *src, unsigned num_components)
 {
    assert(src->num_components >= num_components);
    if (src->num_components == num_components)
@@ -1207,15 +1207,15 @@ nir_trim_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
  * Pad a value to N components with undefs of matching bit size.
  * If the value already contains >= num_components, it is returned without change.
  */
-static inline nir_ssa_def *
-nir_pad_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
+static inline nir_def *
+nir_pad_vector(nir_builder *b, nir_def *src, unsigned num_components)
 {
    assert(src->num_components <= num_components);
    if (src->num_components == num_components)
       return src;
 
-   nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
-   nir_ssa_scalar undef = nir_get_ssa_scalar(nir_ssa_undef(b, 1, src->bit_size), 0);
+   nir_scalar components[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar undef = nir_get_ssa_scalar(nir_undef(b, 1, src->bit_size), 0);
    unsigned i = 0;
    for (; i < src->num_components; i++)
       components[i] = nir_get_ssa_scalar(src, i);
@@ -1230,16 +1230,16 @@ nir_pad_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
  * bit size. If the value already contains >= num_components, it is returned
  * without change.
  */
-static inline nir_ssa_def *
-nir_pad_vector_imm_int(nir_builder *b, nir_ssa_def *src, uint64_t imm_val,
+static inline nir_def *
+nir_pad_vector_imm_int(nir_builder *b, nir_def *src, uint64_t imm_val,
                        unsigned num_components)
 {
    assert(src->num_components <= num_components);
    if (src->num_components == num_components)
       return src;
 
-   nir_ssa_scalar components[NIR_MAX_VEC_COMPONENTS];
-   nir_ssa_scalar imm = nir_get_ssa_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
+   nir_scalar components[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar imm = nir_get_ssa_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
    unsigned i = 0;
    for (; i < src->num_components; i++)
       components[i] = nir_get_ssa_scalar(src, i);
@@ -1253,8 +1253,8 @@ nir_pad_vector_imm_int(nir_builder *b, nir_ssa_def *src, uint64_t imm_val,
  * Pad a value to 4 components with undefs of matching bit size.
  * If the value already contains >= 4 components, it is returned without change.
  */
-static inline nir_ssa_def *
-nir_pad_vec4(nir_builder *b, nir_ssa_def *src)
+static inline nir_def *
+nir_pad_vec4(nir_builder *b, nir_def *src)
 {
    return nir_pad_vector(b, src, 4);
 }
@@ -1265,8 +1265,8 @@ nir_pad_vec4(nir_builder *b, nir_ssa_def *src)
  * need.  Prefer nir_pad_vector() or nir_trim_vector() instead if you know a
  * priori which direction you're resizing.
  */
-static inline nir_ssa_def *
-nir_resize_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
+static inline nir_def *
+nir_resize_vector(nir_builder *b, nir_def *src, unsigned num_components)
 {
    if (src->num_components < num_components)
       return nir_pad_vector(b, src, num_components);
@@ -1274,10 +1274,10 @@ nir_resize_vector(nir_builder *b, nir_ssa_def *src, unsigned num_components)
       return nir_trim_vector(b, src, num_components);
 }
 
-nir_ssa_def *
+nir_def *
 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components);
 
-nir_ssa_def *
+nir_def *
 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn);
 
 static inline unsigned
@@ -1308,7 +1308,7 @@ nir_build_deref_var(nir_builder *build, nir_variable *var)
 
 static inline nir_deref_instr *
 nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
-                      nir_ssa_def *index)
+                      nir_def *index)
 {
    assert(glsl_type_is_array(parent->type) ||
           glsl_type_is_matrix(parent->type) ||
@@ -1337,15 +1337,15 @@ static inline nir_deref_instr *
 nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
                           int64_t index)
 {
-   nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
-                                         parent->dest.ssa.bit_size);
+   nir_def *idx_ssa = nir_imm_intN_t(build, index,
+                                     parent->dest.ssa.bit_size);
 
    return nir_build_deref_array(build, parent, idx_ssa);
 }
 
 static inline nir_deref_instr *
 nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
-                             nir_ssa_def *index)
+                             nir_def *index)
 {
    assert(parent->deref_type == nir_deref_type_array ||
           parent->deref_type == nir_deref_type_ptr_as_array ||
@@ -1416,7 +1416,7 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
 }
 
 static inline nir_deref_instr *
-nir_build_deref_cast(nir_builder *build, nir_ssa_def *parent,
+nir_build_deref_cast(nir_builder *build, nir_def *parent,
                      nir_variable_mode modes, const struct glsl_type *type,
                      unsigned ptr_stride)
 {
@@ -1490,8 +1490,8 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
              glsl_get_length(leader_parent->type));
 
       if (leader->deref_type == nir_deref_type_array) {
-         nir_ssa_def *index = nir_i2iN(b, leader->arr.index.ssa,
-                                       parent->dest.ssa.bit_size);
+         nir_def *index = nir_i2iN(b, leader->arr.index.ssa,
+                                   parent->dest.ssa.bit_size);
          return nir_build_deref_array(b, parent, index);
       } else {
          return nir_build_deref_array_wildcard(b, parent);
@@ -1509,7 +1509,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
    }
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
                            enum gl_access_qualifier access)
 {
@@ -1519,7 +1519,7 @@ nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
 }
 
 #undef nir_load_deref
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_deref(nir_builder *build, nir_deref_instr *deref)
 {
    return nir_load_deref_with_access(build, deref, (enum gl_access_qualifier)0);
@@ -1527,7 +1527,7 @@ nir_load_deref(nir_builder *build, nir_deref_instr *deref)
 
 static inline void
 nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
-                            nir_ssa_def *value, unsigned writemask,
+                            nir_def *value, unsigned writemask,
                             enum gl_access_qualifier access)
 {
    writemask &= (1u << value->num_components) - 1u;
@@ -1537,7 +1537,7 @@ nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
 #undef nir_store_deref
 static inline void
 nir_store_deref(nir_builder *build, nir_deref_instr *deref,
-                nir_ssa_def *value, unsigned writemask)
+                nir_def *value, unsigned writemask)
 {
    nir_store_deref_with_access(build, deref, value, writemask,
                                (enum gl_access_qualifier)0);
@@ -1563,7 +1563,7 @@ nir_copy_deref(nir_builder *build, nir_deref_instr *dest, nir_deref_instr *src)
 
 static inline void
 nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
-                             nir_deref_instr *src, nir_ssa_def *size,
+                             nir_deref_instr *src, nir_def *size,
                              enum gl_access_qualifier dest_access,
                              enum gl_access_qualifier src_access)
 {
@@ -1574,21 +1574,21 @@ nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
 #undef nir_memcpy_deref
 static inline void
 nir_memcpy_deref(nir_builder *build, nir_deref_instr *dest,
-                 nir_deref_instr *src, nir_ssa_def *size)
+                 nir_deref_instr *src, nir_def *size)
 {
    nir_memcpy_deref_with_access(build, dest, src, size,
                                 (enum gl_access_qualifier)0,
                                 (enum gl_access_qualifier)0);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_var(nir_builder *build, nir_variable *var)
 {
    return nir_load_deref(build, nir_build_deref_var(build, var));
 }
 
 static inline void
-nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
+nir_store_var(nir_builder *build, nir_variable *var, nir_def *value,
               unsigned writemask)
 {
    nir_store_deref(build, nir_build_deref_var(build, var), value, writemask);
@@ -1601,15 +1601,15 @@ nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
                   nir_build_deref_var(build, src));
 }
 
-static inline nir_ssa_def *
-nir_load_array_var(nir_builder *build, nir_variable *var, nir_ssa_def *index)
+static inline nir_def *
+nir_load_array_var(nir_builder *build, nir_variable *var, nir_def *index)
 {
    nir_deref_instr *deref =
       nir_build_deref_array(build, nir_build_deref_var(build, var), index);
    return nir_load_deref(build, deref);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_array_var_imm(nir_builder *build, nir_variable *var, int64_t index)
 {
    nir_deref_instr *deref =
@@ -1618,8 +1618,8 @@ nir_load_array_var_imm(nir_builder *build, nir_variable *var, int64_t index)
 }
 
 static inline void
-nir_store_array_var(nir_builder *build, nir_variable *var, nir_ssa_def *index,
-                    nir_ssa_def *value, unsigned writemask)
+nir_store_array_var(nir_builder *build, nir_variable *var, nir_def *index,
+                    nir_def *value, unsigned writemask)
 {
    nir_deref_instr *deref =
       nir_build_deref_array(build, nir_build_deref_var(build, var), index);
@@ -1628,7 +1628,7 @@ nir_store_array_var(nir_builder *build, nir_variable *var, nir_ssa_def *index,
 
 static inline void
 nir_store_array_var_imm(nir_builder *build, nir_variable *var, int64_t index,
-                        nir_ssa_def *value, unsigned writemask)
+                        nir_def *value, unsigned writemask)
 {
    nir_deref_instr *deref =
       nir_build_deref_array_imm(build, nir_build_deref_var(build, var), index);
@@ -1636,8 +1636,8 @@ nir_store_array_var_imm(nir_builder *build, nir_variable *var, int64_t index,
 }
 
 #undef nir_load_global
-static inline nir_ssa_def *
-nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+static inline nir_def *
+nir_load_global(nir_builder *build, nir_def *addr, unsigned align,
                 unsigned num_components, unsigned bit_size)
 {
    nir_intrinsic_instr *load =
@@ -1652,8 +1652,8 @@ nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
 
 #undef nir_store_global
 static inline void
-nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
-                 nir_ssa_def *value, nir_component_mask_t write_mask)
+nir_store_global(nir_builder *build, nir_def *addr, unsigned align,
+                 nir_def *value, nir_component_mask_t write_mask)
 {
    nir_intrinsic_instr *store =
       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
@@ -1667,8 +1667,8 @@ nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
 }
 
 #undef nir_load_global_constant
-static inline nir_ssa_def *
-nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
+static inline nir_def *
+nir_load_global_constant(nir_builder *build, nir_def *addr, unsigned align,
                          unsigned num_components, unsigned bit_size)
 {
    nir_intrinsic_instr *load =
@@ -1682,7 +1682,7 @@ nir_load_global_constant(nir_builder *build, nir_ssa_def *addr, unsigned align,
 }
 
 #undef nir_load_param
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_param(nir_builder *build, uint32_t param_idx)
 {
    assert(param_idx < build->impl->function->num_params);
@@ -1691,7 +1691,7 @@ nir_load_param(nir_builder *build, uint32_t param_idx)
 }
 
 #undef nir_decl_reg
-static inline nir_ssa_def *
+static inline nir_def *
 nir_decl_reg(nir_builder *b, unsigned num_components, unsigned bit_size,
              unsigned num_array_elems)
 {
@@ -1709,14 +1709,14 @@ nir_decl_reg(nir_builder *b, unsigned num_components, unsigned bit_size,
 }
 
 #undef nir_load_reg
-static inline nir_ssa_def *
-nir_load_reg(nir_builder *b, nir_ssa_def *reg)
+static inline nir_def *
+nir_load_reg(nir_builder *b, nir_def *reg)
 {
    nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
    unsigned num_components = nir_intrinsic_num_components(decl);
    unsigned bit_size = nir_intrinsic_bit_size(decl);
 
-   nir_ssa_def *res = nir_build_load_reg(b, num_components, bit_size, reg);
+   nir_def *res = nir_build_load_reg(b, num_components, bit_size, reg);
    res->divergent = nir_intrinsic_divergent(decl);
 
    return res;
@@ -1724,7 +1724,7 @@ nir_load_reg(nir_builder *b, nir_ssa_def *reg)
 
 #undef nir_store_reg
 static inline void
-nir_store_reg(nir_builder *b, nir_ssa_def *value, nir_ssa_def *reg)
+nir_store_reg(nir_builder *b, nir_def *value, nir_def *reg)
 {
    ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
    ASSERTED unsigned num_components = nir_intrinsic_num_components(decl);
@@ -1737,7 +1737,7 @@ nir_store_reg(nir_builder *b, nir_ssa_def *value, nir_ssa_def *reg)
 }
 
 static inline nir_tex_src
-nir_tex_src_for_ssa(nir_tex_src_type src_type, nir_ssa_def *def)
+nir_tex_src_for_ssa(nir_tex_src_type src_type, nir_def *def)
 {
    nir_tex_src src;
    src.src = nir_src_for_ssa(def);
@@ -1746,25 +1746,25 @@ nir_tex_src_for_ssa(nir_tex_src_type src_type, nir_ssa_def *def)
 }
 
 /*
- * Find a texture source, remove it, and return its nir_ssa_def. If the texture
+ * Find a texture source, remove it, and return its nir_def. If the texture
  * source does not exist, return NULL. This is useful for texture lowering pass
  * that consume their input sources and produce a new lowered source.
  */
-static inline nir_ssa_def *
+static inline nir_def *
 nir_steal_tex_src(nir_tex_instr *tex, nir_tex_src_type type_)
 {
    int idx = nir_tex_instr_src_index(tex, type_);
    if (idx < 0)
       return NULL;
 
-   nir_ssa_def *ssa = tex->src[idx].src.ssa;
+   nir_def *ssa = tex->src[idx].src.ssa;
    nir_tex_instr_remove_src(tex, idx);
    return ssa;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_tex_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
-              nir_ssa_def *coord)
+              nir_def *coord)
 {
    nir_tex_src srcs[] = { nir_tex_src_for_ssa(nir_tex_src_coord, coord) };
 
@@ -1772,9 +1772,9 @@ nir_tex_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
                                     ARRAY_SIZE(srcs), srcs);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_txl_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
-              nir_ssa_def *coord, nir_ssa_def *lod)
+              nir_def *coord, nir_def *lod)
 {
    nir_tex_src srcs[] = {
       nir_tex_src_for_ssa(nir_tex_src_coord, coord),
@@ -1785,9 +1785,9 @@ nir_txl_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
                                     ARRAY_SIZE(srcs), srcs);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_txl_zero_deref(nir_builder *b, nir_deref_instr *t, nir_deref_instr *s,
-                   nir_ssa_def *coord)
+                   nir_def *coord)
 {
    return nir_txl_deref(b, t, s, coord, nir_imm_float(b, 0));
 }
@@ -1806,9 +1806,9 @@ nir_tex_type_has_lod(const struct glsl_type *tex_type)
    }
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_txf_deref(nir_builder *b, nir_deref_instr *t,
-              nir_ssa_def *coord, nir_ssa_def *lod)
+              nir_def *coord, nir_def *lod)
 {
    nir_tex_src srcs[2];
    unsigned num_srcs = 0;
@@ -1825,9 +1825,9 @@ nir_txf_deref(nir_builder *b, nir_deref_instr *t,
                                     num_srcs, srcs);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_txf_ms_deref(nir_builder *b, nir_deref_instr *t,
-                 nir_ssa_def *coord, nir_ssa_def *ms_index)
+                 nir_def *coord, nir_def *ms_index)
 {
    nir_tex_src srcs[] = {
       nir_tex_src_for_ssa(nir_tex_src_coord, coord),
@@ -1838,8 +1838,8 @@ nir_txf_ms_deref(nir_builder *b, nir_deref_instr *t,
                                     ARRAY_SIZE(srcs), srcs);
 }
 
-static inline nir_ssa_def *
-nir_txs_deref(nir_builder *b, nir_deref_instr *t, nir_ssa_def *lod)
+static inline nir_def *
+nir_txs_deref(nir_builder *b, nir_deref_instr *t, nir_def *lod)
 {
    nir_tex_src srcs[1];
    unsigned num_srcs = 0;
@@ -1854,9 +1854,9 @@ nir_txs_deref(nir_builder *b, nir_deref_instr *t, nir_ssa_def *lod)
                                     num_srcs, srcs);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_samples_identical_deref(nir_builder *b, nir_deref_instr *t,
-                            nir_ssa_def *coord)
+                            nir_def *coord)
 {
    nir_tex_src srcs[] = { nir_tex_src_for_ssa(nir_tex_src_coord, coord) };
 
@@ -1865,14 +1865,14 @@ nir_samples_identical_deref(nir_builder *b, nir_deref_instr *t,
 }
 
 /* calculate a `(1 << value) - 1` in ssa without overflows */
-static inline nir_ssa_def *
-nir_mask(nir_builder *b, nir_ssa_def *bits, unsigned dst_bit_size)
+static inline nir_def *
+nir_mask(nir_builder *b, nir_def *bits, unsigned dst_bit_size)
 {
    return nir_ushr(b, nir_imm_intN_t(b, -1, dst_bit_size),
                    nir_isub_imm(b, dst_bit_size, nir_u2u32(b, bits)));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
                      unsigned interp_mode)
 {
@@ -1913,9 +1913,9 @@ nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
    nir_builder_instr_insert(build, &jump->instr);
 }
 
-nir_ssa_def *
+nir_def *
 nir_compare_func(nir_builder *b, enum compare_func func,
-                 nir_ssa_def *src0, nir_ssa_def *src1);
+                 nir_def *src0, nir_def *src1);
 
 static inline void
 nir_scoped_memory_barrier(nir_builder *b,
@@ -1926,8 +1926,8 @@ nir_scoped_memory_barrier(nir_builder *b,
    nir_barrier(b, SCOPE_NONE, scope, semantics, modes);
 }
 
-nir_ssa_def *
-nir_gen_rect_vertices(nir_builder *b, nir_ssa_def *z, nir_ssa_def *w);
+nir_def *
+nir_gen_rect_vertices(nir_builder *b, nir_def *z, nir_def *w);
 
 #ifdef __cplusplus
 } /* extern "C" */
index 8aba905..243178d 100644 (file)
@@ -26,7 +26,7 @@ template = """\
 
 <%
 def src_decl_list(num_srcs):
-   return ', '.join('nir_ssa_def *src' + str(i) for i in range(num_srcs))
+   return ', '.join('nir_def *src' + str(i) for i in range(num_srcs))
 
 def src_list(num_srcs):
    return ', '.join('src' + str(i) for i in range(num_srcs))
@@ -43,7 +43,7 @@ def intrinsic_prefix(name):
 
 % for name, opcode in sorted(opcodes.items()):
 % if not needs_num_components(opcode):
-static inline nir_ssa_def *
+static inline nir_def *
 nir_${name}(nir_builder *build, ${src_decl_list(opcode.num_inputs)})
 {
 % if opcode.is_conversion and \
@@ -54,7 +54,7 @@ nir_${name}(nir_builder *build, ${src_decl_list(opcode.num_inputs)})
 % if opcode.num_inputs <= 4:
    return nir_build_alu${opcode.num_inputs}(build, nir_op_${name}, ${src_list(opcode.num_inputs)});
 % else:
-   nir_ssa_def *srcs[${opcode.num_inputs}] = {${src_list(opcode.num_inputs)}};
+   nir_def *srcs[${opcode.num_inputs}] = {${src_list(opcode.num_inputs)}};
    return nir_build_alu_src_arr(build, nir_op_${name}, srcs);
 % endif
 }
@@ -83,7 +83,7 @@ def intrinsic_decl_list(opcode):
     if opcode.has_dest and len(opcode.bit_sizes) != 1 and opcode.bit_size_src == -1:
         res += ', unsigned bit_size'
     for i in range(opcode.num_srcs):
-        res += ', nir_ssa_def *src' + str(i)
+        res += ', nir_def *src' + str(i)
     if opcode.indices:
         res += ', struct _nir_' + opcode.name + '_indices indices'
     return res
@@ -112,7 +112,7 @@ def get_intrinsic_bitsize(opcode):
 
 % for name, opcode in sorted(INTR_OPCODES.items()):
 % if opcode.has_dest:
-static inline nir_ssa_def *
+static inline nir_def *
 % else:
 static inline nir_intrinsic_instr *
 % endif
@@ -178,30 +178,30 @@ _nir_build_${name}(build${intrinsic_macro_list(opcode)}, (struct _nir_${name}_in
 % endfor
 
 % for name in ['flt', 'fge', 'feq', 'fneu']:
-static inline nir_ssa_def *
-nir_${name}_imm(nir_builder *build, nir_ssa_def *src1, double src2)
+static inline nir_def *
+nir_${name}_imm(nir_builder *build, nir_def *src1, double src2)
 {
    return nir_${name}(build, src1, nir_imm_floatN_t(build, src2, src1->bit_size));
 }
 % endfor
 
 % for name in ['ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
-static inline nir_ssa_def *
-nir_${name}_imm(nir_builder *build, nir_ssa_def *src1, uint64_t src2)
+static inline nir_def *
+nir_${name}_imm(nir_builder *build, nir_def *src1, uint64_t src2)
 {
    return nir_${name}(build, src1, nir_imm_intN_t(build, src2, src1->bit_size));
 }
 % endfor
 
 % for prefix in ['i', 'u']:
-static inline nir_ssa_def *
-nir_${prefix}gt_imm(nir_builder *build, nir_ssa_def *src1, uint64_t src2)
+static inline nir_def *
+nir_${prefix}gt_imm(nir_builder *build, nir_def *src1, uint64_t src2)
 {
    return nir_${prefix}lt(build, nir_imm_intN_t(build, src2, src1->bit_size), src1);
 }
 
-static inline nir_ssa_def *
-nir_${prefix}le_imm(nir_builder *build, nir_ssa_def *src1, uint64_t src2)
+static inline nir_def *
+nir_${prefix}le_imm(nir_builder *build, nir_def *src1, uint64_t src2)
 {
    return nir_${prefix}ge(build, nir_imm_intN_t(build, src2, src1->bit_size), src1);
 }
index 9ba8a5c..a8465ec 100644 (file)
@@ -27,8 +27,8 @@
 #include "nir.h"
 #include "nir_builtin_builder.h"
 
-nir_ssa_def *
-nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+nir_def *
+nir_cross3(nir_builder *b, nir_def *x, nir_def *y)
 {
    unsigned yzx[3] = { 1, 2, 0 };
    unsigned zxy[3] = { 2, 0, 1 };
@@ -39,10 +39,10 @@ nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                                         nir_swizzle(b, y, yzx, 3))));
 }
 
-nir_ssa_def *
-nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+nir_def *
+nir_cross4(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *cross = nir_cross3(b, x, y);
+   nir_def *cross = nir_cross3(b, x, y);
 
    return nir_vec4(b,
                    nir_channel(b, cross, 0),
@@ -51,21 +51,21 @@ nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                    nir_imm_intN_t(b, 0, cross->bit_size));
 }
 
-nir_ssa_def *
-nir_fast_length(nir_builder *b, nir_ssa_def *vec)
+nir_def *
+nir_fast_length(nir_builder *b, nir_def *vec)
 {
    return nir_fsqrt(b, nir_fdot(b, vec, vec));
 }
 
-nir_ssa_def *
-nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+nir_def *
+nir_nextafter(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
-   nir_ssa_def *one = nir_imm_intN_t(b, 1, x->bit_size);
+   nir_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
+   nir_def *one = nir_imm_intN_t(b, 1, x->bit_size);
 
-   nir_ssa_def *condeq = nir_feq(b, x, y);
-   nir_ssa_def *conddir = nir_flt(b, x, y);
-   nir_ssa_def *condzero = nir_feq(b, x, zero);
+   nir_def *condeq = nir_feq(b, x, y);
+   nir_def *conddir = nir_flt(b, x, y);
+   nir_def *condzero = nir_feq(b, x, zero);
 
    uint64_t sign_mask = 1ull << (x->bit_size - 1);
    uint64_t min_abs = 1;
@@ -88,54 +88,54 @@ nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
    }
 
    /* beware of: +/-0.0 - 1 == NaN */
-   nir_ssa_def *xn =
+   nir_def *xn =
       nir_bcsel(b,
                 condzero,
                 nir_imm_intN_t(b, sign_mask | min_abs, x->bit_size),
                 nir_isub(b, x, one));
 
    /* beware of -0.0 + 1 == -0x1p-149 */
-   nir_ssa_def *xp = nir_bcsel(b, condzero,
-                               nir_imm_intN_t(b, min_abs, x->bit_size),
-                               nir_iadd(b, x, one));
+   nir_def *xp = nir_bcsel(b, condzero,
+                           nir_imm_intN_t(b, min_abs, x->bit_size),
+                           nir_iadd(b, x, one));
 
    /* nextafter can be implemented by just +/- 1 on the int value */
-   nir_ssa_def *res =
+   nir_def *res =
       nir_bcsel(b, nir_ixor(b, conddir, nir_flt(b, x, zero)), xp, xn);
 
    return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
 }
 
-nir_ssa_def *
-nir_normalize(nir_builder *b, nir_ssa_def *vec)
+nir_def *
+nir_normalize(nir_builder *b, nir_def *vec)
 {
    if (vec->num_components == 1)
       return nir_fsign(b, vec);
 
-   nir_ssa_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
-   nir_ssa_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
-   nir_ssa_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
+   nir_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
+   nir_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
+   nir_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
 
    /* scale the input to increase precision */
-   nir_ssa_def *maxc = nir_fmax_abs_vec_comp(b, vec);
-   nir_ssa_def *svec = nir_fdiv(b, vec, maxc);
+   nir_def *maxc = nir_fmax_abs_vec_comp(b, vec);
+   nir_def *svec = nir_fdiv(b, vec, maxc);
    /* for inf */
-   nir_ssa_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
+   nir_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
 
-   nir_ssa_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
-   nir_ssa_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
+   nir_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
+   nir_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
 
    return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
 }
 
-nir_ssa_def *
-nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_def *x)
+nir_def *
+nir_smoothstep(nir_builder *b, nir_def *edge0, nir_def *edge1, nir_def *x)
 {
-   nir_ssa_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
-   nir_ssa_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
+   nir_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
+   nir_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
 
    /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
-   nir_ssa_def *t =
+   nir_def *t =
       nir_fsat(b, nir_fdiv(b, nir_fsub(b, x, edge0),
                            nir_fsub(b, edge1, edge0)));
 
@@ -143,15 +143,15 @@ nir_smoothstep(nir_builder *b, nir_ssa_def *edge0, nir_ssa_def *edge1, nir_ssa_d
    return nir_fmul(b, t, nir_fmul(b, t, nir_a_minus_bc(b, f3, f2, t)));
 }
 
-nir_ssa_def *
-nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
+nir_def *
+nir_upsample(nir_builder *b, nir_def *hi, nir_def *lo)
 {
    assert(lo->num_components == hi->num_components);
    assert(lo->bit_size == hi->bit_size);
 
-   nir_ssa_def *res[NIR_MAX_VEC_COMPONENTS];
+   nir_def *res[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < lo->num_components; ++i) {
-      nir_ssa_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
+      nir_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
       res[i] = nir_pack_bits(b, vec, vec->bit_size * 2);
    }
 
@@ -161,10 +161,10 @@ nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo)
 /**
  * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
  */
-static nir_ssa_def *
-build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
+static nir_def *
+build_fsum(nir_builder *b, nir_def **xs, int terms)
 {
-   nir_ssa_def *accum = xs[0];
+   nir_def *accum = xs[0];
 
    for (int i = 1; i < terms; i++)
       accum = nir_fadd(b, accum, xs[i]);
@@ -172,13 +172,13 @@ build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
    return accum;
 }
 
-nir_ssa_def *
-nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
+nir_def *
+nir_atan(nir_builder *b, nir_def *y_over_x)
 {
    const uint32_t bit_size = y_over_x->bit_size;
 
-   nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
-   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
+   nir_def *abs_y_over_x = nir_fabs(b, y_over_x);
+   nir_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
 
    /*
     * range-reduction, first step:
@@ -187,8 +187,8 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
     * x = <
     *      \ 1.0 / y_over_x   otherwise
     */
-   nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
-                             nir_fmax(b, abs_y_over_x, one));
+   nir_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
+                         nir_fmax(b, abs_y_over_x, one));
 
    /*
     * approximate atan by evaluating polynomial:
@@ -197,14 +197,14 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
     * x^5 * 0.1938924977115610 - x^7  * 0.1173503194786851 +
     * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
     */
-   nir_ssa_def *x_2 = nir_fmul(b, x, x);
-   nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
-   nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
-   nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
-   nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
-   nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
-
-   nir_ssa_def *polynomial_terms[] = {
+   nir_def *x_2 = nir_fmul(b, x, x);
+   nir_def *x_3 = nir_fmul(b, x_2, x);
+   nir_def *x_5 = nir_fmul(b, x_3, x_2);
+   nir_def *x_7 = nir_fmul(b, x_5, x_2);
+   nir_def *x_9 = nir_fmul(b, x_7, x_2);
+   nir_def *x_11 = nir_fmul(b, x_9, x_2);
+
+   nir_def *polynomial_terms[] = {
       nir_fmul_imm(b, x, 0.9999793128310355f),
       nir_fmul_imm(b, x_3, -0.3326756418091246f),
       nir_fmul_imm(b, x_5, 0.1938924977115610f),
@@ -213,7 +213,7 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
       nir_fmul_imm(b, x_11, -0.0121323213173444f),
    };
 
-   nir_ssa_def *tmp =
+   nir_def *tmp =
       build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
 
    /* range-reduction fixup */
@@ -223,7 +223,7 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
                   tmp);
 
    /* sign fixup */
-   nir_ssa_def *result = nir_fmul(b, tmp, nir_fsign(b, y_over_x));
+   nir_def *result = nir_fmul(b, tmp, nir_fsign(b, y_over_x));
 
    /* The fmin and fmax above will filter out NaN values.  This leads to
     * non-NaN results for NaN inputs.  Work around this by doing
@@ -235,7 +235,7 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
       const bool exact = b->exact;
 
       b->exact = true;
-      nir_ssa_def *is_not_nan = nir_feq(b, y_over_x, y_over_x);
+      nir_def *is_not_nan = nir_feq(b, y_over_x, y_over_x);
       b->exact = exact;
 
       /* The extra 1.0*y_over_x ensures that subnormal results are flushed to
@@ -247,14 +247,14 @@ nir_atan(nir_builder *b, nir_ssa_def *y_over_x)
    return result;
 }
 
-nir_ssa_def *
-nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
+nir_def *
+nir_atan2(nir_builder *b, nir_def *y, nir_def *x)
 {
    assert(y->bit_size == x->bit_size);
    const uint32_t bit_size = x->bit_size;
 
-   nir_ssa_def *zero = nir_imm_floatN_t(b, 0, bit_size);
-   nir_ssa_def *one = nir_imm_floatN_t(b, 1, bit_size);
+   nir_def *zero = nir_imm_floatN_t(b, 0, bit_size);
+   nir_def *one = nir_imm_floatN_t(b, 1, bit_size);
 
    /* If we're on the left half-plane rotate the coordinates Ï€/2 clock-wise
     * for the y=0 discontinuity to end up aligned with the vertical
@@ -262,9 +262,9 @@ nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
     * don't attempt to divide by zero along the vertical line, which may give
     * unspecified results on non-GLSL 4.1-capable hardware.
     */
-   nir_ssa_def *flip = nir_fge(b, zero, x);
-   nir_ssa_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
-   nir_ssa_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
+   nir_def *flip = nir_fge(b, zero, x);
+   nir_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
+   nir_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
 
    /* If the magnitude of the denominator exceeds some huge value, scale down
     * the arguments in order to prevent the reciprocal operation from flushing
@@ -285,10 +285,10 @@ nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
     * 24-bit representation.
     */
    const double huge_val = bit_size >= 32 ? 1e18 : 16384;
-   nir_ssa_def *scale = nir_bcsel(b, nir_fge_imm(b, nir_fabs(b, t), huge_val),
-                                  nir_imm_floatN_t(b, 0.25, bit_size), one);
-   nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
-   nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
+   nir_def *scale = nir_bcsel(b, nir_fge_imm(b, nir_fabs(b, t), huge_val),
+                              nir_imm_floatN_t(b, 0.25, bit_size), one);
+   nir_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
+   nir_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
 
    /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
     * that âˆž/∞ = 1) in order to comply with the rather artificial rules
@@ -307,13 +307,13 @@ nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
     * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
     * well).
     */
-   nir_ssa_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
-                                one, nir_fabs(b, s_over_t));
+   nir_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
+                            one, nir_fabs(b, s_over_t));
 
    /* Calculate the arctangent and fix up the result if we had flipped the
     * coordinate system.
     */
-   nir_ssa_def *arc =
+   nir_def *arc =
       nir_ffma_imm1(b, nir_b2fN(b, flip, bit_size), M_PI_2, nir_atan(b, tan));
 
    /* Rather convoluted calculation of the sign of the result.  When x < 0 we
@@ -329,7 +329,7 @@ nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
                     nir_fneg(b, arc), arc);
 }
 
-nir_ssa_def *
+nir_def *
 nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_before_instr(&tex->instr);
@@ -380,7 +380,7 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
    return &txs->dest.ssa;
 }
 
-nir_ssa_def *
+nir_def *
 nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_before_instr(&tex->instr);
index ff263ee..1acfb3f 100644 (file)
@@ -36,199 +36,199 @@ extern "C" {
  * Definitions for functions in the C file come first.
  */
 
-nir_ssa_def *nir_cross3(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
-nir_ssa_def *nir_cross4(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
-nir_ssa_def *nir_fast_length(nir_builder *b, nir_ssa_def *vec);
-nir_ssa_def *nir_nextafter(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y);
-nir_ssa_def *nir_normalize(nir_builder *b, nir_ssa_def *vec);
-nir_ssa_def *nir_smoothstep(nir_builder *b, nir_ssa_def *edge0,
-                            nir_ssa_def *edge1, nir_ssa_def *x);
-nir_ssa_def *nir_upsample(nir_builder *b, nir_ssa_def *hi, nir_ssa_def *lo);
-nir_ssa_def *nir_atan(nir_builder *b, nir_ssa_def *y_over_x);
-nir_ssa_def *nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x);
-
-nir_ssa_def *
+nir_def *nir_cross3(nir_builder *b, nir_def *x, nir_def *y);
+nir_def *nir_cross4(nir_builder *b, nir_def *x, nir_def *y);
+nir_def *nir_fast_length(nir_builder *b, nir_def *vec);
+nir_def *nir_nextafter(nir_builder *b, nir_def *x, nir_def *y);
+nir_def *nir_normalize(nir_builder *b, nir_def *vec);
+nir_def *nir_smoothstep(nir_builder *b, nir_def *edge0,
+                        nir_def *edge1, nir_def *x);
+nir_def *nir_upsample(nir_builder *b, nir_def *hi, nir_def *lo);
+nir_def *nir_atan(nir_builder *b, nir_def *y_over_x);
+nir_def *nir_atan2(nir_builder *b, nir_def *y, nir_def *x);
+
+nir_def *
 nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex);
 
-nir_ssa_def *
+nir_def *
 nir_get_texture_size(nir_builder *b, nir_tex_instr *tex);
 
-static inline nir_ssa_def *
-nir_fisnan(nir_builder *b, nir_ssa_def *x)
+static inline nir_def *
+nir_fisnan(nir_builder *b, nir_def *x)
 {
    bool old_exact = b->exact;
    b->exact = true;
-   nir_ssa_def *res = nir_fneu(b, x, x);
+   nir_def *res = nir_fneu(b, x, x);
    b->exact = old_exact;
    return res;
 }
 
-static inline nir_ssa_def *
-nir_nan_check2(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *res)
+static inline nir_def *
+nir_nan_check2(nir_builder *b, nir_def *x, nir_def *y, nir_def *res)
 {
    return nir_bcsel(b, nir_fisnan(b, x), x, nir_bcsel(b, nir_fisnan(b, y), y, res));
 }
 
-static inline nir_ssa_def *
-nir_fmax_abs_vec_comp(nir_builder *b, nir_ssa_def *vec)
+static inline nir_def *
+nir_fmax_abs_vec_comp(nir_builder *b, nir_def *vec)
 {
-   nir_ssa_def *abs = nir_fabs(b, vec);
-   nir_ssa_def *res = nir_channel(b, abs, 0);
+   nir_def *abs = nir_fabs(b, vec);
+   nir_def *res = nir_channel(b, abs, 0);
    for (unsigned i = 1; i < vec->num_components; ++i)
       res = nir_fmax(b, res, nir_channel(b, abs, i));
    return res;
 }
 
-static inline nir_ssa_def *
-nir_iabs_diff(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_iabs_diff(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *cond = nir_ige(b, x, y);
-   nir_ssa_def *res0 = nir_isub(b, x, y);
-   nir_ssa_def *res1 = nir_isub(b, y, x);
+   nir_def *cond = nir_ige(b, x, y);
+   nir_def *res0 = nir_isub(b, x, y);
+   nir_def *res1 = nir_isub(b, y, x);
    return nir_bcsel(b, cond, res0, res1);
 }
 
-static inline nir_ssa_def *
-nir_uabs_diff(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_uabs_diff(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *cond = nir_uge(b, x, y);
-   nir_ssa_def *res0 = nir_isub(b, x, y);
-   nir_ssa_def *res1 = nir_isub(b, y, x);
+   nir_def *cond = nir_uge(b, x, y);
+   nir_def *res0 = nir_isub(b, x, y);
+   nir_def *res1 = nir_isub(b, y, x);
    return nir_bcsel(b, cond, res0, res1);
 }
 
-static inline nir_ssa_def *
-nir_fexp(nir_builder *b, nir_ssa_def *x)
+static inline nir_def *
+nir_fexp(nir_builder *b, nir_def *x)
 {
    return nir_fexp2(b, nir_fmul_imm(b, x, M_LOG2E));
 }
 
-static inline nir_ssa_def *
-nir_flog(nir_builder *b, nir_ssa_def *x)
+static inline nir_def *
+nir_flog(nir_builder *b, nir_def *x)
 {
    return nir_fmul_imm(b, nir_flog2(b, x), 1.0 / M_LOG2E);
 }
 
-static inline nir_ssa_def *
-nir_imad24(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z)
+static inline nir_def *
+nir_imad24(nir_builder *b, nir_def *x, nir_def *y, nir_def *z)
 {
-   nir_ssa_def *temp = nir_imul24(b, x, y);
+   nir_def *temp = nir_imul24(b, x, y);
    return nir_iadd(b, temp, z);
 }
 
-static inline nir_ssa_def *
-nir_imad_hi(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z)
+static inline nir_def *
+nir_imad_hi(nir_builder *b, nir_def *x, nir_def *y, nir_def *z)
 {
-   nir_ssa_def *temp = nir_imul_high(b, x, y);
+   nir_def *temp = nir_imul_high(b, x, y);
    return nir_iadd(b, temp, z);
 }
 
-static inline nir_ssa_def *
-nir_umad_hi(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z)
+static inline nir_def *
+nir_umad_hi(nir_builder *b, nir_def *x, nir_def *y, nir_def *z)
 {
-   nir_ssa_def *temp = nir_umul_high(b, x, y);
+   nir_def *temp = nir_umul_high(b, x, y);
    return nir_iadd(b, temp, z);
 }
 
-static inline nir_ssa_def *
-nir_bitselect(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
+static inline nir_def *
+nir_bitselect(nir_builder *b, nir_def *x, nir_def *y, nir_def *s)
 {
    return nir_ior(b, nir_iand(b, nir_inot(b, s), x), nir_iand(b, s, y));
 }
 
-static inline nir_ssa_def *
-nir_copysign(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_copysign(nir_builder *b, nir_def *x, nir_def *y)
 {
    uint64_t masks = 1ull << (x->bit_size - 1);
    uint64_t maskv = ~masks;
 
-   nir_ssa_def *s = nir_imm_intN_t(b, masks, x->bit_size);
-   nir_ssa_def *v = nir_imm_intN_t(b, maskv, x->bit_size);
+   nir_def *s = nir_imm_intN_t(b, masks, x->bit_size);
+   nir_def *v = nir_imm_intN_t(b, maskv, x->bit_size);
 
    return nir_ior(b, nir_iand(b, x, v), nir_iand(b, y, s));
 }
 
-static inline nir_ssa_def *
-nir_degrees(nir_builder *b, nir_ssa_def *val)
+static inline nir_def *
+nir_degrees(nir_builder *b, nir_def *val)
 {
    return nir_fmul_imm(b, val, 180.0 / M_PI);
 }
 
-static inline nir_ssa_def *
-nir_fdim(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_fdim(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *cond = nir_flt(b, y, x);
-   nir_ssa_def *res = nir_fsub(b, x, y);
-   nir_ssa_def *zero = nir_imm_floatN_t(b, 0.0, x->bit_size);
+   nir_def *cond = nir_flt(b, y, x);
+   nir_def *res = nir_fsub(b, x, y);
+   nir_def *zero = nir_imm_floatN_t(b, 0.0, x->bit_size);
 
    // return NaN if either x or y are NaN, else x-y if x>y, else +0.0
    return nir_nan_check2(b, x, y, nir_bcsel(b, cond, res, zero));
 }
 
-static inline nir_ssa_def *
-nir_fast_distance(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_fast_distance(nir_builder *b, nir_def *x, nir_def *y)
 {
    return nir_fast_length(b, nir_fsub(b, x, y));
 }
 
-static inline nir_ssa_def *
-nir_fast_normalize(nir_builder *b, nir_ssa_def *vec)
+static inline nir_def *
+nir_fast_normalize(nir_builder *b, nir_def *vec)
 {
    return nir_fdiv(b, vec, nir_fast_length(b, vec));
 }
 
-static inline nir_ssa_def *
-nir_fmad(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *z)
+static inline nir_def *
+nir_fmad(nir_builder *b, nir_def *x, nir_def *y, nir_def *z)
 {
    return nir_fadd(b, nir_fmul(b, x, y), z);
 }
 
-static inline nir_ssa_def *
-nir_maxmag(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_maxmag(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *xabs = nir_fabs(b, x);
-   nir_ssa_def *yabs = nir_fabs(b, y);
+   nir_def *xabs = nir_fabs(b, x);
+   nir_def *yabs = nir_fabs(b, y);
 
-   nir_ssa_def *condy = nir_flt(b, xabs, yabs);
-   nir_ssa_def *condx = nir_flt(b, yabs, xabs);
+   nir_def *condy = nir_flt(b, xabs, yabs);
+   nir_def *condx = nir_flt(b, yabs, xabs);
 
    return nir_bcsel(b, condy, y, nir_bcsel(b, condx, x, nir_fmax(b, x, y)));
 }
 
-static inline nir_ssa_def *
-nir_minmag(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static inline nir_def *
+nir_minmag(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *xabs = nir_fabs(b, x);
-   nir_ssa_def *yabs = nir_fabs(b, y);
+   nir_def *xabs = nir_fabs(b, x);
+   nir_def *yabs = nir_fabs(b, y);
 
-   nir_ssa_def *condx = nir_flt(b, xabs, yabs);
-   nir_ssa_def *condy = nir_flt(b, yabs, xabs);
+   nir_def *condx = nir_flt(b, xabs, yabs);
+   nir_def *condy = nir_flt(b, yabs, xabs);
 
    return nir_bcsel(b, condy, y, nir_bcsel(b, condx, x, nir_fmin(b, x, y)));
 }
 
-static inline nir_ssa_def *
-nir_nan(nir_builder *b, nir_ssa_def *x)
+static inline nir_def *
+nir_nan(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *nan = nir_imm_floatN_t(b, NAN, x->bit_size);
+   nir_def *nan = nir_imm_floatN_t(b, NAN, x->bit_size);
    if (x->num_components == 1)
       return nan;
 
-   nir_ssa_def *nans[NIR_MAX_VEC_COMPONENTS];
+   nir_def *nans[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < x->num_components; ++i)
       nans[i] = nan;
 
    return nir_vec(b, nans, x->num_components);
 }
 
-static inline nir_ssa_def *
-nir_radians(nir_builder *b, nir_ssa_def *val)
+static inline nir_def *
+nir_radians(nir_builder *b, nir_def *val)
 {
    return nir_fmul_imm(b, val, M_PI / 180.0);
 }
 
-static inline nir_ssa_def *
-nir_select(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
+static inline nir_def *
+nir_select(nir_builder *b, nir_def *x, nir_def *y, nir_def *s)
 {
    if (s->num_components != 1) {
       uint64_t mask = 1ull << (s->bit_size - 1);
@@ -237,26 +237,26 @@ nir_select(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
    return nir_bcsel(b, nir_ieq_imm(b, s, 0), x, y);
 }
 
-static inline nir_ssa_def *
-nir_ftan(nir_builder *b, nir_ssa_def *x)
+static inline nir_def *
+nir_ftan(nir_builder *b, nir_def *x)
 {
    return nir_fdiv(b, nir_fsin(b, x), nir_fcos(b, x));
 }
 
-static inline nir_ssa_def *
-nir_clz_u(nir_builder *b, nir_ssa_def *a)
+static inline nir_def *
+nir_clz_u(nir_builder *b, nir_def *a)
 {
-   nir_ssa_def *val;
+   nir_def *val;
    val = nir_isub_imm(b, a->bit_size - 1,
                       nir_ufind_msb(b, nir_u2uN(b, a,
                                                 MAX2(a->bit_size, 32))));
    return nir_u2uN(b, val, a->bit_size);
 }
 
-static inline nir_ssa_def *
-nir_ctz_u(nir_builder *b, nir_ssa_def *a)
+static inline nir_def *
+nir_ctz_u(nir_builder *b, nir_def *a)
 {
-   nir_ssa_def *cond = nir_ieq_imm(b, a, 0);
+   nir_def *cond = nir_ieq_imm(b, a, 0);
 
    return nir_bcsel(b, cond,
                     nir_imm_intN_t(b, a->bit_size, a->bit_size),
index b10aba5..e4a0be9 100644 (file)
@@ -322,12 +322,12 @@ clone_load_const(clone_state *state, const nir_load_const_instr *lc)
    return nlc;
 }
 
-static nir_ssa_undef_instr *
-clone_ssa_undef(clone_state *state, const nir_ssa_undef_instr *sa)
+static nir_undef_instr *
+clone_ssa_undef(clone_state *state, const nir_undef_instr *sa)
 {
-   nir_ssa_undef_instr *nsa =
-      nir_ssa_undef_instr_create(state->ns, sa->def.num_components,
-                                 sa->def.bit_size);
+   nir_undef_instr *nsa =
+      nir_undef_instr_create(state->ns, sa->def.num_components,
+                             sa->def.bit_size);
 
    add_remap(state, &nsa->def, &sa->def);
 
index 6483eb0..781620c 100644 (file)
@@ -223,10 +223,10 @@ nir_insert_phi_undef(nir_block *block, nir_block *pred)
 {
    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
    nir_foreach_phi(phi, block) {
-      nir_ssa_undef_instr *undef =
-         nir_ssa_undef_instr_create(impl->function->shader,
-                                    phi->dest.ssa.num_components,
-                                    phi->dest.ssa.bit_size);
+      nir_undef_instr *undef =
+         nir_undef_instr_create(impl->function->shader,
+                                phi->dest.ssa.num_components,
+                                phi->dest.ssa.bit_size);
       nir_instr_insert_before_cf_list(&impl->body, &undef->instr);
       nir_phi_src *src = nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(&undef->def));
       list_addtail(&src->src.use_link, &undef->def.uses);
@@ -641,16 +641,16 @@ nir_cf_node_insert(nir_cursor cursor, nir_cf_node *node)
 }
 
 static bool
-replace_ssa_def_uses(nir_ssa_def *def, void *void_impl)
+replace_ssa_def_uses(nir_def *def, void *void_impl)
 {
    nir_function_impl *impl = void_impl;
 
-   nir_ssa_undef_instr *undef =
-      nir_ssa_undef_instr_create(impl->function->shader,
-                                 def->num_components,
-                                 def->bit_size);
+   nir_undef_instr *undef =
+      nir_undef_instr_create(impl->function->shader,
+                             def->num_components,
+                             def->bit_size);
    nir_instr_insert_before_cf_list(&impl->body, &undef->instr);
-   nir_ssa_def_rewrite_uses(def, &undef->def);
+   nir_def_rewrite_uses(def, &undef->def);
    return true;
 }
 
index a96d65a..1600cb1 100644 (file)
@@ -32,8 +32,8 @@
 extern "C" {
 #endif
 
-static inline nir_ssa_def *
-nir_round_float_to_int(nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_round_float_to_int(nir_builder *b, nir_def *src,
                        nir_rounding_mode round)
 {
    switch (round) {
@@ -53,8 +53,8 @@ nir_round_float_to_int(nir_builder *b, nir_ssa_def *src,
    unreachable("unexpected rounding mode");
 }
 
-static inline nir_ssa_def *
-nir_round_float_to_float(nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_round_float_to_float(nir_builder *b, nir_def *src,
                          unsigned dest_bit_size,
                          nir_rounding_mode round)
 {
@@ -73,23 +73,23 @@ nir_round_float_to_float(nir_builder *b, nir_ssa_def *src,
    case nir_rounding_mode_ru: {
       /* If lower-precision conversion results in a lower value, push it
        * up one ULP. */
-      nir_ssa_def *lower_prec =
+      nir_def *lower_prec =
          nir_build_alu(b, low_conv, src, NULL, NULL, NULL);
-      nir_ssa_def *roundtrip =
+      nir_def *roundtrip =
          nir_build_alu(b, high_conv, lower_prec, NULL, NULL, NULL);
-      nir_ssa_def *cmp = nir_flt(b, roundtrip, src);
-      nir_ssa_def *inf = nir_imm_floatN_t(b, INFINITY, dest_bit_size);
+      nir_def *cmp = nir_flt(b, roundtrip, src);
+      nir_def *inf = nir_imm_floatN_t(b, INFINITY, dest_bit_size);
       return nir_bcsel(b, cmp, nir_nextafter(b, lower_prec, inf), lower_prec);
    }
    case nir_rounding_mode_rd: {
       /* If lower-precision conversion results in a higher value, push it
        * down one ULP. */
-      nir_ssa_def *lower_prec =
+      nir_def *lower_prec =
          nir_build_alu(b, low_conv, src, NULL, NULL, NULL);
-      nir_ssa_def *roundtrip =
+      nir_def *roundtrip =
          nir_build_alu(b, high_conv, lower_prec, NULL, NULL, NULL);
-      nir_ssa_def *cmp = nir_flt(b, src, roundtrip);
-      nir_ssa_def *neg_inf = nir_imm_floatN_t(b, -INFINITY, dest_bit_size);
+      nir_def *cmp = nir_flt(b, src, roundtrip);
+      nir_def *neg_inf = nir_imm_floatN_t(b, -INFINITY, dest_bit_size);
       return nir_bcsel(b, cmp, nir_nextafter(b, lower_prec, neg_inf), lower_prec);
    }
    case nir_rounding_mode_rtz:
@@ -105,8 +105,8 @@ nir_round_float_to_float(nir_builder *b, nir_ssa_def *src,
    unreachable("unexpected rounding mode");
 }
 
-static inline nir_ssa_def *
-nir_round_int_to_float(nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_round_int_to_float(nir_builder *b, nir_def *src,
                        nir_alu_type src_type,
                        unsigned dest_bit_size,
                        nir_rounding_mode round)
@@ -133,12 +133,12 @@ nir_round_int_to_float(nir_builder *b, nir_ssa_def *src,
       return src;
 
    if (src_type == nir_type_int) {
-      nir_ssa_def *sign =
+      nir_def *sign =
          nir_i2b(b, nir_ishr(b, src, nir_imm_int(b, src->bit_size - 1)));
-      nir_ssa_def *abs = nir_iabs(b, src);
-      nir_ssa_def *positive_rounded =
+      nir_def *abs = nir_iabs(b, src);
+      nir_def *positive_rounded =
          nir_round_int_to_float(b, abs, nir_type_uint, dest_bit_size, round);
-      nir_ssa_def *max_positive =
+      nir_def *max_positive =
          nir_imm_intN_t(b, (1ull << (src->bit_size - 1)) - 1, src->bit_size);
       switch (round) {
       case nir_rounding_mode_rtz:
@@ -162,13 +162,13 @@ nir_round_int_to_float(nir_builder *b, nir_ssa_def *src,
       }
       unreachable("unexpected rounding mode");
    } else {
-      nir_ssa_def *mantissa_bit_size = nir_imm_int(b, mantissa_bits);
-      nir_ssa_def *msb = nir_imax(b, nir_ufind_msb(b, src), mantissa_bit_size);
-      nir_ssa_def *bits_to_lose = nir_isub(b, msb, mantissa_bit_size);
-      nir_ssa_def *one = nir_imm_intN_t(b, 1, src->bit_size);
-      nir_ssa_def *adjust = nir_ishl(b, one, bits_to_lose);
-      nir_ssa_def *mask = nir_inot(b, nir_isub(b, adjust, one));
-      nir_ssa_def *truncated = nir_iand(b, src, mask);
+      nir_def *mantissa_bit_size = nir_imm_int(b, mantissa_bits);
+      nir_def *msb = nir_imax(b, nir_ufind_msb(b, src), mantissa_bit_size);
+      nir_def *bits_to_lose = nir_isub(b, msb, mantissa_bit_size);
+      nir_def *one = nir_imm_intN_t(b, 1, src->bit_size);
+      nir_def *adjust = nir_ishl(b, one, bits_to_lose);
+      nir_def *mask = nir_inot(b, nir_isub(b, adjust, one));
+      nir_def *truncated = nir_iand(b, src, mask);
       switch (round) {
       case nir_rounding_mode_rtz:
       case nir_rounding_mode_rd:
@@ -230,7 +230,7 @@ static inline void
 nir_get_clamp_limits(nir_builder *b,
                      nir_alu_type src_type,
                      nir_alu_type dest_type,
-                     nir_ssa_def **low, nir_ssa_def **high)
+                     nir_def **low, nir_def **high)
 {
    /* Split types from bit sizes */
    nir_alu_type src_base_type = nir_alu_type_get_base_type(src_type);
@@ -342,10 +342,10 @@ nir_get_clamp_limits(nir_builder *b,
  * src/src_type: The variables used for comparison
  * dest_type: The type which determines the range used for comparison
  */
-static inline nir_ssa_def *
+static inline nir_def *
 nir_clamp_to_type_range(nir_builder *b,
-                        nir_ssa_def *val, nir_alu_type val_type,
-                        nir_ssa_def *src, nir_alu_type src_type,
+                        nir_def *val, nir_alu_type val_type,
+                        nir_def *src, nir_alu_type src_type,
                         nir_alu_type dest_type)
 {
    assert(nir_alu_type_get_type_size(src_type) == 0 ||
@@ -355,10 +355,10 @@ nir_clamp_to_type_range(nir_builder *b,
       return val;
 
    /* limits of the destination type, expressed in the source type */
-   nir_ssa_def *low = NULL, *high = NULL;
+   nir_def *low = NULL, *high = NULL;
    nir_get_clamp_limits(b, src_type, dest_type, &low, &high);
 
-   nir_ssa_def *low_cond = NULL, *high_cond = NULL;
+   nir_def *low_cond = NULL, *high_cond = NULL;
    switch (nir_alu_type_get_base_type(src_type)) {
    case nir_type_int:
       low_cond = low ? nir_ilt(b, src, low) : NULL;
@@ -376,12 +376,12 @@ nir_clamp_to_type_range(nir_builder *b,
       unreachable("clamping from unknown type");
    }
 
-   nir_ssa_def *val_low = low, *val_high = high;
+   nir_def *val_low = low, *val_high = high;
    if (val_type != src_type) {
       nir_get_clamp_limits(b, val_type, dest_type, &val_low, &val_high);
    }
 
-   nir_ssa_def *res = val;
+   nir_def *res = val;
    if (low_cond && val_low)
       res = nir_bcsel(b, low_cond, val_low, res);
    if (high_cond && val_high)
@@ -430,9 +430,9 @@ nir_simplify_conversion_rounding(nir_alu_type src_type,
    return rounding;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_convert_with_rounding(nir_builder *b,
-                          nir_ssa_def *src, nir_alu_type src_type,
+                          nir_def *src, nir_alu_type src_type,
                           nir_alu_type dest_type,
                           nir_rounding_mode round,
                           bool clamp)
@@ -483,7 +483,7 @@ nir_convert_with_rounding(nir_builder *b,
    if (trivial_convert)
       return nir_type_convert(b, src, src_type, dest_type, round);
 
-   nir_ssa_def *dest = src;
+   nir_def *dest = src;
 
    /* clamp the result into range */
    if (clamp && !clamp_after_conversion)
index 92933bb..d7462f5 100644 (file)
@@ -109,7 +109,7 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr)
 
    for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
       /* If anyone is using this deref, leave it alone */
-      if (!nir_ssa_def_is_unused(&d->dest.ssa))
+      if (!nir_def_is_unused(&d->dest.ssa))
          break;
 
       nir_instr_remove(&d->instr);
@@ -339,19 +339,19 @@ nir_deref_instr_get_const_offset(nir_deref_instr *deref,
    return offset;
 }
 
-nir_ssa_def *
+nir_def *
 nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
                        glsl_type_size_align_func size_align)
 {
    nir_deref_path path;
    nir_deref_path_init(&path, deref, NULL);
 
-   nir_ssa_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
+   nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
    for (nir_deref_instr **p = &path.path[1]; *p; p++) {
       switch ((*p)->deref_type) {
       case nir_deref_type_array:
       case nir_deref_type_ptr_as_array: {
-         nir_ssa_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+         nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
          int stride = type_get_array_stride((*p)->type, size_align);
          offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
          break;
@@ -1121,8 +1121,8 @@ opt_remove_sampler_cast(nir_deref_instr *cast)
    /* We're a cast from a more detailed sampler type to a bare sampler or a
     * texture type with the same dimensionality.
     */
-   nir_ssa_def_rewrite_uses(&cast->dest.ssa,
-                            &parent->dest.ssa);
+   nir_def_rewrite_uses(&cast->dest.ssa,
+                        &parent->dest.ssa);
    nir_instr_remove(&cast->instr);
 
    /* Recursively crawl the deref tree and clean up types */
@@ -1169,7 +1169,7 @@ opt_replace_struct_wrapper_cast(nir_builder *b, nir_deref_instr *cast)
       return false;
 
    nir_deref_instr *replace = nir_build_deref_struct(b, parent, 0);
-   nir_ssa_def_rewrite_uses(&cast->dest.ssa, &replace->dest.ssa);
+   nir_def_rewrite_uses(&cast->dest.ssa, &replace->dest.ssa);
    nir_deref_instr_remove_if_unused(cast);
    return true;
 }
@@ -1242,8 +1242,8 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref)
           parent->cast.align_mul == 0 &&
           is_trivial_deref_cast(parent))
          parent = nir_deref_instr_parent(parent);
-      nir_ssa_def_rewrite_uses(&deref->dest.ssa,
-                               &parent->dest.ssa);
+      nir_def_rewrite_uses(&deref->dest.ssa,
+                           &parent->dest.ssa);
       nir_instr_remove(&deref->instr);
       return true;
    }
@@ -1254,8 +1254,8 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref)
 
    deref->arr.in_bounds &= parent->arr.in_bounds;
 
-   nir_ssa_def *new_idx = nir_iadd(b, parent->arr.index.ssa,
-                                   deref->arr.index.ssa);
+   nir_def *new_idx = nir_iadd(b, parent->arr.index.ssa,
+                               deref->arr.index.ssa);
 
    deref->deref_type = parent->deref_type;
    nir_instr_rewrite_src(&deref->instr, &deref->parent, parent->parent);
@@ -1311,8 +1311,8 @@ is_vector_bitcast_deref(nir_deref_instr *cast,
    return true;
 }
 
-static nir_ssa_def *
-resize_vector(nir_builder *b, nir_ssa_def *data, unsigned num_components)
+static nir_def *
+resize_vector(nir_builder *b, nir_def *data, unsigned num_components)
 {
    if (num_components == data->num_components)
       return data;
@@ -1331,7 +1331,7 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
 {
    nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
    nir_component_mask_t read_mask =
-      nir_ssa_def_components_read(&load->dest.ssa);
+      nir_def_components_read(&load->dest.ssa);
 
    /* LLVM loves take advantage of the fact that vec3s in OpenCL are
     * vec4-aligned and so it can just read/write them as vec4s.  This
@@ -1353,13 +1353,13 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
       load->num_components = new_num_comps;
 
       b->cursor = nir_after_instr(&load->instr);
-      nir_ssa_def *data = &load->dest.ssa;
+      nir_def *data = &load->dest.ssa;
       if (old_bit_size != new_bit_size)
          data = nir_bitcast_vector(b, &load->dest.ssa, old_bit_size);
       data = resize_vector(b, data, old_num_comps);
 
-      nir_ssa_def_rewrite_uses_after(&load->dest.ssa, data,
-                                     data->parent_instr);
+      nir_def_rewrite_uses_after(&load->dest.ssa, data,
+                                 data->parent_instr);
       return true;
    }
 
@@ -1377,7 +1377,7 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store)
     * results in a LOT of vec4->vec3 casts on loads and stores.
     */
    if (is_vector_bitcast_deref(deref, write_mask, true)) {
-      nir_ssa_def *data = store->src[1].ssa;
+      nir_def *data = store->src[1].ssa;
 
       const unsigned old_bit_size = data->bit_size;
 
@@ -1415,7 +1415,7 @@ opt_known_deref_mode_is(nir_builder *b, nir_intrinsic_instr *intrin)
    if (deref == NULL)
       return false;
 
-   nir_ssa_def *deref_is = NULL;
+   nir_def *deref_is = NULL;
 
    if (nir_deref_mode_must_be(deref, modes))
       deref_is = nir_imm_true(b);
@@ -1426,7 +1426,7 @@ opt_known_deref_mode_is(nir_builder *b, nir_intrinsic_instr *intrin)
    if (deref_is == NULL)
       return false;
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, deref_is);
+   nir_def_rewrite_uses(&intrin->dest.ssa, deref_is);
    nir_instr_remove(&intrin->instr);
    return true;
 }
index 087146f..e62a7ce 100644 (file)
@@ -56,8 +56,8 @@ void nir_deref_path_finish(nir_deref_path *path);
 unsigned nir_deref_instr_get_const_offset(nir_deref_instr *deref,
                                           glsl_type_size_align_func size_align);
 
-nir_ssa_def *nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
-                                    glsl_type_size_align_func size_align);
+nir_def *nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
+                                glsl_type_size_align_func size_align);
 
 nir_deref_path *nir_get_deref_path(void *mem_ctx, nir_deref_and_path *deref);
 
index 871aa30..265a2e6 100644 (file)
@@ -663,7 +663,7 @@ visit_load_const(nir_load_const_instr *instr)
 }
 
 static bool
-visit_ssa_undef(nir_ssa_undef_instr *instr)
+visit_ssa_undef(nir_undef_instr *instr)
 {
    return false;
 }
@@ -773,7 +773,7 @@ visit_jump(nir_jump_instr *jump, struct divergence_state *state)
 }
 
 static bool
-set_ssa_def_not_divergent(nir_ssa_def *def, UNUSED void *_state)
+set_ssa_def_not_divergent(nir_def *def, UNUSED void *_state)
 {
    def->divergent = false;
    return true;
@@ -870,7 +870,7 @@ visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_c
    if (phi->dest.ssa.divergent)
       return false;
 
-   nir_ssa_def *same = NULL;
+   nir_def *same = NULL;
    nir_foreach_phi_src(src, phi) {
       /* if any source value is divergent, the resulting value is divergent */
       if (src->src.ssa->divergent) {
index 9b11e9b..3ebc637 100644 (file)
@@ -25,8 +25,8 @@
 
 #include "util/format_rgb9e5.h"
 
-static inline nir_ssa_def *
-nir_shift_imm(nir_builder *b, nir_ssa_def *value, int left_shift)
+static inline nir_def *
+nir_shift_imm(nir_builder *b, nir_def *value, int left_shift)
 {
    if (left_shift > 0)
       return nir_ishl_imm(b, value, left_shift);
@@ -36,8 +36,8 @@ nir_shift_imm(nir_builder *b, nir_ssa_def *value, int left_shift)
       return value;
 }
 
-static inline nir_ssa_def *
-nir_shift(nir_builder *b, nir_ssa_def *value, nir_ssa_def *left_shift)
+static inline nir_def *
+nir_shift(nir_builder *b, nir_def *value, nir_def *left_shift)
 {
    return nir_bcsel(b,
                     nir_ige_imm(b, left_shift, 0),
@@ -45,22 +45,22 @@ nir_shift(nir_builder *b, nir_ssa_def *value, nir_ssa_def *left_shift)
                     nir_ushr(b, value, nir_ineg(b, left_shift)));
 }
 
-static inline nir_ssa_def *
-nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_mask_shift(struct nir_builder *b, nir_def *src,
                uint32_t mask, int left_shift)
 {
    return nir_shift_imm(b, nir_iand_imm(b, src, mask), left_shift);
 }
 
-static inline nir_ssa_def *
-nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
+static inline nir_def *
+nir_mask_shift_or(struct nir_builder *b, nir_def *dst, nir_def *src,
                   uint32_t src_mask, int src_left_shift)
 {
    return nir_ior(b, nir_mask_shift(b, src, src_mask, src_left_shift), dst);
 }
 
-static inline nir_ssa_def *
-nir_format_mask_uvec(nir_builder *b, nir_ssa_def *src, const unsigned *bits)
+static inline nir_def *
+nir_format_mask_uvec(nir_builder *b, nir_def *src, const unsigned *bits)
 {
    nir_const_value mask[NIR_MAX_VEC_COMPONENTS];
    memset(mask, 0, sizeof(mask));
@@ -71,12 +71,12 @@ nir_format_mask_uvec(nir_builder *b, nir_ssa_def *src, const unsigned *bits)
    return nir_iand(b, src, nir_build_imm(b, src->num_components, 32, mask));
 }
 
-static inline nir_ssa_def *
-nir_format_sign_extend_ivec(nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_format_sign_extend_ivec(nir_builder *b, nir_def *src,
                             const unsigned *bits)
 {
    assert(src->num_components <= 4);
-   nir_ssa_def *comps[4];
+   nir_def *comps[4];
    for (unsigned i = 0; i < src->num_components; i++) {
       unsigned shift = src->bit_size - bits[i];
       comps[i] = nir_ishr_imm(b, nir_ishl_imm(b, nir_channel(b, src, i), shift),
@@ -85,14 +85,14 @@ nir_format_sign_extend_ivec(nir_builder *b, nir_ssa_def *src,
    return nir_vec(b, comps, src->num_components);
 }
 
-static inline nir_ssa_def *
-nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
+static inline nir_def *
+nir_format_unpack_int(nir_builder *b, nir_def *packed,
                       const unsigned *bits, unsigned num_components,
                       bool sign_extend)
 {
    assert(num_components >= 1 && num_components <= 4);
    const unsigned bit_size = packed->bit_size;
-   nir_ssa_def *comps[4];
+   nir_def *comps[4];
 
    if (bits[0] >= bit_size) {
       assert(bits[0] == bit_size);
@@ -105,7 +105,7 @@ nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
    for (unsigned i = 0; i < num_components; i++) {
       assert(bits[i] < bit_size);
       assert(offset + bits[i] <= bit_size);
-      nir_ssa_def *chan = nir_channel(b, packed, next_chan);
+      nir_def *chan = nir_channel(b, packed, next_chan);
       unsigned lshift = bit_size - (offset + bits[i]);
       unsigned rshift = bit_size - bits[i];
       if (sign_extend)
@@ -122,26 +122,26 @@ nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
    return nir_vec(b, comps, num_components);
 }
 
-static inline nir_ssa_def *
-nir_format_unpack_uint(nir_builder *b, nir_ssa_def *packed,
+static inline nir_def *
+nir_format_unpack_uint(nir_builder *b, nir_def *packed,
                        const unsigned *bits, unsigned num_components)
 {
    return nir_format_unpack_int(b, packed, bits, num_components, false);
 }
 
-static inline nir_ssa_def *
-nir_format_unpack_sint(nir_builder *b, nir_ssa_def *packed,
+static inline nir_def *
+nir_format_unpack_sint(nir_builder *b, nir_def *packed,
                        const unsigned *bits, unsigned num_components)
 {
    return nir_format_unpack_int(b, packed, bits, num_components, true);
 }
 
-static inline nir_ssa_def *
-nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
+static inline nir_def *
+nir_format_pack_uint_unmasked(nir_builder *b, nir_def *color,
                               const unsigned *bits, unsigned num_components)
 {
    assert(num_components >= 1 && num_components <= 4);
-   nir_ssa_def *packed = nir_imm_int(b, 0);
+   nir_def *packed = nir_imm_int(b, 0);
    unsigned offset = 0;
    for (unsigned i = 0; i < num_components; i++) {
       packed = nir_ior(b, packed, nir_shift_imm(b, nir_channel(b, color, i), offset));
@@ -152,12 +152,12 @@ nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
    return packed;
 }
 
-static inline nir_ssa_def *
-nir_format_pack_uint_unmasked_ssa(nir_builder *b, nir_ssa_def *color,
-                                  nir_ssa_def *bits)
+static inline nir_def *
+nir_format_pack_uint_unmasked_ssa(nir_builder *b, nir_def *color,
+                                  nir_def *bits)
 {
-   nir_ssa_def *packed = nir_imm_int(b, 0);
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *packed = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
    for (unsigned i = 0; i < bits->num_components; i++) {
       packed = nir_ior(b, packed, nir_ishl(b, nir_channel(b, color, i), offset));
       offset = nir_iadd(b, offset, nir_channel(b, bits, i));
@@ -165,16 +165,16 @@ nir_format_pack_uint_unmasked_ssa(nir_builder *b, nir_ssa_def *color,
    return packed;
 }
 
-static inline nir_ssa_def *
-nir_format_pack_uint(nir_builder *b, nir_ssa_def *color,
+static inline nir_def *
+nir_format_pack_uint(nir_builder *b, nir_def *color,
                      const unsigned *bits, unsigned num_components)
 {
    return nir_format_pack_uint_unmasked(b, nir_format_mask_uvec(b, color, bits),
                                         bits, num_components);
 }
 
-static inline nir_ssa_def *
-nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
+static inline nir_def *
+nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_def *src,
                                  unsigned src_bits, unsigned dst_bits)
 {
    assert(src->bit_size >= src_bits && src->bit_size >= dst_bits);
@@ -188,13 +188,13 @@ nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
       DIV_ROUND_UP(src->num_components * src_bits, dst_bits);
    assert(dst_components <= 4);
 
-   nir_ssa_def *dst_chan[4] = { 0 };
+   nir_def *dst_chan[4] = { 0 };
    if (dst_bits > src_bits) {
       unsigned shift = 0;
       unsigned dst_idx = 0;
       for (unsigned i = 0; i < src->num_components; i++) {
-         nir_ssa_def *shifted = nir_ishl_imm(b, nir_channel(b, src, i),
-                                             shift);
+         nir_def *shifted = nir_ishl_imm(b, nir_channel(b, src, i),
+                                         shift);
          if (shift == 0) {
             dst_chan[dst_idx] = shifted;
          } else {
@@ -229,7 +229,7 @@ nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
    return nir_vec(b, dst_chan, dst_components);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 _nir_format_norm_factor(nir_builder *b, const unsigned *bits,
                         unsigned num_components,
                         bool is_signed)
@@ -243,29 +243,29 @@ _nir_format_norm_factor(nir_builder *b, const unsigned *bits,
    return nir_build_imm(b, num_components, 32, factor);
 }
 
-static inline nir_ssa_def *
-nir_format_unorm_to_float(nir_builder *b, nir_ssa_def *u, const unsigned *bits)
+static inline nir_def *
+nir_format_unorm_to_float(nir_builder *b, nir_def *u, const unsigned *bits)
 {
-   nir_ssa_def *factor =
+   nir_def *factor =
       _nir_format_norm_factor(b, bits, u->num_components, false);
 
    return nir_fdiv(b, nir_u2f32(b, u), factor);
 }
 
-static inline nir_ssa_def *
-nir_format_snorm_to_float(nir_builder *b, nir_ssa_def *s, const unsigned *bits)
+static inline nir_def *
+nir_format_snorm_to_float(nir_builder *b, nir_def *s, const unsigned *bits)
 {
-   nir_ssa_def *factor =
+   nir_def *factor =
       _nir_format_norm_factor(b, bits, s->num_components, true);
 
    return nir_fmax(b, nir_fdiv(b, nir_i2f32(b, s), factor),
                    nir_imm_float(b, -1.0f));
 }
 
-static inline nir_ssa_def *
-nir_format_float_to_unorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
+static inline nir_def *
+nir_format_float_to_unorm(nir_builder *b, nir_def *f, const unsigned *bits)
 {
-   nir_ssa_def *factor =
+   nir_def *factor =
       _nir_format_norm_factor(b, bits, f->num_components, false);
 
    /* Clamp to the range [0, 1] */
@@ -274,10 +274,10 @@ nir_format_float_to_unorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
    return nir_f2u32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
 }
 
-static inline nir_ssa_def *
-nir_format_float_to_snorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
+static inline nir_def *
+nir_format_float_to_snorm(nir_builder *b, nir_def *f, const unsigned *bits)
 {
-   nir_ssa_def *factor =
+   nir_def *factor =
       _nir_format_norm_factor(b, bits, f->num_components, true);
 
    /* Clamp to the range [-1, 1] */
@@ -289,21 +289,21 @@ nir_format_float_to_snorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
 /* Converts a vector of floats to a vector of half-floats packed in the low 16
  * bits.
  */
-static inline nir_ssa_def *
-nir_format_float_to_half(nir_builder *b, nir_ssa_def *f)
+static inline nir_def *
+nir_format_float_to_half(nir_builder *b, nir_def *f)
 {
-   nir_ssa_def *zero = nir_imm_float(b, 0);
-   nir_ssa_def *f16comps[4];
+   nir_def *zero = nir_imm_float(b, 0);
+   nir_def *f16comps[4];
    for (unsigned i = 0; i < f->num_components; i++)
       f16comps[i] = nir_pack_half_2x16_split(b, nir_channel(b, f, i), zero);
    return nir_vec(b, f16comps, f->num_components);
 }
 
-static inline nir_ssa_def *
-nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
+static inline nir_def *
+nir_format_linear_to_srgb(nir_builder *b, nir_def *c)
 {
-   nir_ssa_def *linear = nir_fmul_imm(b, c, 12.92f);
-   nir_ssa_def *curved =
+   nir_def *linear = nir_fmul_imm(b, c, 12.92f);
+   nir_def *curved =
       nir_fadd_imm(b, nir_fmul_imm(b, nir_fpow(b, c, nir_imm_float(b, 1.0 / 2.4)), 1.055f),
                    -0.055f);
 
@@ -311,11 +311,11 @@ nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
                                 linear, curved));
 }
 
-static inline nir_ssa_def *
-nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
+static inline nir_def *
+nir_format_srgb_to_linear(nir_builder *b, nir_def *c)
 {
-   nir_ssa_def *linear = nir_fdiv_imm(b, c, 12.92f);
-   nir_ssa_def *curved =
+   nir_def *linear = nir_fdiv_imm(b, c, 12.92f);
+   nir_def *curved =
       nir_fpow(b, nir_fmul_imm(b, nir_fadd_imm(b, c, 0.055f), 1.0 / 1.055f),
                nir_imm_float(b, 2.4f));
 
@@ -326,8 +326,8 @@ nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
 /* Clamps a vector of uints so they don't extend beyond the given number of
  * bits per channel.
  */
-static inline nir_ssa_def *
-nir_format_clamp_uint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
+static inline nir_def *
+nir_format_clamp_uint(nir_builder *b, nir_def *f, const unsigned *bits)
 {
    if (bits[0] == 32)
       return f;
@@ -344,8 +344,8 @@ nir_format_clamp_uint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
 /* Clamps a vector of sints so they don't extend beyond the given number of
  * bits per channel.
  */
-static inline nir_ssa_def *
-nir_format_clamp_sint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
+static inline nir_def *
+nir_format_clamp_sint(nir_builder *b, nir_def *f, const unsigned *bits)
 {
    if (bits[0] == 32)
       return f;
@@ -364,10 +364,10 @@ nir_format_clamp_sint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
    return f;
 }
 
-static inline nir_ssa_def *
-nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
+static inline nir_def *
+nir_format_unpack_11f11f10f(nir_builder *b, nir_def *packed)
 {
-   nir_ssa_def *chans[3];
+   nir_def *chans[3];
    chans[0] = nir_mask_shift(b, packed, 0x000007ff, 4);
    chans[1] = nir_mask_shift(b, packed, 0x003ff800, -7);
    chans[2] = nir_mask_shift(b, packed, 0xffc00000, -17);
@@ -378,23 +378,23 @@ nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
    return nir_vec(b, chans, 3);
 }
 
-static inline nir_ssa_def *
-nir_format_pack_11f11f10f(nir_builder *b, nir_ssa_def *color)
+static inline nir_def *
+nir_format_pack_11f11f10f(nir_builder *b, nir_def *color)
 {
    /* 10 and 11-bit floats are unsigned.  Clamp to non-negative */
-   nir_ssa_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
+   nir_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, color->bit_size);
-   nir_ssa_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
-                                              nir_channel(b, clamped, 1));
-   nir_ssa_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
-                                              undef);
+   nir_def *undef = nir_undef(b, 1, color->bit_size);
+   nir_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
+                                          nir_channel(b, clamped, 1));
+   nir_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
+                                          undef);
 
    /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
     * fewer mantissa bits and no sign bit.  All we have to do is throw away
     * the sign bit and the bottom mantissa bits and shift it into place.
     */
-   nir_ssa_def *packed = nir_imm_int(b, 0);
+   nir_def *packed = nir_imm_int(b, 0);
    packed = nir_mask_shift_or(b, packed, p1, 0x00007ff0, -4);
    packed = nir_mask_shift_or(b, packed, p1, 0x7ff00000, -9);
    packed = nir_mask_shift_or(b, packed, p2, 0x00007fe0, 17);
@@ -402,22 +402,22 @@ nir_format_pack_11f11f10f(nir_builder *b, nir_ssa_def *color)
    return packed;
 }
 
-static inline nir_ssa_def *
-nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
+static inline nir_def *
+nir_format_pack_r9g9b9e5(nir_builder *b, nir_def *color)
 {
    /* See also float3_to_rgb9e5 */
 
    /* First, we need to clamp it to range. */
-   nir_ssa_def *clamped = nir_fmin(b, color, nir_imm_float(b, MAX_RGB9E5));
+   nir_def *clamped = nir_fmin(b, color, nir_imm_float(b, MAX_RGB9E5));
 
    /* Get rid of negatives and NaN */
    clamped = nir_bcsel(b, nir_ugt_imm(b, color, 0x7f800000),
                        nir_imm_float(b, 0), clamped);
 
    /* maxrgb.u = MAX3(rc.u, gc.u, bc.u); */
-   nir_ssa_def *maxu = nir_umax(b, nir_channel(b, clamped, 0),
-                                nir_umax(b, nir_channel(b, clamped, 1),
-                                         nir_channel(b, clamped, 2)));
+   nir_def *maxu = nir_umax(b, nir_channel(b, clamped, 0),
+                            nir_umax(b, nir_channel(b, clamped, 1),
+                                     nir_channel(b, clamped, 2)));
 
    /* maxrgb.u += maxrgb.u & (1 << (23-9)); */
    maxu = nir_iadd(b, maxu, nir_iand_imm(b, maxu, 1 << 14));
@@ -425,26 +425,26 @@ nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
    /* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
     *              1 + RGB9E5_EXP_BIAS - 127;
     */
-   nir_ssa_def *exp_shared =
+   nir_def *exp_shared =
       nir_iadd_imm(b, nir_umax(b, nir_ushr_imm(b, maxu, 23), nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
                    1 + RGB9E5_EXP_BIAS - 127);
 
    /* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
     *                             RGB9E5_MANTISSA_BITS) + 1;
     */
-   nir_ssa_def *revdenom_biasedexp =
+   nir_def *revdenom_biasedexp =
       nir_isub_imm(b, 127 + RGB9E5_EXP_BIAS + RGB9E5_MANTISSA_BITS + 1,
                    exp_shared);
 
    /* revdenom.u = revdenom_biasedexp << 23; */
-   nir_ssa_def *revdenom =
+   nir_def *revdenom =
       nir_ishl_imm(b, revdenom_biasedexp, 23);
 
    /* rm = (int) (rc.f * revdenom.f);
     * gm = (int) (gc.f * revdenom.f);
     * bm = (int) (bc.f * revdenom.f);
     */
-   nir_ssa_def *mantissa =
+   nir_def *mantissa =
       nir_f2i32(b, nir_fmul(b, clamped, revdenom));
 
    /* rm = (rm & 1) + (rm >> 1);
@@ -454,7 +454,7 @@ nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
    mantissa = nir_iadd(b, nir_iand_imm(b, mantissa, 1),
                        nir_ushr_imm(b, mantissa, 1));
 
-   nir_ssa_def *packed = nir_channel(b, mantissa, 0);
+   nir_def *packed = nir_channel(b, mantissa, 0);
    packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 1), ~0, 9);
    packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 2), ~0, 18);
    packed = nir_mask_shift_or(b, packed, exp_shared, ~0, 27);
index 1012d79..7ce1903 100644 (file)
@@ -56,7 +56,7 @@ struct from_ssa_state {
  * We treat SSA undefs as always coming before other instruction types.
  */
 static bool
-def_after(nir_ssa_def *a, nir_ssa_def *b)
+def_after(nir_def *a, nir_def *b)
 {
    if (a->parent_instr->type == nir_instr_type_ssa_undef)
       return false;
@@ -77,7 +77,7 @@ def_after(nir_ssa_def *a, nir_ssa_def *b)
 
 /* Returns true if a dominates b */
 static bool
-ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
+ssa_def_dominates(nir_def *a, nir_def *b)
 {
    if (a->parent_instr->type == nir_instr_type_ssa_undef) {
       /* SSA undefs always dominate */
@@ -115,21 +115,21 @@ struct merge_set;
 typedef struct {
    struct exec_node node;
    struct merge_set *set;
-   nir_ssa_def *def;
+   nir_def *def;
 } merge_node;
 
 typedef struct merge_set {
    struct exec_list nodes;
    unsigned size;
    bool divergent;
-   nir_ssa_def *reg_decl;
+   nir_def *reg_decl;
 } merge_set;
 
 #if 0
 static void
 merge_set_dump(merge_set *set, FILE *fp)
 {
-   NIR_VLA(nir_ssa_def *, dom, set->size);
+   NIR_VLA(nir_def *, dom, set->size);
    int dom_idx = -1;
 
    foreach_list_typed(merge_node, node, node, &set->nodes) {
@@ -147,7 +147,7 @@ merge_set_dump(merge_set *set, FILE *fp)
 #endif
 
 static merge_node *
-get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
+get_merge_node(nir_def *def, struct from_ssa_state *state)
 {
    struct hash_entry *entry =
       _mesa_hash_table_search(state->merge_node_table, def);
@@ -179,7 +179,7 @@ merge_nodes_interfere(merge_node *a, merge_node *b)
    if (a->set == b->set)
       return false;
 
-   return nir_ssa_defs_interfere(a->def, b->def);
+   return nir_defs_interfere(a->def, b->def);
 }
 
 /* Merges b into a
@@ -422,8 +422,8 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
       entry->dest.dest.ssa.divergent = phi->dest.ssa.divergent;
       exec_list_push_tail(&block_pcopy->entries, &entry->node);
 
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
-                               &entry->dest.dest.ssa);
+      nir_def_rewrite_uses(&phi->dest.ssa,
+                           &entry->dest.dest.ssa);
 
       nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
                             nir_src_for_ssa(&phi->dest.ssa));
@@ -511,22 +511,22 @@ aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
    return true;
 }
 
-static nir_ssa_def *
-decl_reg_for_ssa_def(nir_builder *b, nir_ssa_def *def)
+static nir_def *
+decl_reg_for_ssa_def(nir_builder *b, nir_def *def)
 {
    return nir_decl_reg(b, def->num_components, def->bit_size, 0);
 }
 
 static void
-set_reg_divergent(nir_ssa_def *reg, bool divergent)
+set_reg_divergent(nir_def *reg, bool divergent)
 {
    nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
    nir_intrinsic_set_divergent(decl, divergent);
 }
 
 void
-nir_rewrite_uses_to_load_reg(nir_builder *b, nir_ssa_def *old,
-                             nir_ssa_def *reg)
+nir_rewrite_uses_to_load_reg(nir_builder *b, nir_def *old,
+                             nir_def *reg)
 {
    nir_foreach_use_including_if_safe(use, old) {
       b->cursor = nir_before_src(use);
@@ -540,7 +540,7 @@ nir_rewrite_uses_to_load_reg(nir_builder *b, nir_ssa_def *old,
 
          assert(!copy_entry->src_is_reg);
          copy_entry->src_is_reg = true;
-         nir_src_rewrite_ssa(&copy_entry->src, reg);
+         nir_src_rewrite(&copy_entry->src, reg);
          continue;
       }
 
@@ -549,7 +549,7 @@ nir_rewrite_uses_to_load_reg(nir_builder *b, nir_ssa_def *old,
        * a register is referenced in multiple sources in the same instruction,
        * which otherwise would turn into piles of unnecessary moves.
        */
-      nir_ssa_def *load = NULL;
+      nir_def *load = NULL;
       if (b->cursor.option == nir_cursor_before_instr) {
          nir_instr *prev = nir_instr_prev(b->cursor.instr);
 
@@ -565,7 +565,7 @@ nir_rewrite_uses_to_load_reg(nir_builder *b, nir_ssa_def *old,
       if (load == NULL)
          load = nir_load_reg(b, reg);
 
-      nir_src_rewrite_ssa(use, load);
+      nir_src_rewrite(use, load);
    }
 }
 
@@ -574,7 +574,7 @@ dest_replace_ssa_with_reg(nir_dest *dest, nir_function_impl *impl)
 {
    nir_builder b = nir_builder_create(impl);
 
-   nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &dest->ssa);
+   nir_def *reg = decl_reg_for_ssa_def(&b, &dest->ssa);
    nir_rewrite_uses_to_load_reg(&b, &dest->ssa, reg);
 
    b.cursor = nir_after_instr(dest->ssa.parent_instr);
@@ -582,8 +582,8 @@ dest_replace_ssa_with_reg(nir_dest *dest, nir_function_impl *impl)
    return true;
 }
 
-static nir_ssa_def *
-reg_for_ssa_def(nir_ssa_def *def, struct from_ssa_state *state)
+static nir_def *
+reg_for_ssa_def(nir_def *def, struct from_ssa_state *state)
 {
    struct hash_entry *entry =
       _mesa_hash_table_search(state->merge_node_table, def);
@@ -633,15 +633,15 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state)
 }
 
 static bool
-rewrite_ssa_def(nir_ssa_def *def, void *void_state)
+rewrite_ssa_def(nir_def *def, void *void_state)
 {
    struct from_ssa_state *state = void_state;
 
-   nir_ssa_def *reg = reg_for_ssa_def(def, state);
+   nir_def *reg = reg_for_ssa_def(def, state);
    if (reg == NULL)
       return true;
 
-   assert(nir_ssa_def_is_unused(def));
+   assert(nir_def_is_unused(def));
 
    /* At this point we know a priori that this SSA def is part of a
     * nir_dest.  We can use exec_node_data to get the dest pointer.
@@ -658,11 +658,11 @@ rewrite_src(nir_src *src, void *void_state)
 {
    struct from_ssa_state *state = void_state;
 
-   nir_ssa_def *reg = reg_for_ssa_def(src->ssa, state);
+   nir_def *reg = reg_for_ssa_def(src->ssa, state);
    if (reg == NULL)
       return true;
 
-   nir_src_rewrite_ssa(src, nir_load_reg(&state->builder, reg));
+   nir_src_rewrite(src, nir_load_reg(&state->builder, reg));
 
    state->progress = true;
    return true;
@@ -709,10 +709,10 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
 
             nir_foreach_parallel_copy_entry(entry, pcopy) {
                assert(!entry->dest_is_reg);
-               assert(nir_ssa_def_is_unused(&entry->dest.dest.ssa));
+               assert(nir_def_is_unused(&entry->dest.dest.ssa));
 
                /* Parallel copy destinations will always be registers */
-               nir_ssa_def *reg = reg_for_ssa_def(&entry->dest.dest.ssa, state);
+               nir_def *reg = reg_for_ssa_def(&entry->dest.dest.ssa, state);
                assert(reg != NULL);
 
                entry->dest_is_reg = true;
@@ -723,7 +723,7 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
 
             nir_foreach_parallel_copy_entry(entry, pcopy) {
                assert(!entry->src_is_reg);
-               nir_ssa_def *reg = reg_for_ssa_def(entry->src.ssa, state);
+               nir_def *reg = reg_for_ssa_def(entry->src.ssa, state);
                if (reg == NULL)
                   continue;
 
@@ -768,7 +768,7 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
  */
 struct copy_value {
    bool is_reg;
-   nir_ssa_def *ssa;
+   nir_def *ssa;
 };
 
 static bool
@@ -790,7 +790,7 @@ copy_value_is_divergent(struct copy_value v)
 static void
 copy_values(nir_builder *b, struct copy_value dest, struct copy_value src)
 {
-   nir_ssa_def *val = src.is_reg ? nir_load_reg(b, src.ssa) : src.ssa;
+   nir_def *val = src.is_reg ? nir_load_reg(b, src.ssa) : src.ssa;
 
    assert(!copy_value_is_divergent(src) || copy_value_is_divergent(dest));
 
@@ -957,7 +957,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
        * temporary (which is trivial).
        */
       assert(num_vals < num_copies * 2);
-      nir_ssa_def *reg;
+      nir_def *reg;
       if (values[b].is_reg) {
          nir_intrinsic_instr *decl = nir_reg_get_decl(values[b].ssa);
          uint8_t num_components = nir_intrinsic_num_components(decl);
@@ -1078,8 +1078,8 @@ nir_convert_from_ssa(nir_shader *shader,
 }
 
 static void
-place_phi_read(nir_builder *b, nir_ssa_def *reg,
-               nir_ssa_def *def, nir_block *block, struct set *visited_blocks)
+place_phi_read(nir_builder *b, nir_def *reg,
+               nir_def *def, nir_block *block, struct set *visited_blocks)
 {
    /* Search already visited blocks to avoid back edges in tree */
    if (_mesa_set_search(visited_blocks, block) == NULL) {
@@ -1150,10 +1150,10 @@ nir_lower_phis_to_regs_block(nir_block *block)
 
    bool progress = false;
    nir_foreach_phi_safe(phi, block) {
-      nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
+      nir_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
 
       b.cursor = nir_after_instr(&phi->instr);
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
+      nir_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
 
       nir_foreach_phi_src(src, phi) {
 
@@ -1186,7 +1186,7 @@ dest_replace_ssa_with_reg_state(nir_dest *dest, void *void_state)
 }
 
 static bool
-ssa_def_is_local_to_block(nir_ssa_def *def, UNUSED void *state)
+ssa_def_is_local_to_block(nir_def *def, UNUSED void *state)
 {
    nir_block *block = def->parent_instr->block;
    nir_foreach_use_including_if(use_src, def) {
@@ -1210,7 +1210,7 @@ instr_is_load_new_reg(nir_instr *instr, unsigned old_num_ssa)
    if (load->intrinsic != nir_intrinsic_load_reg)
       return false;
 
-   nir_ssa_def *reg = load->src[0].ssa;
+   nir_def *reg = load->src[0].ssa;
 
    return reg->index >= old_num_ssa;
 }
@@ -1241,12 +1241,12 @@ nir_lower_ssa_defs_to_regs_block(nir_block *block)
    nir_foreach_instr_safe(instr, block) {
       if (instr->type == nir_instr_type_ssa_undef) {
          /* Undefs are just a read of something never written. */
-         nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
-         nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &undef->def);
+         nir_undef_instr *undef = nir_instr_as_ssa_undef(instr);
+         nir_def *reg = decl_reg_for_ssa_def(&b, &undef->def);
          nir_rewrite_uses_to_load_reg(&b, &undef->def, reg);
       } else if (instr->type == nir_instr_type_load_const) {
          nir_load_const_instr *load = nir_instr_as_load_const(instr);
-         nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &load->def);
+         nir_def *reg = decl_reg_for_ssa_def(&b, &load->def);
          nir_rewrite_uses_to_load_reg(&b, &load->def, reg);
 
          b.cursor = nir_after_instr(instr);
index e302d7a..b07ff01 100644 (file)
@@ -30,7 +30,7 @@
 static bool
 src_is_invocation_id(const nir_src *src)
 {
-   nir_ssa_scalar s = nir_ssa_scalar_resolved(src->ssa, 0);
+   nir_scalar s = nir_scalar_resolved(src->ssa, 0);
    return s.def->parent_instr->type == nir_instr_type_intrinsic &&
           nir_instr_as_intrinsic(s.def->parent_instr)->intrinsic ==
              nir_intrinsic_load_invocation_id;
@@ -39,7 +39,7 @@ src_is_invocation_id(const nir_src *src)
 static bool
 src_is_local_invocation_index(const nir_src *src)
 {
-   nir_ssa_scalar s = nir_ssa_scalar_resolved(src->ssa, 0);
+   nir_scalar s = nir_scalar_resolved(src->ssa, 0);
    return s.def->parent_instr->type == nir_instr_type_intrinsic &&
           nir_instr_as_intrinsic(s.def->parent_instr)->intrinsic ==
              nir_intrinsic_load_local_invocation_index;
@@ -776,10 +776,10 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
    case nir_intrinsic_launch_mesh_workgroups:
    case nir_intrinsic_launch_mesh_workgroups_with_payload_deref: {
       for (unsigned i = 0; i < 3; ++i) {
-         nir_ssa_scalar dim = nir_ssa_scalar_resolved(instr->src[0].ssa, i);
-         if (nir_ssa_scalar_is_const(dim))
+         nir_scalar dim = nir_scalar_resolved(instr->src[0].ssa, i);
+         if (nir_scalar_is_const(dim))
             shader->info.mesh.ts_mesh_dispatch_dimensions[i] =
-               nir_ssa_scalar_as_uint(dim);
+               nir_scalar_as_uint(dim);
       }
       break;
    }
similarity index 98%
rename from src/compiler/nir/nir_gather_ssa_types.c
rename to src/compiler/nir/nir_gather_types.c
index f5e9550..edf1b81 100644 (file)
@@ -95,9 +95,9 @@ copy_types(nir_src src, nir_dest *dest, BITSET_WORD *float_types,
  * recorded for that type.
  */
 void
-nir_gather_ssa_types(nir_function_impl *impl,
-                     BITSET_WORD *float_types,
-                     BITSET_WORD *int_types)
+nir_gather_types(nir_function_impl *impl,
+                 BITSET_WORD *float_types,
+                 BITSET_WORD *int_types)
 {
    bool progress;
    do {
index 96f5af3..3d8abc7 100644 (file)
@@ -193,7 +193,7 @@ group_loads(nir_instr *first, nir_instr *last)
       if (!can_move(instr, first->pass_flags))
          continue;
 
-      nir_ssa_def *def = nir_instr_ssa_def(instr);
+      nir_def *def = nir_instr_ssa_def(instr);
       if (def) {
          bool all_uses_after_last = true;
 
index 5286166..ac4ae09 100644 (file)
@@ -36,7 +36,7 @@ function_ends_in_jump(nir_function_impl *impl)
 void
 nir_inline_function_impl(struct nir_builder *b,
                          const nir_function_impl *impl,
-                         nir_ssa_def **params,
+                         nir_def **params,
                          struct hash_table *shader_var_remap)
 {
    nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
@@ -84,8 +84,8 @@ nir_inline_function_impl(struct nir_builder *b,
 
             unsigned param_idx = nir_intrinsic_param_idx(load);
             assert(param_idx < impl->function->num_params);
-            nir_ssa_def_rewrite_uses(&load->dest.ssa,
-                                     params[param_idx]);
+            nir_def_rewrite_uses(&load->dest.ssa,
+                                 params[param_idx]);
 
             /* Remove any left-over load_param intrinsics because they're soon
              * to be in another function and therefore no longer valid.
@@ -159,7 +159,7 @@ inline_functions_block(nir_block *block, nir_builder *b,
        * to an SSA value first.
        */
       const unsigned num_params = call->num_params;
-      NIR_VLA(nir_ssa_def *, params, num_params);
+      NIR_VLA(nir_def *, params, num_params);
       for (unsigned i = 0; i < num_params; i++) {
          params[i] = nir_ssa_for_src(b, call->params[i],
                                      call->callee->params[i].num_components);
index c20537c..ddcd279 100644 (file)
@@ -228,14 +228,14 @@ nir_add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
 
    /* Allow induction variable which means a loop terminator. */
    if (info) {
-      nir_ssa_scalar cond_scalar = { cond->ssa, 0 };
+      nir_scalar cond_scalar = { cond->ssa, 0 };
 
       /* Limit terminator condition to loop unroll support case which is a simple
        * comparison (ie. "i < count" is supported, but "i + 1 < count" is not).
        */
       if (nir_is_supported_terminator_condition(cond_scalar)) {
-         if (nir_ssa_scalar_alu_op(cond_scalar) == nir_op_inot)
-            cond_scalar = nir_ssa_scalar_chase_alu_src(cond_scalar, 0);
+         if (nir_scalar_alu_op(cond_scalar) == nir_op_inot)
+            cond_scalar = nir_scalar_chase_alu_src(cond_scalar, 0);
 
          nir_alu_instr *alu = nir_instr_as_alu(cond_scalar.def->parent_instr);
 
@@ -408,8 +408,8 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
                   for (unsigned i = 0; i < num_uniforms; i++) {
                      if (offset == uniform_dw_offsets[i]) {
                         b.cursor = nir_before_instr(&intr->instr);
-                        nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
-                        nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
+                        nir_def *def = nir_imm_int(&b, uniform_values[i]);
+                        nir_def_rewrite_uses(&intr->dest.ssa, def);
                         nir_instr_remove(&intr->instr);
                         break;
                      }
@@ -419,7 +419,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
                    * found component load with constant load.
                    */
                   uint32_t max_offset = offset + num_components;
-                  nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = { 0 };
+                  nir_def *components[NIR_MAX_VEC_COMPONENTS] = { 0 };
                   bool found = false;
 
                   b.cursor = nir_before_instr(&intr->instr);
@@ -453,8 +453,8 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
                   }
 
                   /* Replace the original uniform load. */
-                  nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                                           nir_vec(&b, components, num_components));
+                  nir_def_rewrite_uses(&intr->dest.ssa,
+                                       nir_vec(&b, components, num_components));
                   nir_instr_remove(&intr->instr);
                }
             }
index cad4a59..6b93b83 100644 (file)
@@ -712,7 +712,7 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
    unreachable("All cases in the above switch should return");
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_instr_get_dest_ssa_def(nir_instr *instr)
 {
    switch (instr->type) {
@@ -766,8 +766,8 @@ nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr,
 
    if (!cond_function || cond_function(match, instr)) {
       /* rewrite instruction if condition is matched */
-      nir_ssa_def *def = nir_instr_get_dest_ssa_def(instr);
-      nir_ssa_def *new_def = nir_instr_get_dest_ssa_def(match);
+      nir_def *def = nir_instr_get_dest_ssa_def(instr);
+      nir_def *new_def = nir_instr_get_dest_ssa_def(match);
 
       /* It's safe to replace an exact instruction with an inexact one as
        * long as we make it exact.  If we got here, the two instructions are
@@ -777,7 +777,7 @@ nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr,
       if (instr->type == nir_instr_type_alu && nir_instr_as_alu(instr)->exact)
          nir_instr_as_alu(match)->exact = true;
 
-      nir_ssa_def_rewrite_uses(def, new_def);
+      nir_def_rewrite_uses(def, new_def);
 
       nir_instr_remove(instr);
 
index 650cf27..137d42d 100644 (file)
@@ -65,7 +65,7 @@ chase_alu_src_helper(const nir_src *src)
 }
 
 static inline bool
-chase_source_mod(nir_ssa_def **ssa, nir_op op, uint8_t *swizzle)
+chase_source_mod(nir_def **ssa, nir_op op, uint8_t *swizzle)
 {
    if ((*ssa)->parent_instr->type != nir_instr_type_alu)
       return false;
@@ -151,7 +151,7 @@ bool
 nir_legacy_fsat_folds(nir_alu_instr *fsat)
 {
    assert(fsat->op == nir_op_fsat);
-   nir_ssa_def *def = fsat->src[0].src.ssa;
+   nir_def *def = fsat->src[0].src.ssa;
 
    /* No legacy user supports fp64 modifiers */
    if (def->bit_size == 64)
@@ -194,7 +194,7 @@ nir_legacy_fsat_folds(nir_alu_instr *fsat)
 }
 
 static inline bool
-chase_fsat(nir_ssa_def **def)
+chase_fsat(nir_def **def)
 {
    /* No legacy user supports fp64 modifiers */
    if ((*def)->bit_size == 64)
@@ -220,7 +220,7 @@ chase_fsat(nir_ssa_def **def)
 nir_legacy_alu_dest
 nir_legacy_chase_alu_dest(nir_dest *dest)
 {
-   nir_ssa_def *def = &dest->ssa;
+   nir_def *def = &dest->ssa;
 
    /* Try SSA fsat. No users support 64-bit modifiers. */
    if (chase_fsat(&def)) {
@@ -296,7 +296,7 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
             assert(!use->is_if);
             assert(use->parent_instr->type == nir_instr_type_alu);
             nir_alu_src *alu_use = list_entry(use, nir_alu_src, src);
-            nir_src_rewrite_ssa(&alu_use->src, &load->dest.ssa);
+            nir_src_rewrite(&alu_use->src, &load->dest.ssa);
             for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
                alu_use->swizzle[i] = alu->src[0].swizzle[alu_use->swizzle[i]];
          }
@@ -316,7 +316,7 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
 
       if (store) {
          nir_intrinsic_set_legacy_fsat(store, true);
-         nir_src_rewrite_ssa(&store->src[0], &alu->dest.dest.ssa);
+         nir_src_rewrite(&store->src[0], &alu->dest.dest.ssa);
          return true;
       }
    }
index d42cd1b..3190d8e 100644 (file)
 #include "nir.h"
 
 typedef struct {
-   nir_ssa_def *handle;
-   nir_ssa_def *indirect; /** < NULL for no indirect offset */
+   nir_def *handle;
+   nir_def *indirect; /** < NULL for no indirect offset */
    unsigned base_offset;
 } nir_reg_src;
 
 typedef struct {
-   nir_ssa_def *handle;
-   nir_ssa_def *indirect; /** < NULL for no indirect offset */
+   nir_def *handle;
+   nir_def *indirect; /** < NULL for no indirect offset */
    unsigned base_offset;
 } nir_reg_dest;
 
@@ -26,7 +26,7 @@ typedef struct {
 
    union {
       nir_reg_src reg;
-      nir_ssa_def *ssa;
+      nir_def *ssa;
    };
 } nir_legacy_src;
 
@@ -35,7 +35,7 @@ typedef struct {
 
    union {
       nir_reg_dest reg;
-      nir_ssa_def *ssa;
+      nir_def *ssa;
    };
 } nir_legacy_dest;
 
index 5fe8177..8ac46f0 100644 (file)
@@ -1073,11 +1073,11 @@ replace_varying_input_by_constant_load(nir_shader *shader,
             nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
 
          /* Add new const to replace the input */
-         nir_ssa_def *nconst = nir_build_imm(&b, store_intr->num_components,
-                                             intr->dest.ssa.bit_size,
-                                             out_const->value);
+         nir_def *nconst = nir_build_imm(&b, store_intr->num_components,
+                                         intr->dest.ssa.bit_size,
+                                         out_const->value);
 
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nconst);
+         nir_def_rewrite_uses(&intr->dest.ssa, nconst);
 
          progress = true;
       }
@@ -1122,8 +1122,8 @@ replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
 
          b.cursor = nir_before_instr(instr);
 
-         nir_ssa_def *load = nir_load_var(&b, input_var);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+         nir_def *load = nir_load_var(&b, input_var);
+         nir_def_rewrite_uses(&intr->dest.ssa, load);
 
          progress = true;
       }
@@ -1133,7 +1133,7 @@ replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
 }
 
 static bool
-is_direct_uniform_load(nir_ssa_def *def, nir_ssa_scalar *s)
+is_direct_uniform_load(nir_def *def, nir_scalar *s)
 {
    /* def is sure to be scalar as can_replace_varying() filter out vector case. */
    assert(def->num_components == 1);
@@ -1147,9 +1147,9 @@ is_direct_uniform_load(nir_ssa_def *def, nir_ssa_scalar *s)
     *     vec1 32 ssa_4 = deref_var &color_out (shader_out float)
     *     intrinsic store_deref (ssa_4, ssa_3) (1, 0)
     */
-   *s = nir_ssa_scalar_resolved(def, 0);
+   *s = nir_scalar_resolved(def, 0);
 
-   nir_ssa_def *ssa = s->def;
+   nir_def *ssa = s->def;
    if (ssa->parent_instr->type != nir_instr_type_intrinsic)
       return false;
 
@@ -1209,8 +1209,8 @@ clone_deref_instr(nir_builder *b, nir_variable *var, nir_deref_instr *deref)
    case nir_deref_type_ptr_as_array: {
       nir_load_const_instr *index =
          nir_instr_as_load_const(deref->arr.index.ssa->parent_instr);
-      nir_ssa_def *ssa = nir_imm_intN_t(b, index->value->i64,
-                                        parent->dest.ssa.bit_size);
+      nir_def *ssa = nir_imm_intN_t(b, index->value->i64,
+                                    parent->dest.ssa.bit_size);
       return nir_build_deref_ptr_as_array(b, parent, ssa);
    }
    case nir_deref_type_struct:
@@ -1224,7 +1224,7 @@ clone_deref_instr(nir_builder *b, nir_variable *var, nir_deref_instr *deref)
 static bool
 replace_varying_input_by_uniform_load(nir_shader *shader,
                                       nir_intrinsic_instr *store_intr,
-                                      nir_ssa_scalar *scalar)
+                                      nir_scalar *scalar)
 {
    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
 
@@ -1260,7 +1260,7 @@ replace_varying_input_by_uniform_load(nir_shader *shader,
 
          /* Clone instructions start from deref load to variable deref. */
          nir_deref_instr *uni_deref = clone_deref_instr(&b, uni_var, deref);
-         nir_ssa_def *uni_def = nir_load_deref(&b, uni_deref);
+         nir_def *uni_def = nir_load_deref(&b, uni_deref);
 
          /* Add a vector to scalar move if uniform is a vector. */
          if (uni_def->num_components > 1) {
@@ -1271,7 +1271,7 @@ replace_varying_input_by_uniform_load(nir_shader *shader,
          }
 
          /* Replace load input with load uniform. */
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, uni_def);
+         nir_def_rewrite_uses(&intr->dest.ssa, uni_def);
 
          progress = true;
       }
@@ -1393,13 +1393,13 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
       if (!can_replace_varying(out_var))
          continue;
 
-      nir_ssa_def *ssa = intr->src[1].ssa;
+      nir_def *ssa = intr->src[1].ssa;
       if (ssa->parent_instr->type == nir_instr_type_load_const) {
          progress |= replace_varying_input_by_constant_load(consumer, intr);
          continue;
       }
 
-      nir_ssa_scalar uni_scalar;
+      nir_scalar uni_scalar;
       if (is_direct_uniform_load(ssa, &uni_scalar)) {
          if (consumer->options->lower_varying_from_uniform) {
             progress |= replace_varying_input_by_uniform_load(consumer, intr,
index e485c9b..a715534 100644 (file)
@@ -80,7 +80,7 @@ set_src_live(nir_src *src, void *void_live)
 }
 
 static bool
-set_ssa_def_dead(nir_ssa_def *def, void *void_live)
+set_ssa_def_dead(nir_def *def, void *void_live)
 {
    BITSET_WORD *live = void_live;
 
@@ -257,11 +257,11 @@ nir_get_live_ssa_defs(nir_cursor cursor, void *mem_ctx)
 static bool
 src_does_not_use_def(nir_src *src, void *def)
 {
-   return src->ssa != (nir_ssa_def *)def;
+   return src->ssa != (nir_def *)def;
 }
 
 static bool
-search_for_use_after_instr(nir_instr *start, nir_ssa_def *def)
+search_for_use_after_instr(nir_instr *start, nir_def *def)
 {
    /* Only look for a use strictly after the given instruction */
    struct exec_node *node = start->node.next;
@@ -286,7 +286,7 @@ search_for_use_after_instr(nir_instr *start, nir_ssa_def *def)
  * instr in a pre DFS search of the dominance tree.
  */
 static bool
-nir_ssa_def_is_live_at(nir_ssa_def *def, nir_instr *instr)
+nir_def_is_live_at(nir_def *def, nir_instr *instr)
 {
    if (BITSET_TEST(instr->block->live_out, def->index)) {
       /* Since def dominates instr, if def is in the liveout of the block,
@@ -308,7 +308,7 @@ nir_ssa_def_is_live_at(nir_ssa_def *def, nir_instr *instr)
 }
 
 bool
-nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b)
+nir_defs_interfere(nir_def *a, nir_def *b)
 {
    if (a->parent_instr == b->parent_instr) {
       /* Two variables defined at the same time interfere assuming at
@@ -320,8 +320,8 @@ nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b)
       /* If either variable is an ssa_undef, then there's no interference */
       return false;
    } else if (a->parent_instr->index < b->parent_instr->index) {
-      return nir_ssa_def_is_live_at(a, b->parent_instr);
+      return nir_def_is_live_at(a, b->parent_instr);
    } else {
-      return nir_ssa_def_is_live_at(b, a->parent_instr);
+      return nir_def_is_live_at(b, a->parent_instr);
    }
 }
index 6ff65d2..23aca26 100644 (file)
@@ -40,7 +40,7 @@ typedef struct {
    bool in_loop;
 
    /* The ssa_def associated with this info */
-   nir_ssa_def *def;
+   nir_def *def;
 
    /* The type of this ssa_def */
    nir_loop_variable_type type;
@@ -63,7 +63,7 @@ typedef struct {
     * is the increment of the induction variable, this will point to the SSA
     * def being incremented.
     */
-   nir_ssa_def *basis;
+   nir_def *basis;
 } nir_loop_variable;
 
 typedef struct {
@@ -83,7 +83,7 @@ typedef struct {
 } loop_info_state;
 
 static nir_loop_variable *
-get_loop_var(nir_ssa_def *value, loop_info_state *state)
+get_loop_var(nir_def *value, loop_info_state *state)
 {
    nir_loop_variable *var = &(state->loop_vars[value->index]);
 
@@ -112,7 +112,7 @@ typedef struct {
 } init_loop_state;
 
 static bool
-init_loop_def(nir_ssa_def *def, void *void_init_loop_state)
+init_loop_def(nir_def *def, void *void_init_loop_state)
 {
    init_loop_state *loop_init_state = void_init_loop_state;
    nir_loop_variable *var = get_loop_var(def, loop_init_state->state);
@@ -158,14 +158,14 @@ instr_cost(loop_info_state *state, nir_instr *instr,
    unsigned cost = 1;
 
    if (nir_op_is_selection(alu->op)) {
-      nir_ssa_scalar cond_scalar = { alu->src[0].src.ssa, 0 };
+      nir_scalar cond_scalar = { alu->src[0].src.ssa, 0 };
       if (nir_is_terminator_condition_with_two_inputs(cond_scalar)) {
          nir_instr *sel_cond = alu->src[0].src.ssa->parent_instr;
          nir_alu_instr *sel_alu = nir_instr_as_alu(sel_cond);
 
-         nir_ssa_scalar rhs, lhs;
-         lhs = nir_ssa_scalar_chase_alu_src(cond_scalar, 0);
-         rhs = nir_ssa_scalar_chase_alu_src(cond_scalar, 1);
+         nir_scalar rhs, lhs;
+         lhs = nir_scalar_chase_alu_src(cond_scalar, 0);
+         rhs = nir_scalar_chase_alu_src(cond_scalar, 1);
 
          /* If the selects condition is a comparision between a constant and
           * a basic induction variable we know that it will be eliminated once
@@ -179,7 +179,7 @@ instr_cost(loop_info_state *state, nir_instr *instr,
              * remove that alu instructons cost from the cost total also.
              */
             if (!list_is_singular(&sel_alu->dest.dest.ssa.uses) ||
-                nir_ssa_def_used_by_if(&sel_alu->dest.dest.ssa))
+                nir_def_used_by_if(&sel_alu->dest.dest.ssa))
                return 0;
             else
                return -1;
@@ -269,7 +269,7 @@ is_var_phi(nir_loop_variable *var)
 }
 
 static inline bool
-mark_invariant(nir_ssa_def *def, loop_info_state *state)
+mark_invariant(nir_def *def, loop_info_state *state)
 {
    nir_loop_variable *var = get_loop_var(def, state);
 
@@ -617,7 +617,7 @@ find_array_access_via_induction(loop_info_state *state,
 
 static bool
 guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
-                 nir_ssa_scalar basic_ind)
+                 nir_scalar basic_ind)
 {
    unsigned min_array_size = 0;
 
@@ -672,18 +672,18 @@ guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
 }
 
 static bool
-try_find_limit_of_alu(nir_ssa_scalar limit, nir_const_value *limit_val,
+try_find_limit_of_alu(nir_scalar limit, nir_const_value *limit_val,
                       nir_loop_terminator *terminator, loop_info_state *state)
 {
-   if (!nir_ssa_scalar_is_alu(limit))
+   if (!nir_scalar_is_alu(limit))
       return false;
 
-   nir_op limit_op = nir_ssa_scalar_alu_op(limit);
+   nir_op limit_op = nir_scalar_alu_op(limit);
    if (limit_op == nir_op_imin || limit_op == nir_op_fmin) {
       for (unsigned i = 0; i < 2; i++) {
-         nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(limit, i);
-         if (nir_ssa_scalar_is_const(src)) {
-            *limit_val = nir_ssa_scalar_as_const_value(src);
+         nir_scalar src = nir_scalar_chase_alu_src(limit, i);
+         if (nir_scalar_is_const(src)) {
+            *limit_val = nir_scalar_as_const_value(src);
             terminator->exact_trip_count_unknown = true;
             return true;
          }
@@ -717,7 +717,7 @@ eval_const_binop(nir_op op, unsigned bit_size,
 }
 
 static int
-find_replacement(const nir_ssa_def **originals, const nir_ssa_def *key,
+find_replacement(const nir_def **originals, const nir_def *key,
                  unsigned num_replacements)
 {
    for (int i = 0; i < num_replacements; i++) {
@@ -751,7 +751,7 @@ find_replacement(const nir_ssa_def **originals, const nir_ssa_def *key,
  */
 static bool
 try_eval_const_alu(nir_const_value *dest, nir_alu_instr *alu,
-                   const nir_ssa_def **originals,
+                   const nir_def **originals,
                    const nir_const_value **replacements,
                    unsigned num_replacements, unsigned execution_mode)
 {
@@ -891,7 +891,7 @@ get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step,
 
 static int32_t
 get_iteration_empirical(nir_alu_instr *cond_alu, nir_alu_instr *incr_alu,
-                        nir_ssa_def *basis, nir_const_value initial,
+                        nir_def *basis, nir_const_value initial,
                         bool invert_cond, unsigned execution_mode,
                         unsigned max_unroll_iterations)
 {
@@ -899,7 +899,7 @@ get_iteration_empirical(nir_alu_instr *cond_alu, nir_alu_instr *incr_alu,
    nir_const_value result;
    nir_const_value iter = initial;
 
-   const nir_ssa_def *originals[2] = { basis, NULL };
+   const nir_def *originals[2] = { basis, NULL };
    const nir_const_value *replacements[2] = { &iter, NULL };
 
    while (iter_count <= max_unroll_iterations) {
@@ -927,14 +927,14 @@ get_iteration_empirical(nir_alu_instr *cond_alu, nir_alu_instr *incr_alu,
 }
 
 static bool
-will_break_on_first_iteration(nir_alu_instr *cond_alu, nir_ssa_def *basis,
-                              nir_ssa_def *limit_basis,
+will_break_on_first_iteration(nir_alu_instr *cond_alu, nir_def *basis,
+                              nir_def *limit_basis,
                               nir_const_value initial, nir_const_value limit,
                               bool invert_cond, unsigned execution_mode)
 {
    nir_const_value result;
 
-   const nir_ssa_def *originals[2] = { basis, limit_basis };
+   const nir_def *originals[2] = { basis, limit_basis };
    const nir_const_value *replacements[2] = { &initial, &limit };
 
    ASSERTED bool success = try_eval_const_alu(&result, cond_alu, originals,
@@ -995,10 +995,10 @@ test_iterations(int32_t iter_int, nir_const_value step,
 }
 
 static int
-calculate_iterations(nir_ssa_def *basis, nir_ssa_def *limit_basis,
+calculate_iterations(nir_def *basis, nir_def *limit_basis,
                      nir_const_value initial, nir_const_value step,
                      nir_const_value limit, nir_alu_instr *alu,
-                     nir_ssa_scalar cond, nir_op alu_op, bool limit_rhs,
+                     nir_scalar cond, nir_op alu_op, bool limit_rhs,
                      bool invert_cond, unsigned execution_mode,
                      unsigned max_unroll_iterations)
 {
@@ -1113,15 +1113,15 @@ calculate_iterations(nir_ssa_def *basis, nir_ssa_def *limit_basis,
 }
 
 static bool
-get_induction_and_limit_vars(nir_ssa_scalar cond,
-                             nir_ssa_scalar *ind,
-                             nir_ssa_scalar *limit,
+get_induction_and_limit_vars(nir_scalar cond,
+                             nir_scalar *ind,
+                             nir_scalar *limit,
                              bool *limit_rhs,
                              loop_info_state *state)
 {
-   nir_ssa_scalar rhs, lhs;
-   lhs = nir_ssa_scalar_chase_alu_src(cond, 0);
-   rhs = nir_ssa_scalar_chase_alu_src(cond, 1);
+   nir_scalar rhs, lhs;
+   lhs = nir_scalar_chase_alu_src(cond, 0);
+   rhs = nir_scalar_chase_alu_src(cond, 1);
 
    nir_loop_variable *src0_lv = get_loop_var(lhs.def, state);
    nir_loop_variable *src1_lv = get_loop_var(rhs.def, state);
@@ -1148,40 +1148,40 @@ get_induction_and_limit_vars(nir_ssa_scalar cond,
 }
 
 static bool
-try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
-                                 nir_ssa_scalar *ind,
-                                 nir_ssa_scalar *limit,
+try_find_trip_count_vars_in_iand(nir_scalar *cond,
+                                 nir_scalar *ind,
+                                 nir_scalar *limit,
                                  bool *limit_rhs,
                                  loop_info_state *state)
 {
-   const nir_op alu_op = nir_ssa_scalar_alu_op(*cond);
+   const nir_op alu_op = nir_scalar_alu_op(*cond);
    assert(alu_op == nir_op_ieq || alu_op == nir_op_inot);
 
-   nir_ssa_scalar iand = nir_ssa_scalar_chase_alu_src(*cond, 0);
+   nir_scalar iand = nir_scalar_chase_alu_src(*cond, 0);
 
    if (alu_op == nir_op_ieq) {
-      nir_ssa_scalar zero = nir_ssa_scalar_chase_alu_src(*cond, 1);
+      nir_scalar zero = nir_scalar_chase_alu_src(*cond, 1);
 
-      if (!nir_ssa_scalar_is_alu(iand) || !nir_ssa_scalar_is_const(zero)) {
+      if (!nir_scalar_is_alu(iand) || !nir_scalar_is_const(zero)) {
          /* Maybe we had it the wrong way, flip things around */
-         nir_ssa_scalar tmp = zero;
+         nir_scalar tmp = zero;
          zero = iand;
          iand = tmp;
 
          /* If we still didn't find what we need then return */
-         if (!nir_ssa_scalar_is_const(zero))
+         if (!nir_scalar_is_const(zero))
             return false;
       }
 
       /* If the loop is not breaking on (x && y) == 0 then return */
-      if (nir_ssa_scalar_as_uint(zero) != 0)
+      if (nir_scalar_as_uint(zero) != 0)
          return false;
    }
 
-   if (!nir_ssa_scalar_is_alu(iand))
+   if (!nir_scalar_is_alu(iand))
       return false;
 
-   if (nir_ssa_scalar_alu_op(iand) != nir_op_iand)
+   if (nir_scalar_alu_op(iand) != nir_op_iand)
       return false;
 
    /* Check if iand src is a terminator condition and try get induction var
@@ -1189,14 +1189,14 @@ try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
     */
    bool found_induction_var = false;
    for (unsigned i = 0; i < 2; i++) {
-      nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(iand, i);
+      nir_scalar src = nir_scalar_chase_alu_src(iand, i);
       if (nir_is_terminator_condition_with_two_inputs(src) &&
           get_induction_and_limit_vars(src, ind, limit, limit_rhs, state)) {
          *cond = src;
          found_induction_var = true;
 
          /* If we've found one with a constant limit, stop. */
-         if (nir_ssa_scalar_is_const(*limit))
+         if (nir_scalar_is_const(*limit))
             return true;
       }
    }
@@ -1222,9 +1222,9 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
    list_for_each_entry(nir_loop_terminator, terminator,
                        &state->loop->info->loop_terminator_list,
                        loop_terminator_link) {
-      nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
+      nir_scalar cond = { terminator->nif->condition.ssa, 0 };
 
-      if (!nir_ssa_scalar_is_alu(cond)) {
+      if (!nir_scalar_is_alu(cond)) {
          /* If we get here the loop is dead and will get cleaned up by the
           * nir_opt_dead_cf pass.
           */
@@ -1233,13 +1233,13 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
          continue;
       }
 
-      nir_op alu_op = nir_ssa_scalar_alu_op(cond);
+      nir_op alu_op = nir_scalar_alu_op(cond);
 
       bool invert_cond = terminator->continue_from_then;
 
       bool limit_rhs;
-      nir_ssa_scalar basic_ind = { NULL, 0 };
-      nir_ssa_scalar limit;
+      nir_scalar basic_ind = { NULL, 0 };
+      nir_scalar limit;
       if ((alu_op == nir_op_inot || alu_op == nir_op_ieq) &&
           try_find_trip_count_vars_in_iand(&cond, &basic_ind, &limit,
                                            &limit_rhs, state)) {
@@ -1248,7 +1248,7 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
           * inverse of x or y (i.e. which ever contained the induction var) in
           * order to compute the trip count.
           */
-         alu_op = nir_ssa_scalar_alu_op(cond);
+         alu_op = nir_scalar_alu_op(cond);
          invert_cond = !invert_cond;
          trip_count_known = false;
          terminator->exact_trip_count_unknown = true;
@@ -1259,8 +1259,8 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
             /* Extract and inverse the comparision if it is wrapped in an inot
              */
             if (alu_op == nir_op_inot) {
-               cond = nir_ssa_scalar_chase_alu_src(cond, 0);
-               alu_op = nir_ssa_scalar_alu_op(cond);
+               cond = nir_scalar_chase_alu_src(cond, 0);
+               alu_op = nir_scalar_alu_op(cond);
                invert_cond = !invert_cond;
             }
 
@@ -1282,8 +1282,8 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
 
       /* Attempt to find a constant limit for the loop */
       nir_const_value limit_val;
-      if (nir_ssa_scalar_is_const(limit)) {
-         limit_val = nir_ssa_scalar_as_const_value(limit);
+      if (nir_scalar_is_const(limit)) {
+         limit_val = nir_scalar_as_const_value(limit);
       } else {
          trip_count_known = false;
 
@@ -1312,8 +1312,8 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
        * earlier that the phi source has a scalar swizzle, we can take the
        * component from basic_ind.
        */
-      nir_ssa_scalar initial_s = { lv->init_src->ssa, basic_ind.comp };
-      nir_ssa_scalar alu_s = {
+      nir_scalar initial_s = { lv->init_src->ssa, basic_ind.comp };
+      nir_scalar alu_s = {
          lv->update_src->src.ssa,
          lv->update_src->swizzle[basic_ind.comp]
       };
@@ -1321,12 +1321,12 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
       /* We are not guaranteed by that at one of these sources is a constant.
        * Try to find one.
        */
-      if (!nir_ssa_scalar_is_const(initial_s) ||
-          !nir_ssa_scalar_is_const(alu_s))
+      if (!nir_scalar_is_const(initial_s) ||
+          !nir_scalar_is_const(alu_s))
          continue;
 
-      nir_const_value initial_val = nir_ssa_scalar_as_const_value(initial_s);
-      nir_const_value step_val = nir_ssa_scalar_as_const_value(alu_s);
+      nir_const_value initial_val = nir_scalar_as_const_value(initial_s);
+      nir_const_value step_val = nir_scalar_as_const_value(alu_s);
 
       int iterations = calculate_iterations(lv->basis, limit.def,
                                             initial_val, step_val, limit_val,
index 1317044..f8a5986 100644 (file)
@@ -94,9 +94,9 @@ nir_is_trivial_loop_if(nir_if *nif, nir_block *break_block)
 }
 
 static inline bool
-nir_is_terminator_condition_with_two_inputs(nir_ssa_scalar cond)
+nir_is_terminator_condition_with_two_inputs(nir_scalar cond)
 {
-   if (!nir_ssa_scalar_is_alu(cond))
+   if (!nir_scalar_is_alu(cond))
       return false;
 
    nir_alu_instr *alu = nir_instr_as_alu(cond.def->parent_instr);
@@ -105,16 +105,16 @@ nir_is_terminator_condition_with_two_inputs(nir_ssa_scalar cond)
 }
 
 static inline bool
-nir_is_supported_terminator_condition(nir_ssa_scalar cond)
+nir_is_supported_terminator_condition(nir_scalar cond)
 {
-   if (!nir_ssa_scalar_is_alu(cond))
+   if (!nir_scalar_is_alu(cond))
       return false;
 
    nir_alu_instr *alu = nir_instr_as_alu(cond.def->parent_instr);
    return nir_alu_instr_is_comparison(alu) &&
           (nir_op_infos[alu->op].num_inputs == 2 ||
            (alu->op == nir_op_inot &&
-            nir_is_terminator_condition_with_two_inputs(nir_ssa_scalar_chase_alu_src(cond, 0))));
+            nir_is_terminator_condition_with_two_inputs(nir_scalar_chase_alu_src(cond, 0))));
 }
 
 #endif /* NIR_LOOP_ANALYZE_H */
index 2ce8dd0..f70c1f7 100644 (file)
@@ -81,7 +81,7 @@ nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
 
                b.cursor = nir_before_instr(&intr->instr);
 
-               nir_ssa_def *alpha;
+               nir_def *alpha;
                if (alpha_to_one) {
                   alpha = nir_imm_float(&b, 1.0);
                } else if (intr->intrinsic == nir_intrinsic_store_deref) {
@@ -95,9 +95,9 @@ nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
                nir_variable *var = nir_state_variable_create(shader, glsl_float_type(),
                                                              "gl_AlphaRefMESA",
                                                              alpha_ref_state_tokens);
-               nir_ssa_def *alpha_ref = nir_load_var(&b, var);
+               nir_def *alpha_ref = nir_load_var(&b, var);
 
-               nir_ssa_def *condition =
+               nir_def *condition =
                   nir_compare_func(&b, func, alpha, alpha_ref);
 
                nir_discard_if(&b, nir_inot(&b, condition));
index a0d4bee..c967365 100644 (file)
@@ -47,7 +47,7 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
 
    nir_alu_instr *instr = nir_instr_as_alu(instr_);
 
-   nir_ssa_def *lowered = NULL;
+   nir_def *lowered = NULL;
 
    b->cursor = nir_before_instr(&instr->instr);
    b->exact = instr->exact;
@@ -59,15 +59,15 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
           *
           * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel
           */
-         nir_ssa_def *c1 = nir_imm_int(b, 1);
-         nir_ssa_def *c2 = nir_imm_int(b, 2);
-         nir_ssa_def *c4 = nir_imm_int(b, 4);
-         nir_ssa_def *c8 = nir_imm_int(b, 8);
-         nir_ssa_def *c16 = nir_imm_int(b, 16);
-         nir_ssa_def *c33333333 = nir_imm_int(b, 0x33333333);
-         nir_ssa_def *c55555555 = nir_imm_int(b, 0x55555555);
-         nir_ssa_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);
-         nir_ssa_def *c00ff00ff = nir_imm_int(b, 0x00ff00ff);
+         nir_def *c1 = nir_imm_int(b, 1);
+         nir_def *c2 = nir_imm_int(b, 2);
+         nir_def *c4 = nir_imm_int(b, 4);
+         nir_def *c8 = nir_imm_int(b, 8);
+         nir_def *c16 = nir_imm_int(b, 16);
+         nir_def *c33333333 = nir_imm_int(b, 0x33333333);
+         nir_def *c55555555 = nir_imm_int(b, 0x55555555);
+         nir_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);
+         nir_def *c00ff00ff = nir_imm_int(b, 0x00ff00ff);
 
          lowered = nir_ssa_for_alu_src(b, instr, 0);
 
@@ -103,14 +103,14 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
           *
           * http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
           */
-         nir_ssa_def *c1 = nir_imm_int(b, 1);
-         nir_ssa_def *c2 = nir_imm_int(b, 2);
-         nir_ssa_def *c4 = nir_imm_int(b, 4);
-         nir_ssa_def *c24 = nir_imm_int(b, 24);
-         nir_ssa_def *c33333333 = nir_imm_int(b, 0x33333333);
-         nir_ssa_def *c55555555 = nir_imm_int(b, 0x55555555);
-         nir_ssa_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);
-         nir_ssa_def *c01010101 = nir_imm_int(b, 0x01010101);
+         nir_def *c1 = nir_imm_int(b, 1);
+         nir_def *c2 = nir_imm_int(b, 2);
+         nir_def *c4 = nir_imm_int(b, 4);
+         nir_def *c24 = nir_imm_int(b, 24);
+         nir_def *c33333333 = nir_imm_int(b, 0x33333333);
+         nir_def *c55555555 = nir_imm_int(b, 0x55555555);
+         nir_def *c0f0f0f0f = nir_imm_int(b, 0x0f0f0f0f);
+         nir_def *c01010101 = nir_imm_int(b, 0x01010101);
 
          lowered = nir_ssa_for_alu_src(b, instr, 0);
 
@@ -136,23 +136,23 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
    case nir_op_imul_high:
    case nir_op_umul_high:
       if (b->shader->options->lower_mul_high) {
-         nir_ssa_def *src0 = nir_ssa_for_alu_src(b, instr, 0);
-         nir_ssa_def *src1 = nir_ssa_for_alu_src(b, instr, 1);
+         nir_def *src0 = nir_ssa_for_alu_src(b, instr, 0);
+         nir_def *src1 = nir_ssa_for_alu_src(b, instr, 1);
          if (src0->bit_size < 32) {
             /* Just do the math in 32-bit space and shift the result */
             nir_alu_type base_type = nir_op_infos[instr->op].output_type;
 
-            nir_ssa_def *src0_32 = nir_type_convert(b, src0, base_type, base_type | 32, nir_rounding_mode_undef);
-            nir_ssa_def *src1_32 = nir_type_convert(b, src1, base_type, base_type | 32, nir_rounding_mode_undef);
-            nir_ssa_def *dest_32 = nir_imul(b, src0_32, src1_32);
-            nir_ssa_def *dest_shifted = nir_ishr_imm(b, dest_32, src0->bit_size);
+            nir_def *src0_32 = nir_type_convert(b, src0, base_type, base_type | 32, nir_rounding_mode_undef);
+            nir_def *src1_32 = nir_type_convert(b, src1, base_type, base_type | 32, nir_rounding_mode_undef);
+            nir_def *dest_32 = nir_imul(b, src0_32, src1_32);
+            nir_def *dest_shifted = nir_ishr_imm(b, dest_32, src0->bit_size);
             lowered = nir_type_convert(b, dest_shifted, base_type, base_type | src0->bit_size, nir_rounding_mode_undef);
          } else {
-            nir_ssa_def *cshift = nir_imm_int(b, src0->bit_size / 2);
-            nir_ssa_def *cmask = nir_imm_intN_t(b, (1ull << (src0->bit_size / 2)) - 1, src0->bit_size);
-            nir_ssa_def *different_signs = NULL;
+            nir_def *cshift = nir_imm_int(b, src0->bit_size / 2);
+            nir_def *cmask = nir_imm_intN_t(b, (1ull << (src0->bit_size / 2)) - 1, src0->bit_size);
+            nir_def *different_signs = NULL;
             if (instr->op == nir_op_imul_high) {
-               nir_ssa_def *c0 = nir_imm_intN_t(b, 0, src0->bit_size);
+               nir_def *c0 = nir_imm_intN_t(b, 0, src0->bit_size);
                different_signs = nir_ixor(b,
                                           nir_ilt(b, src0, c0),
                                           nir_ilt(b, src1, c0));
@@ -167,17 +167,17 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
              *
              * Start by splitting into the 4 multiplies.
              */
-            nir_ssa_def *src0l = nir_iand(b, src0, cmask);
-            nir_ssa_def *src1l = nir_iand(b, src1, cmask);
-            nir_ssa_def *src0h = nir_ushr(b, src0, cshift);
-            nir_ssa_def *src1h = nir_ushr(b, src1, cshift);
+            nir_def *src0l = nir_iand(b, src0, cmask);
+            nir_def *src1l = nir_iand(b, src1, cmask);
+            nir_def *src0h = nir_ushr(b, src0, cshift);
+            nir_def *src1h = nir_ushr(b, src1, cshift);
 
-            nir_ssa_def *lo = nir_imul(b, src0l, src1l);
-            nir_ssa_def *m1 = nir_imul(b, src0l, src1h);
-            nir_ssa_def *m2 = nir_imul(b, src0h, src1l);
-            nir_ssa_def *hi = nir_imul(b, src0h, src1h);
+            nir_def *lo = nir_imul(b, src0l, src1l);
+            nir_def *m1 = nir_imul(b, src0l, src1h);
+            nir_def *m2 = nir_imul(b, src0h, src1l);
+            nir_def *hi = nir_imul(b, src0h, src1h);
 
-            nir_ssa_def *tmp;
+            nir_def *tmp;
 
             tmp = nir_ishl(b, m1, cshift);
             hi = nir_iadd(b, hi, nir_uadd_carry(b, lo, tmp));
@@ -195,7 +195,7 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
                 * high 32-bits.  Consider -3 * 2.  The high 32-bits is 0, but the
                 * desired result is -1, not -0!  Recall -x == ~x + 1.
                 */
-               nir_ssa_def *c1 = nir_imm_intN_t(b, 1, src0->bit_size);
+               nir_def *c1 = nir_imm_intN_t(b, 1, src0->bit_size);
                hi = nir_bcsel(b, different_signs,
                               nir_iadd(b,
                                        nir_inot(b, hi),
@@ -213,7 +213,7 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
    }
 
    if (lowered) {
-      nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, lowered);
+      nir_def_rewrite_uses(&instr->dest.dest.ssa, lowered);
       nir_instr_remove(&instr->instr);
       return true;
    } else {
index 60bf4a6..2786140 100644 (file)
@@ -92,13 +92,13 @@ nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
    nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components, bit_size);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
                 nir_builder *builder, bool reverse_order)
 {
    unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
 
-   nir_ssa_def *last = NULL;
+   nir_def *last = NULL;
    for (int i = 0; i < num_components; i++) {
       int channel = reverse_order ? num_components - 1 - i : i;
       nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
@@ -139,7 +139,7 @@ will_lower_ffma(nir_shader *shader, unsigned bit_size)
    unreachable("bad bit size");
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_fdot(nir_alu_instr *alu, nir_builder *builder)
 {
    /* Reversed order can result in lower instruction count because it
@@ -156,7 +156,7 @@ lower_fdot(nir_alu_instr *alu, nir_builder *builder)
 
    unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
 
-   nir_ssa_def *prev = NULL;
+   nir_def *prev = NULL;
    for (int i = 0; i < num_components; i++) {
       int channel = reverse_order ? num_components - 1 - i : i;
       nir_alu_instr *instr = nir_alu_instr_create(
@@ -178,7 +178,7 @@ lower_fdot(nir_alu_instr *alu, nir_builder *builder)
    return prev;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
 {
    struct alu_width_data *data = _data;
@@ -223,7 +223,7 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       if (!b->shader->options->lower_pack_half_2x16)
          return NULL;
 
-      nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
       return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
                                       nir_channel(b, src_vec2, 1));
    }
@@ -242,7 +242,7 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       if (!b->shader->options->lower_unpack_half_2x16)
          return NULL;
 
-      nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *packed = nir_ssa_for_alu_src(b, alu, 0);
       if (alu->op == nir_op_unpack_half_2x16_flush_to_zero) {
          return nir_vec2(b,
                          nir_unpack_half_2x16_split_x_flush_to_zero(b,
@@ -260,8 +260,8 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       assert(b->shader->options->lower_pack_snorm_2x16 ||
              b->shader->options->lower_pack_unorm_2x16);
 
-      nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
-                                          nir_imm_int(b, 0));
+      nir_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
+                                      nir_imm_int(b, 0));
       return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)),
                      nir_channel(b, word, 0));
    }
@@ -270,21 +270,21 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       assert(b->shader->options->lower_pack_snorm_4x8 ||
              b->shader->options->lower_pack_unorm_4x8);
 
-      nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
-                                         nir_imm_int(b, 0));
+      nir_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
+                                     nir_imm_int(b, 0));
       return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)), nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))),
                      nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)),
                              nir_channel(b, byte, 0)));
    }
 
    case nir_op_fdph: {
-      nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
-      nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
+      nir_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
 
       /* Only use reverse order for imprecise fdph, see explanation in lower_fdot. */
       bool reverse_order = !b->exact;
       if (will_lower_ffma(b->shader, alu->dest.dest.ssa.bit_size)) {
-         nir_ssa_def *sum[4];
+         nir_def *sum[4];
          for (unsigned i = 0; i < 3; i++) {
             int dest = reverse_order ? 3 - i : i;
             sum[dest] = nir_fmul(b, nir_channel(b, src0_vec, i),
@@ -294,12 +294,12 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
 
          return nir_fadd(b, nir_fadd(b, nir_fadd(b, sum[0], sum[1]), sum[2]), sum[3]);
       } else if (reverse_order) {
-         nir_ssa_def *sum = nir_channel(b, src1_vec, 3);
+         nir_def *sum = nir_channel(b, src1_vec, 3);
          for (int i = 2; i >= 0; i--)
             sum = nir_ffma(b, nir_channel(b, src0_vec, i), nir_channel(b, src1_vec, i), sum);
          return sum;
       } else {
-         nir_ssa_def *sum = nir_fmul(b, nir_channel(b, src0_vec, 0), nir_channel(b, src1_vec, 0));
+         nir_def *sum = nir_fmul(b, nir_channel(b, src0_vec, 0), nir_channel(b, src1_vec, 0));
          sum = nir_ffma(b, nir_channel(b, src0_vec, 1), nir_channel(b, src1_vec, 1), sum);
          sum = nir_ffma(b, nir_channel(b, src0_vec, 2), nir_channel(b, src1_vec, 2), sum);
          return nir_fadd(b, sum, nir_channel(b, src1_vec, 3));
@@ -310,7 +310,7 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       if (!b->shader->options->lower_pack_64_2x32)
          return NULL;
 
-      nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
       return nir_pack_64_2x32_split(b, nir_channel(b, src_vec2, 0),
                                     nir_channel(b, src_vec2, 1));
    }
@@ -318,11 +318,11 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       if (!b->shader->options->lower_pack_64_4x16)
          return NULL;
 
-      nir_ssa_def *src_vec4 = nir_ssa_for_alu_src(b, alu, 0);
-      nir_ssa_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 0),
-                                               nir_channel(b, src_vec4, 1));
-      nir_ssa_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 2),
-                                               nir_channel(b, src_vec4, 3));
+      nir_def *src_vec4 = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 0),
+                                           nir_channel(b, src_vec4, 1));
+      nir_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 2),
+                                           nir_channel(b, src_vec4, 3));
 
       return nir_pack_64_2x32_split(b, xy, zw);
    }
@@ -330,7 +330,7 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data)
       if (!b->shader->options->lower_pack_32_2x16)
          return NULL;
 
-      nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
       return nir_pack_32_2x16_split(b, nir_channel(b, src_vec2, 0),
                                     nir_channel(b, src_vec2, 1));
    }
index 7317105..08a8d65 100644 (file)
 
 static void
 build_write_masked_store(nir_builder *b, nir_deref_instr *vec_deref,
-                         nir_ssa_def *value, unsigned component)
+                         nir_def *value, unsigned component)
 {
    assert(value->num_components == 1);
    unsigned num_components = glsl_get_components(vec_deref->type);
    assert(num_components > 1 && num_components <= NIR_MAX_VEC_COMPONENTS);
 
-   nir_ssa_def *u = nir_ssa_undef(b, 1, value->bit_size);
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *u = nir_undef(b, 1, value->bit_size);
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < num_components; i++)
       comps[i] = (i == component) ? value : u;
 
-   nir_ssa_def *vec = nir_vec(b, comps, num_components);
+   nir_def *vec = nir_vec(b, comps, num_components);
    nir_store_deref(b, vec_deref, vec, (1u << component));
 }
 
 static void
 build_write_masked_stores(nir_builder *b, nir_deref_instr *vec_deref,
-                          nir_ssa_def *value, nir_ssa_def *index,
+                          nir_def *value, nir_def *index,
                           unsigned start, unsigned end)
 {
    if (start == end - 1) {
@@ -106,7 +106,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
          b.cursor = nir_after_instr(&intrin->instr);
 
          if (intrin->intrinsic == nir_intrinsic_store_deref) {
-            nir_ssa_def *value = intrin->src[1].ssa;
+            nir_def *value = intrin->src[1].ssa;
 
             if (nir_src_is_const(deref->arr.index)) {
                if (!(options & nir_lower_direct_array_deref_of_vec_store))
@@ -122,7 +122,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
                if (!(options & nir_lower_indirect_array_deref_of_vec_store))
                   continue;
 
-               nir_ssa_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
+               nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
                build_write_masked_stores(&b, vec_deref, value, index,
                                          0, num_components);
             }
@@ -144,17 +144,17 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
             intrin->dest.ssa.num_components = num_components;
             intrin->num_components = num_components;
 
-            nir_ssa_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
-            nir_ssa_def *scalar =
+            nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
+            nir_def *scalar =
                nir_vector_extract(&b, &intrin->dest.ssa, index);
             if (scalar->parent_instr->type == nir_instr_type_ssa_undef) {
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                        scalar);
+               nir_def_rewrite_uses(&intrin->dest.ssa,
+                                    scalar);
                nir_instr_remove(&intrin->instr);
             } else {
-               nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                              scalar,
-                                              scalar->parent_instr);
+               nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                          scalar,
+                                          scalar->parent_instr);
             }
             progress = true;
          }
index b9632d3..c908d65 100644 (file)
@@ -91,10 +91,10 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
       return false;
    }
 
-   nir_ssa_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
-   nir_ssa_def *temp = NULL;
+   nir_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
+   nir_def *temp = NULL;
 
-   nir_ssa_def *offset_load = NULL;
+   nir_def *offset_load = NULL;
    if (offset_align_state) {
       nir_deref_instr *deref_offset = deref_offset_var(b, nir_intrinsic_base(instr), offset_align_state);
       offset_load = nir_load_deref(b, deref_offset);
@@ -163,10 +163,10 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
 
    if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
       b->cursor = nir_after_instr(&new_instr->instr);
-      nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, result);
+      nir_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
+      nir_def_rewrite_uses(&instr->dest.ssa, result);
    } else {
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
+      nir_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
    }
 
    return true;
index 467a3cb..8a6e148 100644 (file)
@@ -30,8 +30,8 @@
  * the original bit-size.
  */
 
-static nir_ssa_def *
-convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,
+static nir_def *
+convert_to_bit_size(nir_builder *bld, nir_def *src,
                     nir_alu_type type, unsigned bit_size)
 {
    assert(src->bit_size < bit_size);
@@ -57,9 +57,9 @@ lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
    bld->cursor = nir_before_instr(&alu->instr);
 
    /* Convert each source to the requested bit-size */
-   nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
+   nir_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
    for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
-      nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
+      nir_def *src = nir_ssa_for_alu_src(bld, alu, i);
 
       nir_alu_type type = nir_op_infos[op].input_types[i];
       if (nir_alu_type_get_type_size(type) == 0)
@@ -77,7 +77,7 @@ lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
    }
 
    /* Emit the lowered ALU instruction */
-   nir_ssa_def *lowered_dst = NULL;
+   nir_def *lowered_dst = NULL;
    if (op == nir_op_imul_high || op == nir_op_umul_high) {
       assert(dst_bit_size * 2 <= bit_size);
       lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
@@ -119,10 +119,10 @@ lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
    if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
        dst_bit_size != bit_size) {
       nir_alu_type type = nir_op_infos[op].output_type;
-      nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, dst);
+      nir_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa, dst);
    } else {
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);
    }
 }
 
@@ -159,8 +159,8 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
       nir_intrinsic_instr *new_intrin =
          nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
 
-      nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
-                                                     type, bit_size);
+      nir_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
+                                                 type, bit_size);
       new_intrin->src[0] = nir_src_for_ssa(new_src);
 
       if (intrin->intrinsic == nir_intrinsic_vote_feq ||
@@ -177,7 +177,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
 
       nir_builder_instr_insert(b, &new_intrin->instr);
 
-      nir_ssa_def *res = &new_intrin->dest.ssa;
+      nir_def *res = &new_intrin->dest.ssa;
       if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
          /* For exclusive scan, we have to be careful because the identity
           * value for the higher bit size may get added into the mix by
@@ -205,7 +205,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
           intrin->intrinsic != nir_intrinsic_vote_ieq)
          res = nir_u2uN(b, res, old_bit_size);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
+      nir_def_rewrite_uses(&intrin->dest.ssa, res);
       break;
    }
 
@@ -223,7 +223,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
 
    nir_foreach_phi_src(src, phi) {
       b->cursor = nir_after_block_before_jump(src->pred);
-      nir_ssa_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
+      nir_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
 
       nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
    }
@@ -232,9 +232,9 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
 
    b->cursor = nir_after_instr(&last_phi->instr);
 
-   nir_ssa_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size);
-   nir_ssa_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
-                                  new_dest->parent_instr);
+   nir_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size);
+   nir_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
+                              new_dest->parent_instr);
 }
 
 static bool
@@ -315,8 +315,8 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
 
       b->cursor = nir_before_src(&src->src);
 
-      nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
-      nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
+      nir_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
+      nir_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
 
       nir_phi_instr_add_src(lowered[0], src->pred, nir_src_for_ssa(x));
       nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
@@ -332,8 +332,8 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
    nir_builder_instr_insert(b, &lowered[1]->instr);
 
    b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
-   nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
-   nir_ssa_def_rewrite_uses(&phi->dest.ssa, merged);
+   nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
+   nir_def_rewrite_uses(&phi->dest.ssa, merged);
    nir_instr_remove(&phi->instr);
 }
 
index 12c3b0b..9fa33f1 100644 (file)
@@ -56,9 +56,9 @@ static void
 lower_bitmap(nir_shader *shader, nir_builder *b,
              const nir_lower_bitmap_options *options)
 {
-   nir_ssa_def *texcoord;
+   nir_def *texcoord;
    nir_tex_instr *tex;
-   nir_ssa_def *cond;
+   nir_def *cond;
 
    texcoord = nir_load_var(b, nir_get_variable_with_location(shader, nir_var_shader_in,
                                                              VARYING_SLOT_TEX0, glsl_vec4_type()));
index 71ad9da..b1b2f37 100644 (file)
 
 struct ctx {
    const nir_lower_blend_options *options;
-   nir_ssa_def *src1[8];
+   nir_def *src1[8];
 };
 
 /* Given processed factors, combine them per a blend function */
 
-static nir_ssa_def *
+static nir_def *
 nir_blend_func(
    nir_builder *b,
    enum pipe_blend_func func,
-   nir_ssa_def *src, nir_ssa_def *dst)
+   nir_def *src, nir_def *dst)
 {
    switch (func) {
    case PIPE_BLEND_ADD:
@@ -82,26 +82,26 @@ nir_blend_factored(enum pipe_blend_func func)
 }
 
 /* Compute a src_alpha_saturate factor */
-static nir_ssa_def *
+static nir_def *
 nir_alpha_saturate(
    nir_builder *b,
-   nir_ssa_def *src, nir_ssa_def *dst,
+   nir_def *src, nir_def *dst,
    unsigned chan)
 {
-   nir_ssa_def *Asrc = nir_channel(b, src, 3);
-   nir_ssa_def *Adst = nir_channel(b, dst, 3);
-   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0, src->bit_size);
-   nir_ssa_def *Adsti = nir_fsub(b, one, Adst);
+   nir_def *Asrc = nir_channel(b, src, 3);
+   nir_def *Adst = nir_channel(b, dst, 3);
+   nir_def *one = nir_imm_floatN_t(b, 1.0, src->bit_size);
+   nir_def *Adsti = nir_fsub(b, one, Adst);
 
    return (chan < 3) ? nir_fmin(b, Asrc, Adsti) : one;
 }
 
 /* Returns a scalar single factor, unmultiplied */
 
-static nir_ssa_def *
+static nir_def *
 nir_blend_factor_value(
    nir_builder *b,
-   nir_ssa_def *src, nir_ssa_def *src1, nir_ssa_def *dst, nir_ssa_def *bconst,
+   nir_def *src, nir_def *src1, nir_def *dst, nir_def *bconst,
    unsigned chan,
    enum pipe_blendfactor factor_without_invert)
 {
@@ -132,15 +132,15 @@ nir_blend_factor_value(
    }
 }
 
-static nir_ssa_def *
-nir_fsat_signed(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+nir_fsat_signed(nir_builder *b, nir_def *x)
 {
    return nir_fclamp(b, x, nir_imm_floatN_t(b, -1.0, x->bit_size),
                      nir_imm_floatN_t(b, +1.0, x->bit_size));
 }
 
-static nir_ssa_def *
-nir_fsat_to_format(nir_builder *b, nir_ssa_def *x, enum pipe_format format)
+static nir_def *
+nir_fsat_to_format(nir_builder *b, nir_def *x, enum pipe_format format)
 {
    if (util_format_is_unorm(format))
       return nir_fsat(b, x);
@@ -213,16 +213,16 @@ channel_uses_dest(nir_lower_blend_channel chan)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_blend_factor(
    nir_builder *b,
-   nir_ssa_def *raw_scalar,
-   nir_ssa_def *src, nir_ssa_def *src1, nir_ssa_def *dst, nir_ssa_def *bconst,
+   nir_def *raw_scalar,
+   nir_def *src, nir_def *src1, nir_def *dst, nir_def *bconst,
    unsigned chan,
    enum pipe_blendfactor factor,
    enum pipe_format format)
 {
-   nir_ssa_def *f =
+   nir_def *f =
       nir_blend_factor_value(b, src, src1, dst, bconst, chan,
                              util_blendfactor_without_invert(factor));
 
@@ -237,12 +237,12 @@ nir_blend_factor(
 
 /* Given a colormask, "blend" with the destination */
 
-static nir_ssa_def *
+static nir_def *
 nir_color_mask(
    nir_builder *b,
    unsigned mask,
-   nir_ssa_def *src,
-   nir_ssa_def *dst)
+   nir_def *src,
+   nir_def *dst)
 {
    return nir_vec4(b,
                    nir_channel(b, (mask & (1 << 0)) ? src : dst, 0),
@@ -251,11 +251,11 @@ nir_color_mask(
                    nir_channel(b, (mask & (1 << 3)) ? src : dst, 3));
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_logicop_func(
    nir_builder *b,
    enum pipe_logicop func,
-   nir_ssa_def *src, nir_ssa_def *dst, nir_ssa_def *bitmask)
+   nir_def *src, nir_def *dst, nir_def *bitmask)
 {
    switch (func) {
    case PIPE_LOGICOP_CLEAR:
@@ -295,12 +295,12 @@ nir_logicop_func(
    unreachable("Invalid logciop function");
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_blend_logicop(
    nir_builder *b,
    const nir_lower_blend_options *options,
    unsigned rt,
-   nir_ssa_def *src, nir_ssa_def *dst)
+   nir_def *src, nir_def *dst)
 {
    unsigned bit_size = src->bit_size;
 
@@ -345,8 +345,8 @@ nir_blend_logicop(
    for (int i = 0; i < 4; ++i)
       mask[i] = nir_const_value_for_uint(BITFIELD_MASK(bits[i]), 32);
 
-   nir_ssa_def *out = nir_logicop_func(b, options->logicop_func, src, dst,
-                                       nir_build_imm(b, 4, 32, mask));
+   nir_def *out = nir_logicop_func(b, options->logicop_func, src, dst,
+                                   nir_build_imm(b, 4, 32, mask));
 
    if (util_format_is_unorm(format)) {
       out = nir_format_unorm_to_float(b, out, bits);
@@ -375,12 +375,12 @@ channel_exists(const struct util_format_description *desc, unsigned i)
  * return the blended color
  */
 
-static nir_ssa_def *
+static nir_def *
 nir_blend(
    nir_builder *b,
    const nir_lower_blend_options *options,
    unsigned rt,
-   nir_ssa_def *src, nir_ssa_def *src1, nir_ssa_def *dst)
+   nir_def *src, nir_def *src1, nir_def *dst)
 {
    /* Don't crash if src1 isn't written. It doesn't matter what dual colour we
     * blend with in that case, as long as we don't dereference NULL.
@@ -389,7 +389,7 @@ nir_blend(
       src1 = nir_imm_zero(b, 4, src->bit_size);
 
    /* Grab the blend constant ahead of time */
-   nir_ssa_def *bconst;
+   nir_def *bconst;
    if (options->scalar_blend_const) {
       bconst = nir_vec4(b,
                         nir_load_blend_const_color_r_float(b),
@@ -430,8 +430,8 @@ nir_blend(
    const struct util_format_description *desc =
       util_format_description(format);
 
-   nir_ssa_def *zero = nir_imm_floatN_t(b, 0.0, dst->bit_size);
-   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0, dst->bit_size);
+   nir_def *zero = nir_imm_floatN_t(b, 0.0, dst->bit_size);
+   nir_def *one = nir_imm_floatN_t(b, 1.0, dst->bit_size);
 
    dst = nir_vec4(b,
                   channel_exists(desc, 0) ? nir_channel(b, dst, 0) : zero,
@@ -440,15 +440,15 @@ nir_blend(
                   channel_exists(desc, 3) ? nir_channel(b, dst, 3) : one);
 
    /* We blend per channel and recombine later */
-   nir_ssa_def *channels[4];
+   nir_def *channels[4];
 
    for (unsigned c = 0; c < 4; ++c) {
       /* Decide properties based on channel */
       nir_lower_blend_channel chan =
          (c < 3) ? options->rt[rt].rgb : options->rt[rt].alpha;
 
-      nir_ssa_def *psrc = nir_channel(b, src, c);
-      nir_ssa_def *pdst = nir_channel(b, dst, c);
+      nir_def *psrc = nir_channel(b, src, c);
+      nir_def *pdst = nir_channel(b, dst, c);
 
       if (nir_blend_factored(chan.func)) {
          psrc = nir_blend_factor(
@@ -540,12 +540,12 @@ nir_lower_blend_instr(nir_builder *b, nir_instr *instr, void *data)
    /* Grab the input color.  We always want 4 channels during blend.  Dead
     * code will clean up any channels we don't need.
     */
-   nir_ssa_def *src = nir_pad_vector(b, store->src[0].ssa, 4);
+   nir_def *src = nir_pad_vector(b, store->src[0].ssa, 4);
 
    assert(nir_src_as_uint(store->src[1]) == 0 && "store_output invariant");
 
    /* Grab the previous fragment color if we need it */
-   nir_ssa_def *dst;
+   nir_def *dst;
 
    if (channel_uses_dest(options->rt[rt].rgb) ||
        channel_uses_dest(options->rt[rt].alpha) ||
@@ -562,7 +562,7 @@ nir_lower_blend_instr(nir_builder *b, nir_instr *instr, void *data)
                             .dest_type = nir_intrinsic_src_type(store),
                             .io_semantics = sem);
    } else {
-      dst = nir_ssa_undef(b, 4, nir_src_bit_size(store->src[0]));
+      dst = nir_undef(b, 4, nir_src_bit_size(store->src[0]));
    }
 
    /* Blend the two colors per the passed options. We only call nir_blend if
@@ -571,7 +571,7 @@ nir_lower_blend_instr(nir_builder *b, nir_instr *instr, void *data)
     * case where blending is disabled at an API level, but the driver calls
     * nir_blend (possibly for color masking).
     */
-   nir_ssa_def *blended = src;
+   nir_def *blended = src;
 
    if (options->logicop_enable) {
       blended = nir_blend_logicop(b, options, rt, src, dst);
@@ -613,7 +613,7 @@ nir_lower_blend_instr(nir_builder *b, nir_instr *instr, void *data)
 static bool
 consume_dual_stores(nir_builder *b, nir_instr *instr, void *data)
 {
-   nir_ssa_def **outputs = data;
+   nir_def **outputs = data;
    if (instr->type != nir_instr_type_intrinsic)
       return false;
 
index ef47200..aebf1c8 100644 (file)
 #include "nir_builder.h"
 
 static bool
-assert_ssa_def_is_not_1bit(nir_ssa_def *def, UNUSED void *unused)
+assert_ssa_def_is_not_1bit(nir_def *def, UNUSED void *unused)
 {
    assert(def->bit_size > 1);
    return true;
 }
 
 static bool
-rewrite_1bit_ssa_def_to_32bit(nir_ssa_def *def, void *_progress)
+rewrite_1bit_ssa_def_to_32bit(nir_def *def, void *_progress)
 {
    bool *progress = _progress;
    if (def->bit_size == 1) {
@@ -69,7 +69,7 @@ make_sources_canonical(nir_builder *b, nir_alu_instr *alu, uint32_t start_idx)
       if (nir_src_bit_size(alu->src[i].src) != bit_size) {
          b->cursor = nir_before_instr(&alu->instr);
          nir_op convert_op = get_bool_convert_opcode(bit_size);
-         nir_ssa_def *new_src =
+         nir_def *new_src =
             nir_build_alu(b, convert_op, alu->src[i].src.ssa, NULL, NULL, NULL);
          /* Retain the write mask and swizzle of the original instruction so
           * that we don’t unnecessarily create a vectorized instruction.
@@ -356,7 +356,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
       } else if (src_bit_size != dst_bit_size) {
          b->cursor = nir_before_src(&phi_src->src);
          nir_op convert_op = get_bool_convert_opcode(dst_bit_size);
-         nir_ssa_def *new_src =
+         nir_def *new_src =
             nir_build_alu(b, convert_op, phi_src->src.ssa, NULL, NULL, NULL);
          nir_instr_rewrite_src(&phi->instr, &phi_src->src,
                                nir_src_for_ssa(new_src));
index 2ca9ddb..caabd73 100644 (file)
 #include "nir_builder.h"
 
 static bool
-assert_ssa_def_is_not_1bit(nir_ssa_def *def, UNUSED void *unused)
+assert_ssa_def_is_not_1bit(nir_def *def, UNUSED void *unused)
 {
    assert(def->bit_size > 1);
    return true;
 }
 
 static bool
-rewrite_1bit_ssa_def_to_32bit(nir_ssa_def *def, void *_progress)
+rewrite_1bit_ssa_def_to_32bit(nir_def *def, void *_progress)
 {
    bool *progress = _progress;
    if (def->bit_size == 1) {
@@ -51,7 +51,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu, bool has_fcsel_ne,
    b->cursor = nir_before_instr(&alu->instr);
 
    /* Replacement SSA value */
-   nir_ssa_def *rep = NULL;
+   nir_def *rep = NULL;
    switch (alu->op) {
    case nir_op_mov:
    case nir_op_vec2:
@@ -184,7 +184,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu, bool has_fcsel_ne,
 
    if (rep) {
       /* We've emitted a replacement instruction */
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, rep);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa, rep);
       nir_instr_remove(&alu->instr);
    } else {
       if (alu->dest.dest.ssa.bit_size == 1)
index 2f719f1..a9581f8 100644 (file)
 #include "nir_builder.h"
 
 static bool
-assert_ssa_def_is_not_1bit(nir_ssa_def *def, UNUSED void *unused)
+assert_ssa_def_is_not_1bit(nir_def *def, UNUSED void *unused)
 {
    assert(def->bit_size > 1);
    return true;
 }
 
 static bool
-rewrite_1bit_ssa_def_to_32bit(nir_ssa_def *def, void *_progress)
+rewrite_1bit_ssa_def_to_32bit(nir_def *def, void *_progress)
 {
    bool *progress = _progress;
    if (def->bit_size == 1) {
index db6ee0d..14e953e 100644 (file)
@@ -186,10 +186,10 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
                break;
 
             b.cursor = nir_instr_remove(&deref->instr);
-            nir_ssa_def *loc =
+            nir_def *loc =
                nir_imm_intN_t(&b, deref->var->data.driver_location,
                               deref->dest.ssa.bit_size);
-            nir_ssa_def_rewrite_uses(&deref->dest.ssa, loc);
+            nir_def_rewrite_uses(&deref->dest.ssa, loc);
             progress = true;
             break;
          }
@@ -217,7 +217,7 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
                   } else {
                      b.cursor = nir_before_instr(&tex->instr);
                      /* Back-ends expect a 32-bit thing, not 64-bit */
-                     nir_ssa_def *offset = nir_u2u32(&b, tex->src[i].src.ssa);
+                     nir_def *offset = nir_u2u32(&b, tex->src[i].src.ssa);
                      if (tex->src[i].src_type == nir_tex_src_texture_deref)
                         tex->src[count].src_type = nir_tex_src_texture_offset;
                      else
@@ -255,7 +255,7 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
 
                b.cursor = nir_before_instr(&intrin->instr);
                /* Back-ends expect a 32-bit thing, not 64-bit */
-               nir_ssa_def *offset = nir_u2u32(&b, intrin->src[0].ssa);
+               nir_def *offset = nir_u2u32(&b, intrin->src[0].ssa);
                nir_rewrite_image_intrinsic(intrin, offset, false);
                progress = true;
                break;
index 71a0853..2ded057 100644 (file)
@@ -53,7 +53,7 @@ static bool
 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, nir_shader *shader)
 {
    nir_variable *out = NULL;
-   nir_ssa_def *s;
+   nir_def *s;
 
    switch (intr->intrinsic) {
    case nir_intrinsic_store_deref:
index e10440d..7f73d35 100644 (file)
@@ -94,7 +94,7 @@ create_clipdist_vars(nir_shader *shader, nir_variable **io_vars,
 
 static void
 store_clipdist_output(nir_builder *b, nir_variable *out, int location_offset,
-                      nir_ssa_def **val)
+                      nir_def **val)
 {
    nir_io_semantics semantics = {
       .location = out->data.location,
@@ -110,17 +110,17 @@ store_clipdist_output(nir_builder *b, nir_variable *out, int location_offset,
 
 static void
 load_clipdist_input(nir_builder *b, nir_variable *in, int location_offset,
-                    nir_ssa_def **val)
+                    nir_def **val)
 {
    nir_io_semantics semantics = {
       .location = in->data.location,
       .num_slots = 1,
    };
 
-   nir_ssa_def *load;
+   nir_def *load;
    if (b->shader->options->use_interpolated_input_intrinsics) {
       /* TODO: use sample when per-sample shading? */
-      nir_ssa_def *barycentric = nir_load_barycentric(
+      nir_def *barycentric = nir_load_barycentric(
          b, nir_intrinsic_load_barycentric_pixel, INTERP_MODE_NONE);
       load = nir_load_interpolated_input(
          b, 4, 32, barycentric, nir_imm_int(b, location_offset),
@@ -141,7 +141,7 @@ load_clipdist_input(nir_builder *b, nir_variable *in, int location_offset,
    val[3] = nir_channel(b, load, 3);
 }
 
-static nir_ssa_def *
+static nir_def *
 find_output_in_block(nir_block *block, unsigned drvloc)
 {
    nir_foreach_instr(instr, block) {
@@ -163,13 +163,13 @@ find_output_in_block(nir_block *block, unsigned drvloc)
  * NOTE: assumes each output is written exactly once (and unconditionally)
  * so if needed nir_lower_outputs_to_temporaries()
  */
-static nir_ssa_def *
+static nir_def *
 find_output(nir_shader *shader, unsigned drvloc)
 {
-   nir_ssa_def *def = NULL;
+   nir_def *def = NULL;
    nir_foreach_function_impl(impl, shader) {
       nir_foreach_block_reverse(block, impl) {
-         nir_ssa_def *new_def = find_output_in_block(block, drvloc);
+         nir_def *new_def = find_output_in_block(block, drvloc);
          assert(!(new_def && def));
          def = new_def;
 #if !defined(DEBUG)
@@ -216,7 +216,7 @@ find_clipvertex_and_position_outputs(nir_shader *shader,
    return *clipvertex || *position;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_ucp(nir_builder *b, int plane,
         const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH])
 {
@@ -238,8 +238,8 @@ lower_clip_outputs(nir_builder *b, nir_variable *position,
                    bool use_clipdist_array,
                    const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH])
 {
-   nir_ssa_def *clipdist[MAX_CLIP_PLANES];
-   nir_ssa_def *cv;
+   nir_def *clipdist[MAX_CLIP_PLANES];
+   nir_def *cv;
 
    if (use_vars) {
       cv = nir_load_var(b, clipvertex ? clipvertex : position);
@@ -259,7 +259,7 @@ lower_clip_outputs(nir_builder *b, nir_variable *position,
 
    for (int plane = 0; plane < MAX_CLIP_PLANES; plane++) {
       if (ucp_enables & (1 << plane)) {
-         nir_ssa_def *ucp = get_ucp(b, plane, clipplane_state_tokens);
+         nir_def *ucp = get_ucp(b, plane, clipplane_state_tokens);
 
          /* calculate clipdist[plane] - dot(ucp, cv): */
          clipdist[plane] = nir_fdot(b, ucp, cv);
@@ -424,7 +424,7 @@ static void
 lower_clip_fs(nir_function_impl *impl, unsigned ucp_enables,
               nir_variable **in, bool use_clipdist_array)
 {
-   nir_ssa_def *clipdist[MAX_CLIP_PLANES];
+   nir_def *clipdist[MAX_CLIP_PLANES];
    nir_builder b = nir_builder_at(nir_before_cf_list(&impl->body));
 
    if (!use_clipdist_array) {
@@ -439,11 +439,11 @@ lower_clip_fs(nir_function_impl *impl, unsigned ucp_enables,
          load_clipdist_input(&b, in[0], 1, &clipdist[4]);
    }
 
-   nir_ssa_def *cond = NULL;
+   nir_def *cond = NULL;
 
    for (int plane = 0; plane < MAX_CLIP_PLANES; plane++) {
       if (ucp_enables & (1 << plane)) {
-         nir_ssa_def *this_cond =
+         nir_def *this_cond =
             nir_flt_imm(&b, clipdist[plane], 0.0);
 
          cond = cond ? nir_ior(&b, cond, this_cond) : this_cond;
index 7e5cba6..4a8ddbd 100644 (file)
@@ -37,7 +37,7 @@
  * then overwrite it if that plane isn't enabled
  */
 static void
-recursive_if_chain(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *value, unsigned clip_plane_enable, nir_ssa_def *index, unsigned start, unsigned end)
+recursive_if_chain(nir_builder *b, nir_deref_instr *deref, nir_def *value, unsigned clip_plane_enable, nir_def *index, unsigned start, unsigned end)
 {
    if (start == end - 1) {
       /* store the original value again if the clip plane is enabled */
@@ -86,7 +86,7 @@ lower_clip_plane_store(nir_builder *b, nir_instr *instr_, void *cb_data)
    if (deref->deref_type == nir_deref_type_var) {
       int wrmask = nir_intrinsic_write_mask(instr);
 
-      nir_ssa_def *components[4];
+      nir_def *components[4];
       int start = out->data.location == VARYING_SLOT_CLIP_DIST1 ? 4 : 0;
       /* rewrite components as zeroes for planes that aren't enabled */
       for (int i = 0; i < 4; i++) {
@@ -96,7 +96,7 @@ lower_clip_plane_store(nir_builder *b, nir_instr *instr_, void *cb_data)
             else
                components[i] = nir_channel(b, nir_ssa_for_src(b, instr->src[1], nir_src_num_components(instr->src[1])), i);
          } else
-            components[i] = nir_ssa_undef(b, 1, 32);
+            components[i] = nir_undef(b, 1, 32);
       }
       nir_store_deref(b, deref, nir_vec(b, components, instr->num_components), wrmask);
    } else if (nir_src_is_const(deref->arr.index)) {
@@ -110,7 +110,7 @@ lower_clip_plane_store(nir_builder *b, nir_instr *instr_, void *cb_data)
       nir_store_deref(b, deref, nir_imm_int(b, 0), 1);
    } else {
       /* storing using a variable index */
-      nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
       unsigned length = glsl_get_length(nir_deref_instr_parent(deref)->type);
 
       recursive_if_chain(b, deref, instr->src[1].ssa, clip_plane_enable, index, 0, length);
index 5418d96..8141b99 100644 (file)
@@ -40,16 +40,16 @@ lower_pos_write(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
-   nir_ssa_def *def = nir_vec4(b,
-                               nir_channel(b, pos, 0),
-                               nir_channel(b, pos, 1),
-                               nir_fmul_imm(b,
-                                            nir_fadd(b,
-                                                     nir_channel(b, pos, 2),
-                                                     nir_channel(b, pos, 3)),
-                                            0.5),
-                               nir_channel(b, pos, 3));
+   nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+   nir_def *def = nir_vec4(b,
+                           nir_channel(b, pos, 0),
+                           nir_channel(b, pos, 1),
+                           nir_fmul_imm(b,
+                                        nir_fadd(b,
+                                                 nir_channel(b, pos, 2),
+                                                 nir_channel(b, pos, 3)),
+                                        0.5),
+                           nir_channel(b, pos, 3));
    nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def));
    return true;
 }
index ba113ef..b80ba5c 100644 (file)
@@ -400,9 +400,9 @@ nir_lower_const_arrays_to_uniforms(nir_shader *shader,
          }
          nir_deref_path_finish(&path);
 
-         nir_ssa_def *new_def = nir_load_deref(&b, new_deref_instr);
+         nir_def *new_def = nir_load_deref(&b, new_deref_instr);
 
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_def);
+         nir_def_rewrite_uses(&intrin->dest.ssa, new_def);
          nir_instr_remove(&intrin->instr);
       }
    }
index b895370..52ba0d3 100644 (file)
@@ -56,13 +56,13 @@ lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv)
    assert(conv->intrinsic == nir_intrinsic_convert_alu_types);
 
    b->cursor = nir_instr_remove(&conv->instr);
-   nir_ssa_def *val =
+   nir_def *val =
       nir_convert_with_rounding(b, conv->src[0].ssa,
                                 nir_intrinsic_src_type(conv),
                                 nir_intrinsic_dest_type(conv),
                                 nir_intrinsic_rounding_mode(conv),
                                 nir_intrinsic_saturate(conv));
-   nir_ssa_def_rewrite_uses(&conv->dest.ssa, val);
+   nir_def_rewrite_uses(&conv->dest.ssa, val);
 }
 
 static bool
@@ -185,11 +185,11 @@ is_alu_conversion(const nir_instr *instr, UNUSED const void *_data)
           nir_op_infos[nir_instr_as_alu(instr)->op].is_conversion;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_alu_conversion(nir_builder *b, nir_instr *instr, UNUSED void *_data)
 {
    nir_alu_instr *alu = nir_instr_as_alu(instr);
-   nir_ssa_def *src = nir_ssa_for_alu_src(b, alu, 0);
+   nir_def *src = nir_ssa_for_alu_src(b, alu, 0);
    nir_alu_type src_type = nir_op_infos[alu->op].input_types[0] | src->bit_size;
    nir_alu_type dst_type = nir_op_infos[alu->op].output_type;
    return nir_convert_alu_types(b, alu->dest.dest.ssa.bit_size, src,
index 5276c60..4553134 100644 (file)
@@ -66,8 +66,8 @@ nir_lower_demote_to_discard_instr(nir_builder *b, nir_instr *instr, void *data)
       /* If the shader doesn't need helper invocations,
        * we can assume there are none */
       b->cursor = nir_before_instr(instr);
-      nir_ssa_def *zero = nir_imm_false(b);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, zero);
+      nir_def *zero = nir_imm_false(b);
+      nir_def_rewrite_uses(&intrin->dest.ssa, zero);
       nir_instr_remove_v(instr);
       return true;
    }
@@ -76,7 +76,7 @@ nir_lower_demote_to_discard_instr(nir_builder *b, nir_instr *instr, void *data)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 insert_is_helper(nir_builder *b, nir_instr *instr)
 {
    /* find best place to insert is_helper */
@@ -99,14 +99,14 @@ nir_lower_load_helper_to_is_helper(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-   nir_ssa_def *is_helper = *(nir_ssa_def **)data;
+   nir_def *is_helper = *(nir_def **)data;
    switch (intrin->intrinsic) {
    case nir_intrinsic_demote:
    case nir_intrinsic_demote_if:
       /* insert is_helper at last top level occasion */
       if (is_helper == NULL) {
          is_helper = insert_is_helper(b, instr);
-         *(nir_ssa_def **)data = is_helper;
+         *(nir_def **)data = is_helper;
          return true;
       } else {
          return false;
@@ -117,7 +117,7 @@ nir_lower_load_helper_to_is_helper(nir_builder *b, nir_instr *instr, void *data)
        * top-level blocks to ensure correct behavior w.r.t. loops */
       if (is_helper == NULL)
          is_helper = insert_is_helper(b, instr);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, is_helper);
+      nir_def_rewrite_uses(&intrin->dest.ssa, is_helper);
       nir_instr_remove_v(instr);
       return true;
    default:
@@ -184,7 +184,7 @@ nir_lower_discard_or_demote(nir_shader *shader,
                           nir_system_value_from_intrinsic(nir_intrinsic_load_helper_invocation))) {
       /* load_helper needs to preserve the value (whether an invocation is
        * a helper lane) from the beginning of the shader. */
-      nir_ssa_def *is_helper = NULL;
+      nir_def *is_helper = NULL;
       progress = nir_shader_instructions_pass(shader,
                                               nir_lower_load_helper_to_is_helper,
                                               nir_metadata_block_index |
index 5545765..1fcc721 100644 (file)
  */
 
 /* Creates a double with the exponent bits set to a given integer value */
-static nir_ssa_def *
-set_exponent(nir_builder *b, nir_ssa_def *src, nir_ssa_def *exp)
+static nir_def *
+set_exponent(nir_builder *b, nir_def *src, nir_def *exp)
 {
    /* Split into bits 0-31 and 32-63 */
-   nir_ssa_def *lo = nir_unpack_64_2x32_split_x(b, src);
-   nir_ssa_def *hi = nir_unpack_64_2x32_split_y(b, src);
+   nir_def *lo = nir_unpack_64_2x32_split_x(b, src);
+   nir_def *hi = nir_unpack_64_2x32_split_y(b, src);
 
    /* The exponent is bits 52-62, or 20-30 of the high word, so set the exponent
     * to 1023
     */
-   nir_ssa_def *new_hi = nir_bitfield_insert(b, hi, exp,
-                                             nir_imm_int(b, 20),
-                                             nir_imm_int(b, 11));
+   nir_def *new_hi = nir_bitfield_insert(b, hi, exp,
+                                         nir_imm_int(b, 20),
+                                         nir_imm_int(b, 11));
    /* recombine */
    return nir_pack_64_2x32_split(b, lo, new_hi);
 }
 
-static nir_ssa_def *
-get_exponent(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+get_exponent(nir_builder *b, nir_def *src)
 {
    /* get bits 32-63 */
-   nir_ssa_def *hi = nir_unpack_64_2x32_split_y(b, src);
+   nir_def *hi = nir_unpack_64_2x32_split_y(b, src);
 
    /* extract bits 20-30 of the high word */
    return nir_ubitfield_extract(b, hi, nir_imm_int(b, 20), nir_imm_int(b, 11));
@@ -68,10 +68,10 @@ get_exponent(nir_builder *b, nir_ssa_def *src)
 
 /* Return infinity with the sign of the given source which is +/-0 */
 
-static nir_ssa_def *
-get_signed_inf(nir_builder *b, nir_ssa_def *zero)
+static nir_def *
+get_signed_inf(nir_builder *b, nir_def *zero)
 {
-   nir_ssa_def *zero_hi = nir_unpack_64_2x32_split_y(b, zero);
+   nir_def *zero_hi = nir_unpack_64_2x32_split_y(b, zero);
 
    /* The bit pattern for infinity is 0x7ff0000000000000, where the sign bit
     * is the highest bit. Only the sign bit can be non-zero in the passed in
@@ -79,7 +79,7 @@ get_signed_inf(nir_builder *b, nir_ssa_def *zero)
     * the low 32 bits are always 0 so we can construct the correct high 32
     * bits and then pack it together with zero low 32 bits.
     */
-   nir_ssa_def *inf_hi = nir_ior_imm(b, zero_hi, 0x7ff00000);
+   nir_def *inf_hi = nir_ior_imm(b, zero_hi, 0x7ff00000);
    return nir_pack_64_2x32_split(b, nir_imm_int(b, 0), inf_hi);
 }
 
@@ -89,9 +89,9 @@ get_signed_inf(nir_builder *b, nir_ssa_def *zero)
  * too small to be representable.
  */
 
-static nir_ssa_def *
-fix_inv_result(nir_builder *b, nir_ssa_def *res, nir_ssa_def *src,
-               nir_ssa_def *exp)
+static nir_def *
+fix_inv_result(nir_builder *b, nir_def *res, nir_def *src,
+               nir_def *exp)
 {
    /* If the exponent is too small or the original input was infinity/NaN,
     * force the result to 0 (flush denorms) to avoid the work of handling
@@ -108,23 +108,23 @@ fix_inv_result(nir_builder *b, nir_ssa_def *res, nir_ssa_def *src,
    return res;
 }
 
-static nir_ssa_def *
-lower_rcp(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_rcp(nir_builder *b, nir_def *src)
 {
    /* normalize the input to avoid range issues */
-   nir_ssa_def *src_norm = set_exponent(b, src, nir_imm_int(b, 1023));
+   nir_def *src_norm = set_exponent(b, src, nir_imm_int(b, 1023));
 
    /* cast to float, do an rcp, and then cast back to get an approximate
     * result
     */
-   nir_ssa_def *ra = nir_f2f64(b, nir_frcp(b, nir_f2f32(b, src_norm)));
+   nir_def *ra = nir_f2f64(b, nir_frcp(b, nir_f2f32(b, src_norm)));
 
    /* Fixup the exponent of the result - note that we check if this is too
     * small below.
     */
-   nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra),
-                                   nir_iadd_imm(b, get_exponent(b, src),
-                                                -1023));
+   nir_def *new_exp = nir_isub(b, get_exponent(b, ra),
+                               nir_iadd_imm(b, get_exponent(b, src),
+                                            -1023));
 
    ra = set_exponent(b, ra, new_exp);
 
@@ -149,8 +149,8 @@ lower_rcp(nir_builder *b, nir_ssa_def *src)
    return fix_inv_result(b, ra, src, new_exp);
 }
 
-static nir_ssa_def *
-lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
+static nir_def *
+lower_sqrt_rsq(nir_builder *b, nir_def *src, bool sqrt)
 {
    /* We want to compute:
     *
@@ -172,16 +172,16 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
     * shifting right by 1.
     */
 
-   nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
-                                            -1023);
-   nir_ssa_def *even = nir_iand_imm(b, unbiased_exp, 1);
-   nir_ssa_def *half = nir_ishr_imm(b, unbiased_exp, 1);
+   nir_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
+                                        -1023);
+   nir_def *even = nir_iand_imm(b, unbiased_exp, 1);
+   nir_def *half = nir_ishr_imm(b, unbiased_exp, 1);
 
-   nir_ssa_def *src_norm = set_exponent(b, src,
-                                        nir_iadd_imm(b, even, 1023));
+   nir_def *src_norm = set_exponent(b, src,
+                                    nir_iadd_imm(b, even, 1023));
 
-   nir_ssa_def *ra = nir_f2f64(b, nir_frsq(b, nir_f2f32(b, src_norm)));
-   nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra), half);
+   nir_def *ra = nir_f2f64(b, nir_frsq(b, nir_f2f32(b, src_norm)));
+   nir_def *new_exp = nir_isub(b, get_exponent(b, ra), half);
    ra = set_exponent(b, ra, new_exp);
 
    /*
@@ -267,20 +267,20 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
     * (https://en.wikipedia.org/wiki/Methods_of_computing_square_roots).
     */
 
-   nir_ssa_def *one_half = nir_imm_double(b, 0.5);
-   nir_ssa_def *h_0 = nir_fmul(b, one_half, ra);
-   nir_ssa_def *g_0 = nir_fmul(b, src, ra);
-   nir_ssa_def *r_0 = nir_ffma(b, nir_fneg(b, h_0), g_0, one_half);
-   nir_ssa_def *h_1 = nir_ffma(b, h_0, r_0, h_0);
-   nir_ssa_def *res;
+   nir_def *one_half = nir_imm_double(b, 0.5);
+   nir_def *h_0 = nir_fmul(b, one_half, ra);
+   nir_def *g_0 = nir_fmul(b, src, ra);
+   nir_def *r_0 = nir_ffma(b, nir_fneg(b, h_0), g_0, one_half);
+   nir_def *h_1 = nir_ffma(b, h_0, r_0, h_0);
+   nir_def *res;
    if (sqrt) {
-      nir_ssa_def *g_1 = nir_ffma(b, g_0, r_0, g_0);
-      nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, g_1), g_1, src);
+      nir_def *g_1 = nir_ffma(b, g_0, r_0, g_0);
+      nir_def *r_1 = nir_ffma(b, nir_fneg(b, g_1), g_1, src);
       res = nir_ffma(b, h_1, r_1, g_1);
    } else {
-      nir_ssa_def *y_1 = nir_fmul_imm(b, h_1, 2.0);
-      nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, y_1), nir_fmul(b, h_1, src),
-                                  one_half);
+      nir_def *y_1 = nir_fmul_imm(b, h_1, 2.0);
+      nir_def *r_1 = nir_ffma(b, nir_fneg(b, y_1), nir_fmul(b, h_1, src),
+                              one_half);
       res = nir_ffma(b, y_1, r_1, y_1);
    }
 
@@ -292,7 +292,7 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
       const bool preserve_denorms =
          b->shader->info.float_controls_execution_mode &
          FLOAT_CONTROLS_DENORM_PRESERVE_FP64;
-      nir_ssa_def *src_flushed = src;
+      nir_def *src_flushed = src;
       if (!preserve_denorms) {
          src_flushed = nir_bcsel(b,
                                  nir_flt_imm(b, nir_fabs(b, src), DBL_MIN),
@@ -308,13 +308,13 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
    return res;
 }
 
-static nir_ssa_def *
-lower_trunc(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_trunc(nir_builder *b, nir_def *src)
 {
-   nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
-                                            -1023);
+   nir_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
+                                        -1023);
 
-   nir_ssa_def *frac_bits = nir_isub_imm(b, 52, unbiased_exp);
+   nir_def *frac_bits = nir_isub_imm(b, 52, unbiased_exp);
 
    /*
     * Decide the operation to apply depending on the unbiased exponent:
@@ -332,13 +332,13 @@ lower_trunc(nir_builder *b, nir_ssa_def *src)
     */
 
    /* Compute "~0 << frac_bits" in terms of hi/lo 32-bit integer math */
-   nir_ssa_def *mask_lo =
+   nir_def *mask_lo =
       nir_bcsel(b,
                 nir_ige_imm(b, frac_bits, 32),
                 nir_imm_int(b, 0),
                 nir_ishl(b, nir_imm_int(b, ~0), frac_bits));
 
-   nir_ssa_def *mask_hi =
+   nir_def *mask_hi =
       nir_bcsel(b,
                 nir_ilt_imm(b, frac_bits, 33),
                 nir_imm_int(b, ~0),
@@ -346,8 +346,8 @@ lower_trunc(nir_builder *b, nir_ssa_def *src)
                          nir_imm_int(b, ~0),
                          nir_iadd_imm(b, frac_bits, -32)));
 
-   nir_ssa_def *src_lo = nir_unpack_64_2x32_split_x(b, src);
-   nir_ssa_def *src_hi = nir_unpack_64_2x32_split_y(b, src);
+   nir_def *src_lo = nir_unpack_64_2x32_split_x(b, src);
+   nir_def *src_hi = nir_unpack_64_2x32_split_y(b, src);
 
    return nir_bcsel(b,
                     nir_ilt_imm(b, unbiased_exp, 0),
@@ -359,8 +359,8 @@ lower_trunc(nir_builder *b, nir_ssa_def *src)
                                                      nir_iand(b, mask_hi, src_hi))));
 }
 
-static nir_ssa_def *
-lower_floor(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_floor(nir_builder *b, nir_def *src)
 {
    /*
     * For x >= 0, floor(x) = trunc(x)
@@ -368,45 +368,45 @@ lower_floor(nir_builder *b, nir_ssa_def *src)
     *    - if x is integer, floor(x) = x
     *    - otherwise, floor(x) = trunc(x) - 1
     */
-   nir_ssa_def *tr = nir_ftrunc(b, src);
-   nir_ssa_def *positive = nir_fge_imm(b, src, 0.0);
+   nir_def *tr = nir_ftrunc(b, src);
+   nir_def *positive = nir_fge_imm(b, src, 0.0);
    return nir_bcsel(b,
                     nir_ior(b, positive, nir_feq(b, src, tr)),
                     tr,
                     nir_fadd_imm(b, tr, -1.0));
 }
 
-static nir_ssa_def *
-lower_ceil(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_ceil(nir_builder *b, nir_def *src)
 {
    /* if x < 0,                    ceil(x) = trunc(x)
     * else if (x - trunc(x) == 0), ceil(x) = x
     * else,                        ceil(x) = trunc(x) + 1
     */
-   nir_ssa_def *tr = nir_ftrunc(b, src);
-   nir_ssa_def *negative = nir_flt_imm(b, src, 0.0);
+   nir_def *tr = nir_ftrunc(b, src);
+   nir_def *negative = nir_flt_imm(b, src, 0.0);
    return nir_bcsel(b,
                     nir_ior(b, negative, nir_feq(b, src, tr)),
                     tr,
                     nir_fadd_imm(b, tr, 1.0));
 }
 
-static nir_ssa_def *
-lower_fract(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_fract(nir_builder *b, nir_def *src)
 {
    return nir_fsub(b, src, nir_ffloor(b, src));
 }
 
-static nir_ssa_def *
-lower_round_even(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_round_even(nir_builder *b, nir_def *src)
 {
    /* Add and subtract 2**52 to round off any fractional bits. */
-   nir_ssa_def *two52 = nir_imm_double(b, (double)(1ull << 52));
-   nir_ssa_def *sign = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, src),
-                                    1ull << 31);
+   nir_def *two52 = nir_imm_double(b, (double)(1ull << 52));
+   nir_def *sign = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, src),
+                                1ull << 31);
 
    b->exact = true;
-   nir_ssa_def *res = nir_fsub(b, nir_fadd(b, nir_fabs(b, src), two52), two52);
+   nir_def *res = nir_fsub(b, nir_fadd(b, nir_fabs(b, src), two52), two52);
    b->exact = false;
 
    return nir_bcsel(b, nir_flt(b, nir_fabs(b, src), two52),
@@ -415,8 +415,8 @@ lower_round_even(nir_builder *b, nir_ssa_def *src)
                     src);
 }
 
-static nir_ssa_def *
-lower_mod(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+static nir_def *
+lower_mod(nir_builder *b, nir_def *src0, nir_def *src1)
 {
    /* mod(x,y) = x - y * floor(x/y)
     *
@@ -445,12 +445,12 @@ lower_mod(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
     * In summary, in the practice mod(a,a) can be "a" both for OpenGL and
     * Vulkan.
     */
-   nir_ssa_def *floor = nir_ffloor(b, nir_fdiv(b, src0, src1));
+   nir_def *floor = nir_ffloor(b, nir_fdiv(b, src0, src1));
 
    return nir_fsub(b, src0, nir_fmul(b, src1, floor));
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
                             const nir_shader *softfp64,
                             nir_lower_doubles_options options)
@@ -612,7 +612,7 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
       assert(func);
    }
 
-   nir_ssa_def *params[4] = {
+   nir_def *params[4] = {
       NULL,
    };
 
@@ -695,14 +695,14 @@ should_lower_double_instr(const nir_instr *instr, const void *_data)
    return options & nir_lower_doubles_op_to_options_mask(alu->op);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_doubles_instr(nir_builder *b, nir_instr *instr, void *_data)
 {
    const struct lower_doubles_data *data = _data;
    const nir_lower_doubles_options options = data->options;
    nir_alu_instr *alu = nir_instr_as_alu(instr);
 
-   nir_ssa_def *soft_def =
+   nir_def *soft_def =
       lower_doubles_instr_to_soft(b, alu, data->softfp64, options);
    if (soft_def)
       return soft_def;
@@ -710,8 +710,8 @@ lower_doubles_instr(nir_builder *b, nir_instr *instr, void *_data)
    if (!(options & nir_lower_doubles_op_to_options_mask(alu->op)))
       return NULL;
 
-   nir_ssa_def *src = nir_mov_alu(b, alu->src[0],
-                                  alu->dest.dest.ssa.num_components);
+   nir_def *src = nir_mov_alu(b, alu->src[0],
+                              alu->dest.dest.ssa.num_components);
 
    switch (alu->op) {
    case nir_op_frcp:
@@ -734,8 +734,8 @@ lower_doubles_instr(nir_builder *b, nir_instr *instr, void *_data)
    case nir_op_fdiv:
    case nir_op_fsub:
    case nir_op_fmod: {
-      nir_ssa_def *src1 = nir_mov_alu(b, alu->src[1],
-                                      alu->dest.dest.ssa.num_components);
+      nir_def *src1 = nir_mov_alu(b, alu->src[1],
+                                  alu->dest.dest.ssa.num_components);
       switch (alu->op) {
       case nir_op_fdiv:
          return nir_fmul(b, src, nir_frcp(b, src1));
index cabe2cb..18ac998 100644 (file)
@@ -37,7 +37,7 @@ typedef struct {
    nir_variable *texcoord, *texcoord_const, *scale, *bias, *tex, *pixelmap;
 } lower_drawpixels_state;
 
-static nir_ssa_def *
+static nir_def *
 get_texcoord(nir_builder *b, lower_drawpixels_state *state)
 {
    if (state->texcoord == NULL) {
@@ -47,7 +47,7 @@ get_texcoord(nir_builder *b, lower_drawpixels_state *state)
    return nir_load_var(b, state->texcoord);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_scale(nir_builder *b, lower_drawpixels_state *state)
 {
    if (state->scale == NULL) {
@@ -57,7 +57,7 @@ get_scale(nir_builder *b, lower_drawpixels_state *state)
    return nir_load_var(b, state->scale);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_bias(nir_builder *b, lower_drawpixels_state *state)
 {
    if (state->bias == NULL) {
@@ -67,7 +67,7 @@ get_bias(nir_builder *b, lower_drawpixels_state *state)
    return nir_load_var(b, state->bias);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_texcoord_const(nir_builder *b, lower_drawpixels_state *state)
 {
    if (state->texcoord_const == NULL) {
@@ -81,9 +81,9 @@ get_texcoord_const(nir_builder *b, lower_drawpixels_state *state)
 static bool
 lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *intr)
 {
-   nir_ssa_def *texcoord;
+   nir_def *texcoord;
    nir_tex_instr *tex;
-   nir_ssa_def *def;
+   nir_def *def;
 
    b->cursor = nir_before_instr(&intr->instr);
 
@@ -141,7 +141,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
          nir_build_deref_var(b, state->pixelmap);
 
       /* do four pixel map look-ups with two TEX instructions: */
-      nir_ssa_def *def_xy, *def_zw;
+      nir_def *def_xy, *def_zw;
 
       /* TEX def.xy, def.xyyy, pixelmap_sampler, 2D; */
       tex = nir_tex_instr_create(state->shader, 3);
@@ -184,7 +184,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
                      nir_channel(b, def_zw, 1));
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
+   nir_def_rewrite_uses(&intr->dest.ssa, def);
    return true;
 }
 
@@ -193,8 +193,8 @@ lower_texcoord(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_inst
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *texcoord_const = get_texcoord_const(b, state);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, texcoord_const);
+   nir_def *texcoord_const = get_texcoord_const(b, state);
+   nir_def_rewrite_uses(&intr->dest.ssa, texcoord_const);
    return true;
 }
 
index 65c2af7..02be941 100644 (file)
@@ -58,9 +58,9 @@ nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *fragcoord = nir_load_frag_coord(b);
-   nir_ssa_def *sampid = nir_load_sample_id(b);
-   nir_ssa_def *layer = nir_load_layer_id(b);
+   nir_def *fragcoord = nir_load_frag_coord(b);
+   nir_def *sampid = nir_load_sample_id(b);
+   nir_def *layer = nir_load_layer_id(b);
    fragcoord = nir_f2i32(b, fragcoord);
 
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
@@ -79,7 +79,7 @@ nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
    nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
    nir_builder_instr_insert(b, &tex->instr);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
+   nir_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
 
    return true;
 }
index 9b84e4b..d081bf0 100644 (file)
@@ -47,20 +47,20 @@ static void
 replace_with_strict_ffma(struct nir_builder *bld, struct u_vector *dead_flrp,
                          struct nir_alu_instr *alu)
 {
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
-   nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+   nir_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+   nir_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
 
-   nir_ssa_def *const neg_a = nir_fneg(bld, a);
+   nir_def *const neg_a = nir_fneg(bld, a);
    nir_instr_as_alu(neg_a->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const inner_ffma = nir_ffma(bld, neg_a, c, a);
+   nir_def *const inner_ffma = nir_ffma(bld, neg_a, c, a);
    nir_instr_as_alu(inner_ffma->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const outer_ffma = nir_ffma(bld, b, c, inner_ffma);
+   nir_def *const outer_ffma = nir_ffma(bld, b, c, inner_ffma);
    nir_instr_as_alu(outer_ffma->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, outer_ffma);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, outer_ffma);
 
    /* DO NOT REMOVE the original flrp yet.  Many of the lowering choices are
     * based on other uses of the sources.  Removing the flrp may cause the
@@ -76,24 +76,24 @@ static void
 replace_with_single_ffma(struct nir_builder *bld, struct u_vector *dead_flrp,
                          struct nir_alu_instr *alu)
 {
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
-   nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+   nir_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+   nir_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
 
-   nir_ssa_def *const neg_c = nir_fneg(bld, c);
+   nir_def *const neg_c = nir_fneg(bld, c);
    nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const one_minus_c =
+   nir_def *const one_minus_c =
       nir_fadd(bld, nir_imm_floatN_t(bld, 1.0f, c->bit_size), neg_c);
    nir_instr_as_alu(one_minus_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const b_times_c = nir_fmul(bld, b, c);
+   nir_def *const b_times_c = nir_fmul(bld, b, c);
    nir_instr_as_alu(b_times_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const final_ffma = nir_ffma(bld, a, one_minus_c, b_times_c);
+   nir_def *const final_ffma = nir_ffma(bld, a, one_minus_c, b_times_c);
    nir_instr_as_alu(final_ffma->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, final_ffma);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, final_ffma);
 
    /* DO NOT REMOVE the original flrp yet.  Many of the lowering choices are
     * based on other uses of the sources.  Removing the flrp may cause the
@@ -109,27 +109,27 @@ static void
 replace_with_strict(struct nir_builder *bld, struct u_vector *dead_flrp,
                     struct nir_alu_instr *alu)
 {
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
-   nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+   nir_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+   nir_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
 
-   nir_ssa_def *const neg_c = nir_fneg(bld, c);
+   nir_def *const neg_c = nir_fneg(bld, c);
    nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const one_minus_c =
+   nir_def *const one_minus_c =
       nir_fadd(bld, nir_imm_floatN_t(bld, 1.0f, c->bit_size), neg_c);
    nir_instr_as_alu(one_minus_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const first_product = nir_fmul(bld, a, one_minus_c);
+   nir_def *const first_product = nir_fmul(bld, a, one_minus_c);
    nir_instr_as_alu(first_product->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const second_product = nir_fmul(bld, b, c);
+   nir_def *const second_product = nir_fmul(bld, b, c);
    nir_instr_as_alu(second_product->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const sum = nir_fadd(bld, first_product, second_product);
+   nir_def *const sum = nir_fadd(bld, first_product, second_product);
    nir_instr_as_alu(sum->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, sum);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, sum);
 
    /* DO NOT REMOVE the original flrp yet.  Many of the lowering choices are
     * based on other uses of the sources.  Removing the flrp may cause the
@@ -145,23 +145,23 @@ static void
 replace_with_fast(struct nir_builder *bld, struct u_vector *dead_flrp,
                   struct nir_alu_instr *alu)
 {
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
-   nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+   nir_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+   nir_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
 
-   nir_ssa_def *const neg_a = nir_fneg(bld, a);
+   nir_def *const neg_a = nir_fneg(bld, a);
    nir_instr_as_alu(neg_a->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const b_minus_a = nir_fadd(bld, b, neg_a);
+   nir_def *const b_minus_a = nir_fadd(bld, b, neg_a);
    nir_instr_as_alu(b_minus_a->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const product = nir_fmul(bld, c, b_minus_a);
+   nir_def *const product = nir_fmul(bld, c, b_minus_a);
    nir_instr_as_alu(product->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const sum = nir_fadd(bld, a, product);
+   nir_def *const sum = nir_fadd(bld, a, product);
    nir_instr_as_alu(sum->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, sum);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, sum);
 
    /* DO NOT REMOVE the original flrp yet.  Many of the lowering choices are
     * based on other uses of the sources.  Removing the flrp may cause the
@@ -180,17 +180,17 @@ replace_with_expanded_ffma_and_add(struct nir_builder *bld,
                                    struct u_vector *dead_flrp,
                                    struct nir_alu_instr *alu, bool subtract_c)
 {
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
-   nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+   nir_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+   nir_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
 
-   nir_ssa_def *const b_times_c = nir_fmul(bld, b, c);
+   nir_def *const b_times_c = nir_fmul(bld, b, c);
    nir_instr_as_alu(b_times_c->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *inner_sum;
+   nir_def *inner_sum;
 
    if (subtract_c) {
-      nir_ssa_def *const neg_c = nir_fneg(bld, c);
+      nir_def *const neg_c = nir_fneg(bld, c);
       nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
 
       inner_sum = nir_fadd(bld, a, neg_c);
@@ -200,10 +200,10 @@ replace_with_expanded_ffma_and_add(struct nir_builder *bld,
 
    nir_instr_as_alu(inner_sum->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def *const outer_sum = nir_fadd(bld, inner_sum, b_times_c);
+   nir_def *const outer_sum = nir_fadd(bld, inner_sum, b_times_c);
    nir_instr_as_alu(outer_sum->parent_instr)->exact = alu->exact;
 
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, outer_sum);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, outer_sum);
 
    /* DO NOT REMOVE the original flrp yet.  Many of the lowering choices are
     * based on other uses of the sources.  Removing the flrp may cause the
index 23ebd07..f2c74a1 100644 (file)
@@ -45,9 +45,9 @@
  * Version 2.1.0
  */
 
-static nir_ssa_def *
-half_rounded(nir_builder *b, nir_ssa_def *value, nir_ssa_def *guard, nir_ssa_def *sticky,
-             nir_ssa_def *sign, nir_rounding_mode mode)
+static nir_def *
+half_rounded(nir_builder *b, nir_def *value, nir_def *guard, nir_def *sticky,
+             nir_def *sign, nir_rounding_mode mode)
 {
    switch (mode) {
    case nir_rounding_mode_rtne:
@@ -63,27 +63,27 @@ half_rounded(nir_builder *b, nir_ssa_def *value, nir_ssa_def *guard, nir_ssa_def
    }
 }
 
-static nir_ssa_def *
-float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
+static nir_def *
+float_to_half_impl(nir_builder *b, nir_def *src, nir_rounding_mode mode)
 {
-   nir_ssa_def *f32infinity = nir_imm_int(b, 255 << 23);
-   nir_ssa_def *f16max = nir_imm_int(b, (127 + 16) << 23);
+   nir_def *f32infinity = nir_imm_int(b, 255 << 23);
+   nir_def *f16max = nir_imm_int(b, (127 + 16) << 23);
 
    if (src->bit_size == 64)
       src = nir_f2f32(b, src);
-   nir_ssa_def *sign = nir_iand_imm(b, src, 0x80000000);
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *sign = nir_iand_imm(b, src, 0x80000000);
+   nir_def *one = nir_imm_int(b, 1);
 
-   nir_ssa_def *abs = nir_iand_imm(b, src, 0x7FFFFFFF);
+   nir_def *abs = nir_iand_imm(b, src, 0x7FFFFFFF);
    /* NaN or INF. For rtne, overflow also becomes INF, so combine the comparisons */
    nir_push_if(b, nir_ige(b, abs, mode == nir_rounding_mode_rtne ? f16max : f32infinity));
-   nir_ssa_def *inf_nanfp16 = nir_bcsel(b,
-                                        nir_ilt(b, f32infinity, abs),
-                                        nir_imm_int(b, 0x7E00),
-                                        nir_imm_int(b, 0x7C00));
+   nir_def *inf_nanfp16 = nir_bcsel(b,
+                                    nir_ilt(b, f32infinity, abs),
+                                    nir_imm_int(b, 0x7E00),
+                                    nir_imm_int(b, 0x7C00));
    nir_push_else(b, NULL);
 
-   nir_ssa_def *overflowed_fp16 = NULL;
+   nir_def *overflowed_fp16 = NULL;
    if (mode != nir_rounding_mode_rtne) {
       /* Handle overflow */
       nir_push_if(b, nir_ige(b, abs, f16max));
@@ -105,37 +105,37 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
       nir_push_else(b, NULL);
    }
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
 
    nir_push_if(b, nir_ige_imm(b, abs, 113 << 23));
 
    /* FP16 will be normal */
-   nir_ssa_def *value = nir_ior(b,
-                                nir_ishl_imm(b,
-                                             nir_iadd_imm(b,
-                                                          nir_ushr_imm(b, abs, 23),
-                                                          -112),
-                                             10),
-                                nir_iand_imm(b, nir_ushr_imm(b, abs, 13), 0x3FFF));
-   nir_ssa_def *guard = nir_iand(b, nir_ushr_imm(b, abs, 12), one);
-   nir_ssa_def *sticky = nir_bcsel(b, nir_ine(b, nir_iand_imm(b, abs, 0xFFF), zero), one, zero);
-   nir_ssa_def *normal_fp16 = half_rounded(b, value, guard, sticky, sign, mode);
+   nir_def *value = nir_ior(b,
+                            nir_ishl_imm(b,
+                                         nir_iadd_imm(b,
+                                                      nir_ushr_imm(b, abs, 23),
+                                                      -112),
+                                         10),
+                            nir_iand_imm(b, nir_ushr_imm(b, abs, 13), 0x3FFF));
+   nir_def *guard = nir_iand(b, nir_ushr_imm(b, abs, 12), one);
+   nir_def *sticky = nir_bcsel(b, nir_ine(b, nir_iand_imm(b, abs, 0xFFF), zero), one, zero);
+   nir_def *normal_fp16 = half_rounded(b, value, guard, sticky, sign, mode);
 
    nir_push_else(b, NULL);
    nir_push_if(b, nir_ige_imm(b, abs, 102 << 23));
 
    /* FP16 will be denormal */
-   nir_ssa_def *i = nir_isub_imm(b, 125, nir_ushr_imm(b, abs, 23));
-   nir_ssa_def *masked = nir_ior_imm(b, nir_iand_imm(b, abs, 0x7FFFFF), 0x800000);
+   nir_def *i = nir_isub_imm(b, 125, nir_ushr_imm(b, abs, 23));
+   nir_def *masked = nir_ior_imm(b, nir_iand_imm(b, abs, 0x7FFFFF), 0x800000);
    value = nir_ushr(b, masked, nir_iadd(b, i, one));
    guard = nir_iand(b, nir_ushr(b, masked, i), one);
    sticky = nir_bcsel(b, nir_ine(b, nir_iand(b, masked, nir_isub(b, nir_ishl(b, one, i), one)), zero), one, zero);
-   nir_ssa_def *denormal_fp16 = half_rounded(b, value, guard, sticky, sign, mode);
+   nir_def *denormal_fp16 = half_rounded(b, value, guard, sticky, sign, mode);
 
    nir_push_else(b, NULL);
 
    /* Handle underflow. Nonzero values need to shift up or down for round-up or round-down */
-   nir_ssa_def *underflowed_fp16 = zero;
+   nir_def *underflowed_fp16 = zero;
    if (mode == nir_rounding_mode_ru ||
        mode == nir_rounding_mode_rd) {
       nir_push_if(b, nir_i2b(b, abs));
@@ -151,19 +151,19 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
    }
 
    nir_pop_if(b, NULL);
-   nir_ssa_def *underflowed_or_denorm_fp16 = nir_if_phi(b, denormal_fp16, underflowed_fp16);
+   nir_def *underflowed_or_denorm_fp16 = nir_if_phi(b, denormal_fp16, underflowed_fp16);
 
    nir_pop_if(b, NULL);
-   nir_ssa_def *finite_fp16 = nir_if_phi(b, normal_fp16, underflowed_or_denorm_fp16);
+   nir_def *finite_fp16 = nir_if_phi(b, normal_fp16, underflowed_or_denorm_fp16);
 
-   nir_ssa_def *finite_or_overflowed_fp16 = finite_fp16;
+   nir_def *finite_or_overflowed_fp16 = finite_fp16;
    if (mode != nir_rounding_mode_rtne) {
       nir_pop_if(b, NULL);
       finite_or_overflowed_fp16 = nir_if_phi(b, overflowed_fp16, finite_fp16);
    }
 
    nir_pop_if(b, NULL);
-   nir_ssa_def *fp16 = nir_if_phi(b, inf_nanfp16, finite_or_overflowed_fp16);
+   nir_def *fp16 = nir_if_phi(b, inf_nanfp16, finite_or_overflowed_fp16);
 
    return nir_u2u16(b, nir_ior(b, fp16, nir_ushr_imm(b, sign, 16)));
 }
@@ -171,7 +171,7 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
 static bool
 lower_fp16_cast_impl(nir_builder *b, nir_instr *instr, void *data)
 {
-   nir_ssa_def *src, *dst;
+   nir_def *src, *dst;
    uint8_t *swizzle = NULL;
    nir_rounding_mode mode = nir_rounding_mode_undef;
 
@@ -238,15 +238,15 @@ lower_fp16_cast_impl(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *rets[NIR_MAX_VEC_COMPONENTS] = { NULL };
+   nir_def *rets[NIR_MAX_VEC_COMPONENTS] = { NULL };
 
    for (unsigned i = 0; i < dst->num_components; i++) {
-      nir_ssa_def *comp = nir_channel(b, src, swizzle ? swizzle[i] : i);
+      nir_def *comp = nir_channel(b, src, swizzle ? swizzle[i] : i);
       rets[i] = float_to_half_impl(b, comp, mode);
    }
 
-   nir_ssa_def *new_val = nir_vec(b, rets, dst->num_components);
-   nir_ssa_def_rewrite_uses(dst, new_val);
+   nir_def *new_val = nir_vec(b, rets, dst->num_components);
+   nir_def_rewrite_uses(dst, new_val);
    return true;
 }
 
index 20033f4..3425075 100644 (file)
@@ -21,13 +21,13 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *data)
     * should return the centre of the pixel.
     */
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *top_left_xy = nir_u2f32(b, nir_load_pixel_coord(b));
-   nir_ssa_def *xy = nir_fadd_imm(b, top_left_xy, 0.5);
+   nir_def *top_left_xy = nir_u2f32(b, nir_load_pixel_coord(b));
+   nir_def *xy = nir_fadd_imm(b, top_left_xy, 0.5);
 
-   nir_ssa_def *vec = nir_vec4(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1),
-                               nir_load_frag_coord_zw(b, .component = 2),
-                               nir_load_frag_coord_zw(b, .component = 3));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, vec);
+   nir_def *vec = nir_vec4(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1),
+                           nir_load_frag_coord_zw(b, .component = 2),
+                           nir_load_frag_coord_zw(b, .component = 3));
+   nir_def_rewrite_uses(&intr->dest.ssa, vec);
    return true;
 }
 
index 1eacc98..9079a6a 100644 (file)
@@ -65,7 +65,7 @@ lower_fragcolor_instr(nir_builder *b, nir_instr *intr, void *data)
       return false;
    b->cursor = nir_after_instr(&instr->instr);
 
-   nir_ssa_def *frag_color = instr->src[1].ssa;
+   nir_def *frag_color = instr->src[1].ssa;
    ralloc_free(out->name);
 
    const char *name = out->data.index == 0 ? "gl_FragData[0]" : "gl_SecondaryFragDataEXT[0]";
index 5fc1fa2..d92fe96 100644 (file)
@@ -53,7 +53,7 @@ lower_fragcoord_wtrans_filter(const nir_instr *instr, UNUSED const void *_option
    return var->data.location == VARYING_SLOT_POS;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_fragcoord_wtrans_impl(nir_builder *b, nir_instr *instr,
                             UNUSED void *_options)
 {
index 6395a8c..7e0a5c2 100644 (file)
 #include "nir.h"
 #include "nir_builder.h"
 
-static nir_ssa_def *
-lower_frexp_sig(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_frexp_sig(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *abs_x = nir_fabs(b, x);
-   nir_ssa_def *zero = nir_imm_floatN_t(b, 0, x->bit_size);
-   nir_ssa_def *sign_mantissa_mask, *exponent_value;
+   nir_def *abs_x = nir_fabs(b, x);
+   nir_def *zero = nir_imm_floatN_t(b, 0, x->bit_size);
+   nir_def *sign_mantissa_mask, *exponent_value;
 
    switch (x->bit_size) {
    case 16:
@@ -83,10 +83,10 @@ lower_frexp_sig(nir_builder *b, nir_ssa_def *x)
       /* We only need to deal with the exponent so first we extract the upper
        * 32 bits using nir_unpack_64_2x32_split_y.
        */
-      nir_ssa_def *upper_x = nir_unpack_64_2x32_split_y(b, x);
+      nir_def *upper_x = nir_unpack_64_2x32_split_y(b, x);
 
       /* If x is Â±0, Â±Inf, or NaN, return x unmodified. */
-      nir_ssa_def *new_upper =
+      nir_def *new_upper =
          nir_bcsel(b,
                    nir_iand(b,
                             nir_flt(b, zero, abs_x),
@@ -96,7 +96,7 @@ lower_frexp_sig(nir_builder *b, nir_ssa_def *x)
                            exponent_value),
                    upper_x);
 
-      nir_ssa_def *lower_x = nir_unpack_64_2x32_split_x(b, x);
+      nir_def *lower_x = nir_unpack_64_2x32_split_x(b, x);
 
       return nir_pack_64_2x32_split(b, lower_x, new_upper);
    } else {
@@ -112,18 +112,18 @@ lower_frexp_sig(nir_builder *b, nir_ssa_def *x)
    }
 }
 
-static nir_ssa_def *
-lower_frexp_exp(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_frexp_exp(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *abs_x = nir_fabs(b, x);
-   nir_ssa_def *zero = nir_imm_floatN_t(b, 0, x->bit_size);
-   nir_ssa_def *is_not_zero = nir_fneu(b, abs_x, zero);
-   nir_ssa_def *exponent;
+   nir_def *abs_x = nir_fabs(b, x);
+   nir_def *zero = nir_imm_floatN_t(b, 0, x->bit_size);
+   nir_def *is_not_zero = nir_fneu(b, abs_x, zero);
+   nir_def *exponent;
 
    switch (x->bit_size) {
    case 16: {
-      nir_ssa_def *exponent_shift = nir_imm_int(b, 10);
-      nir_ssa_def *exponent_bias = nir_imm_intN_t(b, -14, 16);
+      nir_def *exponent_shift = nir_imm_int(b, 10);
+      nir_def *exponent_bias = nir_imm_intN_t(b, -14, 16);
 
       /* Significand return must be of the same type as the input, but the
        * exponent must be a 32-bit integer.
@@ -133,19 +133,19 @@ lower_frexp_exp(nir_builder *b, nir_ssa_def *x)
       break;
    }
    case 32: {
-      nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
-      nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
+      nir_def *exponent_shift = nir_imm_int(b, 23);
+      nir_def *exponent_bias = nir_imm_int(b, -126);
 
       exponent = nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
                           nir_bcsel(b, is_not_zero, exponent_bias, zero));
       break;
    }
    case 64: {
-      nir_ssa_def *exponent_shift = nir_imm_int(b, 20);
-      nir_ssa_def *exponent_bias = nir_imm_int(b, -1022);
+      nir_def *exponent_shift = nir_imm_int(b, 20);
+      nir_def *exponent_bias = nir_imm_int(b, -1022);
 
-      nir_ssa_def *zero32 = nir_imm_int(b, 0);
-      nir_ssa_def *abs_upper_x = nir_unpack_64_2x32_split_y(b, abs_x);
+      nir_def *zero32 = nir_imm_int(b, 0);
+      nir_def *abs_upper_x = nir_unpack_64_2x32_split_y(b, abs_x);
 
       exponent = nir_iadd(b, nir_ushr(b, abs_upper_x, exponent_shift),
                           nir_bcsel(b, is_not_zero, exponent_bias, zero32));
@@ -165,7 +165,7 @@ lower_frexp_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
       return false;
 
    nir_alu_instr *alu_instr = nir_instr_as_alu(instr);
-   nir_ssa_def *lower;
+   nir_def *lower;
 
    b->cursor = nir_before_instr(instr);
 
@@ -180,7 +180,7 @@ lower_frexp_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
       return false;
    }
 
-   nir_ssa_def_rewrite_uses(&alu_instr->dest.dest.ssa, lower);
+   nir_def_rewrite_uses(&alu_instr->dest.dest.ssa, lower);
    nir_instr_remove(instr);
    return true;
 }
index c3874e4..ad1c849 100644 (file)
@@ -44,7 +44,7 @@ struct path_fork {
    bool is_var;
    union {
       nir_variable *path_var;
-      nir_ssa_def *path_ssa;
+      nir_def *path_ssa;
    };
    struct path paths[2];
 };
@@ -175,7 +175,7 @@ set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
                fork = fork->paths[i].fork;
                break;
             } else {
-               nir_ssa_def *ssa_def = condition.ssa;
+               nir_def *ssa_def = condition.ssa;
                assert(ssa_def->bit_size == 1);
                assert(ssa_def->num_components == 1);
                if (!i)
@@ -353,10 +353,10 @@ loop_routing_start(struct routes *routing, nir_builder *b,
  * Gets a forks condition as ssa def if the condition is inside a helper var,
  * the variable will be read into an ssa def
  */
-static nir_ssa_def *
+static nir_def *
 fork_condition(nir_builder *b, struct path_fork *fork)
 {
-   nir_ssa_def *ret;
+   nir_def *ret;
    if (fork->is_var) {
       ret = nir_load_var(b, fork->path_var);
    } else
index 4313d32..dbf4e51 100644 (file)
@@ -85,15 +85,15 @@ rewrite_emit_vertex(nir_intrinsic_instr *intrin, struct state *state)
    /* Load the vertex count */
    b->cursor = nir_before_instr(&intrin->instr);
    assert(state->vertex_count_vars[stream] != NULL);
-   nir_ssa_def *count = nir_load_var(b, state->vertex_count_vars[stream]);
-   nir_ssa_def *count_per_primitive;
+   nir_def *count = nir_load_var(b, state->vertex_count_vars[stream]);
+   nir_def *count_per_primitive;
 
    if (state->count_vtx_per_prim)
       count_per_primitive = nir_load_var(b, state->vtxcnt_per_prim_vars[stream]);
    else if (state->is_points)
       count_per_primitive = nir_imm_int(b, 0);
    else
-      count_per_primitive = nir_ssa_undef(b, 1, 32);
+      count_per_primitive = nir_undef(b, 1, 32);
 
    /* Create: if (vertex_count < max_vertices) and insert it.
     *
@@ -112,7 +112,7 @@ rewrite_emit_vertex(nir_intrinsic_instr *intrin, struct state *state)
    if (state->count_vtx_per_prim) {
       /* Increment the per-primitive vertex count by 1 */
       nir_variable *var = state->vtxcnt_per_prim_vars[stream];
-      nir_ssa_def *vtx_per_prim_cnt = nir_load_var(b, var);
+      nir_def *vtx_per_prim_cnt = nir_load_var(b, var);
       nir_store_var(b, var,
                     nir_iadd_imm(b, vtx_per_prim_cnt, 1),
                     0x1); /* .x */
@@ -160,19 +160,19 @@ overwrite_incomplete_primitives(struct state *state, unsigned stream)
       unreachable("Invalid GS output primitive type.");
 
    /* Total count of vertices emitted so far. */
-   nir_ssa_def *vtxcnt_total =
+   nir_def *vtxcnt_total =
       nir_load_var(b, state->vertex_count_vars[stream]);
 
    /* Number of vertices emitted for the last primitive */
-   nir_ssa_def *vtxcnt_per_primitive =
+   nir_def *vtxcnt_per_primitive =
       nir_load_var(b, state->vtxcnt_per_prim_vars[stream]);
 
    /* See if the current primitive is a incomplete */
-   nir_ssa_def *is_inc_prim =
+   nir_def *is_inc_prim =
       nir_ilt_imm(b, vtxcnt_per_primitive, outprim_min_vertices);
 
    /* Number of vertices in the incomplete primitive */
-   nir_ssa_def *num_inc_vtx =
+   nir_def *num_inc_vtx =
       nir_bcsel(b, is_inc_prim, vtxcnt_per_primitive, nir_imm_int(b, 0));
 
    /* Store corrected total vertex count */
@@ -182,10 +182,10 @@ overwrite_incomplete_primitives(struct state *state, unsigned stream)
 
    if (state->count_prims) {
       /* Number of incomplete primitives (0 or 1) */
-      nir_ssa_def *num_inc_prim = nir_b2i32(b, is_inc_prim);
+      nir_def *num_inc_prim = nir_b2i32(b, is_inc_prim);
 
       /* Store corrected primitive count */
-      nir_ssa_def *prim_cnt = nir_load_var(b, state->primitive_count_vars[stream]);
+      nir_def *prim_cnt = nir_load_var(b, state->primitive_count_vars[stream]);
       nir_store_var(b, state->primitive_count_vars[stream],
                     nir_isub(b, prim_cnt, num_inc_prim),
                     0x1); /* .x */
@@ -203,21 +203,21 @@ rewrite_end_primitive(nir_intrinsic_instr *intrin, struct state *state)
 
    b->cursor = nir_before_instr(&intrin->instr);
    assert(state->vertex_count_vars[stream] != NULL);
-   nir_ssa_def *count = nir_load_var(b, state->vertex_count_vars[stream]);
-   nir_ssa_def *count_per_primitive;
+   nir_def *count = nir_load_var(b, state->vertex_count_vars[stream]);
+   nir_def *count_per_primitive;
 
    if (state->count_vtx_per_prim)
       count_per_primitive = nir_load_var(b, state->vtxcnt_per_prim_vars[stream]);
    else if (state->is_points)
       count_per_primitive = nir_imm_int(b, 0);
    else
-      count_per_primitive = nir_ssa_undef(b, count->num_components, count->bit_size);
+      count_per_primitive = nir_undef(b, count->num_components, count->bit_size);
 
    nir_end_primitive_with_counter(b, count, count_per_primitive, stream);
 
    if (state->count_prims) {
       /* Increment the primitive count by 1 */
-      nir_ssa_def *prim_cnt = nir_load_var(b, state->primitive_count_vars[stream]);
+      nir_def *prim_cnt = nir_load_var(b, state->primitive_count_vars[stream]);
       nir_store_var(b, state->primitive_count_vars[stream],
                     nir_iadd_imm(b, prim_cnt, 1),
                     0x1); /* .x */
@@ -286,15 +286,15 @@ append_set_vertex_and_primitive_count(nir_block *end_block, struct state *state)
          if (!state->per_stream && stream != 0)
             continue;
 
-         nir_ssa_def *vtx_cnt;
-         nir_ssa_def *prim_cnt;
+         nir_def *vtx_cnt;
+         nir_def *prim_cnt;
 
          if (state->per_stream && !(shader->info.gs.active_stream_mask & (1 << stream))) {
             /* Inactive stream: vertex count is 0, primitive count is 0 or undef. */
             vtx_cnt = nir_imm_int(b, 0);
             prim_cnt = state->count_prims || state->is_points
                           ? nir_imm_int(b, 0)
-                          : nir_ssa_undef(b, 1, 32);
+                          : nir_undef(b, 1, 32);
          } else {
             if (state->overwrite_incomplete)
                overwrite_incomplete_primitives(state, stream);
@@ -309,7 +309,7 @@ append_set_vertex_and_primitive_count(nir_block *end_block, struct state *state)
                 */
                prim_cnt = vtx_cnt;
             else
-               prim_cnt = nir_ssa_undef(b, 1, 32);
+               prim_cnt = nir_undef(b, 1, 32);
          }
 
          nir_set_vertex_and_primitive_count(b, vtx_cnt, prim_cnt, stream);
index a0f6b01..c59489c 100644 (file)
@@ -52,9 +52,9 @@ lower(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
    bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
-   nir_ssa_def *undef = NULL;
+   nir_def *undef = NULL;
 
-   nir_ssa_def *helper = nir_load_helper_invocation(b, 1);
+   nir_def *helper = nir_load_helper_invocation(b, 1);
    nir_push_if(b, nir_inot(b, helper));
    nir_instr_remove(instr);
    nir_builder_instr_insert(b, instr);
@@ -76,19 +76,19 @@ lower(nir_builder *b, nir_instr *instr, void *data)
     */
    if (has_dest) {
       nir_push_else(b, NULL);
-      undef = nir_ssa_undef(b, nir_dest_num_components(intr->dest),
-                            nir_dest_bit_size(intr->dest));
+      undef = nir_undef(b, nir_dest_num_components(intr->dest),
+                        nir_dest_bit_size(intr->dest));
    }
 
    nir_pop_if(b, NULL);
 
    if (has_dest) {
-      nir_ssa_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
+      nir_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
 
-      /* We can't use nir_ssa_def_rewrite_uses_after on phis, so use the global
+      /* We can't use nir_def_rewrite_uses_after on phis, so use the global
        * version and fixup the phi manually
        */
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, phi);
+      nir_def_rewrite_uses(&intr->dest.ssa, phi);
 
       nir_instr *phi_instr = phi->parent_instr;
       nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
index f7b8df4..b828254 100644 (file)
 #include "nir_builder.h"
 
 /* ported from LLVM's AMDGPUTargetLowering::LowerUDIVREM */
-static nir_ssa_def *
-emit_udiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, bool modulo)
+static nir_def *
+emit_udiv(nir_builder *bld, nir_def *numer, nir_def *denom, bool modulo)
 {
-   nir_ssa_def *rcp = nir_frcp(bld, nir_u2f32(bld, denom));
+   nir_def *rcp = nir_frcp(bld, nir_u2f32(bld, denom));
    rcp = nir_f2u32(bld, nir_fmul_imm(bld, rcp, 4294966784.0));
 
-   nir_ssa_def *neg_rcp_times_denom =
+   nir_def *neg_rcp_times_denom =
       nir_imul(bld, rcp, nir_ineg(bld, denom));
    rcp = nir_iadd(bld, rcp, nir_umul_high(bld, rcp, neg_rcp_times_denom));
 
    /* Get initial estimate for quotient/remainder, then refine the estimate
     * in two iterations after */
-   nir_ssa_def *quotient = nir_umul_high(bld, numer, rcp);
-   nir_ssa_def *num_s_remainder = nir_imul(bld, quotient, denom);
-   nir_ssa_def *remainder = nir_isub(bld, numer, num_s_remainder);
+   nir_def *quotient = nir_umul_high(bld, numer, rcp);
+   nir_def *num_s_remainder = nir_imul(bld, quotient, denom);
+   nir_def *remainder = nir_isub(bld, numer, num_s_remainder);
 
    /* First refinement step */
-   nir_ssa_def *remainder_ge_den = nir_uge(bld, remainder, denom);
+   nir_def *remainder_ge_den = nir_uge(bld, remainder, denom);
    if (!modulo) {
       quotient = nir_bcsel(bld, remainder_ge_den,
                            nir_iadd_imm(bld, quotient, 1), quotient);
@@ -65,24 +65,24 @@ emit_udiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, bool modulo)
 }
 
 /* ported from LLVM's AMDGPUTargetLowering::LowerSDIVREM */
-static nir_ssa_def *
-emit_idiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, nir_op op)
+static nir_def *
+emit_idiv(nir_builder *bld, nir_def *numer, nir_def *denom, nir_op op)
 {
-   nir_ssa_def *lh_sign = nir_ilt_imm(bld, numer, 0);
-   nir_ssa_def *rh_sign = nir_ilt_imm(bld, denom, 0);
+   nir_def *lh_sign = nir_ilt_imm(bld, numer, 0);
+   nir_def *rh_sign = nir_ilt_imm(bld, denom, 0);
 
-   nir_ssa_def *lhs = nir_iabs(bld, numer);
-   nir_ssa_def *rhs = nir_iabs(bld, denom);
+   nir_def *lhs = nir_iabs(bld, numer);
+   nir_def *rhs = nir_iabs(bld, denom);
 
    if (op == nir_op_idiv) {
-      nir_ssa_def *d_sign = nir_ixor(bld, lh_sign, rh_sign);
-      nir_ssa_def *res = emit_udiv(bld, lhs, rhs, false);
+      nir_def *d_sign = nir_ixor(bld, lh_sign, rh_sign);
+      nir_def *res = emit_udiv(bld, lhs, rhs, false);
       return nir_bcsel(bld, d_sign, nir_ineg(bld, res), res);
    } else {
-      nir_ssa_def *res = emit_udiv(bld, lhs, rhs, true);
+      nir_def *res = emit_udiv(bld, lhs, rhs, true);
       res = nir_bcsel(bld, lh_sign, nir_ineg(bld, res), res);
       if (op == nir_op_imod) {
-         nir_ssa_def *cond = nir_ieq_imm(bld, res, 0);
+         nir_def *cond = nir_ieq_imm(bld, res, 0);
          cond = nir_ior(bld, nir_ieq(bld, lh_sign, rh_sign), cond);
          res = nir_bcsel(bld, cond, res, nir_iadd(bld, res, denom));
       }
@@ -90,25 +90,25 @@ emit_idiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, nir_op op)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 convert_instr_small(nir_builder *b, nir_op op,
-                    nir_ssa_def *numer, nir_ssa_def *denom,
+                    nir_def *numer, nir_def *denom,
                     const nir_lower_idiv_options *options)
 {
    unsigned sz = numer->bit_size;
    nir_alu_type int_type = nir_op_infos[op].output_type | sz;
    nir_alu_type float_type = nir_type_float | (options->allow_fp16 ? sz * 2 : 32);
 
-   nir_ssa_def *p = nir_type_convert(b, numer, int_type, float_type, nir_rounding_mode_undef);
-   nir_ssa_def *q = nir_type_convert(b, denom, int_type, float_type, nir_rounding_mode_undef);
+   nir_def *p = nir_type_convert(b, numer, int_type, float_type, nir_rounding_mode_undef);
+   nir_def *q = nir_type_convert(b, denom, int_type, float_type, nir_rounding_mode_undef);
 
    /* Take 1/q but offset mantissa by 1 to correct for rounding. This is
     * needed for correct results and has been checked exhaustively for
     * all pairs of 16-bit integers */
-   nir_ssa_def *rcp = nir_iadd_imm(b, nir_frcp(b, q), 1);
+   nir_def *rcp = nir_iadd_imm(b, nir_frcp(b, q), 1);
 
    /* Divide by multiplying by adjusted reciprocal */
-   nir_ssa_def *res = nir_fmul(b, p, rcp);
+   nir_def *res = nir_fmul(b, p, rcp);
 
    /* Convert back to integer space with rounding inferred by type */
    res = nir_type_convert(b, res, float_type, int_type, nir_rounding_mode_undef);
@@ -119,25 +119,25 @@ convert_instr_small(nir_builder *b, nir_op op,
 
    /* Adjust for sign, see constant folding definition */
    if (op == nir_op_imod) {
-      nir_ssa_def *zero = nir_imm_zero(b, 1, sz);
-      nir_ssa_def *diff_sign =
+      nir_def *zero = nir_imm_zero(b, 1, sz);
+      nir_def *diff_sign =
          nir_ine(b, nir_ige(b, numer, zero), nir_ige(b, denom, zero));
 
-      nir_ssa_def *adjust = nir_iand(b, diff_sign, nir_ine(b, res, zero));
+      nir_def *adjust = nir_iand(b, diff_sign, nir_ine(b, res, zero));
       res = nir_iadd(b, res, nir_bcsel(b, adjust, denom, zero));
    }
 
    return res;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_idiv(nir_builder *b, nir_instr *instr, void *_data)
 {
    const nir_lower_idiv_options *options = _data;
    nir_alu_instr *alu = nir_instr_as_alu(instr);
 
-   nir_ssa_def *numer = nir_ssa_for_alu_src(b, alu, 0);
-   nir_ssa_def *denom = nir_ssa_for_alu_src(b, alu, 1);
+   nir_def *numer = nir_ssa_for_alu_src(b, alu, 0);
+   nir_def *denom = nir_ssa_for_alu_src(b, alu, 1);
 
    b->exact = true;
 
index e66e91f..8847940 100644 (file)
@@ -44,8 +44,8 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin)
    nir_intrinsic_set_image_array(_2darray_size, true);
    nir_builder_instr_insert(b, &_2darray_size->instr);
 
-   nir_ssa_def *size = nir_instr_ssa_def(&_2darray_size->instr);
-   nir_ssa_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
+   nir_def *size = nir_instr_ssa_def(&_2darray_size->instr);
+   nir_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
    unsigned coord_comps = intrin->dest.ssa.num_components;
    for (unsigned c = 0; c < coord_comps; c++) {
       if (c == 2) {
@@ -55,8 +55,8 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin)
       }
    }
 
-   nir_ssa_def *vec = nir_vec_scalars(b, comps, intrin->dest.ssa.num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, vec);
+   nir_def *vec = nir_vec_scalars(b, comps, intrin->dest.ssa.num_components);
+   nir_def_rewrite_uses(&intrin->dest.ssa, vec);
    nir_instr_remove(&intrin->instr);
    nir_instr_free(&intrin->instr);
 }
@@ -101,7 +101,7 @@ lower_image_to_fragment_mask_load(nir_builder *b, nir_intrinsic_instr *intrin)
       break;
    }
 
-   nir_ssa_def *fmask =
+   nir_def *fmask =
       nir_image_fragment_mask_load_amd(b, intrin->src[0].ssa, intrin->src[1].ssa,
                                        .image_dim = nir_intrinsic_image_dim(intrin),
                                        .image_array = nir_intrinsic_image_array(intrin),
@@ -113,10 +113,10 @@ lower_image_to_fragment_mask_load(nir_builder *b, nir_intrinsic_instr *intrin)
    fmask_load->intrinsic = fmask_op;
 
    /* extract real color buffer index from fmask buffer */
-   nir_ssa_def *sample_index_old = intrin->src[2].ssa;
-   nir_ssa_def *fmask_offset = nir_ishl_imm(b, sample_index_old, 2);
-   nir_ssa_def *fmask_width = nir_imm_int(b, 3);
-   nir_ssa_def *sample_index_new = nir_ubfe(b, fmask, fmask_offset, fmask_width);
+   nir_def *sample_index_old = intrin->src[2].ssa;
+   nir_def *fmask_offset = nir_ishl_imm(b, sample_index_old, 2);
+   nir_def *fmask_width = nir_imm_int(b, 3);
+   nir_def *sample_index_new = nir_ubfe(b, fmask, fmask_offset, fmask_width);
 
    /* fix color buffer load */
    nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[2], sample_index_new);
@@ -152,8 +152,8 @@ lower_image_samples_identical_to_fragment_mask_load(nir_builder *b, nir_intrinsi
    nir_ssa_dest_init(&fmask_load->instr, &fmask_load->dest, 1, 32);
    nir_builder_instr_insert(b, &fmask_load->instr);
 
-   nir_ssa_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, samples_identical);
+   nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
+   nir_def_rewrite_uses(&intrin->dest.ssa, samples_identical);
 
    nir_instr_remove(&intrin->instr);
    nir_instr_free(&intrin->instr);
@@ -206,8 +206,8 @@ lower_image_instr(nir_builder *b, nir_instr *instr, void *state)
    case nir_intrinsic_bindless_image_samples: {
       if (options->lower_image_samples_to_one) {
          b->cursor = nir_after_instr(&intrin->instr);
-         nir_ssa_def *samples = nir_imm_intN_t(b, 1, nir_dest_bit_size(intrin->dest));
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, samples);
+         nir_def *samples = nir_imm_intN_t(b, 1, nir_dest_bit_size(intrin->dest));
+         nir_def_rewrite_uses(&intrin->dest.ssa, samples);
          return true;
       }
       return false;
index 035b761..d469083 100644 (file)
@@ -64,7 +64,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
    }
 
    /* Get the relevant texel address */
-   nir_ssa_def *address = nir_image_texel_address(
+   nir_def *address = nir_image_texel_address(
       b, 64, intr->src[0].ssa, intr->src[1].ssa, intr->src[2].ssa,
       .image_dim = nir_intrinsic_image_dim(intr),
       .image_array = nir_intrinsic_image_array(intr),
@@ -81,7 +81,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
    }
 
    /* Build the global atomic */
-   nir_ssa_def *global;
+   nir_def *global;
    if (swap) {
       global = nir_global_atomic_swap(b, bit_size, address, intr->src[3].ssa,
                                       intr->src[4].ssa, .atomic_op = atomic_op);
@@ -93,7 +93,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
    /* Replace the image atomic with the global atomic. Remove the image
     * explicitly because it has side effects so is not DCE'd.
     */
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, global);
+   nir_def_rewrite_uses(&intr->dest.ssa, global);
    nir_instr_remove(instr);
    return true;
 }
index 1c0a444..a5cbcc6 100644 (file)
@@ -29,14 +29,14 @@ static void
 emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
                       nir_deref_instr *parent,
                       nir_deref_instr **deref_arr,
-                      nir_ssa_def **dest, nir_ssa_def *src);
+                      nir_def **dest, nir_def *src);
 
 static void
 emit_indirect_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
                                nir_deref_instr *parent,
                                nir_deref_instr **deref_arr,
                                int start, int end,
-                               nir_ssa_def **dest, nir_ssa_def *src)
+                               nir_def **dest, nir_def *src)
 {
    assert(start < end);
    if (start == end - 1) {
@@ -46,7 +46,7 @@ emit_indirect_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
    } else {
       int mid = start + (end - start) / 2;
 
-      nir_ssa_def *then_dest, *else_dest;
+      nir_def *then_dest, *else_dest;
 
       nir_deref_instr *deref = *deref_arr;
       assert(deref->deref_type == nir_deref_type_array);
@@ -68,7 +68,7 @@ static void
 emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
                       nir_deref_instr *parent,
                       nir_deref_instr **deref_arr,
-                      nir_ssa_def **dest, nir_ssa_def *src)
+                      nir_def **dest, nir_def *src)
 {
    for (; *deref_arr; deref_arr++) {
       nir_deref_instr *deref = *deref_arr;
@@ -172,10 +172,10 @@ lower_indirect_derefs_block(nir_block *block, nir_builder *b,
          emit_load_store_deref(b, intrin, base, &path.path[1],
                                NULL, intrin->src[1].ssa);
       } else {
-         nir_ssa_def *result;
+         nir_def *result;
          emit_load_store_deref(b, intrin, base, &path.path[1],
                                &result, NULL);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, result);
+         nir_def_rewrite_uses(&intrin->dest.ssa, result);
       }
 
       nir_deref_path_finish(&path);
index 1ed521c..29cead4 100644 (file)
 #include "nir.h"
 #include "nir_builder.h"
 
-static nir_ssa_def *
+static nir_def *
 load_frag_coord(nir_builder *b, nir_deref_instr *deref,
                 const nir_input_attachment_options *options)
 {
    if (options->use_fragcoord_sysval) {
-      nir_ssa_def *frag_coord = nir_load_frag_coord(b);
+      nir_def *frag_coord = nir_load_frag_coord(b);
       if (options->unscaled_input_attachment_ir3) {
          nir_variable *var = nir_deref_instr_get_variable(deref);
          unsigned base = var->data.index;
-         nir_ssa_def *unscaled_frag_coord = nir_load_frag_coord_unscaled_ir3(b);
+         nir_def *unscaled_frag_coord = nir_load_frag_coord_unscaled_ir3(b);
          if (deref->deref_type == nir_deref_type_array) {
-            nir_ssa_def *unscaled =
+            nir_def *unscaled =
                nir_i2b(b, nir_iand(b, nir_ishr(b, nir_imm_int(b, options->unscaled_input_attachment_ir3 >> base), deref->arr.index.ssa),
                                    nir_imm_int(b, 1)));
             frag_coord = nir_bcsel(b, unscaled, unscaled_frag_coord, frag_coord);
@@ -63,7 +63,7 @@ load_frag_coord(nir_builder *b, nir_deref_instr *deref,
    return nir_load_var(b, pos);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_layer_id(nir_builder *b, const nir_input_attachment_options *options)
 {
    if (options->use_layer_id_sysval) {
@@ -97,13 +97,13 @@ try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load,
 
    b->cursor = nir_instr_remove(&load->instr);
 
-   nir_ssa_def *frag_coord = load_frag_coord(b, deref, options);
+   nir_def *frag_coord = load_frag_coord(b, deref, options);
    frag_coord = nir_f2i32(b, frag_coord);
-   nir_ssa_def *offset = nir_ssa_for_src(b, load->src[1], 2);
-   nir_ssa_def *pos = nir_iadd(b, frag_coord, offset);
+   nir_def *offset = nir_ssa_for_src(b, load->src[1], 2);
+   nir_def *pos = nir_iadd(b, frag_coord, offset);
 
-   nir_ssa_def *layer = load_layer_id(b, options);
-   nir_ssa_def *coord =
+   nir_def *layer = load_layer_id(b, options);
+   nir_def *coord =
       nir_vec3(b, nir_channel(b, pos, 0), nir_channel(b, pos, 1), layer);
 
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3 + multisampled);
@@ -142,13 +142,13 @@ try_lower_input_load(nir_builder *b, nir_intrinsic_instr *load,
    if (tex->is_sparse) {
       unsigned load_result_size = load->dest.ssa.num_components - 1;
       nir_component_mask_t load_result_mask = nir_component_mask(load_result_size);
-      nir_ssa_def *res = nir_channels(
+      nir_def *res = nir_channels(
          b, &tex->dest.ssa, load_result_mask | 0x10);
 
-      nir_ssa_def_rewrite_uses(&load->dest.ssa, res);
+      nir_def_rewrite_uses(&load->dest.ssa, res);
    } else {
-      nir_ssa_def_rewrite_uses(&load->dest.ssa,
-                               &tex->dest.ssa);
+      nir_def_rewrite_uses(&load->dest.ssa,
+                           &tex->dest.ssa);
    }
 
    return true;
@@ -165,12 +165,12 @@ try_lower_input_texop(nir_builder *b, nir_tex_instr *tex,
 
    b->cursor = nir_before_instr(&tex->instr);
 
-   nir_ssa_def *frag_coord = load_frag_coord(b, deref, options);
+   nir_def *frag_coord = load_frag_coord(b, deref, options);
    frag_coord = nir_f2i32(b, frag_coord);
 
-   nir_ssa_def *layer = load_layer_id(b, options);
-   nir_ssa_def *coord = nir_vec3(b, nir_channel(b, frag_coord, 0),
-                                 nir_channel(b, frag_coord, 1), layer);
+   nir_def *layer = load_layer_id(b, options);
+   nir_def *coord = nir_vec3(b, nir_channel(b, frag_coord, 0),
+                             nir_channel(b, frag_coord, 1), layer);
 
    tex->coord_components = 3;
 
index d51d0c6..e86c4b9 100644 (file)
       ? lower_##name(b, __VA_ARGS__)                   \
       : nir_##name(b, __VA_ARGS__)
 
-static nir_ssa_def *
-lower_b2i64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_b2i64(nir_builder *b, nir_def *x)
 {
    return nir_pack_64_2x32_split(b, nir_b2i32(b, x), nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *
-lower_i2i8(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_i2i8(nir_builder *b, nir_def *x)
 {
    return nir_i2i8(b, nir_unpack_64_2x32_split_x(b, x));
 }
 
-static nir_ssa_def *
-lower_i2i16(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_i2i16(nir_builder *b, nir_def *x)
 {
    return nir_i2i16(b, nir_unpack_64_2x32_split_x(b, x));
 }
 
-static nir_ssa_def *
-lower_i2i32(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_i2i32(nir_builder *b, nir_def *x)
 {
    return nir_unpack_64_2x32_split_x(b, x);
 }
 
-static nir_ssa_def *
-lower_i2i64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_i2i64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x32 = x->bit_size == 32 ? x : nir_i2i32(b, x);
+   nir_def *x32 = x->bit_size == 32 ? x : nir_i2i32(b, x);
    return nir_pack_64_2x32_split(b, x32, nir_ishr_imm(b, x32, 31));
 }
 
-static nir_ssa_def *
-lower_u2u8(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_u2u8(nir_builder *b, nir_def *x)
 {
    return nir_u2u8(b, nir_unpack_64_2x32_split_x(b, x));
 }
 
-static nir_ssa_def *
-lower_u2u16(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_u2u16(nir_builder *b, nir_def *x)
 {
    return nir_u2u16(b, nir_unpack_64_2x32_split_x(b, x));
 }
 
-static nir_ssa_def *
-lower_u2u32(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_u2u32(nir_builder *b, nir_def *x)
 {
    return nir_unpack_64_2x32_split_x(b, x);
 }
 
-static nir_ssa_def *
-lower_u2u64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_u2u64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x32 = x->bit_size == 32 ? x : nir_u2u32(b, x);
+   nir_def *x32 = x->bit_size == 32 ? x : nir_u2u32(b, x);
    return nir_pack_64_2x32_split(b, x32, nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *
-lower_bcsel64(nir_builder *b, nir_ssa_def *cond, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_bcsel64(nir_builder *b, nir_def *cond, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
    return nir_pack_64_2x32_split(b, nir_bcsel(b, cond, x_lo, y_lo),
                                  nir_bcsel(b, cond, x_hi, y_hi));
 }
 
-static nir_ssa_def *
-lower_inot64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_inot64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
 
    return nir_pack_64_2x32_split(b, nir_inot(b, x_lo), nir_inot(b, x_hi));
 }
 
-static nir_ssa_def *
-lower_iand64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_iand64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
    return nir_pack_64_2x32_split(b, nir_iand(b, x_lo, y_lo),
                                  nir_iand(b, x_hi, y_hi));
 }
 
-static nir_ssa_def *
-lower_ior64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_ior64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
    return nir_pack_64_2x32_split(b, nir_ior(b, x_lo, y_lo),
                                  nir_ior(b, x_hi, y_hi));
 }
 
-static nir_ssa_def *
-lower_ixor64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_ixor64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
    return nir_pack_64_2x32_split(b, nir_ixor(b, x_lo, y_lo),
                                  nir_ixor(b, x_hi, y_hi));
 }
 
-static nir_ssa_def *
-lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_ishl64(nir_builder *b, nir_def *x, nir_def *y)
 {
    /* Implemented as
     *
@@ -179,19 +179,19 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
     *    }
     * }
     */
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
    y = nir_iand_imm(b, y, 0x3f);
 
-   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
-   nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y);
-   nir_ssa_def *hi_shifted = nir_ishl(b, x_hi, y);
-   nir_ssa_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count);
+   nir_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
+   nir_def *lo_shifted = nir_ishl(b, x_lo, y);
+   nir_def *hi_shifted = nir_ishl(b, x_hi, y);
+   nir_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count);
 
-   nir_ssa_def *res_if_lt_32 =
+   nir_def *res_if_lt_32 =
       nir_pack_64_2x32_split(b, lo_shifted,
                              nir_ior(b, hi_shifted, lo_shifted_hi));
-   nir_ssa_def *res_if_ge_32 =
+   nir_def *res_if_ge_32 =
       nir_pack_64_2x32_split(b, nir_imm_int(b, 0),
                              nir_ishl(b, x_lo, reverse_count));
 
@@ -200,8 +200,8 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                               res_if_ge_32, res_if_lt_32));
 }
 
-static nir_ssa_def *
-lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_ishr64(nir_builder *b, nir_def *x, nir_def *y)
 {
    /* Implemented as
     *
@@ -226,19 +226,19 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
     *    }
     * }
     */
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
    y = nir_iand_imm(b, y, 0x3f);
 
-   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
-   nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
-   nir_ssa_def *hi_shifted = nir_ishr(b, x_hi, y);
-   nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
+   nir_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
+   nir_def *lo_shifted = nir_ushr(b, x_lo, y);
+   nir_def *hi_shifted = nir_ishr(b, x_hi, y);
+   nir_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
 
-   nir_ssa_def *res_if_lt_32 =
+   nir_def *res_if_lt_32 =
       nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo),
                              hi_shifted);
-   nir_ssa_def *res_if_ge_32 =
+   nir_def *res_if_ge_32 =
       nir_pack_64_2x32_split(b, nir_ishr(b, x_hi, reverse_count),
                              nir_ishr_imm(b, x_hi, 31));
 
@@ -247,8 +247,8 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                               res_if_ge_32, res_if_lt_32));
 }
 
-static nir_ssa_def *
-lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_ushr64(nir_builder *b, nir_def *x, nir_def *y)
 {
    /* Implemented as
     *
@@ -272,19 +272,19 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
     * }
     */
 
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
    y = nir_iand_imm(b, y, 0x3f);
 
-   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
-   nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
-   nir_ssa_def *hi_shifted = nir_ushr(b, x_hi, y);
-   nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
+   nir_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
+   nir_def *lo_shifted = nir_ushr(b, x_lo, y);
+   nir_def *hi_shifted = nir_ushr(b, x_hi, y);
+   nir_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
 
-   nir_ssa_def *res_if_lt_32 =
+   nir_def *res_if_lt_32 =
       nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo),
                              hi_shifted);
-   nir_ssa_def *res_if_ge_32 =
+   nir_def *res_if_ge_32 =
       nir_pack_64_2x32_split(b, nir_ushr(b, x_hi, reverse_count),
                              nir_imm_int(b, 0));
 
@@ -293,38 +293,38 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                               res_if_ge_32, res_if_lt_32));
 }
 
-static nir_ssa_def *
-lower_iadd64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_iadd64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
-   nir_ssa_def *res_lo = nir_iadd(b, x_lo, y_lo);
-   nir_ssa_def *carry = nir_b2i32(b, nir_ult(b, res_lo, x_lo));
-   nir_ssa_def *res_hi = nir_iadd(b, carry, nir_iadd(b, x_hi, y_hi));
+   nir_def *res_lo = nir_iadd(b, x_lo, y_lo);
+   nir_def *carry = nir_b2i32(b, nir_ult(b, res_lo, x_lo));
+   nir_def *res_hi = nir_iadd(b, carry, nir_iadd(b, x_hi, y_hi));
 
    return nir_pack_64_2x32_split(b, res_lo, res_hi);
 }
 
-static nir_ssa_def *
-lower_isub64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_isub64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
-   nir_ssa_def *res_lo = nir_isub(b, x_lo, y_lo);
-   nir_ssa_def *borrow = nir_ineg(b, nir_b2i32(b, nir_ult(b, x_lo, y_lo)));
-   nir_ssa_def *res_hi = nir_iadd(b, nir_isub(b, x_hi, y_hi), borrow);
+   nir_def *res_lo = nir_isub(b, x_lo, y_lo);
+   nir_def *borrow = nir_ineg(b, nir_b2i32(b, nir_ult(b, x_lo, y_lo)));
+   nir_def *res_hi = nir_iadd(b, nir_isub(b, x_hi, y_hi), borrow);
 
    return nir_pack_64_2x32_split(b, res_lo, res_hi);
 }
 
-static nir_ssa_def *
-lower_ineg64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_ineg64(nir_builder *b, nir_def *x)
 {
    /* Since isub is the same number of instructions (with better dependencies)
     * as iadd, subtraction is actually more efficient for ineg than the usual
@@ -333,21 +333,21 @@ lower_ineg64(nir_builder *b, nir_ssa_def *x)
    return lower_isub64(b, nir_imm_int64(b, 0), x);
 }
 
-static nir_ssa_def *
-lower_iabs64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_iabs64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *x_is_neg = nir_ilt_imm(b, x_hi, 0);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_is_neg = nir_ilt_imm(b, x_hi, 0);
    return nir_bcsel(b, x_is_neg, nir_ineg(b, x), x);
 }
 
-static nir_ssa_def *
-lower_int64_compare(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_int64_compare(nir_builder *b, nir_op op, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
    switch (op) {
    case nir_op_ieq:
@@ -374,62 +374,62 @@ lower_int64_compare(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *y)
    }
 }
 
-static nir_ssa_def *
-lower_umax64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_umax64(nir_builder *b, nir_def *x, nir_def *y)
 {
    return nir_bcsel(b, lower_int64_compare(b, nir_op_ult, x, y), y, x);
 }
 
-static nir_ssa_def *
-lower_imax64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_imax64(nir_builder *b, nir_def *x, nir_def *y)
 {
    return nir_bcsel(b, lower_int64_compare(b, nir_op_ilt, x, y), y, x);
 }
 
-static nir_ssa_def *
-lower_umin64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_umin64(nir_builder *b, nir_def *x, nir_def *y)
 {
    return nir_bcsel(b, lower_int64_compare(b, nir_op_ult, x, y), x, y);
 }
 
-static nir_ssa_def *
-lower_imin64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_imin64(nir_builder *b, nir_def *x, nir_def *y)
 {
    return nir_bcsel(b, lower_int64_compare(b, nir_op_ilt, x, y), x, y);
 }
 
-static nir_ssa_def *
-lower_mul_2x32_64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
+static nir_def *
+lower_mul_2x32_64(nir_builder *b, nir_def *x, nir_def *y,
                   bool sign_extend)
 {
-   nir_ssa_def *res_hi = sign_extend ? nir_imul_high(b, x, y)
-                                     : nir_umul_high(b, x, y);
+   nir_def *res_hi = sign_extend ? nir_imul_high(b, x, y)
+                                 : nir_umul_high(b, x, y);
 
    return nir_pack_64_2x32_split(b, nir_imul(b, x, y), res_hi);
 }
 
-static nir_ssa_def *
-lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+static nir_def *
+lower_imul64(nir_builder *b, nir_def *x, nir_def *y)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
-   nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
+   nir_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
-   nir_ssa_def *mul_lo = nir_umul_2x32_64(b, x_lo, y_lo);
-   nir_ssa_def *res_hi = nir_iadd(b, nir_unpack_64_2x32_split_y(b, mul_lo),
-                                  nir_iadd(b, nir_imul(b, x_lo, y_hi),
-                                           nir_imul(b, x_hi, y_lo)));
+   nir_def *mul_lo = nir_umul_2x32_64(b, x_lo, y_lo);
+   nir_def *res_hi = nir_iadd(b, nir_unpack_64_2x32_split_y(b, mul_lo),
+                              nir_iadd(b, nir_imul(b, x_lo, y_hi),
+                                       nir_imul(b, x_hi, y_lo)));
 
    return nir_pack_64_2x32_split(b, nir_unpack_64_2x32_split_x(b, mul_lo),
                                  res_hi);
 }
 
-static nir_ssa_def *
-lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
+static nir_def *
+lower_mul_high64(nir_builder *b, nir_def *x, nir_def *y,
                  bool sign_extend)
 {
-   nir_ssa_def *x32[4], *y32[4];
+   nir_def *x32[4], *y32[4];
    x32[0] = nir_unpack_64_2x32_split_x(b, x);
    x32[1] = nir_unpack_64_2x32_split_y(b, x);
    if (sign_extend) {
@@ -446,7 +446,7 @@ lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
       y32[2] = y32[3] = nir_imm_int(b, 0);
    }
 
-   nir_ssa_def *res[8] = {
+   nir_def *res[8] = {
       NULL,
    };
 
@@ -456,7 +456,7 @@ lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
     * this up nicely.
     */
    for (unsigned i = 0; i < 4; i++) {
-      nir_ssa_def *carry = NULL;
+      nir_def *carry = NULL;
       for (unsigned j = 0; j < 4; j++) {
          /* The maximum values of x32[i] and y32[j] are UINT32_MAX so the
           * maximum value of tmp is UINT32_MAX * UINT32_MAX.  The maximum
@@ -469,7 +469,7 @@ lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
           * so we're guaranteed that we can add in two more 32-bit values
           * without overflowing tmp.
           */
-         nir_ssa_def *tmp = nir_umul_2x32_64(b, x32[i], y32[j]);
+         nir_def *tmp = nir_umul_2x32_64(b, x32[i], y32[j]);
 
          if (res[i + j])
             tmp = nir_iadd(b, tmp, nir_u2u64(b, res[i + j]));
@@ -484,44 +484,44 @@ lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
    return nir_pack_64_2x32_split(b, res[2], res[3]);
 }
 
-static nir_ssa_def *
-lower_isign64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_isign64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
 
-   nir_ssa_def *is_non_zero = nir_i2b(b, nir_ior(b, x_lo, x_hi));
-   nir_ssa_def *res_hi = nir_ishr_imm(b, x_hi, 31);
-   nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i32(b, is_non_zero));
+   nir_def *is_non_zero = nir_i2b(b, nir_ior(b, x_lo, x_hi));
+   nir_def *res_hi = nir_ishr_imm(b, x_hi, 31);
+   nir_def *res_lo = nir_ior(b, res_hi, nir_b2i32(b, is_non_zero));
 
    return nir_pack_64_2x32_split(b, res_lo, res_hi);
 }
 
 static void
-lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
-                   nir_ssa_def **q, nir_ssa_def **r)
+lower_udiv64_mod64(nir_builder *b, nir_def *n, nir_def *d,
+                   nir_def **q, nir_def **r)
 {
    /* TODO: We should specially handle the case where the denominator is a
     * constant.  In that case, we should be able to reduce it to a multiply by
     * a constant, some shifts, and an add.
     */
-   nir_ssa_def *n_lo = nir_unpack_64_2x32_split_x(b, n);
-   nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
-   nir_ssa_def *d_lo = nir_unpack_64_2x32_split_x(b, d);
-   nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
+   nir_def *n_lo = nir_unpack_64_2x32_split_x(b, n);
+   nir_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
+   nir_def *d_lo = nir_unpack_64_2x32_split_x(b, d);
+   nir_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
 
-   nir_ssa_def *q_lo = nir_imm_zero(b, n->num_components, 32);
-   nir_ssa_def *q_hi = nir_imm_zero(b, n->num_components, 32);
+   nir_def *q_lo = nir_imm_zero(b, n->num_components, 32);
+   nir_def *q_hi = nir_imm_zero(b, n->num_components, 32);
 
-   nir_ssa_def *n_hi_before_if = n_hi;
-   nir_ssa_def *q_hi_before_if = q_hi;
+   nir_def *n_hi_before_if = n_hi;
+   nir_def *q_hi_before_if = q_hi;
 
    /* If the upper 32 bits of denom are non-zero, it is impossible for shifts
     * greater than 32 bits to occur.  If the upper 32 bits of the numerator
     * are zero, it is impossible for (denom << [63, 32]) <= numer unless
     * denom == 0.
     */
-   nir_ssa_def *need_high_div =
+   nir_def *need_high_div =
       nir_iand(b, nir_ieq_imm(b, d_hi, 0), nir_uge(b, n_hi, d_lo));
    nir_push_if(b, nir_bany(b, need_high_div));
    {
@@ -531,7 +531,7 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
       if (n->num_components == 1)
          need_high_div = nir_imm_true(b);
 
-      nir_ssa_def *log2_d_lo = nir_ufind_msb(b, d_lo);
+      nir_def *log2_d_lo = nir_ufind_msb(b, d_lo);
 
       for (int i = 31; i >= 0; i--) {
          /* if ((d.x << i) <= n.y) {
@@ -539,11 +539,11 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
           *    quot.y |= 1U << i;
           * }
           */
-         nir_ssa_def *d_shift = nir_ishl_imm(b, d_lo, i);
-         nir_ssa_def *new_n_hi = nir_isub(b, n_hi, d_shift);
-         nir_ssa_def *new_q_hi = nir_ior_imm(b, q_hi, 1ull << i);
-         nir_ssa_def *cond = nir_iand(b, need_high_div,
-                                      nir_uge(b, n_hi, d_shift));
+         nir_def *d_shift = nir_ishl_imm(b, d_lo, i);
+         nir_def *new_n_hi = nir_isub(b, n_hi, d_shift);
+         nir_def *new_q_hi = nir_ior_imm(b, q_hi, 1ull << i);
+         nir_def *cond = nir_iand(b, need_high_div,
+                                  nir_uge(b, n_hi, d_shift));
          if (i != 0) {
             /* log2_d_lo is always <= 31, so we don't need to bother with it
              * in the last iteration.
@@ -559,7 +559,7 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
    n_hi = nir_if_phi(b, n_hi, n_hi_before_if);
    q_hi = nir_if_phi(b, q_hi, q_hi_before_if);
 
-   nir_ssa_def *log2_denom = nir_ufind_msb(b, d_hi);
+   nir_def *log2_denom = nir_ufind_msb(b, d_hi);
 
    n = nir_pack_64_2x32_split(b, n_lo, n_hi);
    d = nir_pack_64_2x32_split(b, d_lo, d_hi);
@@ -569,10 +569,10 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
        *    quot.x |= 1U << i;
        * }
        */
-      nir_ssa_def *d_shift = nir_ishl_imm(b, d, i);
-      nir_ssa_def *new_n = nir_isub(b, n, d_shift);
-      nir_ssa_def *new_q_lo = nir_ior_imm(b, q_lo, 1ull << i);
-      nir_ssa_def *cond = nir_uge(b, n, d_shift);
+      nir_def *d_shift = nir_ishl_imm(b, d, i);
+      nir_def *new_n = nir_isub(b, n, d_shift);
+      nir_def *new_q_lo = nir_ior_imm(b, q_lo, 1ull << i);
+      nir_def *cond = nir_uge(b, n, d_shift);
       if (i != 0) {
          /* log2_denom is always <= 31, so we don't need to bother with it
           * in the last iteration.
@@ -588,66 +588,66 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
    *r = n;
 }
 
-static nir_ssa_def *
-lower_udiv64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
+static nir_def *
+lower_udiv64(nir_builder *b, nir_def *n, nir_def *d)
 {
-   nir_ssa_def *q, *r;
+   nir_def *q, *r;
    lower_udiv64_mod64(b, n, d, &q, &r);
    return q;
 }
 
-static nir_ssa_def *
-lower_idiv64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
+static nir_def *
+lower_idiv64(nir_builder *b, nir_def *n, nir_def *d)
 {
-   nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
-   nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
+   nir_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
+   nir_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
 
-   nir_ssa_def *negate = nir_ine(b, nir_ilt_imm(b, n_hi, 0),
-                                 nir_ilt_imm(b, d_hi, 0));
-   nir_ssa_def *q, *r;
+   nir_def *negate = nir_ine(b, nir_ilt_imm(b, n_hi, 0),
+                             nir_ilt_imm(b, d_hi, 0));
+   nir_def *q, *r;
    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
    return nir_bcsel(b, negate, nir_ineg(b, q), q);
 }
 
-static nir_ssa_def *
-lower_umod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
+static nir_def *
+lower_umod64(nir_builder *b, nir_def *n, nir_def *d)
 {
-   nir_ssa_def *q, *r;
+   nir_def *q, *r;
    lower_udiv64_mod64(b, n, d, &q, &r);
    return r;
 }
 
-static nir_ssa_def *
-lower_imod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
+static nir_def *
+lower_imod64(nir_builder *b, nir_def *n, nir_def *d)
 {
-   nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
-   nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
-   nir_ssa_def *n_is_neg = nir_ilt_imm(b, n_hi, 0);
-   nir_ssa_def *d_is_neg = nir_ilt_imm(b, d_hi, 0);
+   nir_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
+   nir_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
+   nir_def *n_is_neg = nir_ilt_imm(b, n_hi, 0);
+   nir_def *d_is_neg = nir_ilt_imm(b, d_hi, 0);
 
-   nir_ssa_def *q, *r;
+   nir_def *q, *r;
    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
 
-   nir_ssa_def *rem = nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
+   nir_def *rem = nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
 
    return nir_bcsel(b, nir_ieq_imm(b, r, 0), nir_imm_int64(b, 0),
                     nir_bcsel(b, nir_ieq(b, n_is_neg, d_is_neg), rem,
                               nir_iadd(b, rem, d)));
 }
 
-static nir_ssa_def *
-lower_irem64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
+static nir_def *
+lower_irem64(nir_builder *b, nir_def *n, nir_def *d)
 {
-   nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
-   nir_ssa_def *n_is_neg = nir_ilt_imm(b, n_hi, 0);
+   nir_def *n_hi = nir_unpack_64_2x32_split_y(b, n);
+   nir_def *n_is_neg = nir_ilt_imm(b, n_hi, 0);
 
-   nir_ssa_def *q, *r;
+   nir_def *q, *r;
    lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r);
    return nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
 }
 
-static nir_ssa_def *
-lower_extract(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *c)
+static nir_def *
+lower_extract(nir_builder *b, nir_op op, nir_def *x, nir_def *c)
 {
    assert(op == nir_op_extract_u8 || op == nir_op_extract_i8 ||
           op == nir_op_extract_u16 || op == nir_op_extract_i16);
@@ -657,7 +657,7 @@ lower_extract(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *c)
       (op == nir_op_extract_u8 || op == nir_op_extract_i8) ? 8 : 16;
    const int num_chunks_in_32 = 32 / chunk_bits;
 
-   nir_ssa_def *extract32;
+   nir_def *extract32;
    if (chunk < num_chunks_in_32) {
       extract32 = nir_build_alu(b, op, nir_unpack_64_2x32_split_x(b, x),
                                 nir_imm_int(b, chunk),
@@ -674,25 +674,25 @@ lower_extract(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *c)
       return lower_u2u64(b, extract32);
 }
 
-static nir_ssa_def *
-lower_ufind_msb64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_ufind_msb64(nir_builder *b, nir_def *x)
 {
 
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *lo_count = nir_ufind_msb(b, x_lo);
-   nir_ssa_def *hi_count = nir_ufind_msb(b, x_hi);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *lo_count = nir_ufind_msb(b, x_lo);
+   nir_def *hi_count = nir_ufind_msb(b, x_hi);
 
    if (b->shader->options->lower_uadd_sat) {
-      nir_ssa_def *valid_hi_bits = nir_ine_imm(b, x_hi, 0);
-      nir_ssa_def *hi_res = nir_iadd_imm(b, hi_count, 32);
+      nir_def *valid_hi_bits = nir_ine_imm(b, x_hi, 0);
+      nir_def *hi_res = nir_iadd_imm(b, hi_count, 32);
       return nir_bcsel(b, valid_hi_bits, hi_res, lo_count);
    } else {
       /* If hi_count was -1, it will still be -1 after this uadd_sat. As a
        * result, hi_count is either -1 or the correct return value for 64-bit
        * ufind_msb.
        */
-      nir_ssa_def *hi_res = nir_uadd_sat(b, nir_imm_intN_t(b, 32, 32), hi_count);
+      nir_def *hi_res = nir_uadd_sat(b, nir_imm_intN_t(b, 32, 32), hi_count);
 
       /* hi_res is either -1 or a value in the range [63, 32]. lo_count is
        * either -1 or a value in the range [31, 0]. The imax will pick
@@ -703,13 +703,13 @@ lower_ufind_msb64(nir_builder *b, nir_ssa_def *x)
    }
 }
 
-static nir_ssa_def *
-lower_find_lsb64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_find_lsb64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *lo_lsb = nir_find_lsb(b, x_lo);
-   nir_ssa_def *hi_lsb = nir_find_lsb(b, x_hi);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *lo_lsb = nir_find_lsb(b, x_lo);
+   nir_def *hi_lsb = nir_find_lsb(b, x_hi);
 
    /* Use umin so that -1 (no bits found) becomes larger (0xFFFFFFFF)
     * than any actual bit position, so we return a found bit instead.
@@ -717,11 +717,11 @@ lower_find_lsb64(nir_builder *b, nir_ssa_def *x)
    return nir_umin(b, lo_lsb, nir_iadd_imm(b, hi_lsb, 32));
 }
 
-static nir_ssa_def *
-lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
+static nir_def *
+lower_2f(nir_builder *b, nir_def *x, unsigned dest_bit_size,
          bool src_is_signed)
 {
-   nir_ssa_def *x_sign = NULL;
+   nir_def *x_sign = NULL;
 
    if (src_is_signed) {
       x_sign = nir_bcsel(b, COND_LOWER_CMP(b, ilt, x, nir_imm_int64(b, 0)),
@@ -730,7 +730,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
       x = COND_LOWER_OP(b, iabs, x);
    }
 
-   nir_ssa_def *exp = COND_LOWER_OP(b, ufind_msb, x);
+   nir_def *exp = COND_LOWER_OP(b, ufind_msb, x);
    unsigned significand_bits;
 
    switch (dest_bit_size) {
@@ -747,10 +747,10 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
       unreachable("Invalid dest_bit_size");
    }
 
-   nir_ssa_def *discard =
+   nir_def *discard =
       nir_imax(b, nir_iadd_imm(b, exp, -significand_bits),
                nir_imm_int(b, 0));
-   nir_ssa_def *significand = COND_LOWER_OP(b, ushr, x, discard);
+   nir_def *significand = COND_LOWER_OP(b, ushr, x, discard);
    if (significand_bits < 32)
       significand = COND_LOWER_CAST(b, u2u32, significand);
 
@@ -762,29 +762,29 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
     *   significand is odd, we round-up
     * - in any other case, we round-down
     */
-   nir_ssa_def *lsb_mask = COND_LOWER_OP(b, ishl, nir_imm_int64(b, 1), discard);
-   nir_ssa_def *rem_mask = COND_LOWER_OP(b, isub, lsb_mask, nir_imm_int64(b, 1));
-   nir_ssa_def *half = COND_LOWER_OP(b, ishr, lsb_mask, nir_imm_int(b, 1));
-   nir_ssa_def *rem = COND_LOWER_OP(b, iand, x, rem_mask);
-   nir_ssa_def *halfway = nir_iand(b, COND_LOWER_CMP(b, ieq, rem, half),
-                                   nir_ine_imm(b, discard, 0));
-   nir_ssa_def *is_odd = COND_LOWER_CMP(b, ine, nir_imm_int64(b, 0),
-                                        COND_LOWER_OP(b, iand, x, lsb_mask));
-   nir_ssa_def *round_up = nir_ior(b, COND_LOWER_CMP(b, ilt, half, rem),
-                                   nir_iand(b, halfway, is_odd));
+   nir_def *lsb_mask = COND_LOWER_OP(b, ishl, nir_imm_int64(b, 1), discard);
+   nir_def *rem_mask = COND_LOWER_OP(b, isub, lsb_mask, nir_imm_int64(b, 1));
+   nir_def *half = COND_LOWER_OP(b, ishr, lsb_mask, nir_imm_int(b, 1));
+   nir_def *rem = COND_LOWER_OP(b, iand, x, rem_mask);
+   nir_def *halfway = nir_iand(b, COND_LOWER_CMP(b, ieq, rem, half),
+                               nir_ine_imm(b, discard, 0));
+   nir_def *is_odd = COND_LOWER_CMP(b, ine, nir_imm_int64(b, 0),
+                                    COND_LOWER_OP(b, iand, x, lsb_mask));
+   nir_def *round_up = nir_ior(b, COND_LOWER_CMP(b, ilt, half, rem),
+                               nir_iand(b, halfway, is_odd));
    if (significand_bits >= 32)
       significand = COND_LOWER_OP(b, iadd, significand,
                                   COND_LOWER_CAST(b, b2i64, round_up));
    else
       significand = nir_iadd(b, significand, nir_b2i32(b, round_up));
 
-   nir_ssa_def *res;
+   nir_def *res;
 
    if (dest_bit_size == 64) {
       /* Compute the left shift required to normalize the original
        * unrounded input manually.
        */
-      nir_ssa_def *shift =
+      nir_def *shift =
          nir_imax(b, nir_isub_imm(b, significand_bits, exp),
                   nir_imm_int(b, 0));
       significand = COND_LOWER_OP(b, ishl, significand, shift);
@@ -797,7 +797,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
        * of the significand is guaranteed to be zero if there was
        * overflow.
        */
-      nir_ssa_def *carry = nir_b2i32(
+      nir_def *carry = nir_b2i32(
          b, nir_uge_imm(b, nir_unpack_64_2x32_split_y(b, significand),
                         (uint64_t)(1 << (significand_bits - 31))));
       significand = COND_LOWER_OP(b, ishr, significand, carry);
@@ -806,13 +806,13 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
       /* Compute the biased exponent, taking care to handle a zero
        * input correctly, which would have caused exp to be negative.
        */
-      nir_ssa_def *biased_exp = nir_bcsel(b, nir_ilt_imm(b, exp, 0),
-                                          nir_imm_int(b, 0),
-                                          nir_iadd_imm(b, exp, 1023));
+      nir_def *biased_exp = nir_bcsel(b, nir_ilt_imm(b, exp, 0),
+                                      nir_imm_int(b, 0),
+                                      nir_iadd_imm(b, exp, 1023));
 
       /* Pack the significand and exponent manually. */
-      nir_ssa_def *lo = nir_unpack_64_2x32_split_x(b, significand);
-      nir_ssa_def *hi = nir_bitfield_insert(
+      nir_def *lo = nir_unpack_64_2x32_split_x(b, significand);
+      nir_def *hi = nir_bitfield_insert(
          b, nir_unpack_64_2x32_split_y(b, significand),
          biased_exp, nir_imm_int(b, 20), nir_imm_int(b, 11));
 
@@ -832,11 +832,11 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
    return res;
 }
 
-static nir_ssa_def *
-lower_f2(nir_builder *b, nir_ssa_def *x, bool dst_is_signed)
+static nir_def *
+lower_f2(nir_builder *b, nir_def *x, bool dst_is_signed)
 {
    assert(x->bit_size == 16 || x->bit_size == 32 || x->bit_size == 64);
-   nir_ssa_def *x_sign = NULL;
+   nir_def *x_sign = NULL;
 
    if (dst_is_signed)
       x_sign = nir_fsign(b, x);
@@ -846,13 +846,13 @@ lower_f2(nir_builder *b, nir_ssa_def *x, bool dst_is_signed)
    if (dst_is_signed)
       x = nir_fabs(b, x);
 
-   nir_ssa_def *res;
+   nir_def *res;
    if (x->bit_size < 32) {
       res = nir_pack_64_2x32_split(b, nir_f2u32(b, x), nir_imm_int(b, 0));
    } else {
-      nir_ssa_def *div = nir_imm_floatN_t(b, 1ULL << 32, x->bit_size);
-      nir_ssa_def *res_hi = nir_f2u32(b, nir_fdiv(b, x, div));
-      nir_ssa_def *res_lo = nir_f2u32(b, nir_frem(b, x, div));
+      nir_def *div = nir_imm_floatN_t(b, 1ULL << 32, x->bit_size);
+      nir_def *res_hi = nir_f2u32(b, nir_fdiv(b, x, div));
+      nir_def *res_lo = nir_f2u32(b, nir_frem(b, x, div));
       res = nir_pack_64_2x32_split(b, res_lo, res_hi);
    }
 
@@ -863,13 +863,13 @@ lower_f2(nir_builder *b, nir_ssa_def *x, bool dst_is_signed)
    return res;
 }
 
-static nir_ssa_def *
-lower_bit_count64(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_bit_count64(nir_builder *b, nir_def *x)
 {
-   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
-   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
-   nir_ssa_def *lo_count = nir_bit_count(b, x_lo);
-   nir_ssa_def *hi_count = nir_bit_count(b, x_hi);
+   nir_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+   nir_def *lo_count = nir_bit_count(b, x_lo);
+   nir_def *hi_count = nir_bit_count(b, x_hi);
    return nir_iadd(b, lo_count, hi_count);
 }
 
@@ -958,10 +958,10 @@ nir_lower_int64_op_to_options_mask(nir_op opcode)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
 {
-   nir_ssa_def *src[4];
+   nir_def *src[4];
    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
       src[i] = nir_ssa_for_alu_src(b, alu, i);
 
@@ -1138,7 +1138,7 @@ should_lower_int64_alu_instr(const nir_alu_instr *alu,
    return (options->lower_int64_options & mask) != 0;
 }
 
-static nir_ssa_def *
+static nir_def *
 split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
 {
    const nir_intrinsic_info *info = &nir_intrinsic_infos[intrin->intrinsic];
@@ -1147,14 +1147,14 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
     * trivially lowered by doing the exact same op on both halves.
     */
    assert(nir_src_bit_size(intrin->src[0]) == 64);
-   nir_ssa_def *split_src0[2] = {
+   nir_def *split_src0[2] = {
       nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa),
       nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa),
    };
 
    assert(info->has_dest && intrin->dest.ssa.bit_size == 64);
 
-   nir_ssa_def *res[2];
+   nir_def *res[2];
    for (unsigned i = 0; i < 2; i++) {
       nir_intrinsic_instr *split =
          nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
@@ -1181,8 +1181,8 @@ split_64bit_subgroup_op(nir_builder *b, const nir_intrinsic_instr *intrin)
    return nir_pack_64_2x32_split(b, res[0], res[1]);
 }
 
-static nir_ssa_def *
-build_vote_ieq(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+build_vote_ieq(nir_builder *b, nir_def *x)
 {
    nir_intrinsic_instr *vote =
       nir_intrinsic_instr_create(b->shader, nir_intrinsic_vote_ieq);
@@ -1193,17 +1193,17 @@ build_vote_ieq(nir_builder *b, nir_ssa_def *x)
    return &vote->dest.ssa;
 }
 
-static nir_ssa_def *
-lower_vote_ieq(nir_builder *b, nir_ssa_def *x)
+static nir_def *
+lower_vote_ieq(nir_builder *b, nir_def *x)
 {
    return nir_iand(b, build_vote_ieq(b, nir_unpack_64_2x32_split_x(b, x)),
                    build_vote_ieq(b, nir_unpack_64_2x32_split_y(b, x)));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_scan_intrinsic(nir_builder *b, nir_intrinsic_op scan_op,
                      nir_op reduction_op, unsigned cluster_size,
-                     nir_ssa_def *val)
+                     nir_def *val)
 {
    nir_intrinsic_instr *scan =
       nir_intrinsic_instr_create(b->shader, scan_op);
@@ -1218,7 +1218,7 @@ build_scan_intrinsic(nir_builder *b, nir_intrinsic_op scan_op,
    return &scan->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_scan_iadd64(nir_builder *b, const nir_intrinsic_instr *intrin)
 {
    unsigned cluster_size =
@@ -1230,22 +1230,22 @@ lower_scan_iadd64(nir_builder *b, const nir_intrinsic_instr *intrin)
     * no larger than 256 which seems reasonable.)  We can then scan on each of
     * the chunks and add them back together at the end.
     */
-   nir_ssa_def *x = intrin->src[0].ssa;
-   nir_ssa_def *x_low =
+   nir_def *x = intrin->src[0].ssa;
+   nir_def *x_low =
       nir_u2u32(b, nir_iand_imm(b, x, 0xffffff));
-   nir_ssa_def *x_mid =
+   nir_def *x_mid =
       nir_u2u32(b, nir_iand_imm(b, nir_ushr_imm(b, x, 24),
                                 0xffffff));
-   nir_ssa_def *x_hi =
+   nir_def *x_hi =
       nir_u2u32(b, nir_ushr_imm(b, x, 48));
 
-   nir_ssa_def *scan_low =
+   nir_def *scan_low =
       build_scan_intrinsic(b, intrin->intrinsic, nir_op_iadd,
                            cluster_size, x_low);
-   nir_ssa_def *scan_mid =
+   nir_def *scan_mid =
       build_scan_intrinsic(b, intrin->intrinsic, nir_op_iadd,
                            cluster_size, x_mid);
-   nir_ssa_def *scan_hi =
+   nir_def *scan_hi =
       build_scan_intrinsic(b, intrin->intrinsic, nir_op_iadd,
                            cluster_size, x_hi);
 
@@ -1301,7 +1301,7 @@ should_lower_int64_intrinsic(const nir_intrinsic_instr *intrin,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_int64_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    switch (intrin->intrinsic) {
@@ -1354,7 +1354,7 @@ should_lower_int64_instr(const nir_instr *instr, const void *_options)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_int64_instr(nir_builder *b, nir_instr *instr, void *_options)
 {
    switch (instr->type) {
index a8136f9..1058eab 100644 (file)
@@ -26,7 +26,7 @@
 #include "nir_builder.h"
 
 static bool
-assert_ssa_def_is_not_int(nir_ssa_def *def, void *arg)
+assert_ssa_def_is_not_int(nir_def *def, void *arg)
 {
    ASSERTED BITSET_WORD *int_types = arg;
    assert(!BITSET_TEST(int_types, def->index));
@@ -97,7 +97,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
    b->cursor = nir_before_instr(&alu->instr);
 
    /* Replacement SSA value */
-   nir_ssa_def *rep = NULL;
+   nir_def *rep = NULL;
    switch (alu->op) {
    case nir_op_mov:
    case nir_op_vec2:
@@ -178,8 +178,8 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
       break;
 
    case nir_op_idiv: {
-      nir_ssa_def *x = nir_ssa_for_alu_src(b, alu, 0);
-      nir_ssa_def *y = nir_ssa_for_alu_src(b, alu, 1);
+      nir_def *x = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *y = nir_ssa_for_alu_src(b, alu, 1);
 
       /* Hand-lower fdiv, since lower_int_to_float is after nir_opt_algebraic. */
       if (b->shader->options->lower_fdiv) {
@@ -247,7 +247,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
 
    if (rep) {
       /* We've emitted a replacement instruction */
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, rep);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa, rep);
       nir_instr_remove(&alu->instr);
    }
 
@@ -267,7 +267,7 @@ nir_lower_int_to_float_impl(nir_function_impl *impl)
                         sizeof(BITSET_WORD));
    int_types = calloc(BITSET_WORDS(impl->ssa_alloc),
                       sizeof(BITSET_WORD));
-   nir_gather_ssa_types(impl, float_types, int_types);
+   nir_gather_types(impl, float_types, int_types);
 
    nir_foreach_block(block, impl) {
       nir_foreach_instr_safe(instr, block) {
index 1678d93..a62a30e 100644 (file)
@@ -95,16 +95,16 @@ nir_lower_interpolation_instr(nir_builder *b, nir_instr *instr, void *cb_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS];
    for (int i = 0; i < intr->num_components; i++) {
-      nir_ssa_def *iid =
+      nir_def *iid =
          nir_load_fs_input_interp_deltas(b, 32, intr->src[1].ssa,
                                          .base = nir_intrinsic_base(intr),
                                          .component = (nir_intrinsic_component(intr) + i),
                                          .io_semantics = nir_intrinsic_io_semantics(intr));
 
-      nir_ssa_def *bary = intr->src[0].ssa;
-      nir_ssa_def *val;
+      nir_def *bary = intr->src[0].ssa;
+      nir_def *val;
 
       val = nir_ffma(b, nir_channel(b, bary, 1),
                      nir_channel(b, iid, 1),
@@ -115,8 +115,8 @@ nir_lower_interpolation_instr(nir_builder *b, nir_instr *instr, void *cb_data)
 
       comps[i] = val;
    }
-   nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, vec);
+   nir_def *vec = nir_vec(b, comps, intr->num_components);
+   nir_def_rewrite_uses(&intr->dest.ssa, vec);
 
    return true;
 }
index 0532888..2cb61da 100644 (file)
@@ -184,9 +184,9 @@ get_number_of_slots(struct lower_io_state *state,
    return state->type_size(type, var->data.bindless);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_io_offset(nir_builder *b, nir_deref_instr *deref,
-              nir_ssa_def **array_index,
+              nir_def **array_index,
               int (*type_size)(const struct glsl_type *, bool),
               unsigned *component, bool bts)
 {
@@ -218,13 +218,13 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
    }
 
    /* Just emit code and let constant-folding go to town */
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
 
    for (; *p; p++) {
       if ((*p)->deref_type == nir_deref_type_array) {
          unsigned size = type_size((*p)->type, bts);
 
-         nir_ssa_def *mul =
+         nir_def *mul =
             nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
 
          offset = nir_iadd(b, offset, mul);
@@ -247,16 +247,16 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
    return offset;
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_load(struct lower_io_state *state,
-          nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
+          nir_def *array_index, nir_variable *var, nir_def *offset,
           unsigned component, unsigned num_components, unsigned bit_size,
           nir_alu_type dest_type)
 {
    nir_builder *b = &state->builder;
    const nir_shader *nir = b->shader;
    nir_variable_mode mode = var->data.mode;
-   nir_ssa_def *barycentric = NULL;
+   nir_def *barycentric = NULL;
 
    nir_intrinsic_op op;
    switch (mode) {
@@ -343,9 +343,9 @@ emit_load(struct lower_io_state *state,
    return &load->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
-           nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
+           nir_def *array_index, nir_variable *var, nir_def *offset,
            unsigned component, const struct glsl_type *type)
 {
    const bool lower_double = !glsl_type_is_integer(type) && state->options & nir_lower_io_lower_64bit_float_to_32;
@@ -355,7 +355,7 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
 
       const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
 
-      nir_ssa_def *comp64[4];
+      nir_def *comp64[4];
       assert(component == 0 || component == 2);
       unsigned dest_comp = 0;
       while (dest_comp < intrin->dest.ssa.num_components) {
@@ -363,7 +363,7 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
             MIN2(intrin->dest.ssa.num_components - dest_comp,
                  (4 - component) / 2);
 
-         nir_ssa_def *data32 =
+         nir_def *data32 =
             emit_load(state, array_index, var, offset, component,
                       num_comps * 2, 32, nir_type_uint32);
          for (unsigned i = 0; i < num_comps; i++) {
@@ -394,8 +394,8 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
 }
 
 static void
-emit_store(struct lower_io_state *state, nir_ssa_def *data,
-           nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
+emit_store(struct lower_io_state *state, nir_def *data,
+           nir_def *array_index, nir_variable *var, nir_def *offset,
            unsigned component, unsigned num_components,
            nir_component_mask_t write_mask, nir_alu_type src_type)
 {
@@ -456,7 +456,7 @@ emit_store(struct lower_io_state *state, nir_ssa_def *data,
 
 static void
 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
-            nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
+            nir_def *array_index, nir_variable *var, nir_def *offset,
             unsigned component, const struct glsl_type *type)
 {
    const bool lower_double = !glsl_type_is_integer(type) && state->options & nir_lower_io_lower_64bit_float_to_32;
@@ -475,10 +475,10 @@ lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
                  (4 - component) / 2);
 
          if (write_mask & BITFIELD_MASK(num_comps)) {
-            nir_ssa_def *data =
+            nir_def *data =
                nir_channels(b, intrin->src[1].ssa,
                             BITFIELD_RANGE(src_comp, num_comps));
-            nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
+            nir_def *data32 = nir_bitcast_vector(b, data, 32);
 
             nir_component_mask_t write_mask32 = 0;
             for (unsigned i = 0; i < num_comps; i++) {
@@ -500,7 +500,7 @@ lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
    } else if (intrin->dest.ssa.bit_size == 1) {
       /* Booleans are 32-bit */
       assert(glsl_type_is_boolean(type));
-      nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
+      nir_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
       emit_store(state, b32_val, array_index, var, offset,
                  component, intrin->num_components,
                  nir_intrinsic_write_mask(intrin),
@@ -513,9 +513,9 @@ lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
-                     nir_variable *var, nir_ssa_def *offset, unsigned component,
+                     nir_variable *var, nir_def *offset, unsigned component,
                      const struct glsl_type *type)
 {
    nir_builder *b = &state->builder;
@@ -526,7 +526,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
     */
    if (var->data.interpolation == INTERP_MODE_FLAT ||
        var->data.interpolation == INTERP_MODE_EXPLICIT) {
-      nir_ssa_def *vertex_index = NULL;
+      nir_def *vertex_index = NULL;
 
       if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
          assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
@@ -574,7 +574,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
       var->data.precision == GLSL_PRECISION_MEDIUM ||
       var->data.precision == GLSL_PRECISION_LOW;
 
-   nir_ssa_def *load =
+   nir_def *load =
       nir_load_interpolated_input(&state->builder,
                                   intrin->dest.ssa.num_components,
                                   intrin->dest.ssa.bit_size,
@@ -631,8 +631,8 @@ nir_lower_io_block(nir_block *block,
 
       const bool is_arrayed = nir_is_arrayed_io(var, b->shader->info.stage);
 
-      nir_ssa_def *offset;
-      nir_ssa_def *array_index = NULL;
+      nir_def *offset;
+      nir_def *array_index = NULL;
       unsigned component_offset = var->data.location_frac;
       bool bindless_type_size = var->data.mode == nir_var_shader_in ||
                                 var->data.mode == nir_var_shader_out ||
@@ -657,11 +657,11 @@ nir_lower_io_block(nir_block *block,
           * that could cause issues in drivers down the line.
           */
          if (intrin->intrinsic != nir_intrinsic_store_deref) {
-            nir_ssa_def *zero =
+            nir_def *zero =
                nir_imm_zero(b, intrin->dest.ssa.num_components,
                             intrin->dest.ssa.bit_size);
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                     zero);
+            nir_def_rewrite_uses(&intrin->dest.ssa,
+                                 zero);
          }
 
          nir_instr_remove(&intrin->instr);
@@ -673,7 +673,7 @@ nir_lower_io_block(nir_block *block,
                              state->type_size, &component_offset,
                              bindless_type_size);
 
-      nir_ssa_def *replacement = NULL;
+      nir_def *replacement = NULL;
 
       switch (intrin->intrinsic) {
       case nir_intrinsic_load_deref:
@@ -700,8 +700,8 @@ nir_lower_io_block(nir_block *block,
       }
 
       if (replacement) {
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                  replacement);
+         nir_def_rewrite_uses(&intrin->dest.ssa,
+                              replacement);
       }
       nir_instr_remove(&intrin->instr);
       progress = true;
@@ -771,11 +771,11 @@ type_scalar_size_bytes(const struct glsl_type *type)
    return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
 }
 
-nir_ssa_def *
-nir_build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
+nir_def *
+nir_build_addr_iadd(nir_builder *b, nir_def *addr,
                     nir_address_format addr_format,
                     nir_variable_mode modes,
-                    nir_ssa_def *offset)
+                    nir_def *offset)
 {
    assert(offset->num_components == 1);
 
@@ -789,11 +789,11 @@ nir_build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
 
    case nir_address_format_2x32bit_global: {
       assert(addr->num_components == 2);
-      nir_ssa_def *lo = nir_channel(b, addr, 0);
-      nir_ssa_def *hi = nir_channel(b, addr, 1);
-      nir_ssa_def *res_lo = nir_iadd(b, lo, offset);
-      nir_ssa_def *carry = nir_b2i32(b, nir_ult(b, res_lo, lo));
-      nir_ssa_def *res_hi = nir_iadd(b, hi, carry);
+      nir_def *lo = nir_channel(b, addr, 0);
+      nir_def *hi = nir_channel(b, addr, 1);
+      nir_def *res_lo = nir_iadd(b, lo, offset);
+      nir_def *carry = nir_b2i32(b, nir_ult(b, res_lo, lo));
+      nir_def *res_hi = nir_iadd(b, hi, carry);
       return nir_vec2(b, res_lo, res_hi);
    }
 
@@ -835,8 +835,8 @@ nir_build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
          /* If we're sure it's one of these modes, we can do an easy 32-bit
           * addition and don't need to bother with 64-bit math.
           */
-         nir_ssa_def *addr32 = nir_unpack_64_2x32_split_x(b, addr);
-         nir_ssa_def *type = nir_unpack_64_2x32_split_y(b, addr);
+         nir_def *addr32 = nir_unpack_64_2x32_split_x(b, addr);
+         nir_def *type = nir_unpack_64_2x32_split_y(b, addr);
          addr32 = nir_iadd(b, addr32, nir_u2u32(b, offset));
          return nir_pack_64_2x32_split(b, addr32, type);
       } else {
@@ -850,7 +850,7 @@ nir_build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
 }
 
 static unsigned
-addr_get_offset_bit_size(nir_ssa_def *addr, nir_address_format addr_format)
+addr_get_offset_bit_size(nir_def *addr, nir_address_format addr_format)
 {
    if (addr_format == nir_address_format_32bit_offset_as_64bit ||
        addr_format == nir_address_format_32bit_index_offset_pack64)
@@ -858,8 +858,8 @@ addr_get_offset_bit_size(nir_ssa_def *addr, nir_address_format addr_format)
    return addr->bit_size;
 }
 
-nir_ssa_def *
-nir_build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
+nir_def *
+nir_build_addr_iadd_imm(nir_builder *b, nir_def *addr,
                         nir_address_format addr_format,
                         nir_variable_mode modes,
                         int64_t offset)
@@ -873,7 +873,7 @@ nir_build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
                      addr_get_offset_bit_size(addr, addr_format)));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_addr_for_var(nir_builder *b, nir_variable *var,
                    nir_address_format addr_format)
 {
@@ -890,7 +890,7 @@ build_addr_for_var(nir_builder *b, nir_variable *var,
    case nir_address_format_2x32bit_global:
    case nir_address_format_32bit_global:
    case nir_address_format_64bit_global: {
-      nir_ssa_def *base_addr;
+      nir_def *base_addr;
       switch (var->data.mode) {
       case nir_var_shader_temp:
          base_addr = nir_load_scratch_base_ptr(b, num_comps, bit_size, 0);
@@ -952,8 +952,8 @@ build_addr_for_var(nir_builder *b, nir_variable *var,
    }
 }
 
-static nir_ssa_def *
-build_runtime_addr_mode_check(nir_builder *b, nir_ssa_def *addr,
+static nir_def *
+build_runtime_addr_mode_check(nir_builder *b, nir_def *addr,
                               nir_address_format addr_format,
                               nir_variable_mode mode)
 {
@@ -962,7 +962,7 @@ build_runtime_addr_mode_check(nir_builder *b, nir_ssa_def *addr,
    case nir_address_format_62bit_generic: {
       assert(addr->num_components == 1);
       assert(addr->bit_size == 64);
-      nir_ssa_def *mode_enum = nir_ushr_imm(b, addr, 62);
+      nir_def *mode_enum = nir_ushr_imm(b, addr, 62);
       switch (mode) {
       case nir_var_function_temp:
       case nir_var_shader_temp:
@@ -1049,8 +1049,8 @@ nir_address_format_num_components(nir_address_format addr_format)
    unreachable("Invalid address format");
 }
 
-static nir_ssa_def *
-addr_to_index(nir_builder *b, nir_ssa_def *addr,
+static nir_def *
+addr_to_index(nir_builder *b, nir_def *addr,
               nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -1067,8 +1067,8 @@ addr_to_index(nir_builder *b, nir_ssa_def *addr,
    }
 }
 
-static nir_ssa_def *
-addr_to_offset(nir_builder *b, nir_ssa_def *addr,
+static nir_def *
+addr_to_offset(nir_builder *b, nir_def *addr,
                nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -1116,8 +1116,8 @@ addr_format_is_offset(nir_address_format addr_format,
           addr_format == nir_address_format_32bit_offset_as_64bit;
 }
 
-static nir_ssa_def *
-addr_to_global(nir_builder *b, nir_ssa_def *addr,
+static nir_def *
+addr_to_global(nir_builder *b, nir_def *addr,
                nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -1155,8 +1155,8 @@ addr_format_needs_bounds_check(nir_address_format addr_format)
    return addr_format == nir_address_format_64bit_bounded_global;
 }
 
-static nir_ssa_def *
-addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
+static nir_def *
+addr_is_in_bounds(nir_builder *b, nir_def *addr,
                   nir_address_format addr_format, unsigned size)
 {
    assert(addr_format == nir_address_format_64bit_bounded_global);
@@ -1305,9 +1305,9 @@ get_load_global_op_from_addr_format(nir_address_format addr_format)
       return nir_intrinsic_load_global_2x32;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
-                       nir_ssa_def *addr, nir_address_format addr_format,
+                       nir_def *addr, nir_address_format addr_format,
                        nir_variable_mode modes,
                        uint32_t align_mul, uint32_t align_offset,
                        unsigned num_components)
@@ -1324,13 +1324,13 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
       } else if (modes & nir_var_function_temp) {
          nir_push_if(b, build_runtime_addr_mode_check(b, addr, addr_format,
                                                       nir_var_function_temp));
-         nir_ssa_def *res1 =
+         nir_def *res1 =
             build_explicit_io_load(b, intrin, addr, addr_format,
                                    nir_var_function_temp,
                                    align_mul, align_offset,
                                    num_components);
          nir_push_else(b, NULL);
-         nir_ssa_def *res2 =
+         nir_def *res2 =
             build_explicit_io_load(b, intrin, addr, addr_format,
                                    modes & ~nir_var_function_temp,
                                    align_mul, align_offset,
@@ -1341,14 +1341,14 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
          nir_push_if(b, build_runtime_addr_mode_check(b, addr, addr_format,
                                                       nir_var_mem_shared));
          assert(modes & nir_var_mem_shared);
-         nir_ssa_def *res1 =
+         nir_def *res1 =
             build_explicit_io_load(b, intrin, addr, addr_format,
                                    nir_var_mem_shared,
                                    align_mul, align_offset,
                                    num_components);
          nir_push_else(b, NULL);
          assert(modes & nir_var_mem_global);
-         nir_ssa_def *res2 =
+         nir_def *res2 =
             build_explicit_io_load(b, intrin, addr, addr_format,
                                    nir_var_mem_global,
                                    align_mul, align_offset,
@@ -1509,7 +1509,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
 
    assert(bit_size % 8 == 0);
 
-   nir_ssa_def *result;
+   nir_def *result;
    if (addr_format_needs_bounds_check(addr_format) &&
        op != nir_intrinsic_load_global_constant_bounded) {
       /* We don't need to bounds-check global_constant_bounded because bounds
@@ -1519,7 +1519,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
        * as to what we can do with an OOB read.  Unfortunately, returning
        * undefined values isn't one of them so we return an actual zero.
        */
-      nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
+      nir_def *zero = nir_imm_zero(b, load->num_components, bit_size);
 
       /* TODO: Better handle block_intel. */
       const unsigned load_size = (bit_size / 8) * load->num_components;
@@ -1553,10 +1553,10 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
 
 static void
 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
-                        nir_ssa_def *addr, nir_address_format addr_format,
+                        nir_def *addr, nir_address_format addr_format,
                         nir_variable_mode modes,
                         uint32_t align_mul, uint32_t align_offset,
-                        nir_ssa_def *value, nir_component_mask_t write_mask)
+                        nir_def *value, nir_component_mask_t write_mask)
 {
    modes = canonicalize_generic_modes(modes);
 
@@ -1718,9 +1718,9 @@ build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
-                         nir_ssa_def *addr, nir_address_format addr_format,
+                         nir_def *addr, nir_address_format addr_format,
                          nir_variable_mode modes)
 {
    modes = canonicalize_generic_modes(modes);
@@ -1732,11 +1732,11 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
       } else if (modes & nir_var_function_temp) {
          nir_push_if(b, build_runtime_addr_mode_check(b, addr, addr_format,
                                                       nir_var_function_temp));
-         nir_ssa_def *res1 =
+         nir_def *res1 =
             build_explicit_io_atomic(b, intrin, addr, addr_format,
                                      nir_var_function_temp);
          nir_push_else(b, NULL);
-         nir_ssa_def *res2 =
+         nir_def *res2 =
             build_explicit_io_atomic(b, intrin, addr, addr_format,
                                      modes & ~nir_var_function_temp);
          nir_pop_if(b, NULL);
@@ -1745,12 +1745,12 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
          nir_push_if(b, build_runtime_addr_mode_check(b, addr, addr_format,
                                                       nir_var_mem_shared));
          assert(modes & nir_var_mem_shared);
-         nir_ssa_def *res1 =
+         nir_def *res1 =
             build_explicit_io_atomic(b, intrin, addr, addr_format,
                                      nir_var_mem_shared);
          nir_push_else(b, NULL);
          assert(modes & nir_var_mem_global);
-         nir_ssa_def *res2 =
+         nir_def *res2 =
             build_explicit_io_atomic(b, intrin, addr, addr_format,
                                      nir_var_mem_global);
          nir_pop_if(b, NULL);
@@ -1825,16 +1825,16 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
 
       nir_pop_if(b, NULL);
       return nir_if_phi(b, &atomic->dest.ssa,
-                        nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
+                        nir_undef(b, 1, atomic->dest.ssa.bit_size));
    } else {
       nir_builder_instr_insert(b, &atomic->instr);
       return &atomic->dest.ssa;
    }
 }
 
-nir_ssa_def *
+nir_def *
 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
-                                   nir_ssa_def *base_addr,
+                                   nir_def *base_addr,
                                    nir_address_format addr_format)
 {
    switch (deref->deref_type) {
@@ -1847,8 +1847,8 @@ nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
       assert(stride > 0);
 
       unsigned offset_bit_size = addr_get_offset_bit_size(base_addr, addr_format);
-      nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
-      nir_ssa_def *offset;
+      nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *offset;
 
       /* If the access chain has been declared in-bounds, then we know it doesn't
        * overflow the type.  For nir_deref_type_array, this implies it cannot be
@@ -1892,7 +1892,7 @@ nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
 void
 nir_lower_explicit_io_instr(nir_builder *b,
                             nir_intrinsic_instr *intrin,
-                            nir_ssa_def *addr,
+                            nir_def *addr,
                             nir_address_format addr_format)
 {
    b->cursor = nir_after_instr(&intrin->instr);
@@ -1912,16 +1912,16 @@ nir_lower_explicit_io_instr(nir_builder *b,
 
    switch (intrin->intrinsic) {
    case nir_intrinsic_load_deref: {
-      nir_ssa_def *value;
+      nir_def *value;
       if (vec_stride > scalar_size) {
-         nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = {
+         nir_def *comps[NIR_MAX_VEC_COMPONENTS] = {
             NULL,
          };
          for (unsigned i = 0; i < intrin->num_components; i++) {
             unsigned comp_offset = i * vec_stride;
-            nir_ssa_def *comp_addr = nir_build_addr_iadd_imm(b, addr, addr_format,
-                                                             deref->modes,
-                                                             comp_offset);
+            nir_def *comp_addr = nir_build_addr_iadd_imm(b, addr, addr_format,
+                                                         deref->modes,
+                                                         comp_offset);
             comps[i] = build_explicit_io_load(b, intrin, comp_addr,
                                               addr_format, deref->modes,
                                               align_mul,
@@ -1935,12 +1935,12 @@ nir_lower_explicit_io_instr(nir_builder *b,
                                         deref->modes, align_mul, align_offset,
                                         intrin->num_components);
       }
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
+      nir_def_rewrite_uses(&intrin->dest.ssa, value);
       break;
    }
 
    case nir_intrinsic_store_deref: {
-      nir_ssa_def *value = intrin->src[1].ssa;
+      nir_def *value = intrin->src[1].ssa;
       nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
       if (vec_stride > scalar_size) {
          for (unsigned i = 0; i < intrin->num_components; i++) {
@@ -1948,9 +1948,9 @@ nir_lower_explicit_io_instr(nir_builder *b,
                continue;
 
             unsigned comp_offset = i * vec_stride;
-            nir_ssa_def *comp_addr = nir_build_addr_iadd_imm(b, addr, addr_format,
-                                                             deref->modes,
-                                                             comp_offset);
+            nir_def *comp_addr = nir_build_addr_iadd_imm(b, addr, addr_format,
+                                                         deref->modes,
+                                                         comp_offset);
             build_explicit_io_store(b, intrin, comp_addr, addr_format,
                                     deref->modes, align_mul,
                                     (align_offset + comp_offset) % align_mul,
@@ -1965,16 +1965,16 @@ nir_lower_explicit_io_instr(nir_builder *b,
    }
 
    case nir_intrinsic_load_deref_block_intel: {
-      nir_ssa_def *value = build_explicit_io_load(b, intrin, addr, addr_format,
-                                                  deref->modes,
-                                                  align_mul, align_offset,
-                                                  intrin->num_components);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
+      nir_def *value = build_explicit_io_load(b, intrin, addr, addr_format,
+                                              deref->modes,
+                                              align_mul, align_offset,
+                                              intrin->num_components);
+      nir_def_rewrite_uses(&intrin->dest.ssa, value);
       break;
    }
 
    case nir_intrinsic_store_deref_block_intel: {
-      nir_ssa_def *value = intrin->src[1].ssa;
+      nir_def *value = intrin->src[1].ssa;
       const nir_component_mask_t write_mask = 0;
       build_explicit_io_store(b, intrin, addr, addr_format,
                               deref->modes, align_mul, align_offset,
@@ -1983,9 +1983,9 @@ nir_lower_explicit_io_instr(nir_builder *b,
    }
 
    default: {
-      nir_ssa_def *value =
+      nir_def *value =
          build_explicit_io_atomic(b, intrin, addr, addr_format, deref->modes);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
+      nir_def_rewrite_uses(&intrin->dest.ssa, value);
       break;
    }
    }
@@ -2108,25 +2108,25 @@ lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
     * one deref which could break our list walking since we walk the list
     * backwards.
     */
-   if (nir_ssa_def_is_unused(&deref->dest.ssa)) {
+   if (nir_def_is_unused(&deref->dest.ssa)) {
       nir_instr_remove(&deref->instr);
       return;
    }
 
    b->cursor = nir_after_instr(&deref->instr);
 
-   nir_ssa_def *base_addr = NULL;
+   nir_def *base_addr = NULL;
    if (deref->deref_type != nir_deref_type_var) {
       base_addr = deref->parent.ssa;
    }
 
-   nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
-                                                          addr_format);
+   nir_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
+                                                      addr_format);
    assert(addr->bit_size == deref->dest.ssa.bit_size);
    assert(addr->num_components == deref->dest.ssa.num_components);
 
    nir_instr_remove(&deref->instr);
-   nir_ssa_def_rewrite_uses(&deref->dest.ssa, addr);
+   nir_def_rewrite_uses(&deref->dest.ssa, addr);
 }
 
 static void
@@ -2150,9 +2150,9 @@ lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
    unsigned stride = glsl_get_explicit_stride(deref->type);
    assert(stride > 0);
 
-   nir_ssa_def *addr = &deref->dest.ssa;
+   nir_def *addr = &deref->dest.ssa;
 
-   nir_ssa_def *offset, *size;
+   nir_def *offset, *size;
    switch (addr_format) {
    case nir_address_format_64bit_global_32bit_offset:
    case nir_address_format_64bit_bounded_global:
@@ -2164,7 +2164,7 @@ lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
    case nir_address_format_32bit_index_offset_pack64:
    case nir_address_format_vec2_index_32bit_offset: {
       offset = addr_to_offset(b, addr, addr_format);
-      nir_ssa_def *index = addr_to_index(b, addr, addr_format);
+      nir_def *index = addr_to_index(b, addr, addr_format);
       unsigned access = nir_intrinsic_access(intrin);
       size = nir_get_ssbo_size(b, index, .access = access);
       break;
@@ -2174,10 +2174,10 @@ lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
       unreachable("Cannot determine SSBO size");
    }
 
-   nir_ssa_def *remaining = nir_usub_sat(b, size, offset);
-   nir_ssa_def *arr_size = nir_udiv_imm(b, remaining, stride);
+   nir_def *remaining = nir_usub_sat(b, size, offset);
+   nir_def *arr_size = nir_udiv_imm(b, remaining, stride);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, arr_size);
+   nir_def_rewrite_uses(&intrin->dest.ssa, arr_size);
    nir_instr_remove(&intrin->instr);
 }
 
@@ -2195,15 +2195,15 @@ lower_explicit_io_mode_check(nir_builder *b, nir_intrinsic_instr *intrin,
       return;
    }
 
-   nir_ssa_def *addr = intrin->src[0].ssa;
+   nir_def *addr = intrin->src[0].ssa;
 
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *is_mode =
+   nir_def *is_mode =
       build_runtime_addr_mode_check(b, addr, addr_format,
                                     nir_intrinsic_memory_modes(intrin));
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, is_mode);
+   nir_def_rewrite_uses(&intrin->dest.ssa, is_mode);
 }
 
 static bool
@@ -2274,7 +2274,7 @@ nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
                   unsigned size = glsl_get_explicit_size(deref->var->type, false);
 
                   /* Replace the current instruction with the explicit intrinsic. */
-                  nir_ssa_def *dispatch_3d = intrin->src[0].ssa;
+                  nir_def *dispatch_3d = intrin->src[0].ssa;
                   b.cursor = nir_instr_remove(instr);
                   nir_launch_mesh_workgroups(&b, dispatch_3d, .base = base, .range = size);
                   progress = true;
@@ -2732,8 +2732,8 @@ nir_address_format_null_value(nir_address_format addr_format)
    return null_values[addr_format];
 }
 
-nir_ssa_def *
-nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+nir_def *
+nir_build_addr_ieq(nir_builder *b, nir_def *addr0, nir_def *addr1,
                    nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -2766,8 +2766,8 @@ nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
    unreachable("Invalid address format");
 }
 
-nir_ssa_def *
-nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+nir_def *
+nir_build_addr_isub(nir_builder *b, nir_def *addr0, nir_def *addr1,
                     nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -2970,7 +2970,7 @@ nir_lower_color_inputs(nir_shader *nir)
          }
 
          b.cursor = nir_before_instr(instr);
-         nir_ssa_def *load = NULL;
+         nir_def *load = NULL;
 
          if (sem.location == VARYING_SLOT_COL0) {
             load = nir_load_color0(&b);
@@ -2991,7 +2991,7 @@ nir_lower_color_inputs(nir_shader *nir)
             load = nir_channels(&b, load, BITFIELD_RANGE(start, count));
          }
 
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load);
+         nir_def_rewrite_uses(&intrin->dest.ssa, load);
          nir_instr_remove(instr);
          progress = true;
       }
index 225f7f5..4a4dbd6 100644 (file)
@@ -35,7 +35,7 @@
 static unsigned
 get_io_offset(nir_builder *b, nir_deref_instr *deref, nir_variable *var,
               unsigned *element_index, unsigned *xfb_offset,
-              nir_ssa_def **array_index)
+              nir_def **array_index)
 {
    nir_deref_path path;
    nir_deref_path_init(&path, deref, NULL);
@@ -114,10 +114,10 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
    if (nir_deref_instr_is_known_out_of_bounds(nir_src_as_deref(intr->src[0]))) {
       /* See Section 5.11 (Out-of-Bounds Accesses) of the GLSL 4.60 */
       if (intr->intrinsic != nir_intrinsic_store_deref) {
-         nir_ssa_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
-                                          intr->dest.ssa.bit_size);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                                  zero);
+         nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
+                                      intr->dest.ssa.bit_size);
+         nir_def_rewrite_uses(&intr->dest.ssa,
+                              zero);
       }
       nir_instr_remove(&intr->instr);
       return;
@@ -126,7 +126,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
    nir_variable **elements =
       get_array_elements(varyings, var, b->shader->info.stage);
 
-   nir_ssa_def *array_index = NULL;
+   nir_def *array_index = NULL;
    unsigned elements_index = 0;
    unsigned xfb_offset = 0;
    unsigned io_offset = get_io_offset(b, nir_src_as_deref(intr->src[0]),
@@ -181,8 +181,8 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
                       &element_intr->instr);
       }
 
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                               &element_intr->dest.ssa);
+      nir_def_rewrite_uses(&intr->dest.ssa,
+                           &element_intr->dest.ssa);
    } else {
       nir_intrinsic_set_write_mask(element_intr,
                                    nir_intrinsic_write_mask(intr));
index 7038849..cab6560 100644 (file)
@@ -45,7 +45,7 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS];
 
    for (unsigned i = 0; i < intr->num_components; i++) {
       bool is_64bit = (nir_intrinsic_instr_dest_type(intr) & NIR_ALU_TYPE_SIZE_MASK) == 64;
@@ -66,7 +66,7 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
          nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
       if (newc + newi > 3) {
          nir_src *src = nir_get_io_offset_src(chan_intr);
-         nir_ssa_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
+         nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
          nir_src new_src = nir_src_for_ssa(offset);
          nir_src_copy(src, &new_src, &chan_intr->instr);
       }
@@ -76,8 +76,8 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
       loads[i] = &chan_intr->dest.ssa;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                            nir_vec(b, loads, intr->num_components));
+   nir_def_rewrite_uses(&intr->dest.ssa,
+                        nir_vec(b, loads, intr->num_components));
    nir_instr_remove(&intr->instr);
 }
 
@@ -86,8 +86,8 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
-   nir_ssa_def *base_offset = nir_get_io_offset_src(intr)->ssa;
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
 
    for (unsigned i = 0; i < intr->num_components; i++) {
       nir_intrinsic_instr *chan_intr =
@@ -113,7 +113,7 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
          nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
 
       /* increment offset per component */
-      nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8));
+      nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8));
       *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
 
       nir_builder_instr_insert(b, &chan_intr->instr);
@@ -121,8 +121,8 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
       loads[i] = &chan_intr->dest.ssa;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                            nir_vec(b, loads, intr->num_components));
+   nir_def_rewrite_uses(&intr->dest.ssa,
+                        nir_vec(b, loads, intr->num_components));
    nir_instr_remove(&intr->instr);
 }
 
@@ -131,7 +131,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+   nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
 
    for (unsigned i = 0; i < intr->num_components; i++) {
       if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
@@ -181,7 +181,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
          nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
       if (newc + newi > 3) {
          nir_src *src = nir_get_io_offset_src(chan_intr);
-         nir_ssa_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
+         nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
          nir_src new_src = nir_src_for_ssa(offset);
          nir_src_copy(src, &new_src, &chan_intr->instr);
       }
@@ -197,8 +197,8 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
-   nir_ssa_def *base_offset = nir_get_io_offset_src(intr)->ssa;
+   nir_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+   nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
 
    /* iterate wrmask instead of num_components to handle split components */
    u_foreach_bit(i, nir_intrinsic_write_mask(intr)) {
@@ -223,7 +223,7 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
          nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
 
       /* increment offset per component */
-      nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
+      nir_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
       *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
 
       nir_builder_instr_insert(b, &chan_intr->instr);
@@ -357,7 +357,7 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS];
 
    nir_variable **chan_vars;
    if (var->data.mode == nir_var_shader_in) {
@@ -400,8 +400,8 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
       loads[i] = &chan_intr->dest.ssa;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                            nir_vec(b, loads, intr->num_components));
+   nir_def_rewrite_uses(&intr->dest.ssa,
+                        nir_vec(b, loads, intr->num_components));
 
    /* Remove the old load intrinsic */
    nir_instr_remove(&intr->instr);
@@ -414,7 +414,7 @@ lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+   nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
 
    nir_variable **chan_vars = get_channel_variables(split_outputs, var);
    for (unsigned i = 0; i < intr->num_components; i++) {
index e53cb4e..607af0d 100644 (file)
@@ -244,8 +244,8 @@ fixup_interpolation_instr(struct lower_io_state *state,
     * load from it. We can reuse the original deref, since it points to the
     * correct part of the temporary.
     */
-   nir_ssa_def *load = nir_load_deref(b, nir_src_as_deref(interp->src[0]));
-   nir_ssa_def_rewrite_uses(&interp->dest.ssa, load);
+   nir_def *load = nir_load_deref(b, nir_src_as_deref(interp->src[0]));
+   nir_def_rewrite_uses(&interp->dest.ssa, load);
    nir_instr_remove(&interp->instr);
 
    nir_deref_path_finish(&interp_path);
index 2da3ed4..dc8b572 100644 (file)
@@ -335,16 +335,16 @@ build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
    return nir_build_deref_follower(b, parent, leader);
 }
 
-static nir_ssa_def *
-build_array_index(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *base,
+static nir_def *
+build_array_index(nir_builder *b, nir_deref_instr *deref, nir_def *base,
                   bool vs_in, bool per_vertex)
 {
    switch (deref->deref_type) {
    case nir_deref_type_var:
       return base;
    case nir_deref_type_array: {
-      nir_ssa_def *index = nir_i2iN(b, deref->arr.index.ssa,
-                                    deref->dest.ssa.bit_size);
+      nir_def *index = nir_i2iN(b, deref->arr.index.ssa,
+                                deref->dest.ssa.bit_size);
 
       if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var &&
           per_vertex)
@@ -375,7 +375,7 @@ build_array_deref_of_new_var_flat(nir_shader *shader,
       nir_deref_instr *p = path.path[1];
       nir_deref_path_finish(&path);
 
-      nir_ssa_def *index = p->arr.index.ssa;
+      nir_def *index = p->arr.index.ssa;
       deref = nir_build_deref_array(b, deref, index);
    }
 
@@ -513,11 +513,11 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
 
             b.cursor = nir_after_instr(&intrin->instr);
 
-            nir_ssa_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
-                                                vec4_comp_mask >> new_frac);
-            nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                           new_vec,
-                                           new_vec->parent_instr);
+            nir_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
+                                            vec4_comp_mask >> new_frac);
+            nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                       new_vec,
+                                       new_vec->parent_instr);
 
             progress = true;
             break;
@@ -559,20 +559,20 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
 
             nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
 
-            nir_ssa_def *old_value = intrin->src[1].ssa;
-            nir_ssa_scalar comps[4];
+            nir_def *old_value = intrin->src[1].ssa;
+            nir_scalar comps[4];
             for (unsigned c = 0; c < intrin->num_components; c++) {
                if (new_frac + c >= old_frac &&
                    (old_wrmask & 1 << (new_frac + c - old_frac))) {
                   comps[c] = nir_get_ssa_scalar(old_value,
                                                 new_frac + c - old_frac);
                } else {
-                  comps[c] = nir_get_ssa_scalar(nir_ssa_undef(&b, old_value->num_components,
-                                                              old_value->bit_size),
+                  comps[c] = nir_get_ssa_scalar(nir_undef(&b, old_value->num_components,
+                                                          old_value->bit_size),
                                                 0);
                }
             }
-            nir_ssa_def *new_value = nir_vec_scalars(&b, comps, intrin->num_components);
+            nir_def *new_value = nir_vec_scalars(&b, comps, intrin->num_components);
             nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
                                   nir_src_for_ssa(new_value));
 
@@ -649,7 +649,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl)
          unsigned vec_size = glsl_get_vector_elements(var->type);
 
          b.cursor = nir_before_instr(instr);
-         nir_ssa_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa;
+         nir_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa;
          nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref));
 
          nir_deref_instr_remove_if_unused(deref);
@@ -661,9 +661,9 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl)
             if (intrin->intrinsic == nir_intrinsic_load_deref) {
                /* Return undef from out of bounds loads. */
                b.cursor = nir_after_instr(instr);
-               nir_ssa_def *val = &intrin->dest.ssa;
-               nir_ssa_def *u = nir_ssa_undef(&b, val->num_components, val->bit_size);
-               nir_ssa_def_rewrite_uses(val, u);
+               nir_def *val = &intrin->dest.ssa;
+               nir_def *u = nir_undef(&b, val->num_components, val->bit_size);
+               nir_def_rewrite_uses(val, u);
             }
 
             /* Finally, remove the out of bounds access. */
@@ -674,15 +674,15 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl)
 
          if (intrin->intrinsic == nir_intrinsic_store_deref) {
             nir_intrinsic_set_write_mask(intrin, 1 << index);
-            nir_ssa_def *new_val = nir_ssa_undef(&b, intrin->num_components, 32);
+            nir_def *new_val = nir_undef(&b, intrin->num_components, 32);
             new_val = nir_vector_insert_imm(&b, new_val, intrin->src[1].ssa, index);
             nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val));
          } else {
             b.cursor = nir_after_instr(instr);
-            nir_ssa_def *val = &intrin->dest.ssa;
+            nir_def *val = &intrin->dest.ssa;
             val->num_components = intrin->num_components;
-            nir_ssa_def *comp = nir_channel(&b, val, index);
-            nir_ssa_def_rewrite_uses_after(val, comp, comp->parent_instr);
+            nir_def *comp = nir_channel(&b, val, index);
+            nir_def_rewrite_uses_after(val, comp, comp->parent_instr);
          }
 
          progress = true;
index 64b955f..53dd3bd 100644 (file)
@@ -56,15 +56,15 @@ nir_lower_load_and_store_is_helper(nir_builder *b, nir_instr *instr, void *data)
    }
    case nir_intrinsic_demote_if: {
       b->cursor = nir_before_instr(instr);
-      nir_ssa_def *current_is_helper = nir_load_deref(b, is_helper_deref);
-      nir_ssa_def *updated_is_helper = nir_ior(b, current_is_helper, intrin->src[0].ssa);
+      nir_def *current_is_helper = nir_load_deref(b, is_helper_deref);
+      nir_def *updated_is_helper = nir_ior(b, current_is_helper, intrin->src[0].ssa);
       nir_store_deref(b, is_helper_deref, updated_is_helper, 1);
       return true;
    }
    case nir_intrinsic_is_helper_invocation: {
       b->cursor = nir_before_instr(instr);
-      nir_ssa_def *is_helper = nir_load_deref(b, is_helper_deref);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, is_helper);
+      nir_def *is_helper = nir_load_deref(b, is_helper_deref);
+      nir_def_rewrite_uses(&intrin->dest.ssa, is_helper);
       nir_instr_remove_v(instr);
       return true;
    }
@@ -109,7 +109,7 @@ nir_lower_is_helper_invocation(nir_shader *shader)
                                                        glsl_bool_type(),
                                                        "gl_IsHelperInvocationEXT");
 
-   nir_ssa_def *started_as_helper = shader->options->lower_helper_invocation ? nir_build_lowered_load_helper_invocation(&b) : nir_load_helper_invocation(&b, 1);
+   nir_def *started_as_helper = shader->options->lower_helper_invocation ? nir_build_lowered_load_helper_invocation(&b) : nir_load_helper_invocation(&b, 1);
 
    nir_deref_instr *is_helper_deref = nir_build_deref_var(&b, is_helper);
    nir_store_deref(&b, is_helper_deref, started_as_helper, 1);
index 6e3f7d8..080022c 100644 (file)
@@ -44,7 +44,7 @@ lower_load_const_instr_scalar(nir_load_const_instr *lower)
    nir_builder b = nir_builder_at(nir_before_instr(&lower->instr));
 
    /* Emit the individual loads. */
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < lower->def.num_components; i++) {
       nir_load_const_instr *load_comp =
          nir_load_const_instr_create(b.shader, 1, lower->def.bit_size);
@@ -54,10 +54,10 @@ lower_load_const_instr_scalar(nir_load_const_instr *lower)
    }
 
    /* Batch things back together into a vector. */
-   nir_ssa_def *vec = nir_vec(&b, loads, lower->def.num_components);
+   nir_def *vec = nir_vec(&b, loads, lower->def.num_components);
 
    /* Replace the old load with a reference to our reconstructed vector. */
-   nir_ssa_def_rewrite_uses(&lower->def, vec);
+   nir_def_rewrite_uses(&lower->def, vec);
    nir_instr_remove(&lower->instr);
    return true;
 }
index 4010e30..799cdbf 100644 (file)
@@ -97,7 +97,7 @@ derefs_equal(const void *void_a, const void *void_b)
    unreachable("We should have hit a variable dereference");
 }
 
-static nir_ssa_def *
+static nir_def *
 get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
 {
    uint32_t hash = hash_deref(deref);
@@ -122,9 +122,9 @@ get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
    if (bit_size == 1)
       bit_size = state->bool_bitsize;
 
-   nir_ssa_def *reg = nir_decl_reg(&state->builder,
-                                   glsl_get_vector_elements(deref->type),
-                                   bit_size, array_size > 1 ? array_size : 0);
+   nir_def *reg = nir_decl_reg(&state->builder,
+                               glsl_get_vector_elements(deref->type),
+                               bit_size, array_size > 1 ? array_size : 0);
 
    _mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg);
 
@@ -132,8 +132,8 @@ get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
 }
 
 struct reg_location {
-   nir_ssa_def *reg;
-   nir_ssa_def *indirect;
+   nir_def *reg;
+   nir_def *indirect;
    unsigned base_offset;
 };
 
@@ -143,7 +143,7 @@ get_deref_reg_location(nir_deref_instr *deref,
 {
    nir_builder *b = &state->builder;
 
-   nir_ssa_def *reg = get_reg_for_deref(deref, state);
+   nir_def *reg = get_reg_for_deref(deref, state);
    nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr);
 
    /* It is possible for a user to create a shader that has an array with a
@@ -154,7 +154,7 @@ get_deref_reg_location(nir_deref_instr *deref,
    if (nir_intrinsic_num_array_elems(decl) == 0)
       return (struct reg_location){ .reg = reg };
 
-   nir_ssa_def *indirect = NULL;
+   nir_def *indirect = NULL;
    unsigned base_offset = 0;
 
    unsigned inner_array_size = 1;
@@ -172,15 +172,15 @@ get_deref_reg_location(nir_deref_instr *deref,
             base_offset = 0;
          }
 
-         nir_ssa_def *index = nir_i2iN(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
-         nir_ssa_def *offset = nir_imul_imm(b, index, inner_array_size);
+         nir_def *index = nir_i2iN(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
+         nir_def *offset = nir_imul_imm(b, index, inner_array_size);
 
          /* Avoid emitting iadd with 0, which is otherwise common, since this
           * pass runs late enough that nothing will clean it up.
           */
-         nir_ssa_scalar scal = nir_get_ssa_scalar(indirect, 0);
-         if (nir_ssa_scalar_is_const(scal))
-            indirect = nir_iadd_imm(b, offset, nir_ssa_scalar_as_uint(scal));
+         nir_scalar scal = nir_get_ssa_scalar(indirect, 0);
+         if (nir_scalar_is_const(scal))
+            indirect = nir_iadd_imm(b, offset, nir_scalar_as_uint(scal));
          else
             indirect = nir_iadd(b, offset, indirect);
       }
@@ -217,7 +217,7 @@ lower_locals_to_regs_block(nir_block *block,
          struct reg_location loc = get_deref_reg_location(deref, state);
          nir_intrinsic_instr *decl = nir_reg_get_decl(loc.reg);
 
-         nir_ssa_def *value;
+         nir_def *value;
          unsigned num_array_elems = nir_intrinsic_num_array_elems(decl);
          unsigned num_components = nir_intrinsic_num_components(decl);
          unsigned bit_size = nir_intrinsic_bit_size(decl);
@@ -234,7 +234,7 @@ lower_locals_to_regs_block(nir_block *block,
                                        loc.reg, .base = loc.base_offset);
          }
 
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
+         nir_def_rewrite_uses(&intrin->dest.ssa, value);
          nir_instr_remove(&intrin->instr);
          state->progress = true;
          break;
@@ -250,7 +250,7 @@ lower_locals_to_regs_block(nir_block *block,
          struct reg_location loc = get_deref_reg_location(deref, state);
          nir_intrinsic_instr *decl = nir_reg_get_decl(loc.reg);
 
-         nir_ssa_def *val = intrin->src[1].ssa;
+         nir_def *val = intrin->src[1].ssa;
          unsigned num_array_elems = nir_intrinsic_num_array_elems(decl);
          unsigned write_mask = nir_intrinsic_write_mask(intrin);
 
index 23fc4aa..44771a4 100644 (file)
@@ -164,7 +164,7 @@ nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
             continue;
 
          nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
-         nir_ssa_def *(*convert)(nir_builder *, nir_ssa_def *);
+         nir_def *(*convert)(nir_builder *, nir_def *);
          bool is_varying = !(nir->info.stage == MESA_SHADER_VERTEX &&
                              mode == nir_var_shader_in) &&
                            !(nir->info.stage == MESA_SHADER_FRAGMENT &&
@@ -203,7 +203,7 @@ nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
              * gl_FragDepth, as GLSL ES declares it highp and so hardware such
              * as Adreno a6xx doesn't expect a half-float output for it.
              */
-            nir_ssa_def *val = intr->src[0].ssa;
+            nir_def *val = intr->src[0].ssa;
             bool is_fragdepth = (nir->info.stage == MESA_SHADER_FRAGMENT &&
                                  sem.location == FRAG_RESULT_DEPTH);
             if (!sem.medium_precision &&
@@ -242,9 +242,9 @@ nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
             b.cursor = nir_after_instr(&intr->instr);
             intr->dest.ssa.bit_size = 16;
             nir_intrinsic_set_dest_type(intr, (type & ~32) | 16);
-            nir_ssa_def *dst = convert(&b, &intr->dest.ssa);
-            nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, dst,
-                                           dst->parent_instr);
+            nir_def *dst = convert(&b, &intr->dest.ssa);
+            nir_def_rewrite_uses_after(&intr->dest.ssa, dst,
+                                       dst->parent_instr);
          }
 
          if (use_16bit_slots && is_varying &&
@@ -462,7 +462,7 @@ nir_lower_mediump_vars_impl(nir_function_impl *impl, nir_variable_mode modes,
                intrin->dest.ssa.bit_size = 16;
 
                b.cursor = nir_after_instr(&intrin->instr);
-               nir_ssa_def *replace = NULL;
+               nir_def *replace = NULL;
                switch (glsl_get_base_type(deref->type)) {
                case GLSL_TYPE_FLOAT16:
                   replace = nir_f2f32(&b, &intrin->dest.ssa);
@@ -477,15 +477,15 @@ nir_lower_mediump_vars_impl(nir_function_impl *impl, nir_variable_mode modes,
                   unreachable("Invalid 16-bit type");
                }
 
-               nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                              replace,
-                                              replace->parent_instr);
+               nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                          replace,
+                                          replace->parent_instr);
                progress = true;
                break;
             }
 
             case nir_intrinsic_store_deref: {
-               nir_ssa_def *data = intrin->src[1].ssa;
+               nir_def *data = intrin->src[1].ssa;
                if (data->bit_size != 32)
                   break;
 
@@ -494,7 +494,7 @@ nir_lower_mediump_vars_impl(nir_function_impl *impl, nir_variable_mode modes,
                   break;
 
                b.cursor = nir_before_instr(&intrin->instr);
-               nir_ssa_def *replace = NULL;
+               nir_def *replace = NULL;
                switch (glsl_get_base_type(deref->type)) {
                case GLSL_TYPE_FLOAT16:
                   replace = nir_f2fmp(&b, data);
@@ -696,7 +696,7 @@ nir_legalize_16bit_sampler_srcs(nir_shader *nir,
             /* Fix the bit size. */
             bool is_sint = nir_tex_instr_src_type(tex, i) == nir_type_int;
             bool is_uint = nir_tex_instr_src_type(tex, i) == nir_type_uint;
-            nir_ssa_def *(*convert)(nir_builder *, nir_ssa_def *);
+            nir_def *(*convert)(nir_builder *, nir_def *);
 
             switch (bit_size) {
             case 16:
@@ -713,7 +713,7 @@ nir_legalize_16bit_sampler_srcs(nir_shader *nir,
             }
 
             b.cursor = nir_before_instr(&tex->instr);
-            nir_ssa_def *conv =
+            nir_def *conv =
                convert(&b, nir_ssa_for_src(&b, tex->src[i].src,
                                            tex->src[i].src.ssa->num_components));
             nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, conv);
@@ -733,30 +733,30 @@ nir_legalize_16bit_sampler_srcs(nir_shader *nir,
 }
 
 static bool
-const_is_f16(nir_ssa_scalar scalar)
+const_is_f16(nir_scalar scalar)
 {
-   double value = nir_ssa_scalar_as_float(scalar);
+   double value = nir_scalar_as_float(scalar);
    uint16_t fp16_val = _mesa_float_to_half(value);
    bool is_denorm = (fp16_val & 0x7fff) != 0 && (fp16_val & 0x7fff) <= 0x3ff;
    return value == _mesa_half_to_float(fp16_val) && !is_denorm;
 }
 
 static bool
-const_is_u16(nir_ssa_scalar scalar)
+const_is_u16(nir_scalar scalar)
 {
-   uint64_t value = nir_ssa_scalar_as_uint(scalar);
+   uint64_t value = nir_scalar_as_uint(scalar);
    return value == (uint16_t)value;
 }
 
 static bool
-const_is_i16(nir_ssa_scalar scalar)
+const_is_i16(nir_scalar scalar)
 {
-   int64_t value = nir_ssa_scalar_as_int(scalar);
+   int64_t value = nir_scalar_as_int(scalar);
    return value == (int16_t)value;
 }
 
 static bool
-can_fold_16bit_src(nir_ssa_def *ssa, nir_alu_type src_type, bool sext_matters)
+can_fold_16bit_src(nir_def *ssa, nir_alu_type src_type, bool sext_matters)
 {
    bool fold_f16 = src_type == nir_type_float32;
    bool fold_u16 = src_type == nir_type_uint32 && sext_matters;
@@ -765,10 +765,10 @@ can_fold_16bit_src(nir_ssa_def *ssa, nir_alu_type src_type, bool sext_matters)
 
    bool can_fold = fold_f16 || fold_u16 || fold_i16 || fold_i16_u16;
    for (unsigned i = 0; can_fold && i < ssa->num_components; i++) {
-      nir_ssa_scalar comp = nir_ssa_scalar_resolved(ssa, i);
-      if (nir_ssa_scalar_is_undef(comp))
+      nir_scalar comp = nir_scalar_resolved(ssa, i);
+      if (nir_scalar_is_undef(comp))
          continue;
-      else if (nir_ssa_scalar_is_const(comp)) {
+      else if (nir_scalar_is_const(comp)) {
          if (fold_f16)
             can_fold &= const_is_f16(comp);
          else if (fold_u16)
@@ -798,26 +798,26 @@ fold_16bit_src(nir_builder *b, nir_instr *instr, nir_src *src, nir_alu_type src_
 {
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_scalar new_comps[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar new_comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < src->ssa->num_components; i++) {
-      nir_ssa_scalar comp = nir_ssa_scalar_resolved(src->ssa, i);
+      nir_scalar comp = nir_scalar_resolved(src->ssa, i);
 
-      if (nir_ssa_scalar_is_undef(comp))
-         new_comps[i] = nir_get_ssa_scalar(nir_ssa_undef(b, 1, 16), 0);
-      else if (nir_ssa_scalar_is_const(comp)) {
-         nir_ssa_def *constant;
+      if (nir_scalar_is_undef(comp))
+         new_comps[i] = nir_get_ssa_scalar(nir_undef(b, 1, 16), 0);
+      else if (nir_scalar_is_const(comp)) {
+         nir_def *constant;
          if (src_type == nir_type_float32)
-            constant = nir_imm_float16(b, nir_ssa_scalar_as_float(comp));
+            constant = nir_imm_float16(b, nir_scalar_as_float(comp));
          else
-            constant = nir_imm_intN_t(b, nir_ssa_scalar_as_uint(comp), 16);
+            constant = nir_imm_intN_t(b, nir_scalar_as_uint(comp), 16);
          new_comps[i] = nir_get_ssa_scalar(constant, 0);
       } else {
          /* conversion instruction */
-         new_comps[i] = nir_ssa_scalar_chase_alu_src(comp, 0);
+         new_comps[i] = nir_scalar_chase_alu_src(comp, 0);
       }
    }
 
-   nir_ssa_def *new_vec = nir_vec_scalars(b, new_comps, src->ssa->num_components);
+   nir_def *new_vec = nir_vec_scalars(b, new_comps, src->ssa->num_components);
 
    nir_instr_rewrite_src_ssa(instr, src, new_vec);
 }
@@ -841,7 +841,7 @@ fold_16bit_store_data(nir_builder *b, nir_intrinsic_instr *instr)
 }
 
 static bool
-fold_16bit_destination(nir_ssa_def *ssa, nir_alu_type dest_type,
+fold_16bit_destination(nir_def *ssa, nir_alu_type dest_type,
                        unsigned exec_mode, nir_rounding_mode rdm)
 {
    bool is_f32_to_f16 = dest_type == nir_type_float32;
index cdc40d4..b8a4f7a 100644 (file)
@@ -28,9 +28,9 @@
 
 static nir_intrinsic_instr *
 dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
-                  nir_ssa_def *offset,
+                  nir_def *offset,
                   unsigned align_mul, unsigned align_offset,
-                  nir_ssa_def *data,
+                  nir_def *data,
                   unsigned num_components, unsigned bit_size)
 {
    const nir_intrinsic_info *info = &nir_intrinsic_infos[intrin->intrinsic];
@@ -81,7 +81,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
    const uint32_t whole_align = nir_intrinsic_align(intrin);
    nir_src *offset_src = nir_get_io_offset_src(intrin);
    const bool offset_is_const = nir_src_is_const(*offset_src);
-   nir_ssa_def *offset = offset_src->ssa;
+   nir_def *offset = offset_src->ssa;
 
    nir_mem_access_size_align requested =
       mem_access_size_align_cb(intrin->intrinsic, bytes_read,
@@ -98,7 +98,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
    /* Otherwise, we have to break it into chunks.  We could end up with as
     * many as 32 chunks if we're loading a u64vec16 as individual dwords.
     */
-   nir_ssa_def *chunks[32];
+   nir_def *chunks[32];
    unsigned num_chunks = 0;
    unsigned chunk_start = 0;
    while (chunk_start < bytes_read) {
@@ -121,8 +121,8 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
          assert(requested.bit_size >= requested.align * 8);
 
          uint64_t align_mask = requested.align - 1;
-         nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
-         nir_ssa_def *pad = nir_iand_imm(b, chunk_offset, align_mask);
+         nir_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
+         nir_def *pad = nir_iand_imm(b, chunk_offset, align_mask);
          chunk_offset = nir_iand_imm(b, chunk_offset, ~align_mask);
 
          nir_intrinsic_instr *load =
@@ -135,15 +135,15 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
             requested.num_components * requested.bit_size / 8;
          chunk_bytes = MIN2(bytes_left, requested_bytes - max_pad);
 
-         nir_ssa_def *shift = nir_imul_imm(b, pad, 8);
-         nir_ssa_def *shifted = nir_ushr(b, &load->dest.ssa, shift);
+         nir_def *shift = nir_imul_imm(b, pad, 8);
+         nir_def *shifted = nir_ushr(b, &load->dest.ssa, shift);
 
          if (load->dest.ssa.num_components > 1) {
-            nir_ssa_def *rev_shift =
+            nir_def *rev_shift =
                nir_isub_imm(b, load->dest.ssa.bit_size, shift);
-            nir_ssa_def *rev_shifted = nir_ishl(b, &load->dest.ssa, rev_shift);
+            nir_def *rev_shifted = nir_ishl(b, &load->dest.ssa, rev_shift);
 
-            nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+            nir_def *comps[NIR_MAX_VEC_COMPONENTS];
             for (unsigned i = 1; i < load->dest.ssa.num_components; i++)
                comps[i - 1] = nir_channel(b, rev_shifted, i);
 
@@ -170,7 +170,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
       } else if (chunk_align_offset % requested.align) {
          /* In this case, we know how much to adjust the offset */
          uint32_t delta = chunk_align_offset % requested.align;
-         nir_ssa_def *load_offset =
+         nir_def *load_offset =
             nir_iadd_imm(b, offset, chunk_start - (int)delta);
 
          const uint32_t load_align_offset =
@@ -192,7 +192,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
          /* There's no guarantee that chunk_num_components is a valid NIR
           * vector size, so just loop one chunk component at a time
           */
-         nir_ssa_def *chunk_data = &load->dest.ssa;
+         nir_def *chunk_data = &load->dest.ssa;
          for (unsigned i = 0; i < chunk_num_components; i++) {
             assert(num_chunks < ARRAY_SIZE(chunks));
             chunks[num_chunks++] =
@@ -201,7 +201,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
                                 1, chunk_bit_size);
          }
       } else {
-         nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
+         nir_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
          nir_intrinsic_instr *load =
             dup_mem_intrinsic(b, intrin, chunk_offset,
                               align_mul, chunk_align_offset, NULL,
@@ -215,9 +215,9 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
       chunk_start += chunk_bytes;
    }
 
-   nir_ssa_def *result = nir_extract_bits(b, chunks, num_chunks, 0,
-                                          num_components, bit_size);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, result);
+   nir_def *result = nir_extract_bits(b, chunks, num_chunks, 0,
+                                      num_components, bit_size);
+   nir_def_rewrite_uses(&intrin->dest.ssa, result);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -228,7 +228,7 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
                 nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
                 const void *cb_data, bool allow_unaligned_stores_as_atomics)
 {
-   nir_ssa_def *value = intrin->src[0].ssa;
+   nir_def *value = intrin->src[0].ssa;
 
    assert(intrin->num_components == value->num_components);
    const unsigned bit_size = value->bit_size;
@@ -240,7 +240,7 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
    const uint32_t whole_align = nir_intrinsic_align(intrin);
    nir_src *offset_src = nir_get_io_offset_src(intrin);
    const bool offset_is_const = nir_src_is_const(*offset_src);
-   nir_ssa_def *offset = offset_src->ssa;
+   nir_def *offset = offset_src->ssa;
 
    nir_component_mask_t writemask = nir_intrinsic_write_mask(intrin);
    assert(writemask < (1 << num_components));
@@ -307,8 +307,8 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
          };
 
          uint64_t align_mask = requested.align - 1;
-         nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
-         nir_ssa_def *pad = chunk_align < 4 ? nir_iand_imm(b, chunk_offset, align_mask) : nir_imm_intN_t(b, 0, chunk_offset->bit_size);
+         nir_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
+         nir_def *pad = chunk_align < 4 ? nir_iand_imm(b, chunk_offset, align_mask) : nir_imm_intN_t(b, 0, chunk_offset->bit_size);
          chunk_offset = nir_iand_imm(b, chunk_offset, ~align_mask);
 
          unsigned max_pad = chunk_align < requested.align ? requested.align - chunk_align : 0;
@@ -317,7 +317,7 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
          chunk_bytes = MIN2(max_chunk_bytes, requested_bytes - max_pad);
          unsigned chunk_bits = chunk_bytes * 8;
 
-         nir_ssa_def *chunk_value = value;
+         nir_def *chunk_value = value;
          /* The one special case where nir_extract_bits cannot get a scalar by asking for
           * 1 component of chunk_bits.
           */
@@ -326,13 +326,13 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
             chunk_bits = 32;
          }
 
-         nir_ssa_def *data = nir_u2u32(b,
-                                       nir_extract_bits(b, &chunk_value, 1, chunk_start * 8,
-                                                        1, chunk_bits));
-         nir_ssa_def *iand_mask = nir_imm_int(b, (1 << chunk_bits) - 1);
+         nir_def *data = nir_u2u32(b,
+                                   nir_extract_bits(b, &chunk_value, 1, chunk_start * 8,
+                                                    1, chunk_bits));
+         nir_def *iand_mask = nir_imm_int(b, (1 << chunk_bits) - 1);
 
          if (chunk_align < requested.align) {
-            nir_ssa_def *shift = nir_imul_imm(b, pad, 8);
+            nir_def *shift = nir_imul_imm(b, pad, 8);
             data = nir_ishl(b, data, shift);
             iand_mask = nir_inot(b, nir_ishl(b, iand_mask, shift));
          }
@@ -364,11 +364,11 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
             unreachable("Unsupported unaligned store");
          }
       } else {
-         nir_ssa_def *packed = nir_extract_bits(b, &value, 1, chunk_start * 8,
-                                                requested.num_components,
-                                                requested.bit_size);
+         nir_def *packed = nir_extract_bits(b, &value, 1, chunk_start * 8,
+                                            requested.num_components,
+                                            requested.bit_size);
 
-         nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
+         nir_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
          dup_mem_intrinsic(b, intrin, chunk_offset,
                            align_mul, chunk_align_offset, packed,
                            requested.num_components, requested.bit_size);
index c5addb8..ecf2cac 100644 (file)
@@ -49,9 +49,9 @@ copy_type_for_byte_size(unsigned size)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 memcpy_load_deref_elem(nir_builder *b, nir_deref_instr *parent,
-                       nir_ssa_def *index)
+                       nir_def *index)
 {
    nir_deref_instr *deref;
 
@@ -62,17 +62,17 @@ memcpy_load_deref_elem(nir_builder *b, nir_deref_instr *parent,
    return nir_load_deref(b, deref);
 }
 
-static nir_ssa_def *
+static nir_def *
 memcpy_load_deref_elem_imm(nir_builder *b, nir_deref_instr *parent,
                            uint64_t index)
 {
-   nir_ssa_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
+   nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
    return memcpy_load_deref_elem(b, parent, idx);
 }
 
 static void
 memcpy_store_deref_elem(nir_builder *b, nir_deref_instr *parent,
-                        nir_ssa_def *index, nir_ssa_def *value)
+                        nir_def *index, nir_def *value)
 {
    nir_deref_instr *deref;
 
@@ -84,9 +84,9 @@ memcpy_store_deref_elem(nir_builder *b, nir_deref_instr *parent,
 
 static void
 memcpy_store_deref_elem_imm(nir_builder *b, nir_deref_instr *parent,
-                            uint64_t index, nir_ssa_def *value)
+                            uint64_t index, nir_def *value)
 {
-   nir_ssa_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
+   nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
    memcpy_store_deref_elem(b, parent, idx, value);
 }
 
@@ -134,14 +134,14 @@ lower_memcpy_impl(nir_function_impl *impl)
                                        copy_type, copy_size);
 
                uint64_t index = offset / copy_size;
-               nir_ssa_def *value =
+               nir_def *value =
                   memcpy_load_deref_elem_imm(&b, copy_src, index);
                memcpy_store_deref_elem_imm(&b, copy_dst, index, value);
                offset += copy_size;
             }
          } else {
             found_non_const_memcpy = true;
-            nir_ssa_def *size = cpy->src[2].ssa;
+            nir_def *size = cpy->src[2].ssa;
 
             /* In this case, we don't have any idea what the size is so we
              * emit a loop which copies one byte at a time.
@@ -158,14 +158,14 @@ lower_memcpy_impl(nir_function_impl *impl)
             nir_store_var(&b, i, nir_imm_intN_t(&b, 0, size->bit_size), ~0);
             nir_push_loop(&b);
             {
-               nir_ssa_def *index = nir_load_var(&b, i);
+               nir_def *index = nir_load_var(&b, i);
                nir_push_if(&b, nir_uge(&b, index, size));
                {
                   nir_jump(&b, nir_jump_break);
                }
                nir_pop_if(&b, NULL);
 
-               nir_ssa_def *value =
+               nir_def *value =
                   memcpy_load_deref_elem(&b, copy_src, index);
                memcpy_store_deref_elem(&b, copy_dst, index, value);
                nir_store_var(&b, i, nir_iadd_imm(&b, index, 1), ~0);
index 15faeb6..d24acdb 100644 (file)
@@ -248,13 +248,13 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
 
    nir_loop *loop = nir_push_loop(&b);
 
-   nir_ssa_def *loop_index = nir_load_deref(&b, loop_index_deref);
-   nir_ssa_def *cmp = nir_ige_imm(&b, loop_index, view_count);
+   nir_def *loop_index = nir_load_deref(&b, loop_index_deref);
+   nir_def *cmp = nir_ige_imm(&b, loop_index, view_count);
    nir_if *loop_check = nir_push_if(&b, cmp);
    nir_jump(&b, nir_jump_break);
    nir_pop_if(&b, loop_check);
 
-   nir_ssa_def *view_index =
+   nir_def *view_index =
       nir_load_deref(&b, nir_build_deref_array(&b, view_index_deref, loop_index));
    nir_deref_instr *pos_deref =
       nir_build_deref_array(&b, nir_build_deref_var(&b, pos_var), loop_index);
@@ -275,7 +275,7 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
 
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_view_index: {
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, view_index);
+            nir_def_rewrite_uses(&intrin->dest.ssa, view_index);
             break;
          }
 
index d7e8263..81dece3 100644 (file)
@@ -26,9 +26,9 @@
 
 struct nu_handle {
    nir_src *src;
-   nir_ssa_def *handle;
+   nir_def *handle;
    nir_deref_instr *parent_deref;
-   nir_ssa_def *first;
+   nir_def *first;
 };
 
 static bool
@@ -63,7 +63,7 @@ nu_handle_init(struct nu_handle *h, nir_src *src)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 nu_handle_compare(const nir_lower_non_uniform_access_options *options,
                   nir_builder *b, struct nu_handle *handle)
 {
@@ -72,14 +72,14 @@ nu_handle_compare(const nir_lower_non_uniform_access_options *options,
       channel_mask = options->callback(handle->src, options->callback_data);
    channel_mask &= nir_component_mask(handle->handle->num_components);
 
-   nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
+   nir_def *channels[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < handle->handle->num_components; i++)
       channels[i] = nir_channel(b, handle->handle, i);
 
    handle->first = handle->handle;
-   nir_ssa_def *equal_first = nir_imm_true(b);
+   nir_def *equal_first = nir_imm_true(b);
    u_foreach_bit(i, channel_mask) {
-      nir_ssa_def *first = nir_read_first_invocation(b, channels[i]);
+      nir_def *first = nir_read_first_invocation(b, channels[i]);
       handle->first = nir_vector_insert_imm(b, handle->first, first, i);
 
       equal_first = nir_iand(b, equal_first, nir_ieq(b, first, channels[i]));
@@ -143,14 +143,14 @@ lower_non_uniform_tex_access(const nir_lower_non_uniform_access_options *options
 
    nir_push_loop(b);
 
-   nir_ssa_def *all_equal_first = nir_imm_true(b);
+   nir_def *all_equal_first = nir_imm_true(b);
    for (unsigned i = 0; i < num_handles; i++) {
       if (i && handles[i].handle == handles[0].handle) {
          handles[i].first = handles[0].first;
          continue;
       }
 
-      nir_ssa_def *equal_first = nu_handle_compare(options, b, &handles[i]);
+      nir_def *equal_first = nu_handle_compare(options, b, &handles[i]);
       all_equal_first = nir_iand(b, all_equal_first, equal_first);
    }
 
index e0e37db..adabc14 100644 (file)
  * unpackInt2x32(foo) -> vec2(unpackInt2x32_x(foo), unpackInt2x32_y(foo))
  */
 
-static nir_ssa_def *
-lower_pack_64_from_32(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_pack_64_from_32(nir_builder *b, nir_def *src)
 {
    return nir_pack_64_2x32_split(b, nir_channel(b, src, 0),
                                  nir_channel(b, src, 1));
 }
 
-static nir_ssa_def *
-lower_unpack_64_to_32(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_unpack_64_to_32(nir_builder *b, nir_def *src)
 {
    return nir_vec2(b, nir_unpack_64_2x32_split_x(b, src),
                    nir_unpack_64_2x32_split_y(b, src));
 }
 
-static nir_ssa_def *
-lower_pack_32_from_16(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_pack_32_from_16(nir_builder *b, nir_def *src)
 {
    return nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
                                  nir_channel(b, src, 1));
 }
 
-static nir_ssa_def *
-lower_unpack_32_to_16(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_unpack_32_to_16(nir_builder *b, nir_def *src)
 {
    return nir_vec2(b, nir_unpack_32_2x16_split_x(b, src),
                    nir_unpack_32_2x16_split_y(b, src));
 }
 
-static nir_ssa_def *
-lower_pack_64_from_16(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_pack_64_from_16(nir_builder *b, nir_def *src)
 {
-   nir_ssa_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
-                                            nir_channel(b, src, 1));
+   nir_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src, 0),
+                                        nir_channel(b, src, 1));
 
-   nir_ssa_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src, 2),
-                                            nir_channel(b, src, 3));
+   nir_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src, 2),
+                                        nir_channel(b, src, 3));
 
    return nir_pack_64_2x32_split(b, xy, zw);
 }
 
-static nir_ssa_def *
-lower_unpack_64_to_16(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_unpack_64_to_16(nir_builder *b, nir_def *src)
 {
-   nir_ssa_def *xy = nir_unpack_64_2x32_split_x(b, src);
-   nir_ssa_def *zw = nir_unpack_64_2x32_split_y(b, src);
+   nir_def *xy = nir_unpack_64_2x32_split_x(b, src);
+   nir_def *zw = nir_unpack_64_2x32_split_y(b, src);
 
    return nir_vec4(b, nir_unpack_32_2x16_split_x(b, xy),
                    nir_unpack_32_2x16_split_y(b, xy),
@@ -86,8 +86,8 @@ lower_unpack_64_to_16(nir_builder *b, nir_ssa_def *src)
                    nir_unpack_32_2x16_split_y(b, zw));
 }
 
-static nir_ssa_def *
-lower_pack_32_from_8(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+lower_pack_32_from_8(nir_builder *b, nir_def *src)
 {
    return nir_pack_32_4x8_split(b, nir_channel(b, src, 0),
                                 nir_channel(b, src, 1),
@@ -114,8 +114,8 @@ lower_pack_instr(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(&alu_instr->instr);
 
-   nir_ssa_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
-   nir_ssa_def *dest;
+   nir_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
+   nir_def *dest;
 
    switch (alu_instr->op) {
    case nir_op_pack_64_2x32:
@@ -142,7 +142,7 @@ lower_pack_instr(nir_builder *b, nir_instr *instr, void *data)
    default:
       unreachable("Impossible opcode");
    }
-   nir_ssa_def_rewrite_uses(&alu_instr->dest.dest.ssa, dest);
+   nir_def_rewrite_uses(&alu_instr->dest.dest.ssa, dest);
    nir_instr_remove(&alu_instr->instr);
 
    return true;
index 3d9053d..3ba490b 100644 (file)
@@ -30,7 +30,7 @@ lower_impl(nir_function_impl *impl)
    nir_shader *shader = impl->function->shader;
    nir_builder b;
    nir_variable *in, *out;
-   nir_ssa_def *def;
+   nir_def *def;
 
    b = nir_builder_at(nir_before_cf_list(&impl->body));
 
@@ -50,7 +50,7 @@ lower_impl(nir_function_impl *impl)
       load_sem.location = VERT_ATTRIB_EDGEFLAG;
       load_sem.num_slots = 1;
 
-      nir_ssa_def *load =
+      nir_def *load =
          nir_load_input(&b, 1, 32, nir_imm_int(&b, 0),
                         .base = shader->num_inputs++,
                         .component = 0,
index 4eb59f9..01c6e24 100644 (file)
@@ -73,7 +73,7 @@ nir_lower_patch_vertices(nir_shader *nir,
 
                b.cursor = nir_before_instr(&intr->instr);
 
-               nir_ssa_def *val = NULL;
+               nir_def *val = NULL;
                if (static_count) {
                   val = nir_imm_int(&b, static_count);
                } else {
@@ -84,8 +84,8 @@ nir_lower_patch_vertices(nir_shader *nir,
                }
 
                progress = true;
-               nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                                        val);
+               nir_def_rewrite_uses(&intr->dest.ssa,
+                                    val);
                nir_instr_remove(instr);
             }
          }
index 356dd95..1cd116d 100644 (file)
@@ -234,8 +234,8 @@ lower_phis_to_scalar_block(nir_block *block,
 
       nir_instr_insert_after(&last_phi->instr, &vec->instr);
 
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
-                               &vec->dest.dest.ssa);
+      nir_def_rewrite_uses(&phi->dest.ssa,
+                           &vec->dest.dest.ssa);
 
       nir_instr_remove(&phi->instr);
       exec_list_push_tail(&state->dead_instrs, &phi->instr.node);
index 26a425e..259ccc3 100644 (file)
@@ -35,7 +35,7 @@ typedef struct {
    nir_variable *pntc_transform;
 } lower_pntc_ytransform_state;
 
-static nir_ssa_def *
+static nir_def *
 get_pntc_transform(lower_pntc_ytransform_state *state)
 {
    if (state->pntc_transform == NULL) {
@@ -60,21 +60,21 @@ lower_load_pointcoord(lower_pntc_ytransform_state *state,
    nir_builder *b = &state->b;
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *pntc = &intr->dest.ssa;
-   nir_ssa_def *transform = get_pntc_transform(state);
-   nir_ssa_def *y = nir_channel(b, pntc, 1);
+   nir_def *pntc = &intr->dest.ssa;
+   nir_def *transform = get_pntc_transform(state);
+   nir_def *y = nir_channel(b, pntc, 1);
    /* The offset is 1 if we're flipping, 0 otherwise. */
-   nir_ssa_def *offset = nir_channel(b, transform, 1);
+   nir_def *offset = nir_channel(b, transform, 1);
    /* Flip the sign of y if we're flipping. */
-   nir_ssa_def *scaled = nir_fmul(b, y, nir_channel(b, transform, 0));
+   nir_def *scaled = nir_fmul(b, y, nir_channel(b, transform, 0));
 
    /* Reassemble the vector. */
-   nir_ssa_def *flipped_pntc = nir_vec2(b,
-                                        nir_channel(b, pntc, 0),
-                                        nir_fadd(b, offset, scaled));
+   nir_def *flipped_pntc = nir_vec2(b,
+                                    nir_channel(b, pntc, 0),
+                                    nir_fadd(b, offset, scaled));
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, flipped_pntc,
-                                  flipped_pntc->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pntc,
+                              flipped_pntc->parent_instr);
 }
 
 static void
index f795725..56b9e21 100644 (file)
@@ -52,7 +52,7 @@ lower_point_size_instr(nir_builder *b, nir_instr *instr, void *data)
    b->cursor = nir_before_instr(instr);
 
    assert(intr->src[1].ssa->num_components == 1);
-   nir_ssa_def *psiz = intr->src[1].ssa;
+   nir_def *psiz = intr->src[1].ssa;
 
    if (minmax[0] > 0.0f)
       psiz = nir_fmax(b, psiz, nir_imm_float(b, minmax[0]));
index 3f76c41..56a280f 100644 (file)
@@ -57,7 +57,7 @@ lower_impl(nir_function_impl *impl,
 
    if (!out) {
       b.cursor = nir_before_cf_list(&impl->body);
-      nir_ssa_def *load = nir_load_var(&b, in);
+      nir_def *load = nir_load_var(&b, in);
       load = nir_fclamp(&b, nir_channel(&b, load, 0), nir_channel(&b, load, 1), nir_channel(&b, load, 2));
       nir_store_var(&b, new_out, load, 0x1);
    } else {
@@ -70,7 +70,7 @@ lower_impl(nir_function_impl *impl,
                   nir_variable *var = nir_intrinsic_get_var(intr, 0);
                   if (var == out) {
                      b.cursor = nir_after_instr(instr);
-                     nir_ssa_def *load = nir_load_var(&b, in);
+                     nir_def *load = nir_load_var(&b, in);
                      load = nir_fclamp(&b, nir_channel(&b, load, 0), nir_channel(&b, load, 1), nir_channel(&b, load, 2));
                      nir_store_var(&b, new_out ? new_out : out, load, 0x1);
                      found = true;
@@ -81,7 +81,7 @@ lower_impl(nir_function_impl *impl,
       }
       if (!found) {
          b.cursor = nir_before_cf_list(&impl->body);
-         nir_ssa_def *load = nir_load_var(&b, in);
+         nir_def *load = nir_load_var(&b, in);
          load = nir_fclamp(&b, nir_channel(&b, load, 0), nir_channel(&b, load, 1), nir_channel(&b, load, 2));
          nir_store_var(&b, new_out, load, 0x1);
       }
index 6411fb3..946e2de 100644 (file)
@@ -63,33 +63,33 @@ lower_point_smooth(nir_builder *b, nir_instr *instr, UNUSED void *_state)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *coord = nir_load_point_coord_maybe_flipped(b);
+   nir_def *coord = nir_load_point_coord_maybe_flipped(b);
 
    /* point_size = 1.0 / dFdx(gl_PointCoord.x); */
-   nir_ssa_def *point_size = nir_frcp(b, nir_fddx(b, nir_channel(b, coord, 0)));
+   nir_def *point_size = nir_frcp(b, nir_fddx(b, nir_channel(b, coord, 0)));
 
    /* radius = point_size * 0.5 */
-   nir_ssa_def *radius = nir_fmul_imm(b, point_size, 0.5);
+   nir_def *radius = nir_fmul_imm(b, point_size, 0.5);
    ;
 
    /**
     * Compute the distance of point from centre
     * distance = âˆš (x - 0.5)^2 + (y - 0.5)^2
     */
-   nir_ssa_def *distance = nir_fast_distance(b, coord,
-                                             nir_imm_vec2(b, 0.5, 0.5));
+   nir_def *distance = nir_fast_distance(b, coord,
+                                         nir_imm_vec2(b, 0.5, 0.5));
    distance = nir_fmul(b, distance, point_size);
 
    /* alpha = min(max(radius - distance, 0.0), 1.0) */
-   nir_ssa_def *coverage = nir_fsat(b, nir_fsub(b, radius, distance));
+   nir_def *coverage = nir_fsat(b, nir_fsub(b, radius, distance));
 
    /* Discard fragments that are not covered by the point */
    nir_discard_if(b, nir_feq_imm(b, coverage, 0.0f));
 
    /* Write out the fragment color*vec4(1, 1, 1, coverage)*/
-   nir_ssa_def *one = nir_imm_float(b, 1.0f);
-   nir_ssa_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
-                                   intr->src[out_src_idx].ssa);
+   nir_def *one = nir_imm_float(b, 1.0f);
+   nir_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
+                               intr->src[out_src_idx].ssa);
    nir_instr_rewrite_src(instr, &intr->src[out_src_idx], nir_src_for_ssa(new_val));
 
    return true;
index 5f89a4a..4f51ac5 100644 (file)
@@ -51,11 +51,11 @@ lower_polylinesmooth(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *res1, *res2;
+   nir_def *res1, *res2;
 
    nir_if *if_enabled = nir_push_if(b, nir_load_poly_line_smooth_enabled(b));
    {
-      nir_ssa_def *coverage = nir_load_sample_mask_in(b);
+      nir_def *coverage = nir_load_sample_mask_in(b);
 
       /* coverage = (coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
       coverage = nir_bit_count(b, coverage);
@@ -63,7 +63,7 @@ lower_polylinesmooth(nir_builder *b, nir_instr *instr, void *data)
       coverage = nir_fmul_imm(b, coverage, 1.0 / *num_smooth_aa_sample);
 
       /* Write out the fragment color*vec4(1, 1, 1, alpha) */
-      nir_ssa_def *one = nir_imm_float(b, 1.0f);
+      nir_def *one = nir_imm_float(b, 1.0f);
       res1 = nir_fmul(b, nir_vec4(b, one, one, one, coverage), intr->src[0].ssa);
    }
    nir_push_else(b, if_enabled);
@@ -72,7 +72,7 @@ lower_polylinesmooth(nir_builder *b, nir_instr *instr, void *data)
    }
    nir_pop_if(b, if_enabled);
 
-   nir_ssa_def *new_dest = nir_if_phi(b, res1, res2);
+   nir_def *new_dest = nir_if_phi(b, res1, res2);
 
    nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(new_dest));
    return true;
index 1a6047a..65ea6ed 100644 (file)
@@ -38,7 +38,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
    if (prntf->intrinsic != nir_intrinsic_printf)
       return false;
 
-   nir_ssa_def *fmt_str_id = prntf->src[0].ssa;
+   nir_def *fmt_str_id = prntf->src[0].ssa;
    nir_deref_instr *args = nir_src_as_deref(prntf->src[1]);
    assert(args->deref_type == nir_deref_type_var);
 
@@ -48,7 +48,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
     * overflowed, return -1, otherwise, store the arguments and return 0.
     */
    b->cursor = nir_before_instr(&prntf->instr);
-   nir_ssa_def *buffer_addr = nir_load_printf_buffer_address(b, ptr_bit_size);
+   nir_def *buffer_addr = nir_load_printf_buffer_address(b, ptr_bit_size);
    nir_deref_instr *buffer =
       nir_build_deref_cast(b, buffer_addr, nir_var_mem_global,
                            glsl_array_type(glsl_uint8_t_type(), 0, 4), 0);
@@ -66,7 +66,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
                                   nir_var_mem_global,
                                   glsl_uint_type(), 0);
    counter->cast.align_mul = 4;
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_deref_atomic(b, 32, &counter->dest.ssa,
                        nir_imm_int(b, fmt_str_id_size + args_size),
                        .atomic_op = nir_atomic_op_iadd);
@@ -78,10 +78,10 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
       buffer_size - args_size - fmt_str_id_size - counter_size;
    nir_push_if(b, nir_ilt_imm(b, offset, max_valid_offset));
 
-   nir_ssa_def *printf_succ_val = nir_imm_int(b, 0);
+   nir_def *printf_succ_val = nir_imm_int(b, 0);
 
    /* Write the format string ID */
-   nir_ssa_def *fmt_str_id_offset =
+   nir_def *fmt_str_id_offset =
       nir_i2iN(b, offset, ptr_bit_size);
    nir_deref_instr *fmt_str_id_deref =
       nir_build_deref_array(b, buffer, fmt_str_id_offset);
@@ -94,7 +94,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
    /* Write the format args */
    for (unsigned i = 0; i < glsl_get_length(args->type); ++i) {
       nir_deref_instr *arg_deref = nir_build_deref_struct(b, args, i);
-      nir_ssa_def *arg = nir_load_deref(b, arg_deref);
+      nir_def *arg = nir_load_deref(b, arg_deref);
       const struct glsl_type *arg_type = arg_deref->type;
 
       /* Clang does promotion of arguments to their "native" size. That means
@@ -110,7 +110,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
       }
 
       unsigned field_offset = glsl_get_struct_field_offset(args->type, i);
-      nir_ssa_def *arg_offset =
+      nir_def *arg_offset =
          nir_i2iN(b, nir_iadd_imm(b, offset, fmt_str_id_size + field_offset),
                   ptr_bit_size);
       nir_deref_instr *dst_arg_deref =
@@ -123,11 +123,11 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options)
    }
 
    nir_push_else(b, NULL);
-   nir_ssa_def *printf_fail_val = nir_imm_int(b, -1);
+   nir_def *printf_fail_val = nir_imm_int(b, -1);
    nir_pop_if(b, NULL);
 
-   nir_ssa_def *ret_val = nir_if_phi(b, printf_succ_val, printf_fail_val);
-   nir_ssa_def_rewrite_uses(&prntf->dest.ssa, ret_val);
+   nir_def *ret_val = nir_if_phi(b, printf_succ_val, printf_fail_val);
+   nir_def_rewrite_uses(&prntf->dest.ssa, ret_val);
    nir_instr_remove(&prntf->instr);
 
    return true;
index 465c528..6bd3aa0 100644 (file)
@@ -137,12 +137,12 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
 
    switch (intrin->intrinsic) {
    case nir_intrinsic_image_deref_load: {
-      nir_ssa_def *coord =
+      nir_def *coord =
          nir_trim_vector(b, intrin->src[1].ssa, coord_components);
       tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
       tex->coord_components = coord_components;
 
-      nir_ssa_def *lod = intrin->src[3].ssa;
+      nir_def *lod = intrin->src[3].ssa;
       tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
 
       assert(num_srcs == 3);
@@ -153,7 +153,7 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
    }
 
    case nir_intrinsic_image_deref_size: {
-      nir_ssa_def *lod = intrin->src[1].ssa;
+      nir_def *lod = intrin->src[1].ssa;
       tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
 
       assert(num_srcs == 2);
@@ -169,10 +169,10 @@ lower_readonly_image_instr_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
 
    nir_builder_instr_insert(b, &tex->instr);
 
-   nir_ssa_def *res = nir_trim_vector(b, &tex->dest.ssa,
-                                      intrin->dest.ssa.num_components);
+   nir_def *res = nir_trim_vector(b, &tex->dest.ssa,
+                                  intrin->dest.ssa.num_components);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
+   nir_def_rewrite_uses(&intrin->dest.ssa, res);
    nir_instr_remove(&intrin->instr);
 
    return true;
index bc449ea..980b8a6 100644 (file)
@@ -57,19 +57,19 @@ static void
 rewrite_load(nir_intrinsic_instr *load, struct regs_to_ssa_state *state)
 {
    nir_block *block = load->instr.block;
-   nir_ssa_def *reg = load->src[0].ssa;
+   nir_def *reg = load->src[0].ssa;
 
    struct nir_phi_builder_value *value = state->values[reg->index];
    if (!value)
       return;
 
    nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr);
-   nir_ssa_def *def = nir_phi_builder_value_get_block_def(value, block);
+   nir_def *def = nir_phi_builder_value_get_block_def(value, block);
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, def);
+   nir_def_rewrite_uses(&load->dest.ssa, def);
    nir_instr_remove(&load->instr);
 
-   if (nir_ssa_def_is_unused(&decl->dest.ssa))
+   if (nir_def_is_unused(&decl->dest.ssa))
       nir_instr_remove(&decl->instr);
 }
 
@@ -77,8 +77,8 @@ static void
 rewrite_store(nir_intrinsic_instr *store, struct regs_to_ssa_state *state)
 {
    nir_block *block = store->instr.block;
-   nir_ssa_def *new_value = store->src[0].ssa;
-   nir_ssa_def *reg = store->src[1].ssa;
+   nir_def *new_value = store->src[0].ssa;
+   nir_def *reg = store->src[1].ssa;
 
    struct nir_phi_builder_value *value = state->values[reg->index];
    if (!value)
@@ -90,10 +90,10 @@ rewrite_store(nir_intrinsic_instr *store, struct regs_to_ssa_state *state)
 
    /* Implement write masks by combining together the old/new values */
    if (write_mask != BITFIELD_MASK(num_components)) {
-      nir_ssa_def *old_value =
+      nir_def *old_value =
          nir_phi_builder_value_get_block_def(value, block);
 
-      nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS] = { NULL };
+      nir_def *channels[NIR_MAX_VEC_COMPONENTS] = { NULL };
       state->b.cursor = nir_before_instr(&store->instr);
 
       for (unsigned i = 0; i < num_components; ++i) {
@@ -109,7 +109,7 @@ rewrite_store(nir_intrinsic_instr *store, struct regs_to_ssa_state *state)
    nir_phi_builder_value_set_block_def(value, block, new_value);
    nir_instr_remove(&store->instr);
 
-   if (nir_ssa_def_is_unused(&decl->dest.ssa))
+   if (nir_def_is_unused(&decl->dest.ssa))
       nir_instr_remove(&decl->instr);
 }
 
index 6571d38..26394e8 100644 (file)
 
 static void
 rewrite_offset(nir_builder *b, nir_intrinsic_instr *instr,
-               uint32_t type_sz, uint32_t offset_src, nir_ssa_def *size)
+               uint32_t type_sz, uint32_t offset_src, nir_def *size)
 {
    /* Compute the maximum offset being accessed and if it is out of bounds
     * rewrite it to 0 to ensure the access is within bounds.
     */
    const uint32_t access_size = instr->num_components * type_sz;
-   nir_ssa_def *max_access_offset =
+   nir_def *max_access_offset =
       nir_iadd_imm(b, instr->src[offset_src].ssa, access_size - 1);
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_bcsel(b, nir_uge(b, max_access_offset, size), nir_imm_int(b, 0),
                 instr->src[offset_src].ssa);
 
@@ -31,10 +31,10 @@ rewrite_offset(nir_builder *b, nir_intrinsic_instr *instr,
  * intrinsic produces a destination, it will be zero in the invalid case.
  */
 static void
-wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_ssa_def *valid)
+wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_def *valid)
 {
    bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
-   nir_ssa_def *res, *zero;
+   nir_def *res, *zero;
 
    if (has_dest) {
       zero = nir_imm_zero(b, instr->dest.ssa.num_components,
@@ -52,7 +52,7 @@ wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_ssa_def *valid)
    nir_pop_if(b, NULL);
 
    if (has_dest)
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_if_phi(b, res, zero));
+      nir_def_rewrite_uses(&instr->dest.ssa, nir_if_phi(b, res, zero));
 
    /* We've cloned and wrapped, so drop original instruction */
    nir_instr_remove(&instr->instr);
@@ -64,8 +64,8 @@ lower_buffer_load(nir_builder *b,
                   const nir_lower_robust_access_options *opts)
 {
    uint32_t type_sz = nir_dest_bit_size(instr->dest) / 8;
-   nir_ssa_def *size;
-   nir_ssa_def *index = instr->src[0].ssa;
+   nir_def *size;
+   nir_def *index = instr->src[0].ssa;
 
    if (instr->intrinsic == nir_intrinsic_load_ubo) {
       size = nir_get_ubo_size(b, 32, index);
@@ -125,21 +125,21 @@ lower_image(nir_builder *b,
 
    uint32_t num_coords = nir_image_intrinsic_coord_components(instr);
    bool is_array = nir_intrinsic_image_array(instr);
-   nir_ssa_def *coord = instr->src[1].ssa;
+   nir_def *coord = instr->src[1].ssa;
 
    /* Get image size. imageSize for cubes returns the size of a single face. */
    unsigned size_components = num_coords;
    if (dim == GLSL_SAMPLER_DIM_CUBE && !is_array)
       size_components -= 1;
 
-   nir_ssa_def *size =
+   nir_def *size =
       nir_image_size(b, size_components, 32,
                      instr->src[0].ssa, nir_imm_int(b, 0),
                      .image_array = is_array, .image_dim = dim);
 
    if (dim == GLSL_SAMPLER_DIM_CUBE) {
-      nir_ssa_def *z = is_array ? nir_imul_imm(b, nir_channel(b, size, 2), 6)
-                                : nir_imm_int(b, 6);
+      nir_def *z = is_array ? nir_imul_imm(b, nir_channel(b, size, 2), 6)
+                            : nir_imm_int(b, 6);
 
       size = nir_vec3(b, nir_channel(b, size, 0), nir_channel(b, size, 1), z);
    }
index f1f37f6..32de01f 100644 (file)
@@ -30,7 +30,7 @@ static void
 lower_tex_src_to_offset(nir_builder *b,
                         nir_tex_instr *instr, unsigned src_idx)
 {
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    unsigned base_index = 0;
    unsigned array_elements = 1;
    nir_tex_src *src = &instr->src[src_idx];
index 9348d9e..ac0a623 100644 (file)
@@ -40,7 +40,7 @@ lower_load_store(nir_builder *b,
    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
    nir_variable *var = nir_deref_instr_get_variable(deref);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_iadd_imm(b, nir_build_deref_offset(b, deref, size_align),
                    var->data.location);
 
@@ -49,16 +49,16 @@ lower_load_store(nir_builder *b,
 
    if (intrin->intrinsic == nir_intrinsic_load_deref) {
       unsigned bit_size = intrin->dest.ssa.bit_size;
-      nir_ssa_def *value = nir_load_scratch(
+      nir_def *value = nir_load_scratch(
          b, intrin->num_components, bit_size == 1 ? 32 : bit_size, offset, .align_mul = align);
       if (bit_size == 1)
          value = nir_b2b1(b, value);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
+      nir_def_rewrite_uses(&intrin->dest.ssa, value);
    } else {
       assert(intrin->intrinsic == nir_intrinsic_store_deref);
 
-      nir_ssa_def *value = intrin->src[1].ssa;
+      nir_def *value = intrin->src[1].ssa;
       if (value->bit_size == 1)
          value = nir_b2b32(b, value);
 
index 655e42e..610497f 100644 (file)
@@ -114,7 +114,7 @@ src_is_in_bitset(nir_src *src, void *_set)
 }
 
 static void
-add_ssa_def_to_bitset(nir_ssa_def *def, struct sized_bitset *set)
+add_ssa_def_to_bitset(nir_def *def, struct sized_bitset *set)
 {
    if (def->index >= set->size)
       return;
@@ -212,7 +212,7 @@ can_remat_instr(nir_instr *instr, struct sized_bitset *remat)
 }
 
 static bool
-can_remat_ssa_def(nir_ssa_def *def, struct sized_bitset *remat)
+can_remat_ssa_def(nir_def *def, struct sized_bitset *remat)
 {
    return can_remat_instr(def->parent_instr, remat);
 }
@@ -248,7 +248,7 @@ compare_instr_indexes(const void *_inst1, const void *_inst2)
 }
 
 static bool
-can_remat_chain_ssa_def(nir_ssa_def *def, struct sized_bitset *remat, struct util_dynarray *buf)
+can_remat_chain_ssa_def(nir_def *def, struct sized_bitset *remat, struct util_dynarray *buf)
 {
    assert(util_dynarray_num_elements(buf, nir_instr *) == 0);
 
@@ -283,7 +283,7 @@ can_remat_chain_ssa_def(nir_ssa_def *def, struct sized_bitset *remat, struct uti
    memcpy(potential_remat.set, remat->set, BITSET_WORDS(remat->size) * sizeof(BITSET_WORD));
 
    util_dynarray_foreach(buf, nir_instr *, instr_ptr) {
-      nir_ssa_def *instr_ssa_def = nir_instr_ssa_def(*instr_ptr);
+      nir_def *instr_ssa_def = nir_instr_ssa_def(*instr_ptr);
 
       /* If already in the potential rematerializable, nothing to do. */
       if (BITSET_TEST(potential_remat.set, instr_ssa_def->index))
@@ -308,23 +308,23 @@ fail:
    return false;
 }
 
-static nir_ssa_def *
-remat_ssa_def(nir_builder *b, nir_ssa_def *def, struct hash_table *remap_table)
+static nir_def *
+remat_ssa_def(nir_builder *b, nir_def *def, struct hash_table *remap_table)
 {
    nir_instr *clone = nir_instr_clone_deep(b->shader, def->parent_instr, remap_table);
    nir_builder_instr_insert(b, clone);
    return nir_instr_ssa_def(clone);
 }
 
-static nir_ssa_def *
+static nir_def *
 remat_chain_ssa_def(nir_builder *b, struct util_dynarray *buf,
-                    struct sized_bitset *remat, nir_ssa_def ***fill_defs,
+                    struct sized_bitset *remat, nir_def ***fill_defs,
                     unsigned call_idx, struct hash_table *remap_table)
 {
-   nir_ssa_def *last_def = NULL;
+   nir_def *last_def = NULL;
 
    util_dynarray_foreach(buf, nir_instr *, instr_ptr) {
-      nir_ssa_def *instr_ssa_def = nir_instr_ssa_def(*instr_ptr);
+      nir_def *instr_ssa_def = nir_instr_ssa_def(*instr_ptr);
       unsigned ssa_index = instr_ssa_def->index;
 
       if (fill_defs[ssa_index] != NULL &&
@@ -332,11 +332,11 @@ remat_chain_ssa_def(nir_builder *b, struct util_dynarray *buf,
          continue;
 
       /* Clone the instruction we want to rematerialize */
-      nir_ssa_def *clone_ssa_def = remat_ssa_def(b, instr_ssa_def, remap_table);
+      nir_def *clone_ssa_def = remat_ssa_def(b, instr_ssa_def, remap_table);
 
       if (fill_defs[ssa_index] == NULL) {
          fill_defs[ssa_index] =
-            rzalloc_array(fill_defs, nir_ssa_def *, remat->size);
+            rzalloc_array(fill_defs, nir_def *, remat->size);
       }
 
       /* Add the new ssa_def to the list fill_defs and flag it as
@@ -357,7 +357,7 @@ struct pbv_array {
 };
 
 static struct nir_phi_builder_value *
-get_phi_builder_value_for_def(nir_ssa_def *def,
+get_phi_builder_value_for_def(nir_def *def,
                               struct pbv_array *pbv_arr)
 {
    if (def->index >= pbv_arr->len)
@@ -366,7 +366,7 @@ get_phi_builder_value_for_def(nir_ssa_def *def,
    return pbv_arr->arr[def->index];
 }
 
-static nir_ssa_def *
+static nir_def *
 get_phi_builder_def_for_src(nir_src *src, struct pbv_array *pbv_arr,
                             nir_block *block)
 {
@@ -390,14 +390,14 @@ rewrite_instr_src_from_phi_builder(nir_src *src, void *_pbv_arr)
       block = src->parent_instr->block;
    }
 
-   nir_ssa_def *new_def = get_phi_builder_def_for_src(src, _pbv_arr, block);
+   nir_def *new_def = get_phi_builder_def_for_src(src, _pbv_arr, block);
    if (new_def != NULL)
       nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new_def));
    return true;
 }
 
-static nir_ssa_def *
-spill_fill(nir_builder *before, nir_builder *after, nir_ssa_def *def,
+static nir_def *
+spill_fill(nir_builder *before, nir_builder *after, nir_def *def,
            unsigned value_id, unsigned call_idx,
            unsigned offset, unsigned stack_alignment)
 {
@@ -462,14 +462,14 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
    struct sized_bitset trivial_remat = bitset_create(mem_ctx, num_ssa_defs);
 
    /* Array of all live SSA defs which are spill candidates */
-   nir_ssa_def **spill_defs =
-      rzalloc_array(mem_ctx, nir_ssa_def *, num_ssa_defs);
+   nir_def **spill_defs =
+      rzalloc_array(mem_ctx, nir_def *, num_ssa_defs);
 
    /* For each spill candidate, an array of every time it's defined by a fill,
     * indexed by call instruction index.
     */
-   nir_ssa_def ***fill_defs =
-      rzalloc_array(mem_ctx, nir_ssa_def **, num_ssa_defs);
+   nir_def ***fill_defs =
+      rzalloc_array(mem_ctx, nir_def **, num_ssa_defs);
 
    /* For each call instruction, the liveness set at the call */
    const BITSET_WORD **call_live =
@@ -522,7 +522,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
 
       nir_foreach_block(block, impl) {
          nir_foreach_instr(instr, block) {
-            nir_ssa_def *def = nir_instr_ssa_def(instr);
+            nir_def *def = nir_instr_ssa_def(instr);
             if (def == NULL)
                continue;
 
@@ -559,7 +559,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
    unsigned max_scratch_size = shader->scratch_size;
    nir_foreach_block(block, impl) {
       nir_foreach_instr_safe(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (def != NULL) {
             if (can_remat_ssa_def(def, &trivial_remat)) {
                add_ssa_def_to_bitset(def, &trivial_remat);
@@ -605,7 +605,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
                assert(index < num_ssa_defs);
 
                def = spill_defs[index];
-               nir_ssa_def *original_def = def, *new_def;
+               nir_def *original_def = def, *new_def;
                if (can_remat_ssa_def(def, &remat)) {
                   /* If this SSA def is re-materializable or based on other
                    * things we've already spilled, re-materialize it rather
@@ -648,7 +648,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
                 */
                if (fill_defs[index] == NULL) {
                   fill_defs[index] =
-                     rzalloc_array(fill_defs, nir_ssa_def *, num_calls);
+                     rzalloc_array(fill_defs, nir_def *, num_calls);
                }
                fill_defs[index][call_idx] = new_def;
                _mesa_hash_table_insert(remap_table, original_def, new_def);
@@ -717,7 +717,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
       if (fill_defs[index] == NULL)
          continue;
 
-      nir_ssa_def *def = spill_defs[index];
+      nir_def *def = spill_defs[index];
 
       memset(def_blocks, 0, block_words * sizeof(BITSET_WORD));
       BITSET_SET(def_blocks, def->parent_instr->block->index);
@@ -735,7 +735,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
     */
    nir_foreach_block(block, impl) {
       nir_foreach_instr_safe(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (def != NULL) {
             struct nir_phi_builder_value *pbv =
                get_phi_builder_value_for_def(def, &pbv_arr);
@@ -775,7 +775,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
 
       nir_if *following_if = nir_block_get_following_if(block);
       if (following_if) {
-         nir_ssa_def *new_def =
+         nir_def *new_def =
             get_phi_builder_def_for_src(&following_if->condition,
                                         &pbv_arr, block);
          if (new_def != NULL)
@@ -829,7 +829,7 @@ find_resume_instr(nir_function_impl *impl, unsigned call_idx)
 static bool
 duplicate_loop_bodies(nir_function_impl *impl, nir_instr *resume_instr)
 {
-   nir_ssa_def *resume_reg = NULL;
+   nir_def *resume_reg = NULL;
    for (nir_cf_node *node = resume_instr->block->cf_node.parent;
         node->type != nir_cf_node_function; node = node->parent) {
       if (node->type != nir_cf_node_loop)
@@ -911,7 +911,7 @@ rewrite_phis_to_pred(nir_block *block, nir_block *pred)
       nir_foreach_phi_src(phi_src, phi) {
          if (phi_src->pred == pred) {
             found = true;
-            nir_ssa_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa);
+            nir_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa);
             break;
          }
       }
@@ -1044,7 +1044,7 @@ flatten_resume_if_ladder(nir_builder *b,
                nir_instr_insert(b->cursor, instr);
                b->cursor = nir_after_instr(instr);
 
-               nir_ssa_def *def = nir_instr_ssa_def(instr);
+               nir_def *def = nir_instr_ssa_def(instr);
                BITSET_SET(remat->set, def->index);
             }
          }
@@ -1256,7 +1256,7 @@ lower_resume(nir_shader *shader, int call_idx)
       NIR_PASS_V(shader, nir_lower_reg_intrinsics_to_ssa);
    }
 
-   /* Re-index nir_ssa_def::index.  We don't care about actual liveness in
+   /* Re-index nir_def::index.  We don't care about actual liveness in
     * this pass but, so we can use the same helpers as the spilling pass, we
     * need to make sure that live_index is something sane.  It's used
     * constantly for determining if an SSA value has been added since the
@@ -1342,12 +1342,12 @@ lower_stack_instr_to_scratch(struct nir_builder *b, nir_instr *instr, void *data
    switch (stack->intrinsic) {
    case nir_intrinsic_load_stack: {
       b->cursor = nir_instr_remove(instr);
-      nir_ssa_def *data, *old_data = nir_instr_ssa_def(instr);
+      nir_def *data, *old_data = nir_instr_ssa_def(instr);
 
       if (state->address_format == nir_address_format_64bit_global) {
-         nir_ssa_def *addr = nir_iadd_imm(b,
-                                          nir_load_scratch_base_ptr(b, 1, 64, 1),
-                                          nir_intrinsic_base(stack));
+         nir_def *addr = nir_iadd_imm(b,
+                                      nir_load_scratch_base_ptr(b, 1, 64, 1),
+                                      nir_intrinsic_base(stack));
          data = nir_load_global(b, addr,
                                 nir_intrinsic_align_mul(stack),
                                 stack->dest.ssa.num_components,
@@ -1360,18 +1360,18 @@ lower_stack_instr_to_scratch(struct nir_builder *b, nir_instr *instr, void *data
                                  nir_imm_int(b, nir_intrinsic_base(stack)),
                                  .align_mul = nir_intrinsic_align_mul(stack));
       }
-      nir_ssa_def_rewrite_uses(old_data, data);
+      nir_def_rewrite_uses(old_data, data);
       break;
    }
 
    case nir_intrinsic_store_stack: {
       b->cursor = nir_instr_remove(instr);
-      nir_ssa_def *data = stack->src[0].ssa;
+      nir_def *data = stack->src[0].ssa;
 
       if (state->address_format == nir_address_format_64bit_global) {
-         nir_ssa_def *addr = nir_iadd_imm(b,
-                                          nir_load_scratch_base_ptr(b, 1, 64, 1),
-                                          nir_intrinsic_base(stack));
+         nir_def *addr = nir_iadd_imm(b,
+                                      nir_load_scratch_base_ptr(b, 1, 64, 1),
+                                      nir_intrinsic_base(stack));
          nir_store_global(b, addr,
                           nir_intrinsic_align_mul(stack),
                           data,
@@ -1484,7 +1484,7 @@ nir_opt_trim_stack_values(nir_shader *shader)
          const unsigned value_id = nir_intrinsic_value_id(intrin);
 
          const unsigned mask =
-            nir_ssa_def_components_read(nir_instr_ssa_def(instr));
+            nir_def_components_read(nir_instr_ssa_def(instr));
          add_use_mask(value_id_to_mask, value_id, mask);
       }
    }
@@ -1519,7 +1519,7 @@ nir_opt_trim_stack_values(nir_shader *shader)
 
          nir_builder b = nir_builder_at(nir_before_instr(instr));
 
-         nir_ssa_def *value = nir_channels(&b, intrin->src[0].ssa, read_mask);
+         nir_def *value = nir_channels(&b, intrin->src[0].ssa, read_mask);
          nir_instr_rewrite_src_ssa(instr, &intrin->src[0], value);
 
          intrin->num_components = util_bitcount(read_mask);
@@ -1555,7 +1555,7 @@ nir_opt_trim_stack_values(nir_shader *shader)
          u_foreach_bit(idx, read_mask)
             swiz_map[idx] = swiz_count++;
 
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
 
          nir_foreach_use_safe(use_src, def) {
             if (use_src->parent_instr->type == nir_instr_type_alu) {
@@ -1652,7 +1652,7 @@ nir_opt_sort_and_pack_stack(nir_shader *shader,
                continue;
 
             const unsigned value_id = nir_intrinsic_value_id(intrin);
-            nir_ssa_def *def = nir_instr_ssa_def(instr);
+            nir_def *def = nir_instr_ssa_def(instr);
 
             assert(_mesa_hash_table_u64_search(value_id_to_item,
                                                value_id) == NULL);
@@ -1747,7 +1747,7 @@ nir_block_loop_depth(nir_block *block)
 
 /* Find the last block dominating all the uses of a SSA value. */
 static nir_block *
-find_last_dominant_use_block(nir_function_impl *impl, nir_ssa_def *value)
+find_last_dominant_use_block(nir_function_impl *impl, nir_def *value)
 {
    nir_block *old_block = value->parent_instr->block;
    unsigned old_block_loop_depth = nir_block_loop_depth(old_block);
@@ -1817,7 +1817,7 @@ nir_opt_stack_loads(nir_shader *shader)
             if (intrin->intrinsic != nir_intrinsic_load_stack)
                continue;
 
-            nir_ssa_def *value = &intrin->dest.ssa;
+            nir_def *value = &intrin->dest.ssa;
             nir_block *new_block = find_last_dominant_use_block(impl, value);
             if (new_block == block)
                continue;
@@ -1864,7 +1864,7 @@ split_stack_components_instr(struct nir_builder *b, nir_instr *instr, void *data
    b->cursor = nir_before_instr(instr);
 
    if (intrin->intrinsic == nir_intrinsic_load_stack) {
-      nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {
+      nir_def *components[NIR_MAX_VEC_COMPONENTS] = {
          0,
       };
       for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++) {
@@ -1876,9 +1876,9 @@ split_stack_components_instr(struct nir_builder *b, nir_instr *instr, void *data
                                         .align_mul = nir_intrinsic_align_mul(intrin));
       }
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                               nir_vec(b, components,
-                                       intrin->dest.ssa.num_components));
+      nir_def_rewrite_uses(&intrin->dest.ssa,
+                           nir_vec(b, components,
+                                   intrin->dest.ssa.num_components));
    } else {
       assert(intrin->intrinsic == nir_intrinsic_store_stack);
       for (unsigned c = 0; c < intrin->src[0].ssa->num_components; c++) {
index 41899f4..c87624e 100644 (file)
@@ -34,7 +34,7 @@ lower_single_sampled_instr(nir_builder *b,
 
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *lowered;
+   nir_def *lowered;
    switch (intrin->intrinsic) {
    case nir_intrinsic_load_sample_id:
       b->cursor = nir_before_instr(instr);
@@ -83,7 +83,7 @@ lower_single_sampled_instr(nir_builder *b,
       return false;
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, lowered);
+   nir_def_rewrite_uses(&intrin->dest.ssa, lowered);
    nir_instr_remove(instr);
    return true;
 }
index be313fd..17b3c85 100644 (file)
@@ -59,7 +59,7 @@ lower_ssbo_op(nir_intrinsic_op op)
 /* Like SSBO property sysvals, though SSBO index may be indirect. C.f.
  * nir_load_system_value */
 
-static inline nir_ssa_def *
+static inline nir_def *
 nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op,
                    nir_src *idx, unsigned bitsize)
 {
@@ -74,7 +74,7 @@ nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op,
 #define nir_ssbo_prop(b, prop, index, bitsize) \
    nir_load_ssbo_prop(b, nir_intrinsic_##prop, index, bitsize)
 
-static nir_ssa_def *
+static nir_def *
 lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
 {
    nir_intrinsic_op op = lower_ssbo_op(intr->intrinsic);
@@ -88,9 +88,9 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
 
    nir_src index = intr->src[is_store ? 1 : 0];
    nir_src *offset_src = nir_get_io_offset_src(intr);
-   nir_ssa_def *offset = nir_ssa_for_src(b, *offset_src, 1);
+   nir_def *offset = nir_ssa_for_src(b, *offset_src, 1);
 
-   nir_ssa_def *address =
+   nir_def *address =
       nir_iadd(b,
                nir_ssbo_prop(b, load_ssbo_address, &index, 64),
                nir_u2u64(b, offset));
@@ -167,11 +167,11 @@ nir_lower_ssbo(nir_shader *shader)
             b.cursor = nir_before_instr(instr);
 
             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-            nir_ssa_def *replace = lower_ssbo_instr(&b, intr);
+            nir_def *replace = lower_ssbo_instr(&b, intr);
 
             if (replace) {
-               nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                                        replace);
+               nir_def_rewrite_uses(&intr->dest.ssa,
+                                    replace);
             }
 
             nir_instr_remove(instr);
index 0d146b4..0e86c38 100644 (file)
@@ -33,7 +33,7 @@ static nir_intrinsic_instr *
 lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
                                       unsigned int component)
 {
-   nir_ssa_def *comp;
+   nir_def *comp;
    if (component == 0)
       comp = nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa);
    else
@@ -52,7 +52,7 @@ lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intri
    return intr;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_subgroup_op_to_32bit(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    assert(intrin->src[0].ssa->bit_size == 64);
@@ -61,8 +61,8 @@ lower_subgroup_op_to_32bit(nir_builder *b, nir_intrinsic_instr *intrin)
    return nir_pack_64_2x32_split(b, &intr_x->dest.ssa, &intr_y->dest.ssa);
 }
 
-static nir_ssa_def *
-ballot_type_to_uint(nir_builder *b, nir_ssa_def *value,
+static nir_def *
+ballot_type_to_uint(nir_builder *b, nir_def *value,
                     const nir_lower_subgroups_options *options)
 {
    /* Only the new-style SPIR-V subgroup instructions take a ballot result as
@@ -74,8 +74,8 @@ ballot_type_to_uint(nir_builder *b, nir_ssa_def *value,
                            options->ballot_bit_size);
 }
 
-static nir_ssa_def *
-uint_to_ballot_type(nir_builder *b, nir_ssa_def *value,
+static nir_def *
+uint_to_ballot_type(nir_builder *b, nir_def *value,
                     unsigned num_components, unsigned bit_size)
 {
    assert(util_is_power_of_two_nonzero(num_components));
@@ -103,16 +103,16 @@ uint_to_ballot_type(nir_builder *b, nir_ssa_def *value,
    return value;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
                             bool lower_to_32bit)
 {
    /* This is safe to call on scalar things but it would be silly */
    assert(intrin->dest.ssa.num_components > 1);
 
-   nir_ssa_def *value = nir_ssa_for_src(b, intrin->src[0],
-                                        intrin->num_components);
-   nir_ssa_def *reads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *value = nir_ssa_for_src(b, intrin->src[0],
+                                    intrin->num_components);
+   nir_def *reads[NIR_MAX_VEC_COMPONENTS];
 
    for (unsigned i = 0; i < intrin->num_components; i++) {
       nir_intrinsic_instr *chan_intrin =
@@ -143,12 +143,12 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
    return nir_vec(b, reads, intrin->num_components);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *value = intrin->src[0].ssa;
+   nir_def *value = intrin->src[0].ssa;
 
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
    for (unsigned i = 0; i < intrin->num_components; i++) {
       nir_intrinsic_instr *chan_intrin =
          nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
@@ -168,17 +168,17 @@ lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
    return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_vote_eq(nir_builder *b, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *value = intrin->src[0].ssa;
+   nir_def *value = intrin->src[0].ssa;
 
    /* We have to implicitly lower to scalar */
-   nir_ssa_def *all_eq = NULL;
+   nir_def *all_eq = NULL;
    for (unsigned i = 0; i < intrin->num_components; i++) {
-      nir_ssa_def *rfi = nir_read_first_invocation(b, nir_channel(b, value, i));
+      nir_def *rfi = nir_read_first_invocation(b, nir_channel(b, value, i));
 
-      nir_ssa_def *is_eq;
+      nir_def *is_eq;
       if (intrin->intrinsic == nir_intrinsic_vote_feq) {
          is_eq = nir_feq(b, rfi, nir_channel(b, value, i));
       } else {
@@ -195,7 +195,7 @@ lower_vote_eq(nir_builder *b, nir_intrinsic_instr *intrin)
    return nir_vote_all(b, 1, all_eq);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin,
                          const nir_lower_subgroups_options *options)
 {
@@ -225,20 +225,20 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin,
 
 /* Lowers "specialized" shuffles to a generic nir_intrinsic_shuffle. */
 
-static nir_ssa_def *
+static nir_def *
 lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
                  const nir_lower_subgroups_options *options)
 {
    if (intrin->intrinsic == nir_intrinsic_shuffle_xor &&
        options->lower_shuffle_to_swizzle_amd &&
        nir_src_is_const(intrin->src[1])) {
-      nir_ssa_def *result =
+      nir_def *result =
          lower_shuffle_to_swizzle(b, intrin, options);
       if (result)
          return result;
    }
 
-   nir_ssa_def *index = nir_load_subgroup_invocation(b);
+   nir_def *index = nir_load_subgroup_invocation(b);
    bool is_shuffle = false;
    switch (intrin->intrinsic) {
    case nir_intrinsic_shuffle_xor:
@@ -276,11 +276,11 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
       index = nir_ixor(b, index, nir_imm_int(b, 0x3));
       break;
    case nir_intrinsic_rotate: {
-      nir_ssa_def *delta = intrin->src[1].ssa;
-      nir_ssa_def *local_id = nir_load_subgroup_invocation(b);
+      nir_def *delta = intrin->src[1].ssa;
+      nir_def *local_id = nir_load_subgroup_invocation(b);
       const unsigned cluster_size = nir_intrinsic_cluster_size(intrin);
 
-      nir_ssa_def *rotation_group_mask =
+      nir_def *rotation_group_mask =
          cluster_size > 0 ? nir_imm_int(b, (int)(cluster_size - 1)) : nir_iadd_imm(b, nir_load_subgroup_size(b), -1);
 
       index = nir_iand(b, nir_iadd(b, local_id, delta),
@@ -316,7 +316,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
 }
 
 static const struct glsl_type *
-glsl_type_for_ssa(nir_ssa_def *def)
+glsl_type_for_ssa(nir_def *def)
 {
    const struct glsl_type *comp_type = def->bit_size == 1 ? glsl_bool_type() : glsl_uintN_t_type(def->bit_size);
    return glsl_replace_vector_type(comp_type, def->num_components);
@@ -324,11 +324,11 @@ glsl_type_for_ssa(nir_ssa_def *def)
 
 /* Lower nir_intrinsic_shuffle to a waterfall loop + nir_read_invocation.
  */
-static nir_ssa_def *
+static nir_def *
 lower_shuffle(nir_builder *b, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *val = intrin->src[0].ssa;
-   nir_ssa_def *id = intrin->src[1].ssa;
+   nir_def *val = intrin->src[0].ssa;
+   nir_def *id = intrin->src[1].ssa;
 
    /* The loop is something like:
     *
@@ -362,16 +362,16 @@ lower_shuffle(nir_builder *b, nir_intrinsic_instr *intrin)
     * loop over always-inactive invocations.
     */
 
-   nir_ssa_def *subgroup_id = nir_load_subgroup_invocation(b);
+   nir_def *subgroup_id = nir_load_subgroup_invocation(b);
 
    nir_variable *result =
       nir_local_variable_create(b->impl, glsl_type_for_ssa(val), "result");
 
    nir_loop *loop = nir_push_loop(b);
    {
-      nir_ssa_def *first_id = nir_read_first_invocation(b, subgroup_id);
-      nir_ssa_def *first_val = nir_read_first_invocation(b, val);
-      nir_ssa_def *first_result =
+      nir_def *first_id = nir_read_first_invocation(b, subgroup_id);
+      nir_def *first_val = nir_read_first_invocation(b, val);
+      nir_def *first_result =
          nir_read_invocation(b, val, nir_read_first_invocation(b, id));
 
       nir_if *nif = nir_push_if(b, nir_ieq(b, id, first_id));
@@ -407,15 +407,15 @@ lower_subgroups_filter(const nir_instr *instr, const void *_options)
  * then shifted left by "shift". Only particular values for "val" are
  * supported, see below.
  */
-static nir_ssa_def *
-build_ballot_imm_ishl(nir_builder *b, int64_t val, nir_ssa_def *shift,
+static nir_def *
+build_ballot_imm_ishl(nir_builder *b, int64_t val, nir_def *shift,
                       const nir_lower_subgroups_options *options)
 {
    /* This only works if all the high bits are the same as bit 1. */
    assert((val >> 2) == (val & 0x2 ? -1 : 0));
 
    /* First compute the result assuming one ballot component. */
-   nir_ssa_def *result =
+   nir_def *result =
       nir_ishl(b, nir_imm_intN_t(b, val, options->ballot_bit_size), shift);
 
    if (options->ballot_components == 1)
@@ -438,12 +438,12 @@ build_ballot_imm_ishl(nir_builder *b, int64_t val, nir_ssa_def *shift,
    nir_const_value min_shift[4];
    for (unsigned i = 0; i < options->ballot_components; i++)
       min_shift[i] = nir_const_value_for_int(i * options->ballot_bit_size, 32);
-   nir_ssa_def *min_shift_val = nir_build_imm(b, options->ballot_components, 32, min_shift);
+   nir_def *min_shift_val = nir_build_imm(b, options->ballot_components, 32, min_shift);
 
    nir_const_value max_shift[4];
    for (unsigned i = 0; i < options->ballot_components; i++)
       max_shift[i] = nir_const_value_for_int((i + 1) * options->ballot_bit_size, 32);
-   nir_ssa_def *max_shift_val = nir_build_imm(b, options->ballot_components, 32, max_shift);
+   nir_def *max_shift_val = nir_build_imm(b, options->ballot_components, 32, max_shift);
 
    return nir_bcsel(b, nir_ult(b, shift, max_shift_val),
                     nir_bcsel(b, nir_ult(b, shift, min_shift_val),
@@ -452,29 +452,29 @@ build_ballot_imm_ishl(nir_builder *b, int64_t val, nir_ssa_def *shift,
                     nir_imm_intN_t(b, 0, result->bit_size));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_subgroup_eq_mask(nir_builder *b,
                        const nir_lower_subgroups_options *options)
 {
-   nir_ssa_def *subgroup_idx = nir_load_subgroup_invocation(b);
+   nir_def *subgroup_idx = nir_load_subgroup_invocation(b);
 
    return build_ballot_imm_ishl(b, 1, subgroup_idx, options);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_subgroup_ge_mask(nir_builder *b,
                        const nir_lower_subgroups_options *options)
 {
-   nir_ssa_def *subgroup_idx = nir_load_subgroup_invocation(b);
+   nir_def *subgroup_idx = nir_load_subgroup_invocation(b);
 
    return build_ballot_imm_ishl(b, ~0ull, subgroup_idx, options);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_subgroup_gt_mask(nir_builder *b,
                        const nir_lower_subgroups_options *options)
 {
-   nir_ssa_def *subgroup_idx = nir_load_subgroup_invocation(b);
+   nir_def *subgroup_idx = nir_load_subgroup_invocation(b);
 
    return build_ballot_imm_ishl(b, ~1ull, subgroup_idx, options);
 }
@@ -484,14 +484,14 @@ build_subgroup_gt_mask(nir_builder *b,
  * above the subgroup size for the masks, but gt_mask and ge_mask make them 1
  * so we have to "and" with this mask.
  */
-static nir_ssa_def *
+static nir_def *
 build_subgroup_mask(nir_builder *b,
                     const nir_lower_subgroups_options *options)
 {
-   nir_ssa_def *subgroup_size = nir_load_subgroup_size(b);
+   nir_def *subgroup_size = nir_load_subgroup_size(b);
 
    /* First compute the result assuming one ballot component. */
-   nir_ssa_def *result =
+   nir_def *result =
       nir_ushr(b, nir_imm_intN_t(b, ~0ull, options->ballot_bit_size),
                nir_isub_imm(b, options->ballot_bit_size,
                             subgroup_size));
@@ -519,32 +519,32 @@ build_subgroup_mask(nir_builder *b,
    nir_const_value min_idx[4];
    for (unsigned i = 0; i < options->ballot_components; i++)
       min_idx[i] = nir_const_value_for_int(i * options->ballot_bit_size, 32);
-   nir_ssa_def *min_idx_val = nir_build_imm(b, options->ballot_components, 32, min_idx);
+   nir_def *min_idx_val = nir_build_imm(b, options->ballot_components, 32, min_idx);
 
-   nir_ssa_def *result_extended =
+   nir_def *result_extended =
       nir_pad_vector_imm_int(b, result, ~0ull, options->ballot_components);
 
    return nir_bcsel(b, nir_ult(b, min_idx_val, subgroup_size),
                     result_extended, nir_imm_intN_t(b, 0, options->ballot_bit_size));
 }
 
-static nir_ssa_def *
-vec_bit_count(nir_builder *b, nir_ssa_def *value)
+static nir_def *
+vec_bit_count(nir_builder *b, nir_def *value)
 {
-   nir_ssa_def *vec_result = nir_bit_count(b, value);
-   nir_ssa_def *result = nir_channel(b, vec_result, 0);
+   nir_def *vec_result = nir_bit_count(b, value);
+   nir_def *result = nir_channel(b, vec_result, 0);
    for (unsigned i = 1; i < value->num_components; i++)
       result = nir_iadd(b, result, nir_channel(b, vec_result, i));
    return result;
 }
 
-static nir_ssa_def *
-vec_find_lsb(nir_builder *b, nir_ssa_def *value)
+static nir_def *
+vec_find_lsb(nir_builder *b, nir_def *value)
 {
-   nir_ssa_def *vec_result = nir_find_lsb(b, value);
-   nir_ssa_def *result = nir_imm_int(b, -1);
+   nir_def *vec_result = nir_find_lsb(b, value);
+   nir_def *result = nir_imm_int(b, -1);
    for (int i = value->num_components - 1; i >= 0; i--) {
-      nir_ssa_def *channel = nir_channel(b, vec_result, i);
+      nir_def *channel = nir_channel(b, vec_result, i);
       /* result = channel >= 0 ? (i * bitsize + channel) : result */
       result = nir_bcsel(b, nir_ige_imm(b, channel, 0),
                          nir_iadd_imm(b, channel, i * value->bit_size),
@@ -553,13 +553,13 @@ vec_find_lsb(nir_builder *b, nir_ssa_def *value)
    return result;
 }
 
-static nir_ssa_def *
-vec_find_msb(nir_builder *b, nir_ssa_def *value)
+static nir_def *
+vec_find_msb(nir_builder *b, nir_def *value)
 {
-   nir_ssa_def *vec_result = nir_ufind_msb(b, value);
-   nir_ssa_def *result = nir_imm_int(b, -1);
+   nir_def *vec_result = nir_ufind_msb(b, value);
+   nir_def *result = nir_imm_int(b, -1);
    for (unsigned i = 0; i < value->num_components; i++) {
-      nir_ssa_def *channel = nir_channel(b, vec_result, i);
+      nir_def *channel = nir_channel(b, vec_result, i);
       /* result = channel >= 0 ? (i * bitsize + channel) : result */
       result = nir_bcsel(b, nir_ige_imm(b, channel, 0),
                          nir_iadd_imm(b, channel, i * value->bit_size),
@@ -568,14 +568,14 @@ vec_find_msb(nir_builder *b, nir_ssa_def *value)
    return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
                              const nir_lower_subgroups_options *options)
 {
    if (!options->lower_quad_broadcast_dynamic_to_const)
       return lower_to_shuffle(b, intrin, options);
 
-   nir_ssa_def *dst = NULL;
+   nir_def *dst = NULL;
 
    for (unsigned i = 0; i < 4; ++i) {
       nir_intrinsic_instr *qbcst =
@@ -588,7 +588,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
                         intrin->dest.ssa.num_components,
                         intrin->dest.ssa.bit_size);
 
-      nir_ssa_def *qbcst_dst = NULL;
+      nir_def *qbcst_dst = NULL;
 
       if (options->lower_to_scalar && qbcst->num_components > 1) {
          qbcst_dst = lower_subgroup_op_to_scalar(b, qbcst, false);
@@ -607,7 +607,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
    return dst;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_read_invocation_to_cond(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    return nir_read_invocation_cond_ir3(b, intrin->dest.ssa.bit_size,
@@ -616,7 +616,7 @@ lower_read_invocation_to_cond(nir_builder *b, nir_intrinsic_instr *intrin)
                                                nir_load_subgroup_invocation(b)));
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
 {
    const nir_lower_subgroups_options *options = _options;
@@ -668,7 +668,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
       if (!options->lower_subgroup_masks)
          return NULL;
 
-      nir_ssa_def *val;
+      nir_def *val;
       switch (intrin->intrinsic) {
       case nir_intrinsic_load_subgroup_eq_mask:
          val = build_subgroup_eq_mask(b, options);
@@ -701,7 +701,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
           intrin->dest.ssa.bit_size == options->ballot_bit_size)
          return NULL;
 
-      nir_ssa_def *ballot =
+      nir_def *ballot =
          nir_ballot(b, options->ballot_components, options->ballot_bit_size,
                     intrin->src[0].ssa);
 
@@ -714,8 +714,8 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
    case nir_intrinsic_ballot_bit_count_reduce:
    case nir_intrinsic_ballot_find_lsb:
    case nir_intrinsic_ballot_find_msb: {
-      nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
-                                                 options);
+      nir_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
+                                             options);
 
       if (intrin->intrinsic != nir_intrinsic_ballot_bitfield_extract &&
           intrin->intrinsic != nir_intrinsic_ballot_find_lsb) {
@@ -743,7 +743,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
 
       switch (intrin->intrinsic) {
       case nir_intrinsic_ballot_bitfield_extract: {
-         nir_ssa_def *idx = intrin->src[1].ssa;
+         nir_def *idx = intrin->src[1].ssa;
          if (int_val->num_components > 1) {
             /* idx will be truncated by nir_ushr, so we just need to select
              * the right component using the bits of idx that are truncated in
@@ -769,10 +769,10 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
 
    case nir_intrinsic_ballot_bit_count_exclusive:
    case nir_intrinsic_ballot_bit_count_inclusive: {
-      nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
-                                                 options);
+      nir_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
+                                             options);
       if (options->lower_ballot_bit_count_to_mbcnt_amd) {
-         nir_ssa_def *acc;
+         nir_def *acc;
          if (intrin->intrinsic == nir_intrinsic_ballot_bit_count_exclusive) {
             acc = nir_imm_int(b, 0);
          } else {
@@ -782,7 +782,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
          return nir_mbcnt_amd(b, int_val, acc);
       }
 
-      nir_ssa_def *mask;
+      nir_def *mask;
       if (intrin->intrinsic == nir_intrinsic_ballot_bit_count_inclusive) {
          mask = nir_inot(b, build_subgroup_gt_mask(b, options));
       } else {
@@ -832,7 +832,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
       break;
 
    case nir_intrinsic_reduce: {
-      nir_ssa_def *ret = NULL;
+      nir_def *ret = NULL;
       /* A cluster size greater than the subgroup size is implemention defined */
       if (options->subgroup_size &&
           nir_intrinsic_cluster_size(intrin) >= options->subgroup_size) {
index 6517257..2186e52 100644 (file)
@@ -39,7 +39,7 @@ struct lower_sysval_state {
    struct set *lower_once_list;
 };
 
-static nir_ssa_def *
+static nir_def *
 sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    const unsigned bit_size = intrin->dest.ssa.bit_size;
@@ -50,11 +50,11 @@ sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
    return nir_u2uN(b, &intrin->dest.ssa, bit_size);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_global_group_size(nir_builder *b, unsigned bit_size)
 {
-   nir_ssa_def *group_size = nir_load_workgroup_size(b);
-   nir_ssa_def *num_workgroups = nir_load_num_workgroups(b, bit_size);
+   nir_def *group_size = nir_load_workgroup_size(b);
+   nir_def *num_workgroups = nir_load_num_workgroups(b, bit_size);
    return nir_imul(b, nir_u2uN(b, group_size, bit_size),
                    num_workgroups);
 }
@@ -65,7 +65,7 @@ lower_system_value_filter(const nir_instr *instr, const void *_state)
    return instr->type == nir_instr_type_intrinsic;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
 {
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
@@ -149,7 +149,7 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
       if (!nir_deref_mode_is(deref, nir_var_system_value))
          return NULL;
 
-      nir_ssa_def *column = NULL;
+      nir_def *column = NULL;
       if (deref->deref_type != nir_deref_type_var) {
          /* The only one system values that aren't plane variables are
           * gl_SampleMask which is always an array of one element and a
@@ -164,11 +164,11 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
          switch (deref->var->data.location) {
          case SYSTEM_VALUE_TESS_LEVEL_INNER:
          case SYSTEM_VALUE_TESS_LEVEL_OUTER: {
-            nir_ssa_def *index = nir_ssa_for_src(b, arr_deref->arr.index, 1);
-            nir_ssa_def *sysval = (deref->var->data.location ==
-                                   SYSTEM_VALUE_TESS_LEVEL_INNER)
-                                     ? nir_load_tess_level_inner(b)
-                                     : nir_load_tess_level_outer(b);
+            nir_def *index = nir_ssa_for_src(b, arr_deref->arr.index, 1);
+            nir_def *sysval = (deref->var->data.location ==
+                               SYSTEM_VALUE_TESS_LEVEL_INNER)
+                                 ? nir_load_tess_level_inner(b)
+                                 : nir_load_tess_level_outer(b);
             return nir_vector_extract(b, sysval, index);
          }
 
@@ -290,7 +290,7 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
          ASSERTED unsigned num_rows = glsl_get_vector_elements(var->type);
          assert(num_rows == intrin->dest.ssa.num_components);
 
-         nir_ssa_def *cols[4];
+         nir_def *cols[4];
          for (unsigned i = 0; i < num_cols; i++) {
             cols[i] = nir_load_system_value(b, sysval_op, i,
                                             intrin->dest.ssa.num_components,
@@ -303,7 +303,7 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
          ASSERTED const struct glsl_type *elem_type = glsl_get_array_element(var->type);
          assert(glsl_get_components(elem_type) == intrin->dest.ssa.num_components);
 
-         nir_ssa_def *elems[4];
+         nir_def *elems[4];
          assert(ARRAY_SIZE(elems) >= num_elems);
          for (unsigned i = 0; i < num_elems; i++) {
             elems[i] = nir_load_system_value(b, sysval_op, i,
@@ -323,10 +323,10 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
    }
 }
 
-nir_ssa_def *
+nir_def *
 nir_build_lowered_load_helper_invocation(nir_builder *b)
 {
-   nir_ssa_def *tmp;
+   nir_def *tmp;
    tmp = nir_ishl(b, nir_imm_int(b, 1),
                   nir_load_sample_id_no_per_sample(b));
    tmp = nir_iand(b, nir_load_sample_mask_in(b), tmp);
@@ -353,9 +353,9 @@ nir_lower_system_values(nir_shader *shader)
    return progress;
 }
 
-static nir_ssa_def *
-id_to_index_no_umod_slow(nir_builder *b, nir_ssa_def *index,
-                         nir_ssa_def *size_x, nir_ssa_def *size_y,
+static nir_def *
+id_to_index_no_umod_slow(nir_builder *b, nir_def *index,
+                         nir_def *size_x, nir_def *size_y,
                          unsigned bit_size)
 {
    /* We lower ID to Index with the following formula:
@@ -369,23 +369,23 @@ id_to_index_no_umod_slow(nir_builder *b, nir_ssa_def *index,
     * not compile time known or not a power of two.
     */
 
-   nir_ssa_def *size_x_y = nir_imul(b, size_x, size_y);
-   nir_ssa_def *id_z = nir_udiv(b, index, size_x_y);
-   nir_ssa_def *z_portion = nir_imul(b, id_z, size_x_y);
-   nir_ssa_def *id_y = nir_udiv(b, nir_isub(b, index, z_portion), size_x);
-   nir_ssa_def *y_portion = nir_imul(b, id_y, size_x);
-   nir_ssa_def *id_x = nir_isub(b, index, nir_iadd(b, z_portion, y_portion));
+   nir_def *size_x_y = nir_imul(b, size_x, size_y);
+   nir_def *id_z = nir_udiv(b, index, size_x_y);
+   nir_def *z_portion = nir_imul(b, id_z, size_x_y);
+   nir_def *id_y = nir_udiv(b, nir_isub(b, index, z_portion), size_x);
+   nir_def *y_portion = nir_imul(b, id_y, size_x);
+   nir_def *id_x = nir_isub(b, index, nir_iadd(b, z_portion, y_portion));
 
    return nir_u2uN(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
 }
 
-static nir_ssa_def *
-lower_id_to_index_no_umod(nir_builder *b, nir_ssa_def *index,
-                          nir_ssa_def *size, unsigned bit_size,
+static nir_def *
+lower_id_to_index_no_umod(nir_builder *b, nir_def *index,
+                          nir_def *size, unsigned bit_size,
                           const uint16_t *size_imm,
                           bool shortcut_1d)
 {
-   nir_ssa_def *size_x, *size_y;
+   nir_def *size_x, *size_y;
 
    if (size_imm[0] > 0)
       size_x = nir_imm_int(b, size_imm[0]);
@@ -404,14 +404,14 @@ lower_id_to_index_no_umod(nir_builder *b, nir_ssa_def *index,
        *    id = id_to_index_no_umod_slow
        */
 
-      nir_ssa_def *size_z = nir_channel(b, size, 2);
-      nir_ssa_def *cond = nir_ieq(b, nir_iadd(b, size_y, size_z), nir_imm_int(b, 2));
+      nir_def *size_z = nir_channel(b, size, 2);
+      nir_def *cond = nir_ieq(b, nir_iadd(b, size_y, size_z), nir_imm_int(b, 2));
 
-      nir_ssa_def *val1, *val2;
+      nir_def *val1, *val2;
       nir_if *if_opt = nir_push_if(b, cond);
       if_opt->control = nir_selection_control_dont_flatten;
       {
-         nir_ssa_def *zero = nir_imm_int(b, 0);
+         nir_def *zero = nir_imm_int(b, 0);
          val1 = nir_u2uN(b, nir_vec3(b, index, zero, zero), bit_size);
       }
       nir_push_else(b, if_opt);
@@ -426,8 +426,8 @@ lower_id_to_index_no_umod(nir_builder *b, nir_ssa_def *index,
    }
 }
 
-static nir_ssa_def *
-lower_id_to_index(nir_builder *b, nir_ssa_def *index, nir_ssa_def *size,
+static nir_def *
+lower_id_to_index(nir_builder *b, nir_def *index, nir_def *size,
                   unsigned bit_size)
 {
    /* We lower gl_LocalInvocationID to gl_LocalInvocationIndex based
@@ -446,12 +446,12 @@ lower_id_to_index(nir_builder *b, nir_ssa_def *index, nir_ssa_def *size,
     * 64-bit arithmetic.
     */
 
-   nir_ssa_def *size_x = nir_channel(b, size, 0);
-   nir_ssa_def *size_y = nir_channel(b, size, 1);
+   nir_def *size_x = nir_channel(b, size, 0);
+   nir_def *size_y = nir_channel(b, size, 1);
 
-   nir_ssa_def *id_x = nir_umod(b, index, size_x);
-   nir_ssa_def *id_y = nir_umod(b, nir_udiv(b, index, size_x), size_y);
-   nir_ssa_def *id_z = nir_udiv(b, index, nir_imul(b, size_x, size_y));
+   nir_def *id_x = nir_umod(b, index, size_x);
+   nir_def *id_y = nir_umod(b, nir_udiv(b, index, size_x), size_y);
+   nir_def *id_z = nir_udiv(b, index, nir_imul(b, size_x, size_y));
 
    return nir_u2uN(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
 }
@@ -462,8 +462,8 @@ lower_compute_system_value_filter(const nir_instr *instr, const void *_state)
    return instr->type == nir_instr_type_intrinsic;
 }
 
-static nir_ssa_def *
-try_lower_id_to_index_1d(nir_builder *b, nir_ssa_def *index, const uint16_t *size)
+static nir_def *
+try_lower_id_to_index_1d(nir_builder *b, nir_def *index, const uint16_t *size)
 {
    /* size_x = 1, size_y = 1, therefore Z = local index */
    if (size[0] == 1 && size[1] == 1)
@@ -480,7 +480,7 @@ try_lower_id_to_index_1d(nir_builder *b, nir_ssa_def *index, const uint16_t *siz
    return NULL;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_compute_system_value_instr(nir_builder *b,
                                  nir_instr *instr, void *_state)
 {
@@ -501,7 +501,7 @@ lower_compute_system_value_instr(nir_builder *b,
        */
       if (b->shader->options->lower_cs_local_id_to_index ||
           (options && options->lower_cs_local_id_to_index)) {
-         nir_ssa_def *local_index = nir_load_local_invocation_index(b);
+         nir_def *local_index = nir_load_local_invocation_index(b);
 
          if (!b->shader->info.workgroup_size_variable) {
             /* Shortcut for 1 dimensional workgroups:
@@ -510,26 +510,26 @@ lower_compute_system_value_instr(nir_builder *b,
              * this way we don't leave behind extra ALU instrs.
              */
 
-            nir_ssa_def *val = try_lower_id_to_index_1d(b, local_index,
-                                                        b->shader->info.workgroup_size);
+            nir_def *val = try_lower_id_to_index_1d(b, local_index,
+                                                    b->shader->info.workgroup_size);
             if (val)
                return val;
          }
 
-         nir_ssa_def *local_size = nir_load_workgroup_size(b);
+         nir_def *local_size = nir_load_workgroup_size(b);
          return lower_id_to_index(b, local_index, local_size, bit_size);
       }
       if (options && options->shuffle_local_ids_for_quad_derivatives &&
           b->shader->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS &&
           _mesa_set_search(state->lower_once_list, instr) == NULL) {
-         nir_ssa_def *ids = nir_load_local_invocation_id(b);
+         nir_def *ids = nir_load_local_invocation_id(b);
          _mesa_set_add(state->lower_once_list, ids->parent_instr);
 
-         nir_ssa_def *x = nir_channel(b, ids, 0);
-         nir_ssa_def *y = nir_channel(b, ids, 1);
-         nir_ssa_def *z = nir_channel(b, ids, 2);
+         nir_def *x = nir_channel(b, ids, 0);
+         nir_def *y = nir_channel(b, ids, 1);
+         nir_def *z = nir_channel(b, ids, 2);
          unsigned size_x = b->shader->info.workgroup_size[0];
-         nir_ssa_def *size_x_imm;
+         nir_def *size_x_imm;
 
          if (b->shader->info.workgroup_size_variable)
             size_x_imm = nir_channel(b, nir_load_workgroup_size(b), 0);
@@ -568,20 +568,20 @@ lower_compute_system_value_instr(nir_builder *b,
           * The 2D result is: (x,y) = (i % w, i / w)
           */
 
-         nir_ssa_def *one = nir_imm_int(b, 1);
-         nir_ssa_def *inv_one = nir_imm_int(b, ~1);
-         nir_ssa_def *x_bit0 = nir_iand(b, x, one);
-         nir_ssa_def *y_bit0 = nir_iand(b, y, one);
-         nir_ssa_def *x_bits_1n = nir_iand(b, x, inv_one);
-         nir_ssa_def *y_bits_1n = nir_iand(b, y, inv_one);
-         nir_ssa_def *bits_01 = nir_ior(b, x_bit0, nir_ishl(b, y_bit0, one));
-         nir_ssa_def *bits_01x = nir_ior(b, bits_01,
-                                         nir_ishl(b, x_bits_1n, one));
-         nir_ssa_def *i;
+         nir_def *one = nir_imm_int(b, 1);
+         nir_def *inv_one = nir_imm_int(b, ~1);
+         nir_def *x_bit0 = nir_iand(b, x, one);
+         nir_def *y_bit0 = nir_iand(b, y, one);
+         nir_def *x_bits_1n = nir_iand(b, x, inv_one);
+         nir_def *y_bits_1n = nir_iand(b, y, inv_one);
+         nir_def *bits_01 = nir_ior(b, x_bit0, nir_ishl(b, y_bit0, one));
+         nir_def *bits_01x = nir_ior(b, bits_01,
+                                     nir_ishl(b, x_bits_1n, one));
+         nir_def *i;
 
          if (!b->shader->info.workgroup_size_variable &&
              util_is_power_of_two_nonzero(size_x)) {
-            nir_ssa_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
+            nir_def *log2_size_x = nir_imm_int(b, util_logbase2(size_x));
             i = nir_ior(b, bits_01x, nir_ishl(b, y_bits_1n, log2_size_x));
          } else {
             i = nir_iadd(b, bits_01x, nir_imul(b, y_bits_1n, size_x_imm));
@@ -602,7 +602,7 @@ lower_compute_system_value_instr(nir_builder *b,
       is_zero |= b->shader->info.workgroup_size[1] == 1 ? 0x2 : 0x0;
       is_zero |= b->shader->info.workgroup_size[2] == 1 ? 0x4 : 0x0;
       if (!b->shader->info.workgroup_size_variable && is_zero) {
-         nir_ssa_scalar defs[3];
+         nir_scalar defs[3];
          for (unsigned i = 0; i < 3; i++) {
             defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->dest.ssa, i);
          }
@@ -624,16 +624,16 @@ lower_compute_system_value_instr(nir_builder *b,
           *    gl_WorkGroupSize.y + gl_LocalInvocationID.y *
           *    gl_WorkGroupSize.x + gl_LocalInvocationID.x"
           */
-         nir_ssa_def *local_id = nir_load_local_invocation_id(b);
-         nir_ssa_def *local_size = nir_load_workgroup_size(b);
-         nir_ssa_def *size_x = nir_channel(b, local_size, 0);
-         nir_ssa_def *size_y = nir_channel(b, local_size, 1);
+         nir_def *local_id = nir_load_local_invocation_id(b);
+         nir_def *local_size = nir_load_workgroup_size(b);
+         nir_def *size_x = nir_channel(b, local_size, 0);
+         nir_def *size_y = nir_channel(b, local_size, 1);
 
          /* Because no hardware supports a local workgroup size greater than
           * about 1K, this calculation can be done in 32-bit and can save some
           * 64-bit arithmetic.
           */
-         nir_ssa_def *index;
+         nir_def *index;
          index = nir_imul(b, nir_channel(b, local_id, 2),
                           nir_imul(b, size_x, size_y));
          index = nir_iadd(b, index,
@@ -665,9 +665,9 @@ lower_compute_system_value_instr(nir_builder *b,
    case nir_intrinsic_load_global_invocation_id_zero_base: {
       if ((options && options->has_base_workgroup_id) ||
           !b->shader->options->has_cs_global_id) {
-         nir_ssa_def *group_size = nir_load_workgroup_size(b);
-         nir_ssa_def *group_id = nir_load_workgroup_id(b, bit_size);
-         nir_ssa_def *local_id = nir_load_local_invocation_id(b);
+         nir_def *group_size = nir_load_workgroup_size(b);
+         nir_def *group_id = nir_load_workgroup_id(b, bit_size);
+         nir_def *local_id = nir_load_local_invocation_id(b);
 
          return nir_iadd(b, nir_imul(b, group_id, nir_u2uN(b, group_size, bit_size)),
                          nir_u2uN(b, local_id, bit_size));
@@ -690,12 +690,12 @@ lower_compute_system_value_instr(nir_builder *b,
    case nir_intrinsic_load_global_invocation_index: {
       /* OpenCL's global_linear_id explicitly removes the global offset before computing this */
       assert(b->shader->info.stage == MESA_SHADER_KERNEL);
-      nir_ssa_def *global_base_id = nir_load_base_global_invocation_id(b, bit_size);
-      nir_ssa_def *global_id = nir_isub(b, nir_load_global_invocation_id(b, bit_size), global_base_id);
-      nir_ssa_def *global_size = build_global_group_size(b, bit_size);
+      nir_def *global_base_id = nir_load_base_global_invocation_id(b, bit_size);
+      nir_def *global_id = nir_isub(b, nir_load_global_invocation_id(b, bit_size), global_base_id);
+      nir_def *global_size = build_global_group_size(b, bit_size);
 
       /* index = id.x + ((id.y + (id.z * size.y)) * size.x) */
-      nir_ssa_def *index;
+      nir_def *index;
       index = nir_imul(b, nir_channel(b, global_id, 2),
                        nir_channel(b, global_size, 1));
       index = nir_iadd(b, nir_channel(b, global_id, 1), index);
@@ -709,9 +709,9 @@ lower_compute_system_value_instr(nir_builder *b,
          return nir_iadd(b, nir_u2uN(b, nir_load_workgroup_id_zero_base(b), bit_size),
                          nir_load_base_workgroup_id(b, bit_size));
       else if (options && options->lower_workgroup_id_to_index) {
-         nir_ssa_def *wg_idx = nir_load_workgroup_index(b);
+         nir_def *wg_idx = nir_load_workgroup_index(b);
 
-         nir_ssa_def *val =
+         nir_def *val =
             try_lower_id_to_index_1d(b, wg_idx, options->num_workgroups);
          if (val)
             return val;
@@ -740,7 +740,7 @@ lower_compute_system_value_instr(nir_builder *b,
 
       b->cursor = nir_after_instr(instr);
 
-      nir_ssa_def *num_wgs = &intrin->dest.ssa;
+      nir_def *num_wgs = &intrin->dest.ssa;
       for (unsigned i = 0; i < 3; ++i) {
          if (num_wgs_imm[i])
             num_wgs = nir_vector_insert_imm(b, num_wgs, nir_imm_int(b, num_wgs_imm[i]), i);
index 2ca041b..9c3ff55 100644 (file)
@@ -55,17 +55,17 @@ lower_nv_task_output(nir_builder *b,
    switch (intrin->intrinsic) {
    case nir_intrinsic_load_output: {
       b->cursor = nir_after_instr(instr);
-      nir_ssa_def *load =
+      nir_def *load =
          nir_load_shared(b, 1, 32, nir_imm_int(b, 0),
                          .base = s->task_count_shared_addr);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load);
+      nir_def_rewrite_uses(&intrin->dest.ssa, load);
       nir_instr_remove(instr);
       return true;
    }
 
    case nir_intrinsic_store_output: {
       b->cursor = nir_after_instr(instr);
-      nir_ssa_def *store_val = intrin->src[0].ssa;
+      nir_def *store_val = intrin->src[0].ssa;
       nir_store_shared(b, store_val, nir_imm_int(b, 0),
                        .base = s->task_count_shared_addr);
       nir_instr_remove(instr);
@@ -86,7 +86,7 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
     * shader doesn't write the TASK_COUNT output.
     */
    b->cursor = nir_before_cf_list(&b->impl->body);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
    nir_store_shared(b, zero, zero, .base = s->task_count_shared_addr);
 
    nir_barrier(b,
@@ -106,7 +106,7 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
                .memory_semantics = NIR_MEMORY_ACQUIRE,
                .memory_modes = nir_var_mem_shared);
 
-   nir_ssa_def *task_count =
+   nir_def *task_count =
       nir_load_shared(b, 1, 32, zero, .base = s->task_count_shared_addr);
 
    /* NV_mesh_shader doesn't offer to choose which task_payload variable
@@ -114,8 +114,8 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
     */
    uint32_t range = b->shader->info.task_payload_size;
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *dispatch_3d = nir_vec3(b, task_count, one, one);
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *dispatch_3d = nir_vec3(b, task_count, one, one);
    nir_launch_mesh_workgroups(b, dispatch_3d, .base = 0, .range = range);
 }
 
@@ -186,14 +186,14 @@ lower_task_payload_to_shared(nir_builder *b,
 static void
 copy_shared_to_payload(nir_builder *b,
                        unsigned num_components,
-                       nir_ssa_def *addr,
+                       nir_def *addr,
                        unsigned shared_base,
                        unsigned off)
 {
    /* Read from shared memory. */
-   nir_ssa_def *copy = nir_load_shared(b, num_components, 32, addr,
-                                       .align_mul = 16,
-                                       .base = shared_base + off);
+   nir_def *copy = nir_load_shared(b, num_components, 32, addr,
+                                   .align_mul = 16,
+                                   .base = shared_base + off);
 
    /* Write to task payload memory. */
    nir_store_task_payload(b, copy, addr, .base = off);
@@ -223,8 +223,8 @@ emit_shared_to_payload_copy(nir_builder *b,
                    4);
    const unsigned base_shared_addr = s->payload_shared_addr + payload_addr;
 
-   nir_ssa_def *invocation_index = nir_load_local_invocation_index(b);
-   nir_ssa_def *addr = nir_imul_imm(b, invocation_index, vec4size);
+   nir_def *invocation_index = nir_load_local_invocation_index(b);
+   nir_def *addr = nir_imul_imm(b, invocation_index, vec4size);
 
    /* Wait for all previous shared stores to finish.
     * This is necessary because we placed the payload in shared memory.
@@ -256,7 +256,7 @@ emit_shared_to_payload_copy(nir_builder *b,
    if (remaining_vec4_copies > 0) {
       assert(remaining_vec4_copies < invocations);
 
-      nir_ssa_def *cmp = nir_ilt_imm(b, invocation_index, remaining_vec4_copies);
+      nir_def *cmp = nir_ilt_imm(b, invocation_index, remaining_vec4_copies);
       nir_if *if_stmt = nir_push_if(b, cmp);
       {
          copy_shared_to_payload(b, vec4size / 4, addr, base_shared_addr, off);
@@ -268,7 +268,7 @@ emit_shared_to_payload_copy(nir_builder *b,
    /* Copy the last few dwords not forming full vec4. */
    if (remaining_dwords > 0) {
       assert(remaining_dwords < 4);
-      nir_ssa_def *cmp = nir_ieq_imm(b, invocation_index, 0);
+      nir_def *cmp = nir_ieq_imm(b, invocation_index, 0);
       nir_if *if_stmt = nir_push_if(b, cmp);
       {
          copy_shared_to_payload(b, remaining_dwords, addr, base_shared_addr, off);
index 36a2a13..a215a35 100644 (file)
@@ -19,10 +19,10 @@ lower_tess_coord_z(nir_builder *b, nir_instr *instr, void *state)
       return false;
 
    b->cursor = nir_instr_remove(instr);
-   nir_ssa_def *xy = nir_load_tess_coord_xy(b);
-   nir_ssa_def *x = nir_channel(b, xy, 0);
-   nir_ssa_def *y = nir_channel(b, xy, 1);
-   nir_ssa_def *z;
+   nir_def *xy = nir_load_tess_coord_xy(b);
+   nir_def *x = nir_channel(b, xy, 0);
+   nir_def *y = nir_channel(b, xy, 1);
+   nir_def *z;
 
    bool *triangles = state;
    if (*triangles)
@@ -30,7 +30,7 @@ lower_tess_coord_z(nir_builder *b, nir_instr *instr, void *state)
    else
       z = nir_imm_float(b, 0.0f);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_vec3(b, x, y, z));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_vec3(b, x, y, z));
    return true;
 }
 
index 378bcff..9691333 100644 (file)
@@ -99,12 +99,12 @@ static const float bt2020_full_range_csc_offsets[3] = {
 static bool
 project_src(nir_builder *b, nir_tex_instr *tex)
 {
-   nir_ssa_def *proj = nir_steal_tex_src(tex, nir_tex_src_projector);
+   nir_def *proj = nir_steal_tex_src(tex, nir_tex_src_projector);
    if (!proj)
       return false;
 
    b->cursor = nir_before_instr(&tex->instr);
-   nir_ssa_def *inv_proj = nir_frcp(b, proj);
+   nir_def *inv_proj = nir_frcp(b, proj);
 
    /* Walk through the sources projecting the arguments. */
    for (unsigned i = 0; i < tex->num_srcs; i++) {
@@ -115,9 +115,9 @@ project_src(nir_builder *b, nir_tex_instr *tex)
       default:
          continue;
       }
-      nir_ssa_def *unprojected =
+      nir_def *unprojected =
          nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
-      nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj);
+      nir_def *projected = nir_fmul(b, unprojected, inv_proj);
 
       /* Array indices don't get projected, so make an new vector with the
        * coordinate's array index untouched.
@@ -159,29 +159,29 @@ project_src(nir_builder *b, nir_tex_instr *tex)
 static bool
 lower_offset(nir_builder *b, nir_tex_instr *tex)
 {
-   nir_ssa_def *offset = nir_steal_tex_src(tex, nir_tex_src_offset);
+   nir_def *offset = nir_steal_tex_src(tex, nir_tex_src_offset);
    if (!offset)
       return false;
 
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
    assert(coord_index >= 0);
 
-   nir_ssa_def *coord = tex->src[coord_index].src.ssa;
+   nir_def *coord = tex->src[coord_index].src.ssa;
 
    b->cursor = nir_before_instr(&tex->instr);
 
-   nir_ssa_def *offset_coord;
+   nir_def *offset_coord;
    if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) {
       if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
          offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset));
       } else {
-         nir_ssa_def *scale = NULL;
+         nir_def *scale = NULL;
 
          if (b->shader->options->has_texture_scaling) {
-            nir_ssa_def *idx = nir_imm_int(b, tex->texture_index);
+            nir_def *idx = nir_imm_int(b, tex->texture_index);
             scale = nir_load_texture_scale(b, 32, idx);
          } else {
-            nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
+            nir_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
             scale = nir_frcp(b, txs);
          }
 
@@ -222,12 +222,12 @@ lower_rect(nir_builder *b, nir_tex_instr *tex)
     */
    tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
 
-   nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
-   nir_ssa_def *scale = nir_frcp(b, txs);
+   nir_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
+   nir_def *scale = nir_frcp(b, txs);
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
 
    if (coord_index != -1) {
-      nir_ssa_def *coords =
+      nir_def *coords =
          nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
       nir_instr_rewrite_src(&tex->instr,
                             &tex->src[coord_index].src,
@@ -240,12 +240,12 @@ lower_rect_tex_scale(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_before_instr(&tex->instr);
 
-   nir_ssa_def *idx = nir_imm_int(b, tex->texture_index);
-   nir_ssa_def *scale = nir_load_texture_scale(b, 32, idx);
+   nir_def *idx = nir_imm_int(b, tex->texture_index);
+   nir_def *scale = nir_load_texture_scale(b, 32, idx);
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
 
    if (coord_index != -1) {
-      nir_ssa_def *coords =
+      nir_def *coords =
          nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
       nir_instr_rewrite_src(&tex->instr,
                             &tex->src[coord_index].src,
@@ -254,7 +254,7 @@ lower_rect_tex_scale(nir_builder *b, nir_tex_instr *tex)
 }
 
 static void
-lower_lod(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *lod)
+lower_lod(nir_builder *b, nir_tex_instr *tex, nir_def *lod)
 {
    assert(tex->op == nir_texop_tex || tex->op == nir_texop_txb);
    assert(nir_tex_instr_src_index(tex, nir_tex_src_lod) < 0);
@@ -262,12 +262,12 @@ lower_lod(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *lod)
    assert(nir_tex_instr_src_index(tex, nir_tex_src_ddy) < 0);
 
    /* If we have a bias, add it in */
-   nir_ssa_def *bias = nir_steal_tex_src(tex, nir_tex_src_bias);
+   nir_def *bias = nir_steal_tex_src(tex, nir_tex_src_bias);
    if (bias)
       lod = nir_fadd(b, lod, bias);
 
    /* If we have a minimum LOD, clamp LOD accordingly */
-   nir_ssa_def *min_lod = nir_steal_tex_src(tex, nir_tex_src_min_lod);
+   nir_def *min_lod = nir_steal_tex_src(tex, nir_tex_src_min_lod);
    if (min_lod)
       lod = nir_fmax(b, lod, min_lod);
 
@@ -288,7 +288,7 @@ lower_zero_lod(nir_builder *b, nir_tex_instr *tex)
    b->cursor = nir_before_instr(&tex->instr);
 
    if (tex->op == nir_texop_lod) {
-      nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_imm_int(b, 0));
+      nir_def_rewrite_uses(&tex->dest.ssa, nir_imm_int(b, 0));
       nir_instr_remove(&tex->instr);
       return;
    }
@@ -296,7 +296,7 @@ lower_zero_lod(nir_builder *b, nir_tex_instr *tex)
    lower_lod(b, tex, nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *
+static nir_def *
 sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
              const nir_lower_tex_options *options)
 {
@@ -336,8 +336,8 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
 
 static void
 convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
-                   nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v,
-                   nir_ssa_def *a,
+                   nir_def *y, nir_def *u, nir_def *v,
+                   nir_def *a,
                    const nir_lower_tex_options *options,
                    unsigned texture_index)
 {
@@ -371,7 +371,7 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
 
    unsigned bit_size = nir_dest_bit_size(tex->dest);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_vec4(b,
                nir_imm_floatN_t(b, offset_vals[0], a->bit_size),
                nir_imm_floatN_t(b, offset_vals[1], a->bit_size),
@@ -380,14 +380,14 @@ convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
 
    offset = nir_f2fN(b, offset, bit_size);
 
-   nir_ssa_def *m0 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[0]), bit_size);
-   nir_ssa_def *m1 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[1]), bit_size);
-   nir_ssa_def *m2 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[2]), bit_size);
+   nir_def *m0 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[0]), bit_size);
+   nir_def *m1 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[1]), bit_size);
+   nir_def *m2 = nir_f2fN(b, nir_build_imm(b, 4, 32, m->v[2]), bit_size);
 
-   nir_ssa_def *result =
+   nir_def *result =
       nir_ffma(b, y, m0, nir_ffma(b, u, m1, nir_ffma(b, v, m2, offset)));
 
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, result);
+   nir_def_rewrite_uses(&tex->dest.ssa, result);
 }
 
 static void
@@ -397,8 +397,8 @@ lower_y_uv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *uv = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *uv = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 0),
@@ -416,8 +416,8 @@ lower_y_vu_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *vu = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *vu = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 0),
@@ -435,9 +435,9 @@ lower_y_u_v_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *u = sample_plane(b, tex, 1, options);
-   nir_ssa_def *v = sample_plane(b, tex, 2, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *u = sample_plane(b, tex, 1, options);
+   nir_def *v = sample_plane(b, tex, 2, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 0),
@@ -455,8 +455,8 @@ lower_yx_xuxv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *xuxv = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *xuxv = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 0),
@@ -474,8 +474,8 @@ lower_yx_xvxu_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *xvxu = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *xvxu = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 0),
@@ -493,8 +493,8 @@ lower_xy_uxvx_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *uxvx = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *uxvx = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 1),
@@ -512,8 +512,8 @@ lower_xy_vxux_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y = sample_plane(b, tex, 0, options);
-   nir_ssa_def *vxux = sample_plane(b, tex, 1, options);
+   nir_def *y = sample_plane(b, tex, 0, options);
+   nir_def *vxux = sample_plane(b, tex, 1, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y, 1),
@@ -531,7 +531,7 @@ lower_ayuv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *ayuv = sample_plane(b, tex, 0, options);
+   nir_def *ayuv = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, ayuv, 2),
@@ -549,7 +549,7 @@ lower_y41x_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *y41x = sample_plane(b, tex, 0, options);
+   nir_def *y41x = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, y41x, 1),
@@ -567,7 +567,7 @@ lower_xyuv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *xyuv = sample_plane(b, tex, 0, options);
+   nir_def *xyuv = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, xyuv, 2),
@@ -585,7 +585,7 @@ lower_yuv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *yuv = sample_plane(b, tex, 0, options);
+   nir_def *yuv = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, yuv, 0),
@@ -603,7 +603,7 @@ lower_yu_yv_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *yuv = sample_plane(b, tex, 0, options);
+   nir_def *yuv = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, yuv, 1),
@@ -621,7 +621,7 @@ lower_yv_yu_external(nir_builder *b, nir_tex_instr *tex,
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *yuv = sample_plane(b, tex, 0, options);
+   nir_def *yuv = sample_plane(b, tex, 0, options);
 
    convert_yuv_to_rgb(b, tex,
                       nir_channel(b, yuv, 2),
@@ -637,7 +637,7 @@ lower_yv_yu_external(nir_builder *b, nir_tex_instr *tex,
  * computed from the gradients.
  */
 static void
-replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
+replace_gradient_with_lod(nir_builder *b, nir_def *lod, nir_tex_instr *tex)
 {
    assert(tex->op == nir_texop_txd);
 
@@ -645,7 +645,7 @@ replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
    nir_tex_instr_remove_src(tex, nir_tex_instr_src_index(tex, nir_tex_src_ddy));
 
    /* If we have a minimum LOD, clamp LOD accordingly */
-   nir_ssa_def *min_lod = nir_steal_tex_src(tex, nir_tex_src_min_lod);
+   nir_def *min_lod = nir_steal_tex_src(tex, nir_tex_src_min_lod);
    if (min_lod)
       lod = nir_fmax(b, lod, min_lod);
 
@@ -660,7 +660,7 @@ lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
    assert(tex->op == nir_texop_txd);
 
    /* Use textureSize() to get the width and height of LOD 0 */
-   nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
+   nir_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
 
    /* Cubemap texture lookups first generate a texture coordinate normalized
     * to [-1, 1] on the appropiate face. The appropiate face is determined
@@ -719,25 +719,25 @@ lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
     */
 
    /* coordinate */
-   nir_ssa_def *p =
+   nir_def *p =
       tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)].src.ssa;
 
    /* unmodified dPdx, dPdy values */
-   nir_ssa_def *dPdx =
+   nir_def *dPdx =
       tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
-   nir_ssa_def *dPdy =
+   nir_def *dPdy =
       tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
 
-   nir_ssa_def *abs_p = nir_fabs(b, p);
-   nir_ssa_def *abs_p_x = nir_channel(b, abs_p, 0);
-   nir_ssa_def *abs_p_y = nir_channel(b, abs_p, 1);
-   nir_ssa_def *abs_p_z = nir_channel(b, abs_p, 2);
+   nir_def *abs_p = nir_fabs(b, p);
+   nir_def *abs_p_x = nir_channel(b, abs_p, 0);
+   nir_def *abs_p_y = nir_channel(b, abs_p, 1);
+   nir_def *abs_p_z = nir_channel(b, abs_p, 2);
 
    /* 1. compute selector */
-   nir_ssa_def *Q, *dQdx, *dQdy;
+   nir_def *Q, *dQdx, *dQdy;
 
-   nir_ssa_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
-   nir_ssa_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
+   nir_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
+   nir_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
 
    unsigned yzx[3] = { 1, 2, 0 };
    unsigned xzy[3] = { 0, 2, 1 };
@@ -766,29 +766,29 @@ lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
     * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
     * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
     */
-   nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
+   nir_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
 
-   nir_ssa_def *Q_xy = nir_trim_vector(b, Q, 2);
-   nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
+   nir_def *Q_xy = nir_trim_vector(b, Q, 2);
+   nir_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
 
-   nir_ssa_def *dQdx_xy = nir_trim_vector(b, dQdx, 2);
-   nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2);
-   nir_ssa_def *dx =
+   nir_def *dQdx_xy = nir_trim_vector(b, dQdx, 2);
+   nir_def *dQdx_z = nir_channel(b, dQdx, 2);
+   nir_def *dx =
       nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z)));
 
-   nir_ssa_def *dQdy_xy = nir_trim_vector(b, dQdy, 2);
-   nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2);
-   nir_ssa_def *dy =
+   nir_def *dQdy_xy = nir_trim_vector(b, dQdy, 2);
+   nir_def *dQdy_z = nir_channel(b, dQdy, 2);
+   nir_def *dy =
       nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z)));
 
    /* M = max(dot(dx, dx), dot(dy, dy)); */
-   nir_ssa_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
+   nir_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
 
    /* size has textureSize() of LOD 0 */
-   nir_ssa_def *L = nir_channel(b, size, 0);
+   nir_def *L = nir_channel(b, size, 0);
 
    /* lod = -1.0 + 0.5 * log2(L * L * M); */
-   nir_ssa_def *lod =
+   nir_def *lod =
       nir_fadd(b,
                nir_imm_float(b, -1.0f),
                nir_fmul(b,
@@ -825,7 +825,7 @@ lower_gradient(nir_builder *b, nir_tex_instr *tex)
       break;
    }
 
-   nir_ssa_def *size =
+   nir_def *size =
       nir_channels(b, nir_i2f32(b, nir_get_texture_size(b, tex)),
                    component_mask);
 
@@ -833,15 +833,15 @@ lower_gradient(nir_builder *b, nir_tex_instr *tex)
     * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
     * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
     */
-   nir_ssa_def *ddx =
+   nir_def *ddx =
       tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
-   nir_ssa_def *ddy =
+   nir_def *ddy =
       tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
 
-   nir_ssa_def *dPdx = nir_fmul(b, ddx, size);
-   nir_ssa_def *dPdy = nir_fmul(b, ddy, size);
+   nir_def *dPdx = nir_fmul(b, ddx, size);
+   nir_def *dPdy = nir_fmul(b, ddy, size);
 
-   nir_ssa_def *rho;
+   nir_def *rho;
    if (dPdx->num_components == 1) {
       rho = nir_fmax(b, nir_fabs(b, dPdx), nir_fabs(b, dPdy));
    } else {
@@ -851,7 +851,7 @@ lower_gradient(nir_builder *b, nir_tex_instr *tex)
    }
 
    /* lod = log2(rho).  We're ignoring GL state biases for now. */
-   nir_ssa_def *lod = nir_flog2(b, rho);
+   nir_def *lod = nir_flog2(b, rho);
 
    /* Replace the gradient instruction with an equivalent lod instruction */
    replace_gradient_with_lod(b, lod, tex);
@@ -881,8 +881,8 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
    }
    int coord = nir_tex_instr_src_index(tex, nir_tex_src_coord);
    assert(coord >= 0);
-   nir_ssa_def *dfdx = nir_fddx(b, tex->src[coord].src.ssa);
-   nir_ssa_def *dfdy = nir_fddy(b, tex->src[coord].src.ssa);
+   nir_def *dfdx = nir_fddx(b, tex->src[coord].src.ssa);
+   nir_def *dfdy = nir_fddy(b, tex->src[coord].src.ssa);
    txd->src[tex->num_srcs] = nir_tex_src_for_ssa(nir_tex_src_ddx, dfdx);
    txd->src[tex->num_srcs + 1] = nir_tex_src_for_ssa(nir_tex_src_ddy, dfdy);
 
@@ -890,7 +890,7 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
                      nir_dest_num_components(tex->dest),
                      nir_dest_bit_size(tex->dest));
    nir_builder_instr_insert(b, &txd->instr);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
+   nir_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
    nir_instr_remove(&tex->instr);
    return txd;
 }
@@ -919,7 +919,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
          txl->src[i].src_type = tex->src[i].src_type;
       }
    }
-   nir_ssa_def *lod = nir_get_texture_lod(b, txl);
+   nir_def *lod = nir_get_texture_lod(b, txl);
 
    int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
    assert(bias_idx >= 0);
@@ -930,7 +930,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
                      nir_dest_num_components(tex->dest),
                      nir_dest_bit_size(tex->dest));
    nir_builder_instr_insert(b, &txl->instr);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
+   nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
    nir_instr_remove(&tex->instr);
    return txl;
 }
@@ -947,11 +947,11 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
 
    if (coord_index != -1) {
-      nir_ssa_def *src =
+      nir_def *src =
          nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
 
       /* split src into components: */
-      nir_ssa_def *comp[4];
+      nir_def *comp[4];
 
       assume(tex->coord_components >= 1);
 
@@ -969,7 +969,7 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
                /* non-normalized texture coords, so clamp to texture
                 * size rather than [0.0, 1.0]
                 */
-               nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
+               nir_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
                comp[j] = nir_fmax(b, comp[j], nir_imm_float(b, 0.0));
                comp[j] = nir_fmin(b, comp[j], nir_channel(b, txs, j));
             } else {
@@ -988,7 +988,7 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
    return tex;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
 {
    nir_const_value v[4];
@@ -1015,10 +1015,10 @@ swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex)
 
    assert(nir_tex_instr_dest_size(tex) == 4);
    unsigned swiz[4] = { 2, 3, 1, 0 };
-   nir_ssa_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
+   nir_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
-                                  swizzled->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
+                              swizzled->parent_instr);
 }
 
 static void
@@ -1026,7 +1026,7 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
 {
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *swizzled;
+   nir_def *swizzled;
    if (tex->op == nir_texop_tg4) {
       if (swizzle[tex->component] < 4) {
          /* This one's easy */
@@ -1043,7 +1043,7 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
          /* We have no 0s or 1s, just emit a swizzling MOV */
          swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
       } else {
-         nir_ssa_scalar srcs[4];
+         nir_scalar srcs[4];
          for (unsigned i = 0; i < 4; i++) {
             if (swizzle[i] < 4) {
                srcs[i] = nir_get_ssa_scalar(&tex->dest.ssa, swizzle[i]);
@@ -1055,8 +1055,8 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
       }
    }
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
-                                  swizzled->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
+                              swizzled->parent_instr);
 }
 
 static void
@@ -1067,18 +1067,18 @@ linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
 
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *rgb =
+   nir_def *rgb =
       nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->dest.ssa, 3));
 
    /* alpha is untouched: */
-   nir_ssa_def *result = nir_vec4(b,
-                                  nir_channel(b, rgb, 0),
-                                  nir_channel(b, rgb, 1),
-                                  nir_channel(b, rgb, 2),
-                                  nir_channel(b, &tex->dest.ssa, 3));
-
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, result,
-                                  result->parent_instr);
+   nir_def *result = nir_vec4(b,
+                              nir_channel(b, rgb, 0),
+                              nir_channel(b, rgb, 1),
+                              nir_channel(b, rgb, 2),
+                              nir_channel(b, &tex->dest.ssa, 3));
+
+   nir_def_rewrite_uses_after(&tex->dest.ssa, result,
+                              result->parent_instr);
 }
 
 /**
@@ -1094,7 +1094,7 @@ static bool
 lower_tex_packing(nir_builder *b, nir_tex_instr *tex,
                   const nir_lower_tex_options *options)
 {
-   nir_ssa_def *color = &tex->dest.ssa;
+   nir_def *color = &tex->dest.ssa;
 
    b->cursor = nir_after_instr(&tex->instr);
 
@@ -1117,15 +1117,15 @@ lower_tex_packing(nir_builder *b, nir_tex_instr *tex,
             color = nir_unpack_half_2x16_split_x(b, nir_channel(b, color, 0));
             break;
          case 2: {
-            nir_ssa_def *rg = nir_channel(b, color, 0);
+            nir_def *rg = nir_channel(b, color, 0);
             color = nir_vec2(b,
                              nir_unpack_half_2x16_split_x(b, rg),
                              nir_unpack_half_2x16_split_y(b, rg));
             break;
          }
          case 4: {
-            nir_ssa_def *rg = nir_channel(b, color, 0);
-            nir_ssa_def *ba = nir_channel(b, color, 1);
+            nir_def *rg = nir_channel(b, color, 0);
+            nir_def *ba = nir_channel(b, color, 1);
             color = nir_vec4(b,
                              nir_unpack_half_2x16_split_x(b, rg),
                              nir_unpack_half_2x16_split_y(b, rg),
@@ -1158,8 +1158,8 @@ lower_tex_packing(nir_builder *b, nir_tex_instr *tex,
       break;
    }
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, color,
-                                  color->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, color,
+                              color->parent_instr);
    return true;
 }
 
@@ -1191,8 +1191,8 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
 
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_scalar dest[5] = { 0 };
-   nir_ssa_def *residency = NULL;
+   nir_scalar dest[5] = { 0 };
+   nir_def *residency = NULL;
    for (unsigned i = 0; i < 4; ++i) {
       nir_tex_instr *tex_copy = nir_tex_instr_create(b->shader, tex->num_srcs + 1);
       tex_copy->op = tex->op;
@@ -1214,8 +1214,8 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
          tex_copy->src[j].src_type = tex->src[j].src_type;
       }
 
-      nir_ssa_def *offset = nir_imm_ivec2(b, tex->tg4_offsets[i][0],
-                                          tex->tg4_offsets[i][1]);
+      nir_def *offset = nir_imm_ivec2(b, tex->tg4_offsets[i][0],
+                                      tex->tg4_offsets[i][1]);
       nir_tex_src src = nir_tex_src_for_ssa(nir_tex_src_offset, offset);
       tex_copy->src[tex_copy->num_srcs - 1] = src;
 
@@ -1226,7 +1226,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
 
       dest[i] = nir_get_ssa_scalar(&tex_copy->dest.ssa, 3);
       if (tex->is_sparse) {
-         nir_ssa_def *code = nir_channel(b, &tex_copy->dest.ssa, 4);
+         nir_def *code = nir_channel(b, &tex_copy->dest.ssa, 4);
          if (residency)
             residency = nir_sparse_residency_code_and(b, residency, code);
          else
@@ -1235,8 +1235,8 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
    }
    dest[4] = nir_get_ssa_scalar(residency, 0);
 
-   nir_ssa_def *res = nir_vec_scalars(b, dest, tex->dest.ssa.num_components);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, res);
+   nir_def *res = nir_vec_scalars(b, dest, tex->dest.ssa.num_components);
+   nir_def_rewrite_uses(&tex->dest.ssa, res);
    nir_instr_remove(&tex->instr);
 
    return true;
@@ -1254,7 +1254,7 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
    unsigned dest_size = nir_tex_instr_dest_size(tex);
 
    b->cursor = nir_before_instr(&tex->instr);
-   nir_ssa_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
+   nir_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
 
    /* Replace the non-0-LOD in the initial TXS operation by a 0-LOD. */
    nir_instr_rewrite_src(&tex->instr, &tex->src[lod_idx].src,
@@ -1265,15 +1265,15 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
     * which should return 0, not 1.
     */
    b->cursor = nir_after_instr(&tex->instr);
-   nir_ssa_def *minified = nir_imin(b, &tex->dest.ssa,
-                                    nir_imax(b, nir_ushr(b, &tex->dest.ssa, lod),
-                                             nir_imm_int(b, 1)));
+   nir_def *minified = nir_imin(b, &tex->dest.ssa,
+                                nir_imax(b, nir_ushr(b, &tex->dest.ssa, lod),
+                                         nir_imm_int(b, 1)));
 
    /* Make sure the component encoding the array size (if any) is not
     * minified.
     */
    if (tex->is_array) {
-      nir_ssa_def *comp[3];
+      nir_def *comp[3];
 
       assert(dest_size <= ARRAY_SIZE(comp));
       for (unsigned i = 0; i < dest_size - 1; i++)
@@ -1283,8 +1283,8 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
       minified = nir_vec(b, comp, dest_size);
    }
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, minified,
-                                  minified->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, minified,
+                              minified->parent_instr);
    return true;
 }
 
@@ -1297,13 +1297,13 @@ nir_lower_txs_cube_array(nir_builder *b, nir_tex_instr *tex)
    b->cursor = nir_after_instr(&tex->instr);
 
    assert(tex->dest.ssa.num_components == 3);
-   nir_ssa_def *size = &tex->dest.ssa;
+   nir_def *size = &tex->dest.ssa;
    size = nir_vec3(b, nir_channel(b, size, 1),
                    nir_channel(b, size, 1),
                    nir_idiv(b, nir_channel(b, size, 2),
                             nir_imm_int(b, 6)));
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, size, size->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, size, size->parent_instr);
 }
 
 /* Adjust the sample index according to AMD FMASK (fragment mask).
@@ -1358,8 +1358,8 @@ nir_lower_ms_txf_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex)
    int ms_index = nir_tex_instr_src_index(tex, nir_tex_src_ms_index);
    assert(ms_index >= 0);
    nir_src sample = tex->src[ms_index].src;
-   nir_ssa_def *new_sample = nir_ubfe(b, &fmask_fetch->dest.ssa,
-                                      nir_ishl_imm(b, sample.ssa, 2), nir_imm_int(b, 3));
+   nir_def *new_sample = nir_ubfe(b, &fmask_fetch->dest.ssa,
+                                  nir_ishl_imm(b, sample.ssa, 2), nir_imm_int(b, 3));
 
    /* Update instruction. */
    tex->op = nir_texop_fragment_fetch_amd;
@@ -1377,7 +1377,7 @@ nir_lower_samples_identical_to_fragment_fetch(nir_builder *b, nir_tex_instr *tex
    nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32);
    nir_builder_instr_insert(b, &fmask_fetch->instr);
 
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0));
+   nir_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0));
    nir_instr_remove_v(&tex->instr);
 }
 
@@ -1389,28 +1389,28 @@ nir_lower_lod_zero_width(nir_builder *b, nir_tex_instr *tex)
 
    b->cursor = nir_after_instr(&tex->instr);
 
-   nir_ssa_def *is_zero = nir_imm_true(b);
+   nir_def *is_zero = nir_imm_true(b);
    for (unsigned i = 0; i < tex->coord_components; i++) {
-      nir_ssa_def *coord = nir_channel(b, tex->src[coord_index].src.ssa, i);
+      nir_def *coord = nir_channel(b, tex->src[coord_index].src.ssa, i);
 
       /* Compute the sum of the absolute values of derivatives. */
-      nir_ssa_def *dfdx = nir_fddx(b, coord);
-      nir_ssa_def *dfdy = nir_fddy(b, coord);
-      nir_ssa_def *fwidth = nir_fadd(b, nir_fabs(b, dfdx), nir_fabs(b, dfdy));
+      nir_def *dfdx = nir_fddx(b, coord);
+      nir_def *dfdy = nir_fddy(b, coord);
+      nir_def *fwidth = nir_fadd(b, nir_fabs(b, dfdx), nir_fabs(b, dfdy));
 
       /* Check if the sum is 0. */
       is_zero = nir_iand(b, is_zero, nir_feq_imm(b, fwidth, 0.0));
    }
 
    /* Replace the raw LOD by -FLT_MAX if the sum is 0 for all coordinates. */
-   nir_ssa_def *adjusted_lod =
+   nir_def *adjusted_lod =
       nir_bcsel(b, is_zero, nir_imm_float(b, -FLT_MAX),
                 nir_channel(b, &tex->dest.ssa, 1));
 
-   nir_ssa_def *def =
+   nir_def *def =
       nir_vec2(b, nir_channel(b, &tex->dest.ssa, 0), adjusted_lod);
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, def, def->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, def, def->parent_instr);
 }
 
 static bool
@@ -1436,7 +1436,7 @@ lower_index_to_offset(nir_builder *b, nir_tex_instr *tex)
       if ((*index) == 0)
          continue;
 
-      nir_ssa_def *sum = nir_iadd_imm(b, tex->src[i].src.ssa, *index);
+      nir_def *sum = nir_iadd_imm(b, tex->src[i].src.ssa, *index);
       nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
                             nir_src_for_ssa(sum));
       *index = 0;
index 48030af..b3c54d7 100644 (file)
@@ -71,7 +71,7 @@ typedef struct {
    nir_lower_tex_shadow_swizzle *tex_swizzles;
 } sampler_state;
 
-static nir_ssa_def *
+static nir_def *
 nir_lower_tex_shadow_impl(nir_builder *b, nir_instr *instr, void *options)
 
 {
@@ -99,24 +99,24 @@ nir_lower_tex_shadow_impl(nir_builder *b, nir_instr *instr, void *options)
    /* NIR expects a vec4 result from the above texture instructions */
    nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
 
-   nir_ssa_def *tex_r = nir_channel(b, &tex->dest.ssa, 0);
-   nir_ssa_def *cmp = tex->src[comp_index].src.ssa;
+   nir_def *tex_r = nir_channel(b, &tex->dest.ssa, 0);
+   nir_def *cmp = tex->src[comp_index].src.ssa;
 
    int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
    if (proj_index >= 0)
       cmp = nir_fmul(b, cmp, nir_frcp(b, tex->src[proj_index].src.ssa));
 
-   nir_ssa_def *result =
+   nir_def *result =
       nir_compare_func(b,
                        sampler_binding < state->n_states ? state->compare_func[sampler_binding] : COMPARE_FUNC_ALWAYS,
                        cmp, tex_r);
 
    result = nir_b2f32(b, result);
-   nir_ssa_def *one = nir_imm_float(b, 1.0);
-   nir_ssa_def *zero = nir_imm_float(b, 0.0);
+   nir_def *one = nir_imm_float(b, 1.0);
+   nir_def *zero = nir_imm_float(b, 0.0);
 
-   nir_ssa_def *lookup[6] = { result, NULL, NULL, NULL, zero, one };
-   nir_ssa_def *r[4] = { result, result, result, result };
+   nir_def *lookup[6] = { result, NULL, NULL, NULL, zero, one };
+   nir_def *r[4] = { result, result, result, result };
 
    if (sampler_binding < state->n_states) {
       r[0] = lookup[state->tex_swizzles[sampler_binding].swizzle_r];
index 849d2b9..7b54bcb 100644 (file)
@@ -28,7 +28,7 @@
 #include "nir_builder.h"
 #include "nir_deref.h"
 
-static nir_ssa_def *
+static nir_def *
 get_io_index(nir_builder *b, nir_deref_instr *deref)
 {
    nir_deref_path path;
@@ -38,13 +38,13 @@ get_io_index(nir_builder *b, nir_deref_instr *deref)
    nir_deref_instr **p = &path.path[1];
 
    /* Just emit code and let constant-folding go to town */
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
 
    for (; *p; p++) {
       if ((*p)->deref_type == nir_deref_type_array) {
          unsigned size = glsl_get_length((*p)->type);
 
-         nir_ssa_def *mul =
+         nir_def *mul =
             nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
 
          offset = nir_iadd(b, offset, mul);
@@ -65,7 +65,7 @@ nir_lower_texcoord_replace_impl(nir_function_impl *impl,
 {
    nir_builder b = nir_builder_at(nir_before_cf_list(&impl->body));
 
-   nir_ssa_def *new_coord;
+   nir_def *new_coord;
    if (point_coord_is_sysval) {
       new_coord = nir_load_system_value(&b, nir_intrinsic_load_point_coord,
                                         0, 2, 32);
@@ -81,9 +81,9 @@ nir_lower_texcoord_replace_impl(nir_function_impl *impl,
    /* point-coord is two-component, need to add two implicit ones in case of
     * projective texturing etc.
     */
-   nir_ssa_def *zero = nir_imm_zero(&b, 1, new_coord->bit_size);
-   nir_ssa_def *one = nir_imm_floatN_t(&b, 1.0, new_coord->bit_size);
-   nir_ssa_def *y = nir_channel(&b, new_coord, 1);
+   nir_def *zero = nir_imm_zero(&b, 1, new_coord->bit_size);
+   nir_def *one = nir_imm_floatN_t(&b, 1.0, new_coord->bit_size);
+   nir_def *y = nir_channel(&b, new_coord, 1);
    if (yinvert)
       y = nir_fsub_imm(&b, 1.0, y);
    new_coord = nir_vec4(&b, nir_channel(&b, new_coord, 0),
@@ -108,18 +108,18 @@ nir_lower_texcoord_replace_impl(nir_function_impl *impl,
 
          b.cursor = nir_after_instr(instr);
          nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
-         nir_ssa_def *index = get_io_index(&b, deref);
-         nir_ssa_def *mask =
+         nir_def *index = get_io_index(&b, deref);
+         nir_def *mask =
             nir_ishl(&b, nir_imm_int(&b, 1),
                      nir_iadd_imm(&b, index, base));
 
-         nir_ssa_def *cond = nir_test_mask(&b, mask, coord_replace);
-         nir_ssa_def *result = nir_bcsel(&b, cond, new_coord,
-                                         &intrin->dest.ssa);
+         nir_def *cond = nir_test_mask(&b, mask, coord_replace);
+         nir_def *result = nir_bcsel(&b, cond, new_coord,
+                                     &intrin->dest.ssa);
 
-         nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                        result,
-                                        result->parent_instr);
+         nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                    result,
+                                    result->parent_instr);
       }
    }
 
index 6a43974..c5d1ea1 100644 (file)
@@ -13,13 +13,13 @@ struct opts {
    bool point_coord_is_sysval;
 };
 
-static nir_ssa_def *
-nir_channel_or_undef(nir_builder *b, nir_ssa_def *def, signed int channel)
+static nir_def *
+nir_channel_or_undef(nir_builder *b, nir_def *def, signed int channel)
 {
    if (channel >= 0 && channel < def->num_components)
       return nir_channel(b, def, channel);
    else
-      return nir_ssa_undef(b, def->bit_size, 1);
+      return nir_undef(b, def->bit_size, 1);
 }
 
 static bool
@@ -49,14 +49,14 @@ pass(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *channels[4] = {
+   nir_def *channels[4] = {
       NULL, NULL,
       nir_imm_float(b, 0.0),
       nir_imm_float(b, 1.0)
    };
 
    if (opts->point_coord_is_sysval) {
-      nir_ssa_def *pntc = nir_load_point_coord(b);
+      nir_def *pntc = nir_load_point_coord(b);
 
       b->cursor = nir_after_instr(instr);
       channels[0] = nir_channel(b, pntc, 0);
@@ -65,16 +65,16 @@ pass(nir_builder *b, nir_instr *instr, void *data)
       sem.location = VARYING_SLOT_PNTC;
       nir_instr_rewrite_src_ssa(instr, offset, nir_imm_int(b, 0));
       nir_intrinsic_set_io_semantics(intr, sem);
-      nir_ssa_def *raw = &intr->dest.ssa;
+      nir_def *raw = &intr->dest.ssa;
 
       b->cursor = nir_after_instr(instr);
       channels[0] = nir_channel_or_undef(b, raw, 0 - component);
       channels[1] = nir_channel_or_undef(b, raw, 1 - component);
    }
 
-   nir_ssa_def *res = nir_vec(b, &channels[component], intr->num_components);
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, res,
-                                  res->parent_instr);
+   nir_def *res = nir_vec(b, &channels[component], intr->num_components);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, res,
+                              res->parent_instr);
    return true;
 }
 
index eed09f2..cfd47bc 100644 (file)
@@ -58,7 +58,7 @@ create_input(nir_shader *shader, gl_varying_slot slot,
    return var;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_input(nir_builder *b, nir_variable *in)
 {
    return nir_load_input(b, 4, 32, nir_imm_int(b, 0),
@@ -143,7 +143,7 @@ nir_lower_two_sided_color_instr(nir_builder *b, nir_instr *instr, void *data)
    /* gl_FrontFace is a boolean but the intrinsic constructor creates
     * 32-bit value by default.
     */
-   nir_ssa_def *face;
+   nir_def *face;
    if (state->face_sysval)
       face = nir_load_front_face(b, 1);
    else {
@@ -153,7 +153,7 @@ nir_lower_two_sided_color_instr(nir_builder *b, nir_instr *instr, void *data)
       face = nir_load_var(b, var);
    }
 
-   nir_ssa_def *front, *back;
+   nir_def *front, *back;
    if (intr->intrinsic == nir_intrinsic_load_deref) {
       front = nir_load_var(b, state->colors[idx].front);
       back = nir_load_var(b, state->colors[idx].back);
@@ -161,9 +161,9 @@ nir_lower_two_sided_color_instr(nir_builder *b, nir_instr *instr, void *data)
       front = load_input(b, state->colors[idx].front);
       back = load_input(b, state->colors[idx].back);
    }
-   nir_ssa_def *color = nir_bcsel(b, face, front, back);
+   nir_def *color = nir_bcsel(b, face, front, back);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, color);
+   nir_def_rewrite_uses(&intr->dest.ssa, color);
 
    return true;
 }
index e51f94e..36330e8 100644 (file)
@@ -73,22 +73,22 @@ nir_lower_ubo_vec4_filter(const nir_instr *instr, const void *data)
 }
 
 static nir_intrinsic_instr *
-create_load(nir_builder *b, nir_ssa_def *block, nir_ssa_def *offset,
+create_load(nir_builder *b, nir_def *block, nir_def *offset,
             unsigned bit_size, unsigned num_components)
 {
-   nir_ssa_def *def = nir_load_ubo_vec4(b, num_components, bit_size, block, offset);
+   nir_def *def = nir_load_ubo_vec4(b, num_components, bit_size, block, offset);
    return nir_instr_as_intrinsic(def->parent_instr);
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
 {
    b->cursor = nir_before_instr(instr);
 
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *byte_offset = nir_ssa_for_src(b, intr->src[1], 1);
-   nir_ssa_def *vec4_offset = nir_ushr_imm(b, byte_offset, 4);
+   nir_def *byte_offset = nir_ssa_for_src(b, intr->src[1], 1);
+   nir_def *vec4_offset = nir_ushr_imm(b, byte_offset, 4);
 
    unsigned align_mul = nir_intrinsic_align_mul(intr);
    unsigned align_offset = nir_intrinsic_align_offset(intr);
@@ -115,7 +115,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
 
    nir_intrinsic_set_access(load, nir_intrinsic_access(intr));
 
-   nir_ssa_def *result = &load->dest.ssa;
+   nir_def *result = &load->dest.ssa;
 
    int align_chan_offset = align_offset / chan_size_bytes;
    if (aligned_mul) {
@@ -127,7 +127,7 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
       /* If we're loading a single component, that component alone won't
        * straddle a vec4 boundary so we can do this with a single UBO load.
        */
-      nir_ssa_def *component =
+      nir_def *component =
          nir_iand_imm(b,
                       nir_udiv_imm(b, byte_offset, chan_size_bytes),
                       chans_per_vec4 - 1);
@@ -149,18 +149,18 @@ nir_lower_ubo_vec4_lower(nir_builder *b, nir_instr *instr, void *data)
       /* General fallback case: Per-result-channel bcsel-based extraction
        * from two separate vec4 loads.
        */
-      nir_ssa_def *next_vec4_offset = nir_iadd_imm(b, vec4_offset, 1);
+      nir_def *next_vec4_offset = nir_iadd_imm(b, vec4_offset, 1);
       nir_intrinsic_instr *next_load = create_load(b, intr->src[0].ssa, next_vec4_offset,
                                                    intr->dest.ssa.bit_size,
                                                    num_components);
 
-      nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
+      nir_def *channels[NIR_MAX_VEC_COMPONENTS];
       for (unsigned i = 0; i < intr->num_components; i++) {
-         nir_ssa_def *chan_byte_offset = nir_iadd_imm(b, byte_offset, i * chan_size_bytes);
+         nir_def *chan_byte_offset = nir_iadd_imm(b, byte_offset, i * chan_size_bytes);
 
-         nir_ssa_def *chan_vec4_offset = nir_ushr_imm(b, chan_byte_offset, 4);
+         nir_def *chan_vec4_offset = nir_ushr_imm(b, chan_byte_offset, 4);
 
-         nir_ssa_def *component =
+         nir_def *component =
             nir_iand_imm(b,
                          nir_udiv_imm(b, chan_byte_offset, chan_size_bytes),
                          chans_per_vec4 - 1);
index 93e5617..0cd41fc 100644 (file)
@@ -46,11 +46,11 @@ lower_undef_instr_to_zero(nir_builder *b, nir_instr *instr, UNUSED void *_state)
    if (instr->type != nir_instr_type_ssa_undef)
       return false;
 
-   nir_ssa_undef_instr *und = nir_instr_as_ssa_undef(instr);
+   nir_undef_instr *und = nir_instr_as_ssa_undef(instr);
    b->cursor = nir_instr_remove(&und->instr);
-   nir_ssa_def *zero = nir_imm_zero(b, und->def.num_components,
-                                    und->def.bit_size);
-   nir_ssa_def_rewrite_uses(&und->def, zero);
+   nir_def *zero = nir_imm_zero(b, und->def.num_components,
+                                und->def.bit_size);
+   nir_def_rewrite_uses(&und->def, zero);
    return true;
 }
 
index f38bd37..ba99c0e 100644 (file)
@@ -58,19 +58,19 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
    /* Increase all UBO binding points by 1. */
    if (intr->intrinsic == nir_intrinsic_load_ubo &&
        !b->shader->info.first_ubo_is_default_ubo) {
-      nir_ssa_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
-      nir_ssa_def *new_idx = nir_iadd_imm(b, old_idx, 1);
+      nir_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
+      nir_def *new_idx = nir_iadd_imm(b, old_idx, 1);
       nir_instr_rewrite_src(&intr->instr, &intr->src[0],
                             nir_src_for_ssa(new_idx));
       return true;
    }
 
    if (intr->intrinsic == nir_intrinsic_load_uniform) {
-      nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
-      nir_ssa_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1);
+      nir_def *ubo_idx = nir_imm_int(b, 0);
+      nir_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1);
 
       assert(intr->dest.ssa.bit_size >= 8);
-      nir_ssa_def *load_result;
+      nir_def *load_result;
       if (state->load_vec4) {
          /* No asking us to generate load_vec4 when you've packed your uniforms
           * as dwords instead of vec4s.
@@ -109,7 +109,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
          nir_intrinsic_set_range_base(load, nir_intrinsic_base(intr) * multiplier);
          nir_intrinsic_set_range(load, nir_intrinsic_range(intr) * multiplier);
       }
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, load_result);
+      nir_def_rewrite_uses(&intr->dest.ssa, load_result);
 
       nir_instr_remove(&intr->instr);
       return true;
index cf9ae5f..6417fff 100644 (file)
@@ -161,13 +161,13 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
 
    nir_variable *it = nir_local_variable_create(b.impl, glsl_uint_type(),
                                                 "zero_init_iterator");
-   nir_ssa_def *local_index = nir_load_local_invocation_index(&b);
-   nir_ssa_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
+   nir_def *local_index = nir_load_local_invocation_index(&b);
+   nir_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
    nir_store_var(&b, it, first_offset, 0x1);
 
    nir_loop *loop = nir_push_loop(&b);
    {
-      nir_ssa_def *offset = nir_load_var(&b, it);
+      nir_def *offset = nir_load_var(&b, it);
 
       nir_push_if(&b, nir_uge_imm(&b, offset, shared_size));
       {
@@ -179,7 +179,7 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
                        .align_mul = chunk_size,
                        .write_mask = ((1 << chunk_comps) - 1));
 
-      nir_ssa_def *new_offset = nir_iadd_imm(&b, offset, chunk_size * local_count);
+      nir_def *new_offset = nir_iadd_imm(&b, offset, chunk_size * local_count);
       nir_store_var(&b, it, new_offset, 0x1);
    }
    nir_pop_loop(&b, loop);
index 3852ea9..3da12b7 100644 (file)
@@ -411,15 +411,15 @@ register_load_instr(nir_intrinsic_instr *load_instr,
     * expect any array derefs at all after vars_to_ssa.
     */
    if (node == UNDEF_NODE) {
-      nir_ssa_undef_instr *undef =
-         nir_ssa_undef_instr_create(state->shader,
-                                    load_instr->num_components,
-                                    load_instr->dest.ssa.bit_size);
+      nir_undef_instr *undef =
+         nir_undef_instr_create(state->shader,
+                                load_instr->num_components,
+                                load_instr->dest.ssa.bit_size);
 
       nir_instr_insert_before(&load_instr->instr, &undef->instr);
       nir_instr_remove(&load_instr->instr);
 
-      nir_ssa_def_rewrite_uses(&load_instr->dest.ssa, &undef->def);
+      nir_def_rewrite_uses(&load_instr->dest.ssa, &undef->def);
       return true;
    }
 
@@ -609,8 +609,8 @@ rename_variables(struct lower_variables_state *state)
             nir_instr_insert_before(&intrin->instr, &mov->instr);
             nir_instr_remove(&intrin->instr);
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                     &mov->dest.dest.ssa);
+            nir_def_rewrite_uses(&intrin->dest.ssa,
+                                 &mov->dest.dest.ssa);
             break;
          }
 
@@ -626,7 +626,7 @@ rename_variables(struct lower_variables_state *state)
             /* Should have been removed before rename_variables(). */
             assert(node != UNDEF_NODE);
 
-            nir_ssa_def *value = intrin->src[1].ssa;
+            nir_def *value = intrin->src[1].ssa;
 
             if (!node->lower_to_ssa)
                continue;
@@ -634,7 +634,7 @@ rename_variables(struct lower_variables_state *state)
             assert(intrin->num_components ==
                    glsl_get_vector_elements(node->type));
 
-            nir_ssa_def *new_def;
+            nir_def *new_def;
             b.cursor = nir_before_instr(&intrin->instr);
 
             unsigned wrmask = nir_intrinsic_write_mask(intrin);
@@ -650,13 +650,13 @@ rename_variables(struct lower_variables_state *state)
                new_def = nir_swizzle(&b, value, swiz,
                                      intrin->num_components);
             } else {
-               nir_ssa_def *old_def =
+               nir_def *old_def =
                   nir_phi_builder_value_get_block_def(node->pb_value, block);
                /* For writemasked store_var intrinsics, we combine the newly
                 * written values with the existing contents of unwritten
                 * channels, creating a new SSA value for the whole vector.
                 */
-               nir_ssa_scalar srcs[NIR_MAX_VEC_COMPONENTS];
+               nir_scalar srcs[NIR_MAX_VEC_COMPONENTS];
                for (unsigned i = 0; i < intrin->num_components; i++) {
                   if (wrmask & (1 << i)) {
                      srcs[i] = nir_get_ssa_scalar(value, i);
index 9e84dd4..eb5cfc3 100644 (file)
@@ -58,10 +58,10 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data)
          intrin->dest.ssa.num_components = 4;
 
          b->cursor = nir_after_instr(&intrin->instr);
-         nir_ssa_def *vec3 = nir_trim_vector(b, &intrin->dest.ssa, 3);
-         nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                        vec3,
-                                        vec3->parent_instr);
+         nir_def *vec3 = nir_trim_vector(b, &intrin->dest.ssa, 3);
+         nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                    vec3,
+                                    vec3->parent_instr);
          return true;
       }
 
@@ -73,7 +73,7 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data)
          if (!nir_deref_mode_is_in_set(deref, modes))
             break;
 
-         nir_ssa_def *data = intrin->src[1].ssa;
+         nir_def *data = intrin->src[1].ssa;
 
          b->cursor = nir_before_instr(&intrin->instr);
          unsigned swiz[] = { 0, 1, 2, 2 };
index d820bd2..51c3728 100644 (file)
@@ -26,11 +26,11 @@ struct data {
  * which ones have been processed.
  */
 static unsigned
-insert_store(nir_builder *b, nir_ssa_def *reg, nir_alu_instr *vec,
+insert_store(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
              unsigned start_idx)
 {
    assert(start_idx < nir_op_infos[vec->op].num_inputs);
-   nir_ssa_def *src = vec->src[start_idx].src.ssa;
+   nir_def *src = vec->src[start_idx].src.ssa;
 
    unsigned num_components = nir_dest_num_components(vec->dest.dest);
    assert(num_components == nir_op_infos[vec->op].num_inputs);
@@ -69,7 +69,7 @@ has_replicated_dest(nir_alu_instr *alu)
  * can then call insert_mov as normal.
  */
 static unsigned
-try_coalesce(nir_builder *b, nir_ssa_def *reg, nir_alu_instr *vec,
+try_coalesce(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
              unsigned start_idx, struct data *data)
 {
    assert(start_idx < nir_op_infos[vec->op].num_inputs);
@@ -213,8 +213,8 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
 
    if (need_reg) {
       /* We'll replace with a register. Declare one for the purpose. */
-      nir_ssa_def *reg = nir_decl_reg(b, num_components,
-                                      nir_dest_bit_size(vec->dest.dest), 0);
+      nir_def *reg = nir_decl_reg(b, num_components,
+                                  nir_dest_bit_size(vec->dest.dest), 0);
 
       unsigned finished_write_mask = 0;
       for (unsigned i = 0; i < num_components; i++) {
@@ -237,9 +237,9 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
       }
 
       b->cursor = nir_before_instr(instr);
-      nir_ssa_def *swizzled = nir_swizzle(b, vec->src[0].src.ssa, swiz,
-                                          num_components);
-      nir_ssa_def_rewrite_uses(&vec->dest.dest.ssa, swizzled);
+      nir_def *swizzled = nir_swizzle(b, vec->src[0].src.ssa, swiz,
+                                      num_components);
+      nir_def_rewrite_uses(&vec->dest.dest.ssa, swizzled);
    }
 
    nir_instr_remove(&vec->instr);
index 31b2bf5..292235d 100644 (file)
@@ -59,19 +59,19 @@ lower_viewport_transform_instr(nir_builder *b, nir_instr *instr,
    b->cursor = nir_before_instr(instr);
 
    /* Grab the source and viewport */
-   nir_ssa_def *input_point = nir_ssa_for_src(b, intr->src[1], 4);
-   nir_ssa_def *scale = nir_load_viewport_scale(b);
-   nir_ssa_def *offset = nir_load_viewport_offset(b);
+   nir_def *input_point = nir_ssa_for_src(b, intr->src[1], 4);
+   nir_def *scale = nir_load_viewport_scale(b);
+   nir_def *offset = nir_load_viewport_offset(b);
 
    /* World space to normalised device coordinates to screen space */
 
-   nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, input_point, 3));
+   nir_def *w_recip = nir_frcp(b, nir_channel(b, input_point, 3));
 
-   nir_ssa_def *ndc_point = nir_fmul(b, nir_trim_vector(b, input_point, 3),
-                                     w_recip);
+   nir_def *ndc_point = nir_fmul(b, nir_trim_vector(b, input_point, 3),
+                                 w_recip);
 
-   nir_ssa_def *screen = nir_fadd(b, nir_fmul(b, ndc_point, scale),
-                                  offset);
+   nir_def *screen = nir_fadd(b, nir_fmul(b, ndc_point, scale),
+                              offset);
 
    /* gl_Position will be written out in screenspace xyz, with w set to
     * the reciprocal we computed earlier. The transformed w component is
@@ -80,11 +80,11 @@ lower_viewport_transform_instr(nir_builder *b, nir_instr *instr,
     * used in depth clipping computations
     */
 
-   nir_ssa_def *screen_space = nir_vec4(b,
-                                        nir_channel(b, screen, 0),
-                                        nir_channel(b, screen, 1),
-                                        nir_channel(b, screen, 2),
-                                        w_recip);
+   nir_def *screen_space = nir_vec4(b,
+                                    nir_channel(b, screen, 0),
+                                    nir_channel(b, screen, 1),
+                                    nir_channel(b, screen, 2),
+                                    w_recip);
 
    nir_instr_rewrite_src(instr, &intr->src[1],
                          nir_src_for_ssa(screen_space));
index b80221b..1726425 100644 (file)
 static void
 update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr)
 {
-   nir_ssa_def *wpos = &intr->dest.ssa;
+   nir_def *wpos = &intr->dest.ssa;
 
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *spos = nir_load_sample_pos_or_center(b);
+   nir_def *spos = nir_load_sample_pos_or_center(b);
 
    wpos = nir_fadd(b, wpos,
                    nir_vec4(b,
@@ -60,8 +60,8 @@ update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr)
                             nir_imm_float(b, 0.0f),
                             nir_imm_float(b, 0.0f)));
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, wpos,
-                                  wpos->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, wpos,
+                              wpos->parent_instr);
 }
 
 static bool
index 05a347f..2de93dc 100644 (file)
@@ -41,7 +41,7 @@ typedef struct {
    nir_variable *transform;
 } lower_wpos_ytransform_state;
 
-static nir_ssa_def *
+static nir_def *
 get_transform(lower_wpos_ytransform_state *state)
 {
    if (state->transform == NULL) {
@@ -60,8 +60,8 @@ get_transform(lower_wpos_ytransform_state *state)
 }
 
 /* NIR equiv of TGSI CMP instruction: */
-static nir_ssa_def *
-nir_cmp(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1, nir_ssa_def *src2)
+static nir_def *
+nir_cmp(nir_builder *b, nir_def *src0, nir_def *src1, nir_def *src2)
 {
    return nir_bcsel(b, nir_flt_imm(b, src0, 0.0), src1, src2);
 }
@@ -73,7 +73,7 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state,
                      float adjX, float adjY[2])
 {
    nir_builder *b = &state->b;
-   nir_ssa_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;
+   nir_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;
 
    wpos_input = &intr->dest.ssa;
 
@@ -89,7 +89,7 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state,
           * or not, which is determined by testing against the inversion
           * state variable used below, which will be either +1 or -1.
           */
-         nir_ssa_def *adj_temp;
+         nir_def *adj_temp;
 
          adj_temp = nir_cmp(b,
                             nir_channel(b, wpostrans, invert ? 2 : 0),
@@ -128,9 +128,9 @@ emit_wpos_adjustment(lower_wpos_ytransform_state *state,
                         nir_channel(b, wpos_temp, 2),
                         nir_channel(b, wpos_temp, 3));
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
-                                  wpos_temp,
-                                  wpos_temp->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa,
+                              wpos_temp,
+                              wpos_temp->parent_instr);
 }
 
 static void
@@ -226,7 +226,7 @@ static void
 lower_fddy(lower_wpos_ytransform_state *state, nir_alu_instr *fddy)
 {
    nir_builder *b = &state->b;
-   nir_ssa_def *p, *pt, *trans;
+   nir_def *p, *pt, *trans;
 
    b->cursor = nir_before_instr(&fddy->instr);
 
@@ -254,8 +254,8 @@ lower_interp_deref_or_load_baryc_at_offset(lower_wpos_ytransform_state *state,
                                            unsigned offset_src)
 {
    nir_builder *b = &state->b;
-   nir_ssa_def *offset;
-   nir_ssa_def *flip_y;
+   nir_def *offset;
+   nir_def *flip_y;
 
    b->cursor = nir_before_instr(&intr->instr);
 
@@ -274,17 +274,17 @@ lower_load_sample_pos(lower_wpos_ytransform_state *state,
    nir_builder *b = &state->b;
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *pos = &intr->dest.ssa;
-   nir_ssa_def *scale = nir_channel(b, get_transform(state), 0);
-   nir_ssa_def *neg_scale = nir_channel(b, get_transform(state), 2);
+   nir_def *pos = &intr->dest.ssa;
+   nir_def *scale = nir_channel(b, get_transform(state), 0);
+   nir_def *neg_scale = nir_channel(b, get_transform(state), 2);
    /* Either y or 1-y for scale equal to 1 or -1 respectively. */
-   nir_ssa_def *flipped_y =
+   nir_def *flipped_y =
       nir_fadd(b, nir_fmax(b, neg_scale, nir_imm_float(b, 0.0)),
                nir_fmul(b, nir_channel(b, pos, 1), scale));
-   nir_ssa_def *flipped_pos = nir_vec2(b, nir_channel(b, pos, 0), flipped_y);
+   nir_def *flipped_pos = nir_vec2(b, nir_channel(b, pos, 0), flipped_y);
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, flipped_pos,
-                                  flipped_pos->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pos,
+                              flipped_pos->parent_instr);
 }
 
 static bool
index 5485b67..e57ef49 100644 (file)
@@ -110,8 +110,8 @@ split_wrmask(nir_builder *b, nir_intrinsic_instr *intr)
       unsigned first_component = ffs(wrmask) - 1;
       unsigned length = ffs(~(wrmask >> first_component)) - 1;
 
-      nir_ssa_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
-      nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
+      nir_def *value = nir_ssa_for_src(b, intr->src[value_idx], num_comp);
+      nir_def *offset = nir_ssa_for_src(b, intr->src[offset_idx], 1);
 
       /* swizzle out the consecutive components that we'll store
        * in this iteration:
index 66672e9..4868b8b 100644 (file)
@@ -30,7 +30,7 @@ nir_alu_src_type(const nir_alu_instr *instr, unsigned src)
           nir_src_bit_size(instr->src[src].src);
 }
 
-static nir_ssa_scalar
+static nir_scalar
 nir_alu_arg(const nir_alu_instr *alu, unsigned arg, unsigned comp)
 {
    const nir_alu_src *src = &alu->src[arg];
@@ -45,7 +45,7 @@ nir_alu_arg(const nir_alu_instr *alu, unsigned arg, unsigned comp)
  * Tests are in mod_analysis_tests.cpp.
  */
 bool
-nir_mod_analysis(nir_ssa_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod)
+nir_mod_analysis(nir_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod)
 {
    if (div == 1) {
       *mod = 0;
index 09fc679..371388a 100644 (file)
@@ -46,7 +46,7 @@
  * considered to *not* dominate the instruction that defines it.
  */
 static bool
-ssa_def_dominates_instr(nir_ssa_def *def, nir_instr *instr)
+ssa_def_dominates_instr(nir_def *def, nir_instr *instr)
 {
    if (instr->index <= def->parent_instr->index) {
       return false;
index ad61df2..2a675d2 100644 (file)
@@ -44,13 +44,13 @@ normalize_cubemap_coords(nir_builder *b, nir_instr *instr, void *data)
    if (idx < 0)
       return false;
 
-   nir_ssa_def *orig_coord =
+   nir_def *orig_coord =
       nir_ssa_for_src(b, tex->src[idx].src, nir_tex_instr_src_size(tex, idx));
    assert(orig_coord->num_components >= 3);
 
-   nir_ssa_def *orig_xyz = nir_trim_vector(b, orig_coord, 3);
-   nir_ssa_def *norm = nir_fmax_abs_vec_comp(b, orig_xyz);
-   nir_ssa_def *normalized = nir_fmul(b, orig_coord, nir_frcp(b, norm));
+   nir_def *orig_xyz = nir_trim_vector(b, orig_coord, 3);
+   nir_def *norm = nir_fmax_abs_vec_comp(b, orig_xyz);
+   nir_def *normalized = nir_fmul(b, orig_coord, nir_frcp(b, norm));
 
    /* Array indices don't have to be normalized, so make a new vector
     * with the coordinate's array index untouched.
index 13d5b1d..eebf872 100644 (file)
@@ -49,7 +49,7 @@ struct access_state {
 };
 
 static void
-gather_buffer_access(struct access_state *state, nir_ssa_def *def, bool read, bool write)
+gather_buffer_access(struct access_state *state, nir_def *def, bool read, bool write)
 {
    state->buffers_read |= read;
    state->buffers_written |= write;
index d015cbe..1a05634 100644 (file)
@@ -123,7 +123,7 @@ combine_stores(struct combine_stores_state *state,
    /* Build a new vec, to be used as source for the combined store.  As it
     * gets build, remove previous stores that are not needed anymore.
     */
-   nir_ssa_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
+   nir_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
    unsigned num_components = glsl_get_vector_elements(combo->dst->type);
    unsigned bit_size = combo->latest->src[1].ssa->bit_size;
    for (unsigned i = 0; i < num_components; i++) {
@@ -141,11 +141,11 @@ combine_stores(struct combine_stores_state *state,
          if (--store->instr.pass_flags == 0 && store != combo->latest)
             nir_instr_remove(&store->instr);
       } else {
-         comps[i] = nir_get_ssa_scalar(nir_ssa_undef(&state->b, 1, bit_size), 0);
+         comps[i] = nir_get_ssa_scalar(nir_undef(&state->b, 1, bit_size), 0);
       }
    }
    assert(combo->latest->instr.pass_flags == 0);
-   nir_ssa_def *vec = nir_vec_scalars(&state->b, comps, num_components);
+   nir_def *vec = nir_vec_scalars(&state->b, comps, num_components);
 
    /* Fix the latest store with the combined information. */
    nir_intrinsic_instr *store = combo->latest;
index cb4479e..a5e14e7 100644 (file)
@@ -179,19 +179,19 @@ rewrite_compare_instruction(nir_builder *bld, nir_alu_instr *orig_cmp,
     * zero_on_left is false, the resulting compare instruction is (fcmp,
     * (fadd, x, y), 0.0) and x = a and y = -b.
     */
-   nir_ssa_def *const a = nir_ssa_for_alu_src(bld, orig_cmp, 0);
-   nir_ssa_def *const b = nir_ssa_for_alu_src(bld, orig_cmp, 1);
+   nir_def *const a = nir_ssa_for_alu_src(bld, orig_cmp, 0);
+   nir_def *const b = nir_ssa_for_alu_src(bld, orig_cmp, 1);
 
-   nir_ssa_def *const fadd = zero_on_left
-                                ? nir_fadd(bld, b, nir_fneg(bld, a))
-                                : nir_fadd(bld, a, nir_fneg(bld, b));
+   nir_def *const fadd = zero_on_left
+                            ? nir_fadd(bld, b, nir_fneg(bld, a))
+                            : nir_fadd(bld, a, nir_fneg(bld, b));
 
-   nir_ssa_def *const zero =
+   nir_def *const zero =
       nir_imm_floatN_t(bld, 0.0, orig_add->dest.dest.ssa.bit_size);
 
-   nir_ssa_def *const cmp = zero_on_left
-                               ? nir_build_alu(bld, orig_cmp->op, zero, fadd, NULL, NULL)
-                               : nir_build_alu(bld, orig_cmp->op, fadd, zero, NULL, NULL);
+   nir_def *const cmp = zero_on_left
+                           ? nir_build_alu(bld, orig_cmp->op, zero, fadd, NULL, NULL)
+                           : nir_build_alu(bld, orig_cmp->op, fadd, zero, NULL, NULL);
 
    /* Generating extra moves of the results is the easy way to make sure the
     * writemasks match the original instructions.  Later optimization passes
@@ -214,10 +214,10 @@ rewrite_compare_instruction(nir_builder *bld, nir_alu_instr *orig_cmp,
 
    nir_builder_instr_insert(bld, &mov_cmp->instr);
 
-   nir_ssa_def_rewrite_uses(&orig_cmp->dest.dest.ssa,
-                            &mov_cmp->dest.dest.ssa);
-   nir_ssa_def_rewrite_uses(&orig_add->dest.dest.ssa,
-                            &mov_add->dest.dest.ssa);
+   nir_def_rewrite_uses(&orig_cmp->dest.dest.ssa,
+                        &mov_cmp->dest.dest.ssa);
+   nir_def_rewrite_uses(&orig_add->dest.dest.ssa,
+                        &mov_add->dest.dest.ssa);
 
    /* We know these have no more uses because we just rewrote them all, so we
     * can remove them.
index e714019..5066249 100644 (file)
@@ -80,7 +80,7 @@ nir_opt_conditional_discard_block(nir_builder *b, nir_block *block)
 
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
    nir_intrinsic_op op = intrin->intrinsic;
-   nir_ssa_def *cond = if_stmt->condition.ssa;
+   nir_def *cond = if_stmt->condition.ssa;
    b->cursor = nir_before_cf_node(prev_node);
 
    switch (intrin->intrinsic) {
index 7f854dc..3d2aefc 100644 (file)
@@ -84,10 +84,10 @@ try_fold_alu(nir_builder *b, nir_alu_instr *alu)
                          b->shader->info.float_controls_execution_mode);
 
    b->cursor = nir_before_instr(&alu->instr);
-   nir_ssa_def *imm = nir_build_imm(b, alu->dest.dest.ssa.num_components,
-                                    alu->dest.dest.ssa.bit_size,
-                                    dest);
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, imm);
+   nir_def *imm = nir_build_imm(b, alu->dest.dest.ssa.num_components,
+                                alu->dest.dest.ssa.bit_size,
+                                dest);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, imm);
    nir_instr_remove(&alu->instr);
    nir_instr_free(&alu->instr);
 
@@ -208,9 +208,9 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
       nir_const_value *v = const_value_for_deref(deref);
       if (v) {
          b->cursor = nir_before_instr(&intrin->instr);
-         nir_ssa_def *val = nir_build_imm(b, intrin->dest.ssa.num_components,
-                                          intrin->dest.ssa.bit_size, v);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
+         nir_def *val = nir_build_imm(b, intrin->dest.ssa.num_components,
+                                      intrin->dest.ssa.bit_size, v);
+         nir_def_rewrite_uses(&intrin->dest.ssa, val);
          nir_instr_remove(&intrin->instr);
          return true;
       }
@@ -231,10 +231,10 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
       assert(base + range <= b->shader->constant_data_size);
 
       b->cursor = nir_before_instr(&intrin->instr);
-      nir_ssa_def *val;
+      nir_def *val;
       if (offset >= range) {
-         val = nir_ssa_undef(b, intrin->dest.ssa.num_components,
-                             intrin->dest.ssa.bit_size);
+         val = nir_undef(b, intrin->dest.ssa.num_components,
+                         intrin->dest.ssa.bit_size);
       } else {
          nir_const_value imm[NIR_MAX_VEC_COMPONENTS];
          memset(imm, 0, sizeof(imm));
@@ -249,7 +249,7 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
          val = nir_build_imm(b, intrin->dest.ssa.num_components,
                              intrin->dest.ssa.bit_size, imm);
       }
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
+      nir_def_rewrite_uses(&intrin->dest.ssa, val);
       nir_instr_remove(&intrin->instr);
       return true;
    }
@@ -273,8 +273,8 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
        * the data is constant.
        */
       if (nir_src_is_const(intrin->src[0])) {
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                  intrin->src[0].ssa);
+         nir_def_rewrite_uses(&intrin->dest.ssa,
+                              intrin->src[0].ssa);
          nir_instr_remove(&intrin->instr);
          return true;
       }
@@ -284,8 +284,8 @@ try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
    case nir_intrinsic_vote_ieq:
       if (nir_src_is_const(intrin->src[0])) {
          b->cursor = nir_before_instr(&intrin->instr);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                  nir_imm_true(b));
+         nir_def_rewrite_uses(&intrin->dest.ssa,
+                              nir_imm_true(b));
          nir_instr_remove(&intrin->instr);
          return true;
       }
index 6ece986..c5e8d0c 100644 (file)
@@ -82,7 +82,7 @@ struct value {
    bool is_ssa;
    union {
       struct {
-         nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
+         nir_def *def[NIR_MAX_VEC_COMPONENTS];
          uint8_t component[NIR_MAX_VEC_COMPONENTS];
       } ssa;
       nir_deref_and_path deref;
@@ -90,7 +90,7 @@ struct value {
 };
 
 static void
-value_set_ssa_components(struct value *value, nir_ssa_def *def,
+value_set_ssa_components(struct value *value, nir_def *def,
                          unsigned num_components)
 {
    value->is_ssa = true;
@@ -655,8 +655,8 @@ load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
 
    assert(entry->src.ssa.component[index] <
           entry->src.ssa.def[index]->num_components);
-   nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
-                                  entry->src.ssa.component[index]);
+   nir_def *def = nir_channel(b, entry->src.ssa.def[index],
+                              entry->src.ssa.component[index]);
 
    *value = (struct value){
       .is_ssa = true,
@@ -727,7 +727,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
 
    if (available != (1 << num_components) - 1 &&
        intrin->intrinsic == nir_intrinsic_load_deref &&
-       (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
+       (available & nir_def_components_read(&intrin->dest.ssa)) == 0) {
       /* If none of the components read are available as SSA values, then we
        * should just bail.  Otherwise, we would end up replacing the uses of
        * the load_deref a vecN() that just gathers up its components.
@@ -737,11 +737,11 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
 
    b->cursor = nir_after_instr(&intrin->instr);
 
-   nir_ssa_def *load_def =
+   nir_def *load_def =
       intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
 
    bool keep_intrin = false;
-   nir_ssa_scalar comps[NIR_MAX_VEC_COMPONENTS];
+   nir_scalar comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < num_components; i++) {
       if (value->ssa.def[i]) {
          comps[i] = nir_get_ssa_scalar(value->ssa.def[i], value->ssa.component[i]);
@@ -759,7 +759,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
       }
    }
 
-   nir_ssa_def *vec = nir_vec_scalars(b, comps, num_components);
+   nir_def *vec = nir_vec_scalars(b, comps, num_components);
    value_set_ssa_components(value, vec, num_components);
 
    if (!keep_intrin) {
@@ -1072,8 +1072,8 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
             /* Loading from an invalid index yields an undef */
             if (vec_index >= vec_comps) {
                b->cursor = nir_instr_remove(instr);
-               nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa, u);
+               nir_def *u = nir_undef(b, 1, intrin->dest.ssa.bit_size);
+               nir_def_rewrite_uses(&intrin->dest.ssa, u);
                state->progress = true;
                break;
             }
@@ -1097,12 +1097,12 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
                    * We need to be careful when rewriting uses so we don't
                    * rewrite the vecN itself.
                    */
-                  nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                                 value.ssa.def[0],
-                                                 value.ssa.def[0]->parent_instr);
+                  nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                             value.ssa.def[0],
+                                             value.ssa.def[0]->parent_instr);
                } else {
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                           value.ssa.def[0]);
+                  nir_def_rewrite_uses(&intrin->dest.ssa,
+                                       value.ssa.def[0]);
                }
             } else {
                /* We're turning it into a load of a different variable */
index 8bc2f3e..bf7e732 100644 (file)
@@ -69,8 +69,8 @@ rewrite_to_vec(nir_alu_instr *mov, nir_alu_instr *vec)
    for (unsigned i = 0; i < num_comp; i++)
       new_vec->src[i] = vec->src[mov->src[0].swizzle[i]];
 
-   nir_ssa_def *new = nir_builder_alu_instr_finish_and_insert(&b, new_vec);
-   nir_ssa_def_rewrite_uses(&mov->dest.dest.ssa, new);
+   nir_def *new = nir_builder_alu_instr_finish_and_insert(&b, new_vec);
+   nir_def_rewrite_uses(&mov->dest.dest.ssa, new);
 
    /* If we remove "mov" and it's the next instruction in the
     * nir_foreach_instr_safe() loop, then we would end copy-propagation early. */
@@ -81,7 +81,7 @@ rewrite_to_vec(nir_alu_instr *mov, nir_alu_instr *vec)
 static bool
 copy_propagate_alu(nir_alu_src *src, nir_alu_instr *copy)
 {
-   nir_ssa_def *def = NULL;
+   nir_def *def = NULL;
    nir_alu_instr *user = nir_instr_as_alu(src->src.parent_instr);
    unsigned src_idx = src - user->src;
    assert(src_idx < nir_op_infos[user->op].num_inputs);
@@ -115,7 +115,7 @@ copy_propagate(nir_src *src, nir_alu_instr *copy)
    if (!is_swizzleless_move(copy))
       return false;
 
-   nir_src_rewrite_ssa(src, copy->src[0].src.ssa);
+   nir_src_rewrite(src, copy->src[0].src.ssa);
 
    return true;
 }
@@ -140,7 +140,7 @@ copy_prop_instr(nir_instr *instr)
          progress |= copy_propagate(src, mov);
    }
 
-   if (progress && nir_ssa_def_is_unused(&mov->dest.dest.ssa))
+   if (progress && nir_def_is_unused(&mov->dest.dest.ssa))
       nir_instr_remove(&mov->instr);
 
    return progress;
index 5f000ed..118c677 100644 (file)
@@ -85,7 +85,7 @@ is_live(BITSET_WORD *defs_live, nir_instr *instr)
       return BITSET_TEST(defs_live, lc->def.index);
    }
    case nir_instr_type_ssa_undef: {
-      nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
+      nir_undef_instr *undef = nir_instr_as_ssa_undef(instr);
       return BITSET_TEST(defs_live, undef->def.index);
    }
    case nir_instr_type_parallel_copy: {
index ab7e891..ed61542 100644 (file)
@@ -100,7 +100,7 @@ opt_constant_if(nir_if *if_stmt, bool condition)
        */
       nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
       nir_foreach_phi_safe(phi, after) {
-         nir_ssa_def *def = NULL;
+         nir_def *def = NULL;
          nir_foreach_phi_src(phi_src, phi) {
             if (phi_src->pred != last_block)
                continue;
@@ -109,7 +109,7 @@ opt_constant_if(nir_if *if_stmt, bool condition)
          }
 
          assert(def);
-         nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
+         nir_def_rewrite_uses(&phi->dest.ssa, def);
          nir_instr_remove(&phi->instr);
       }
    }
@@ -125,7 +125,7 @@ opt_constant_if(nir_if *if_stmt, bool condition)
 }
 
 static bool
-def_only_used_in_cf_node(nir_ssa_def *def, void *_node)
+def_only_used_in_cf_node(nir_def *def, void *_node)
 {
    nir_cf_node *node = _node;
    assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
index aacf653..7b74ba2 100644 (file)
@@ -32,9 +32,9 @@
  */
 
 static bool
-ssa_def_is_source_depth(nir_ssa_def *def)
+ssa_def_is_source_depth(nir_def *def)
 {
-   nir_ssa_scalar scalar = nir_ssa_scalar_resolved(def, 0);
+   nir_scalar scalar = nir_scalar_resolved(def, 0);
    nir_instr *instr = scalar.def->parent_instr;
    if (instr->type != nir_instr_type_intrinsic)
       return false;
index 90553d4..c142f60 100644 (file)
@@ -625,7 +625,7 @@ gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
  * as close to the LCA as possible while trying to stay out of loops.
  */
 static bool
-gcm_schedule_late_def(nir_ssa_def *def, void *void_state)
+gcm_schedule_late_def(nir_def *def, void *void_state)
 {
    struct gcm_state *state = void_state;
 
@@ -733,18 +733,18 @@ gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
 }
 
 static bool
-gcm_replace_def_with_undef(nir_ssa_def *def, void *void_state)
+gcm_replace_def_with_undef(nir_def *def, void *void_state)
 {
    struct gcm_state *state = void_state;
 
-   if (nir_ssa_def_is_unused(def))
+   if (nir_def_is_unused(def))
       return true;
 
-   nir_ssa_undef_instr *undef =
-      nir_ssa_undef_instr_create(state->impl->function->shader,
-                                 def->num_components, def->bit_size);
+   nir_undef_instr *undef =
+      nir_undef_instr_create(state->impl->function->shader,
+                             def->num_components, def->bit_size);
    nir_instr_insert(nir_before_cf_list(&state->impl->body), &undef->instr);
-   nir_ssa_def_rewrite_uses(def, &undef->def);
+   nir_def_rewrite_uses(def, &undef->def);
 
    return true;
 }
index 1e546df..06c4c17 100644 (file)
@@ -26,8 +26,8 @@
 #include "nir.h"
 #include "nir_builder.h"
 
-static nir_ssa_def *
-build_udiv(nir_builder *b, nir_ssa_def *n, uint64_t d)
+static nir_def *
+build_udiv(nir_builder *b, nir_def *n, uint64_t d)
 {
    if (d == 0) {
       return nir_imm_intN_t(b, 0, n->bit_size);
@@ -49,8 +49,8 @@ build_udiv(nir_builder *b, nir_ssa_def *n, uint64_t d)
    }
 }
 
-static nir_ssa_def *
-build_umod(nir_builder *b, nir_ssa_def *n, uint64_t d)
+static nir_def *
+build_umod(nir_builder *b, nir_def *n, uint64_t d)
 {
    if (d == 0) {
       return nir_imm_intN_t(b, 0, n->bit_size);
@@ -61,8 +61,8 @@ build_umod(nir_builder *b, nir_ssa_def *n, uint64_t d)
    }
 }
 
-static nir_ssa_def *
-build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
+static nir_def *
+build_idiv(nir_builder *b, nir_def *n, int64_t d)
 {
    int64_t int_min = u_intN_min(n->bit_size);
    if (d == int_min)
@@ -77,15 +77,15 @@ build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
    } else if (d == -1) {
       return nir_ineg(b, n);
    } else if (util_is_power_of_two_or_zero64(abs_d)) {
-      nir_ssa_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
-      nir_ssa_def *n_neg = nir_ilt_imm(b, n, 0);
-      nir_ssa_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
+      nir_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
+      nir_def *n_neg = nir_ilt_imm(b, n, 0);
+      nir_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
       return nir_bcsel(b, neg, nir_ineg(b, uq), uq);
    } else {
       struct util_fast_sdiv_info m =
          util_compute_fast_sdiv_info(d, n->bit_size);
 
-      nir_ssa_def *res =
+      nir_def *res =
          nir_imul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
       if (d > 0 && m.multiplier < 0)
          res = nir_iadd(b, res, n);
@@ -99,8 +99,8 @@ build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
    }
 }
 
-static nir_ssa_def *
-build_irem(nir_builder *b, nir_ssa_def *n, int64_t d)
+static nir_def *
+build_irem(nir_builder *b, nir_def *n, int64_t d)
 {
    int64_t int_min = u_intN_min(n->bit_size);
    if (d == 0) {
@@ -110,8 +110,8 @@ build_irem(nir_builder *b, nir_ssa_def *n, int64_t d)
    } else {
       d = d < 0 ? -d : d;
       if (util_is_power_of_two_or_zero64(d)) {
-         nir_ssa_def *tmp = nir_bcsel(b, nir_ilt_imm(b, n, 0),
-                                      nir_iadd_imm(b, n, d - 1), n);
+         nir_def *tmp = nir_bcsel(b, nir_ilt_imm(b, n, 0),
+                                  nir_iadd_imm(b, n, d - 1), n);
          return nir_isub(b, n, nir_iand_imm(b, tmp, -d));
       } else {
          return nir_isub(b, n, nir_imul_imm(b, build_idiv(b, n, d), d));
@@ -119,28 +119,28 @@ build_irem(nir_builder *b, nir_ssa_def *n, int64_t d)
    }
 }
 
-static nir_ssa_def *
-build_imod(nir_builder *b, nir_ssa_def *n, int64_t d)
+static nir_def *
+build_imod(nir_builder *b, nir_def *n, int64_t d)
 {
    int64_t int_min = u_intN_min(n->bit_size);
    if (d == 0) {
       return nir_imm_intN_t(b, 0, n->bit_size);
    } else if (d == int_min) {
-      nir_ssa_def *int_min_def = nir_imm_intN_t(b, int_min, n->bit_size);
-      nir_ssa_def *is_neg_not_int_min = nir_ult(b, int_min_def, n);
-      nir_ssa_def *is_zero = nir_ieq_imm(b, n, 0);
+      nir_def *int_min_def = nir_imm_intN_t(b, int_min, n->bit_size);
+      nir_def *is_neg_not_int_min = nir_ult(b, int_min_def, n);
+      nir_def *is_zero = nir_ieq_imm(b, n, 0);
       return nir_bcsel(b, nir_ior(b, is_neg_not_int_min, is_zero), n, nir_iadd(b, int_min_def, n));
    } else if (d > 0 && util_is_power_of_two_or_zero64(d)) {
       return nir_iand_imm(b, n, d - 1);
    } else if (d < 0 && util_is_power_of_two_or_zero64(-d)) {
-      nir_ssa_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
-      nir_ssa_def *res = nir_ior(b, n, d_def);
+      nir_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
+      nir_def *res = nir_ior(b, n, d_def);
       return nir_bcsel(b, nir_ieq(b, res, d_def), nir_imm_intN_t(b, 0, n->bit_size), res);
    } else {
-      nir_ssa_def *rem = build_irem(b, n, d);
-      nir_ssa_def *zero = nir_imm_intN_t(b, 0, n->bit_size);
-      nir_ssa_def *sign_same = d < 0 ? nir_ilt(b, n, zero) : nir_ige(b, n, zero);
-      nir_ssa_def *rem_zero = nir_ieq(b, rem, zero);
+      nir_def *rem = build_irem(b, n, d);
+      nir_def *zero = nir_imm_intN_t(b, 0, n->bit_size);
+      nir_def *sign_same = d < 0 ? nir_ilt(b, n, zero) : nir_ige(b, n, zero);
+      nir_def *rem_zero = nir_ieq(b, rem, zero);
       return nir_bcsel(b, nir_ior(b, rem_zero, sign_same), rem, nir_iadd_imm(b, rem, d));
    }
 }
@@ -171,11 +171,11 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_instr *instr, void *user_data)
 
    b->cursor = nir_before_instr(&alu->instr);
 
-   nir_ssa_def *q[NIR_MAX_VEC_COMPONENTS];
+   nir_def *q[NIR_MAX_VEC_COMPONENTS];
    for (unsigned comp = 0; comp < alu->dest.dest.ssa.num_components; comp++) {
       /* Get the numerator for the channel */
-      nir_ssa_def *n = nir_channel(b, alu->src[0].src.ssa,
-                                   alu->src[0].swizzle[comp]);
+      nir_def *n = nir_channel(b, alu->src[0].src.ssa,
+                               alu->src[0].swizzle[comp]);
 
       /* Get the denominator for the channel */
       int64_t d = nir_src_comp_as_int(alu->src[1].src,
@@ -212,8 +212,8 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_instr *instr, void *user_data)
       }
    }
 
-   nir_ssa_def *qvec = nir_vec(b, q, alu->dest.dest.ssa.num_components);
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, qvec);
+   nir_def *qvec = nir_vec(b, q, alu->dest.dest.ssa.num_components);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, qvec);
    nir_instr_remove(&alu->instr);
 
    return true;
index d65a4d0..32ae526 100644 (file)
@@ -27,9 +27,9 @@
 #include "nir_control_flow.h"
 #include "nir_loop_analyze.h"
 
-static nir_ssa_def *clone_alu_and_replace_src_defs(nir_builder *b,
-                                                   const nir_alu_instr *alu,
-                                                   nir_ssa_def **src_defs);
+static nir_def *clone_alu_and_replace_src_defs(nir_builder *b,
+                                               const nir_alu_instr *alu,
+                                               nir_def **src_defs);
 
 /**
  * Gets the single block that jumps back to the loop header. Already assumes
@@ -158,7 +158,7 @@ opt_peel_loop_initial_if(nir_loop *loop)
 
    nir_if *nif = nir_cf_node_as_if(if_node);
 
-   nir_ssa_def *cond = nif->condition.ssa;
+   nir_def *cond = nif->condition.ssa;
    if (cond->parent_instr->type != nir_instr_type_phi)
       return false;
 
@@ -408,8 +408,8 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
       bool all_non_phi_exist_in_prev_block = true;
       bool is_prev_result_undef = true;
       bool is_prev_result_const = true;
-      nir_ssa_def *prev_srcs[8];     // FINISHME: Array size?
-      nir_ssa_def *continue_srcs[8]; // FINISHME: Array size?
+      nir_def *prev_srcs[8];     // FINISHME: Array size?
+      nir_def *continue_srcs[8]; // FINISHME: Array size?
 
       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
          nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
@@ -479,7 +479,7 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
 
       /* Split ALU of Phi */
       b->cursor = nir_after_block(prev_block);
-      nir_ssa_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
+      nir_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
 
       /* Make a copy of the original ALU instruction.  Replace the sources
        * of the new instruction that read a phi with an undef source from
@@ -489,7 +489,7 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
        */
       b->cursor = nir_after_block_before_jump(continue_block);
 
-      nir_ssa_def *const alu_copy =
+      nir_def *const alu_copy =
          clone_alu_and_replace_src_defs(b, alu, continue_srcs);
 
       /* Make a new phi node that selects a value from prev_block and the
@@ -508,8 +508,8 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
       /* Modify all readers of the original ALU instruction to read the
        * result of the phi.
        */
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
-                               &phi->dest.ssa);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa,
+                           &phi->dest.ssa);
 
       /* Since the original ALU instruction no longer has any readers, just
        * remove it.
@@ -673,8 +673,8 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
       /* Modify all readers of the bcsel instruction to read the result of
        * the phi.
        */
-      nir_ssa_def_rewrite_uses(&bcsel->dest.dest.ssa,
-                               &phi->dest.ssa);
+      nir_def_rewrite_uses(&bcsel->dest.dest.ssa,
+                           &phi->dest.ssa);
 
       /* Since the original bcsel instruction no longer has any readers,
        * just remove it.
@@ -873,7 +873,7 @@ opt_if_simplification(nir_builder *b, nir_if *nif)
    /* Insert the inverted instruction and rewrite the condition. */
    b->cursor = nir_after_instr(&alu_instr->instr);
 
-   nir_ssa_def *new_condition =
+   nir_def *new_condition =
       nir_inot(b, &alu_instr->dest.dest.ssa);
 
    nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
@@ -914,7 +914,7 @@ opt_if_phi_is_condition(nir_builder *b, nir_if *nif)
    /* Grab pointers to the last then/else blocks for looking in the phis. */
    nir_block *then_block = nir_if_last_then_block(nif);
    ASSERTED nir_block *else_block = nir_if_last_else_block(nif);
-   nir_ssa_def *cond = nif->condition.ssa;
+   nir_def *cond = nif->condition.ssa;
    bool progress = false;
 
    nir_block *after_if_block = nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
@@ -934,23 +934,23 @@ opt_if_phi_is_condition(nir_builder *b, nir_if *nif)
          assert(src->pred == then_block || src->pred == else_block);
          enum opt_bool *pred_val = src->pred == then_block ? &then_val : &else_val;
 
-         nir_ssa_scalar val = nir_ssa_scalar_resolved(src->src.ssa, 0);
-         if (!nir_ssa_scalar_is_const(val))
+         nir_scalar val = nir_scalar_resolved(src->src.ssa, 0);
+         if (!nir_scalar_is_const(val))
             break;
 
-         if (nir_ssa_scalar_as_int(val) == -1)
+         if (nir_scalar_as_int(val) == -1)
             *pred_val = T;
-         else if (nir_ssa_scalar_as_uint(val) == 0)
+         else if (nir_scalar_as_uint(val) == 0)
             *pred_val = F;
          else
             break;
       }
       if (then_val == T && else_val == F) {
-         nir_ssa_def_rewrite_uses(&phi->dest.ssa, cond);
+         nir_def_rewrite_uses(&phi->dest.ssa, cond);
          progress = true;
       } else if (then_val == F && else_val == T) {
          b->cursor = nir_before_cf_node(&nif->cf_node);
-         nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_inot(b, cond));
+         nir_def_rewrite_uses(&phi->dest.ssa, nir_inot(b, cond));
          progress = true;
       }
    }
@@ -1177,9 +1177,9 @@ evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
-                               nir_ssa_def **src_defs)
+                               nir_def **src_defs)
 {
    nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
    nalu->exact = alu->exact;
@@ -1256,7 +1256,7 @@ propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
    if (!evaluate_if_condition(nif, b->cursor, &bool_value))
       return false;
 
-   nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS] = { 0 };
+   nir_def *def[NIR_MAX_VEC_COMPONENTS] = { 0 };
    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
       if (alu->src[i].src.ssa == use_src->ssa) {
          def[i] = nir_imm_bool(b, bool_value);
@@ -1265,7 +1265,7 @@ propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
       }
    }
 
-   nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
+   nir_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
 
    /* Rewrite use to use new alu instruction */
    nir_src new_src = nir_src_for_ssa(nalu);
@@ -1343,14 +1343,14 @@ opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
 
 static bool
 rewrite_comp_uses_within_if(nir_builder *b, nir_if *nif, bool invert,
-                            nir_ssa_scalar scalar, nir_ssa_scalar new_scalar)
+                            nir_scalar scalar, nir_scalar new_scalar)
 {
    bool progress = false;
 
    nir_block *first = invert ? nir_if_first_else_block(nif) : nir_if_first_then_block(nif);
    nir_block *last = invert ? nir_if_last_else_block(nif) : nir_if_last_then_block(nif);
 
-   nir_ssa_def *new_ssa = NULL;
+   nir_def *new_ssa = NULL;
    nir_foreach_use_safe(use, scalar.def) {
       if (use->parent_instr->block->index < first->index ||
           use->parent_instr->block->index > last->index)
@@ -1371,7 +1371,7 @@ rewrite_comp_uses_within_if(nir_builder *b, nir_if *nif, bool invert,
          b->cursor = nir_before_cf_node(&nif->cf_node);
          new_ssa = nir_channel(b, new_scalar.def, new_scalar.comp);
          if (scalar.def->num_components > 1) {
-            nir_ssa_def *vec = nir_ssa_undef(b, scalar.def->num_components, scalar.def->bit_size);
+            nir_def *vec = nir_undef(b, scalar.def->num_components, scalar.def->bit_size);
             new_ssa = nir_vector_insert_imm(b, vec, new_ssa, scalar.comp);
          }
       }
@@ -1399,17 +1399,17 @@ rewrite_comp_uses_within_if(nir_builder *b, nir_if *nif, bool invert,
  *        use(d)
  */
 static bool
-opt_if_rewrite_uniform_uses(nir_builder *b, nir_if *nif, nir_ssa_scalar cond, bool accept_ine)
+opt_if_rewrite_uniform_uses(nir_builder *b, nir_if *nif, nir_scalar cond, bool accept_ine)
 {
    bool progress = false;
 
-   if (!nir_ssa_scalar_is_alu(cond))
+   if (!nir_scalar_is_alu(cond))
       return false;
 
-   nir_op op = nir_ssa_scalar_alu_op(cond);
+   nir_op op = nir_scalar_alu_op(cond);
    if (op == nir_op_iand) {
-      progress |= opt_if_rewrite_uniform_uses(b, nif, nir_ssa_scalar_chase_alu_src(cond, 0), false);
-      progress |= opt_if_rewrite_uniform_uses(b, nif, nir_ssa_scalar_chase_alu_src(cond, 1), false);
+      progress |= opt_if_rewrite_uniform_uses(b, nif, nir_scalar_chase_alu_src(cond, 0), false);
+      progress |= opt_if_rewrite_uniform_uses(b, nif, nir_scalar_chase_alu_src(cond, 1), false);
       return progress;
    }
 
@@ -1417,8 +1417,8 @@ opt_if_rewrite_uniform_uses(nir_builder *b, nir_if *nif, nir_ssa_scalar cond, bo
       return false;
 
    for (unsigned i = 0; i < 2; i++) {
-      nir_ssa_scalar src_uni = nir_ssa_scalar_chase_alu_src(cond, i);
-      nir_ssa_scalar src_div = nir_ssa_scalar_chase_alu_src(cond, !i);
+      nir_scalar src_uni = nir_scalar_chase_alu_src(cond, i);
+      nir_scalar src_div = nir_scalar_chase_alu_src(cond, !i);
 
       if (src_uni.def->parent_instr->type == nir_instr_type_load_const && src_div.def != src_uni.def)
          return rewrite_comp_uses_within_if(b, nif, op == nir_op_ine, src_div, src_uni);
@@ -1431,8 +1431,8 @@ opt_if_rewrite_uniform_uses(nir_builder *b, nir_if *nif, nir_ssa_scalar cond, bo
           (intrin->intrinsic != nir_intrinsic_reduce || nir_intrinsic_cluster_size(intrin)))
          continue;
 
-      nir_ssa_scalar intrin_src = { intrin->src[0].ssa, src_uni.comp };
-      nir_ssa_scalar resolved_intrin_src = nir_ssa_scalar_resolved(intrin_src.def, intrin_src.comp);
+      nir_scalar intrin_src = { intrin->src[0].ssa, src_uni.comp };
+      nir_scalar resolved_intrin_src = nir_scalar_resolved(intrin_src.def, intrin_src.comp);
 
       if (resolved_intrin_src.comp != src_div.comp || resolved_intrin_src.def != src_div.def)
          continue;
@@ -1646,7 +1646,7 @@ opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
          progress |= opt_if_safe_cf_list(b, &nif->then_list);
          progress |= opt_if_safe_cf_list(b, &nif->else_list);
          progress |= opt_if_evaluate_condition_use(b, nif);
-         nir_ssa_scalar cond = nir_ssa_scalar_resolved(nif->condition.ssa, 0);
+         nir_scalar cond = nir_scalar_resolved(nif->condition.ssa, 0);
          progress |= opt_if_rewrite_uniform_uses(b, nif, cond, true);
          break;
       }
index e1e8ab9..d004d2c 100644 (file)
@@ -29,7 +29,7 @@
  */
 
 static bool
-src_is_single_use_shuffle(nir_src src, nir_ssa_def **data, nir_ssa_def **index)
+src_is_single_use_shuffle(nir_src src, nir_def **data, nir_def **index)
 {
    nir_intrinsic_instr *shuffle = nir_src_as_intrinsic(src);
    if (shuffle == NULL || shuffle->intrinsic != nir_intrinsic_shuffle)
@@ -42,7 +42,7 @@ src_is_single_use_shuffle(nir_src src, nir_ssa_def **data, nir_ssa_def **index)
    if (!list_is_singular(&shuffle->dest.ssa.uses))
       return false;
 
-   if (nir_ssa_def_used_by_if(&shuffle->dest.ssa))
+   if (nir_def_used_by_if(&shuffle->dest.ssa))
       return false;
 
    *data = shuffle->src[0].ssa;
@@ -51,7 +51,7 @@ src_is_single_use_shuffle(nir_src src, nir_ssa_def **data, nir_ssa_def **index)
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
                          bool block_has_discard)
 {
@@ -68,13 +68,13 @@ try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
    if (!nir_alu_src_is_trivial_ssa(alu, 0))
       return NULL;
 
-   nir_ssa_def *data1, *index1;
+   nir_def *data1, *index1;
    if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
        alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
        !src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
       return NULL;
 
-   nir_ssa_def *data2, *index2;
+   nir_def *data2, *index2;
    if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
        alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
        !src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
@@ -83,8 +83,8 @@ try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
    if (data1 != data2)
       return NULL;
 
-   nir_ssa_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
-   nir_ssa_def *shuffle = nir_shuffle(b, data1, index);
+   nir_def *index = nir_bcsel(b, alu->src[0].src.ssa, index1, index2);
+   nir_def *shuffle = nir_shuffle(b, data1, index);
 
    return shuffle;
 }
@@ -128,7 +128,7 @@ src_is_alu(nir_op op, nir_src src, nir_src srcs[2])
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 try_opt_quad_vote(nir_builder *b, nir_alu_instr *alu, bool block_has_discard)
 {
    if (block_has_discard)
@@ -215,7 +215,7 @@ static bool
 opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
                    bool block_has_discard, const struct nir_shader_compiler_options *options)
 {
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
 
    switch (alu->op) {
    case nir_op_bcsel:
@@ -231,8 +231,8 @@ opt_intrinsics_alu(nir_builder *b, nir_alu_instr *alu,
    }
 
    if (replacement) {
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
-                               replacement);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa,
+                           replacement);
       nir_instr_remove(&alu->instr);
       return true;
    } else {
@@ -268,9 +268,9 @@ try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
 
       assert(src_index < 2 && nir_op_infos[alu->op].num_inputs == 2);
 
-      nir_ssa_scalar scan_scalar = nir_ssa_scalar_resolved(intrin->src[0].ssa, 0);
-      nir_ssa_scalar op_scalar = nir_ssa_scalar_resolved(alu->src[!src_index].src.ssa,
-                                                         alu->src[!src_index].swizzle[0]);
+      nir_scalar scan_scalar = nir_scalar_resolved(intrin->src[0].ssa, 0);
+      nir_scalar op_scalar = nir_scalar_resolved(alu->src[!src_index].src.ssa,
+                                                 alu->src[!src_index].swizzle[0]);
 
       if (scan_scalar.def != op_scalar.def || scan_scalar.comp != op_scalar.comp)
          return false;
@@ -282,7 +282,7 @@ try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
    nir_foreach_use_including_if_safe(src, &intrin->dest.ssa) {
       /* Remove alu. */
       nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
-      nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, &intrin->dest.ssa);
+      nir_def_rewrite_uses(&alu->dest.dest.ssa, &intrin->dest.ssa);
       nir_instr_remove(&alu->instr);
    }
 
@@ -317,13 +317,13 @@ opt_intrinsics_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
                if (!const_val || const_val->i32 != 0)
                   continue;
 
-               nir_ssa_def *new_expr = nir_load_helper_invocation(b, 1);
+               nir_def *new_expr = nir_load_helper_invocation(b, 1);
 
                if (alu->op == nir_op_ine)
                   new_expr = nir_inot(b, new_expr);
 
-               nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
-                                        new_expr);
+               nir_def_rewrite_uses(&alu->dest.dest.ssa,
+                                    new_expr);
                nir_instr_remove(&alu->instr);
                progress = true;
             }
index d79455b..6041608 100644 (file)
@@ -64,7 +64,7 @@ var_info_cmp(const void *_a, const void *_b)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 build_constant_load(nir_builder *b, nir_deref_instr *deref,
                     glsl_type_size_align_func size_align)
 {
@@ -80,8 +80,8 @@ build_constant_load(nir_builder *b, nir_deref_instr *deref,
    UNUSED unsigned deref_size, deref_align;
    size_align(deref->type, &deref_size, &deref_align);
 
-   nir_ssa_def *src = nir_build_deref_offset(b, deref, size_align);
-   nir_ssa_def *load =
+   nir_def *src = nir_build_deref_offset(b, deref, size_align);
+   nir_def *load =
       nir_load_constant(b, num_components, bit_size, src,
                         .base = var->data.location,
                         .range = var_size,
@@ -360,9 +360,9 @@ nir_opt_large_constants(nir_shader *shader,
             struct var_info *info = &var_infos[var->index];
             if (info->is_constant) {
                b.cursor = nir_after_instr(&intrin->instr);
-               nir_ssa_def *val = build_constant_load(&b, deref, size_align);
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                        val);
+               nir_def *val = build_constant_load(&b, deref, size_align);
+               nir_def_rewrite_uses(&intrin->dest.ssa,
+                                    val);
                nir_instr_remove(&intrin->instr);
                nir_deref_instr_remove_if_unused(deref);
             }
index 5cf8358..2f58187 100644 (file)
@@ -118,10 +118,10 @@ get_info(nir_intrinsic_op op)
  * "resource" or "var" may be NULL.
  */
 struct entry_key {
-   nir_ssa_def *resource;
+   nir_def *resource;
    nir_variable *var;
    unsigned offset_def_count;
-   nir_ssa_scalar *offset_defs;
+   nir_scalar *offset_defs;
    uint64_t *offset_defs_mul;
 };
 
@@ -239,18 +239,18 @@ get_bit_size(struct entry *entry)
  * sources is a constant, update "def" to be the non-constant source, fill "c"
  * with the constant and return true. */
 static bool
-parse_alu(nir_ssa_scalar *def, nir_op op, uint64_t *c)
+parse_alu(nir_scalar *def, nir_op op, uint64_t *c)
 {
-   if (!nir_ssa_scalar_is_alu(*def) || nir_ssa_scalar_alu_op(*def) != op)
+   if (!nir_scalar_is_alu(*def) || nir_scalar_alu_op(*def) != op)
       return false;
 
-   nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(*def, 0);
-   nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(*def, 1);
-   if (op != nir_op_ishl && nir_ssa_scalar_is_const(src0)) {
-      *c = nir_ssa_scalar_as_uint(src0);
+   nir_scalar src0 = nir_scalar_chase_alu_src(*def, 0);
+   nir_scalar src1 = nir_scalar_chase_alu_src(*def, 1);
+   if (op != nir_op_ishl && nir_scalar_is_const(src0)) {
+      *c = nir_scalar_as_uint(src0);
       *def = src1;
-   } else if (nir_ssa_scalar_is_const(src1)) {
-      *c = nir_ssa_scalar_as_uint(src1);
+   } else if (nir_scalar_is_const(src1)) {
+      *c = nir_scalar_as_uint(src1);
       *def = src0;
    } else {
       return false;
@@ -260,10 +260,10 @@ parse_alu(nir_ssa_scalar *def, nir_op op, uint64_t *c)
 
 /* Parses an offset expression such as "a * 16 + 4" and "(a * 16 + 4) * 64 + 32". */
 static void
-parse_offset(nir_ssa_scalar *base, uint64_t *base_mul, uint64_t *offset)
+parse_offset(nir_scalar *base, uint64_t *base_mul, uint64_t *offset)
 {
-   if (nir_ssa_scalar_is_const(*base)) {
-      *offset = nir_ssa_scalar_as_uint(*base);
+   if (nir_scalar_is_const(*base)) {
+      *offset = nir_scalar_as_uint(*base);
       base->def = NULL;
       return;
    }
@@ -284,8 +284,8 @@ parse_offset(nir_ssa_scalar *base, uint64_t *base_mul, uint64_t *offset)
       progress |= parse_alu(base, nir_op_iadd, &add2);
       add += add2 * mul;
 
-      if (nir_ssa_scalar_is_alu(*base) && nir_ssa_scalar_alu_op(*base) == nir_op_mov) {
-         *base = nir_ssa_scalar_chase_alu_src(*base, 0);
+      if (nir_scalar_is_alu(*base) && nir_scalar_alu_op(*base) == nir_op_mov) {
+         *base = nir_scalar_chase_alu_src(*base, 0);
          progress = true;
       }
    } while (progress);
@@ -309,8 +309,8 @@ type_scalar_size_bytes(const struct glsl_type *type)
 }
 
 static unsigned
-add_to_entry_key(nir_ssa_scalar *offset_defs, uint64_t *offset_defs_mul,
-                 unsigned offset_def_count, nir_ssa_scalar def, uint64_t mul)
+add_to_entry_key(nir_scalar *offset_defs, uint64_t *offset_defs_mul,
+                 unsigned offset_def_count, nir_scalar def, uint64_t mul)
 {
    mul = util_mask_sign_extend(mul, def.def->bit_size);
 
@@ -318,7 +318,7 @@ add_to_entry_key(nir_ssa_scalar *offset_defs, uint64_t *offset_defs_mul,
       if (i == offset_def_count || def.def->index > offset_defs[i].def->index) {
          /* insert before i */
          memmove(offset_defs + i + 1, offset_defs + i,
-                 (offset_def_count - i) * sizeof(nir_ssa_scalar));
+                 (offset_def_count - i) * sizeof(nir_scalar));
          memmove(offset_defs_mul + i + 1, offset_defs_mul + i,
                  (offset_def_count - i) * sizeof(uint64_t));
          offset_defs[i] = def;
@@ -345,12 +345,12 @@ create_entry_key_from_deref(void *mem_ctx,
    while (path->path[path_len])
       path_len++;
 
-   nir_ssa_scalar offset_defs_stack[32];
+   nir_scalar offset_defs_stack[32];
    uint64_t offset_defs_mul_stack[32];
-   nir_ssa_scalar *offset_defs = offset_defs_stack;
+   nir_scalar *offset_defs = offset_defs_stack;
    uint64_t *offset_defs_mul = offset_defs_mul_stack;
    if (path_len > 32) {
-      offset_defs = malloc(path_len * sizeof(nir_ssa_scalar));
+      offset_defs = malloc(path_len * sizeof(nir_scalar));
       offset_defs_mul = malloc(path_len * sizeof(uint64_t));
    }
    unsigned offset_def_count = 0;
@@ -373,10 +373,10 @@ create_entry_key_from_deref(void *mem_ctx,
       case nir_deref_type_array:
       case nir_deref_type_ptr_as_array: {
          assert(parent);
-         nir_ssa_def *index = deref->arr.index.ssa;
+         nir_def *index = deref->arr.index.ssa;
          uint32_t stride = nir_deref_instr_array_stride(deref);
 
-         nir_ssa_scalar base = { .def = index, .comp = 0 };
+         nir_scalar base = { .def = index, .comp = 0 };
          uint64_t offset = 0, base_mul = 1;
          parse_offset(&base, &base_mul, &offset);
          offset = util_mask_sign_extend(offset, index->bit_size);
@@ -406,9 +406,9 @@ create_entry_key_from_deref(void *mem_ctx,
    }
 
    key->offset_def_count = offset_def_count;
-   key->offset_defs = ralloc_array(mem_ctx, nir_ssa_scalar, offset_def_count);
+   key->offset_defs = ralloc_array(mem_ctx, nir_scalar, offset_def_count);
    key->offset_defs_mul = ralloc_array(mem_ctx, uint64_t, offset_def_count);
-   memcpy(key->offset_defs, offset_defs, offset_def_count * sizeof(nir_ssa_scalar));
+   memcpy(key->offset_defs, offset_defs, offset_def_count * sizeof(nir_scalar));
    memcpy(key->offset_defs_mul, offset_defs_mul, offset_def_count * sizeof(uint64_t));
 
    if (offset_defs != offset_defs_stack)
@@ -421,7 +421,7 @@ create_entry_key_from_deref(void *mem_ctx,
 
 static unsigned
 parse_entry_key_from_offset(struct entry_key *key, unsigned size, unsigned left,
-                            nir_ssa_scalar base, uint64_t base_mul, uint64_t *offset)
+                            nir_scalar base, uint64_t base_mul, uint64_t *offset)
 {
    uint64_t new_mul;
    uint64_t new_offset;
@@ -436,9 +436,9 @@ parse_entry_key_from_offset(struct entry_key *key, unsigned size, unsigned left,
    assert(left >= 1);
 
    if (left >= 2) {
-      if (nir_ssa_scalar_is_alu(base) && nir_ssa_scalar_alu_op(base) == nir_op_iadd) {
-         nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(base, 0);
-         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(base, 1);
+      if (nir_scalar_is_alu(base) && nir_scalar_alu_op(base) == nir_op_iadd) {
+         nir_scalar src0 = nir_scalar_chase_alu_src(base, 0);
+         nir_scalar src1 = nir_scalar_chase_alu_src(base, 1);
          unsigned amount = parse_entry_key_from_offset(key, size, left - 1, src0, base_mul, offset);
          amount += parse_entry_key_from_offset(key, size + amount, left - amount, src1, base_mul, offset);
          return amount;
@@ -449,23 +449,23 @@ parse_entry_key_from_offset(struct entry_key *key, unsigned size, unsigned left,
 }
 
 static struct entry_key *
-create_entry_key_from_offset(void *mem_ctx, nir_ssa_def *base, uint64_t base_mul, uint64_t *offset)
+create_entry_key_from_offset(void *mem_ctx, nir_def *base, uint64_t base_mul, uint64_t *offset)
 {
    struct entry_key *key = ralloc(mem_ctx, struct entry_key);
    key->resource = NULL;
    key->var = NULL;
    if (base) {
-      nir_ssa_scalar offset_defs[32];
+      nir_scalar offset_defs[32];
       uint64_t offset_defs_mul[32];
       key->offset_defs = offset_defs;
       key->offset_defs_mul = offset_defs_mul;
 
-      nir_ssa_scalar scalar = { .def = base, .comp = 0 };
+      nir_scalar scalar = { .def = base, .comp = 0 };
       key->offset_def_count = parse_entry_key_from_offset(key, 0, 32, scalar, base_mul, offset);
 
-      key->offset_defs = ralloc_array(mem_ctx, nir_ssa_scalar, key->offset_def_count);
+      key->offset_defs = ralloc_array(mem_ctx, nir_scalar, key->offset_def_count);
       key->offset_defs_mul = ralloc_array(mem_ctx, uint64_t, key->offset_def_count);
-      memcpy(key->offset_defs, offset_defs, key->offset_def_count * sizeof(nir_ssa_scalar));
+      memcpy(key->offset_defs, offset_defs, key->offset_def_count * sizeof(nir_scalar));
       memcpy(key->offset_defs_mul, offset_defs_mul, key->offset_def_count * sizeof(uint64_t));
    } else {
       key->offset_def_count = 0;
@@ -542,7 +542,7 @@ create_entry(struct vectorize_ctx *ctx,
       entry->key = create_entry_key_from_deref(entry, ctx, &path, &entry->offset);
       nir_deref_path_finish(&path);
    } else {
-      nir_ssa_def *base = entry->info->base_src >= 0 ? intrin->src[entry->info->base_src].ssa : NULL;
+      nir_def *base = entry->info->base_src >= 0 ? intrin->src[entry->info->base_src].ssa : NULL;
       uint64_t offset = 0;
       if (nir_intrinsic_has_base(intrin))
          offset += nir_intrinsic_base(intrin);
@@ -655,8 +655,8 @@ subtract_deref(nir_builder *b, nir_deref_instr *deref, int64_t offset)
        nir_src_is_const(deref->arr.index) &&
        offset % nir_deref_instr_array_stride(deref) == 0) {
       unsigned stride = nir_deref_instr_array_stride(deref);
-      nir_ssa_def *index = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index) - offset / stride,
-                                          deref->dest.ssa.bit_size);
+      nir_def *index = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index) - offset / stride,
+                                      deref->dest.ssa.bit_size);
       return nir_build_deref_ptr_as_array(b, nir_deref_instr_parent(deref), index);
    }
 
@@ -686,7 +686,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
    unsigned high_bit_size = get_bit_size(high);
    bool low_bool = low->intrin->dest.ssa.bit_size == 1;
    bool high_bool = high->intrin->dest.ssa.bit_size == 1;
-   nir_ssa_def *data = &first->intrin->dest.ssa;
+   nir_def *data = &first->intrin->dest.ssa;
 
    b->cursor = nir_after_instr(first->instr);
 
@@ -694,9 +694,9 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
    data->num_components = new_num_components;
    data->bit_size = new_bit_size;
 
-   nir_ssa_def *low_def = nir_extract_bits(
+   nir_def *low_def = nir_extract_bits(
       b, &data, 1, 0, low->intrin->num_components, low_bit_size);
-   nir_ssa_def *high_def = nir_extract_bits(
+   nir_def *high_def = nir_extract_bits(
       b, &data, 1, high_start, high->intrin->num_components, high_bit_size);
 
    /* convert booleans */
@@ -705,13 +705,13 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
 
    /* update uses */
    if (first == low) {
-      nir_ssa_def_rewrite_uses_after(&low->intrin->dest.ssa, low_def,
-                                     high_def->parent_instr);
-      nir_ssa_def_rewrite_uses(&high->intrin->dest.ssa, high_def);
+      nir_def_rewrite_uses_after(&low->intrin->dest.ssa, low_def,
+                                 high_def->parent_instr);
+      nir_def_rewrite_uses(&high->intrin->dest.ssa, high_def);
    } else {
-      nir_ssa_def_rewrite_uses(&low->intrin->dest.ssa, low_def);
-      nir_ssa_def_rewrite_uses_after(&high->intrin->dest.ssa, high_def,
-                                     high_def->parent_instr);
+      nir_def_rewrite_uses(&low->intrin->dest.ssa, low_def);
+      nir_def_rewrite_uses_after(&high->intrin->dest.ssa, high_def,
+                                 high_def->parent_instr);
    }
 
    /* update the intrinsic */
@@ -726,7 +726,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
        * nir_opt_algebraic() turns them into "i * 16 + 16" */
       b->cursor = nir_before_instr(first->instr);
 
-      nir_ssa_def *new_base = first->intrin->src[info->base_src].ssa;
+      nir_def *new_base = first->intrin->src[info->base_src].ssa;
       new_base = nir_iadd_imm(b, new_base, -(int)(high_start / 8u));
 
       nir_instr_rewrite_src(first->instr, &first->intrin->src[info->base_src],
@@ -792,13 +792,13 @@ vectorize_stores(nir_builder *b, struct vectorize_ctx *ctx,
    uint32_t write_mask = low_write_mask | high_write_mask;
 
    /* convert booleans */
-   nir_ssa_def *low_val = low->intrin->src[low->info->value_src].ssa;
-   nir_ssa_def *high_val = high->intrin->src[high->info->value_src].ssa;
+   nir_def *low_val = low->intrin->src[low->info->value_src].ssa;
+   nir_def *high_val = high->intrin->src[high->info->value_src].ssa;
    low_val = low_val->bit_size == 1 ? nir_b2iN(b, low_val, 32) : low_val;
    high_val = high_val->bit_size == 1 ? nir_b2iN(b, high_val, 32) : high_val;
 
    /* combine the data */
-   nir_ssa_def *data_channels[NIR_MAX_VEC_COMPONENTS];
+   nir_def *data_channels[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < new_num_components; i++) {
       bool set_low = low_write_mask & (1 << i);
       bool set_high = high_write_mask & (1 << i);
@@ -811,10 +811,10 @@ vectorize_stores(nir_builder *b, struct vectorize_ctx *ctx,
          unsigned offset = i * new_bit_size - high_start;
          data_channels[i] = nir_extract_bits(b, &high_val, 1, offset, 1, new_bit_size);
       } else {
-         data_channels[i] = nir_ssa_undef(b, 1, new_bit_size);
+         data_channels[i] = nir_undef(b, 1, new_bit_size);
       }
    }
-   nir_ssa_def *data = nir_vec(b, data_channels, new_num_components);
+   nir_def *data = nir_vec(b, data_channels, new_num_components);
 
    /* update the intrinsic */
    nir_intrinsic_set_write_mask(second->intrin, write_mask);
@@ -1183,24 +1183,24 @@ try_vectorize_shared2(struct vectorize_ctx *ctx,
    /* vectorize the accesses */
    nir_builder b = nir_builder_at(nir_after_instr(first->is_store ? second->instr : first->instr));
 
-   nir_ssa_def *offset = first->intrin->src[first->is_store].ssa;
+   nir_def *offset = first->intrin->src[first->is_store].ssa;
    offset = nir_iadd_imm(&b, offset, nir_intrinsic_base(first->intrin));
    if (first != low)
       offset = nir_iadd_imm(&b, offset, -(int)diff);
 
    if (first->is_store) {
-      nir_ssa_def *low_val = low->intrin->src[low->info->value_src].ssa;
-      nir_ssa_def *high_val = high->intrin->src[high->info->value_src].ssa;
-      nir_ssa_def *val = nir_vec2(&b, nir_bitcast_vector(&b, low_val, low_size * 8u),
-                                  nir_bitcast_vector(&b, high_val, low_size * 8u));
+      nir_def *low_val = low->intrin->src[low->info->value_src].ssa;
+      nir_def *high_val = high->intrin->src[high->info->value_src].ssa;
+      nir_def *val = nir_vec2(&b, nir_bitcast_vector(&b, low_val, low_size * 8u),
+                              nir_bitcast_vector(&b, high_val, low_size * 8u));
       nir_store_shared2_amd(&b, val, offset, .offset1 = diff / stride, .st64 = st64);
    } else {
-      nir_ssa_def *new_def = nir_load_shared2_amd(&b, low_size * 8u, offset, .offset1 = diff / stride,
-                                                  .st64 = st64);
-      nir_ssa_def_rewrite_uses(&low->intrin->dest.ssa,
-                               nir_bitcast_vector(&b, nir_channel(&b, new_def, 0), low_bit_size));
-      nir_ssa_def_rewrite_uses(&high->intrin->dest.ssa,
-                               nir_bitcast_vector(&b, nir_channel(&b, new_def, 1), high_bit_size));
+      nir_def *new_def = nir_load_shared2_amd(&b, low_size * 8u, offset, .offset1 = diff / stride,
+                                              .st64 = st64);
+      nir_def_rewrite_uses(&low->intrin->dest.ssa,
+                           nir_bitcast_vector(&b, nir_channel(&b, new_def, 0), low_bit_size));
+      nir_def_rewrite_uses(&high->intrin->dest.ssa,
+                           nir_bitcast_vector(&b, nir_channel(&b, new_def, 1), high_bit_size));
    }
 
    nir_instr_remove(first->instr);
index a035c68..6db8305 100644 (file)
@@ -677,11 +677,11 @@ remove_out_of_bounds_induction_use(nir_shader *shader, nir_loop *loop,
             if (is_access_out_of_bounds(term, nir_src_as_deref(intrin->src[0]),
                                         trip_count)) {
                if (intrin->intrinsic == nir_intrinsic_load_deref) {
-                  nir_ssa_def *undef =
-                     nir_ssa_undef(&b, intrin->dest.ssa.num_components,
-                                   intrin->dest.ssa.bit_size);
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                           undef);
+                  nir_def *undef =
+                     nir_undef(&b, intrin->dest.ssa.num_components,
+                               intrin->dest.ssa.bit_size);
+                  nir_def_rewrite_uses(&intrin->dest.ssa,
+                                       undef);
                } else {
                   nir_instr_remove(instr);
                   continue;
index d765855..aa73fa4 100644 (file)
@@ -149,7 +149,7 @@ try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy,
        glsl_get_explicit_size(dst->type, false) == size &&
        glsl_get_explicit_size(src->type, false) == size) {
       b->cursor = nir_instr_remove(&cpy->instr);
-      nir_ssa_def *data =
+      nir_def *data =
          nir_load_deref_with_access(b, src, nir_intrinsic_src_access(cpy));
       data = nir_bitcast_vector(b, data, glsl_get_bit_size(dst->type));
       assert(data->num_components == glsl_get_vector_elements(dst->type));
index bc00015..f55d68b 100644 (file)
@@ -75,7 +75,7 @@ nir_opt_move_block(nir_block *block, nir_move_options options)
          continue;
 
       /* Check all users in this block which is the first */
-      const nir_ssa_def *def = nir_instr_ssa_def(instr);
+      const nir_def *def = nir_instr_ssa_def(instr);
       nir_instr *first_user = instr == if_cond_instr ? NULL : last_instr;
       nir_foreach_use(use, def) {
          nir_instr *parent = use->parent_instr;
index 478a478..1feb2fd 100644 (file)
@@ -34,19 +34,19 @@ typedef struct
    const nir_opt_offsets_options *options;
 } opt_offsets_state;
 
-static nir_ssa_scalar
-try_extract_const_addition(nir_builder *b, nir_ssa_scalar val, opt_offsets_state *state, unsigned *out_const, uint32_t max)
+static nir_scalar
+try_extract_const_addition(nir_builder *b, nir_scalar val, opt_offsets_state *state, unsigned *out_const, uint32_t max)
 {
-   val = nir_ssa_scalar_chase_movs(val);
+   val = nir_scalar_chase_movs(val);
 
-   if (!nir_ssa_scalar_is_alu(val))
+   if (!nir_scalar_is_alu(val))
       return val;
 
    nir_alu_instr *alu = nir_instr_as_alu(val.def->parent_instr);
    if (alu->op != nir_op_iadd)
       return val;
 
-   nir_ssa_scalar src[2] = {
+   nir_scalar src[2] = {
       { alu->src[0].src.ssa, alu->src[0].swizzle[val.comp] },
       { alu->src[1].src.ssa, alu->src[1].swizzle[val.comp] },
    };
@@ -74,9 +74,9 @@ try_extract_const_addition(nir_builder *b, nir_ssa_scalar val, opt_offsets_state
    }
 
    for (unsigned i = 0; i < 2; ++i) {
-      src[i] = nir_ssa_scalar_chase_movs(src[i]);
-      if (nir_ssa_scalar_is_const(src[i])) {
-         uint32_t offset = nir_ssa_scalar_as_uint(src[i]);
+      src[i] = nir_scalar_chase_movs(src[i]);
+      if (nir_scalar_is_const(src[i])) {
+         uint32_t offset = nir_scalar_as_uint(src[i]);
          if (offset + *out_const <= max) {
             *out_const += offset;
             return try_extract_const_addition(b, src[1 - i], state, out_const, max);
@@ -91,7 +91,7 @@ try_extract_const_addition(nir_builder *b, nir_ssa_scalar val, opt_offsets_state
       return val;
 
    b->cursor = nir_before_instr(&alu->instr);
-   nir_ssa_def *r =
+   nir_def *r =
       nir_iadd(b, nir_channel(b, src[0].def, src[0].comp),
                nir_channel(b, src[1].def, src[1].comp));
    return nir_get_ssa_scalar(r, 0);
@@ -111,14 +111,14 @@ try_fold_load_store(nir_builder *b,
 
    unsigned off_const = nir_intrinsic_base(intrin);
    nir_src *off_src = &intrin->src[offset_src_idx];
-   nir_ssa_def *replace_src = NULL;
+   nir_def *replace_src = NULL;
 
    if (off_src->ssa->bit_size != 32)
       return false;
 
    if (!nir_src_is_const(*off_src)) {
       uint32_t add_offset = 0;
-      nir_ssa_scalar val = { .def = off_src->ssa, .comp = 0 };
+      nir_scalar val = { .def = off_src->ssa, .comp = 0 };
       val = try_extract_const_addition(b, val, state, &add_offset, max - off_const);
       if (add_offset == 0)
          return false;
index c34fa27..0ab92d0 100644 (file)
@@ -349,8 +349,8 @@ nir_opt_collapse_if(nir_if *if_stmt, nir_shader *shader, unsigned limit,
 
    /* combine the conditions */
    struct nir_builder b = nir_builder_at(nir_before_cf_node(&if_stmt->cf_node));
-   nir_ssa_def *cond = nir_iand(&b, if_stmt->condition.ssa,
-                                parent_if->condition.ssa);
+   nir_def *cond = nir_iand(&b, if_stmt->condition.ssa,
+                            parent_if->condition.ssa);
    nir_if_rewrite_condition(if_stmt, nir_src_for_ssa(cond));
 
    /* move the whole inner if before the parent if */
@@ -460,8 +460,8 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader,
       nir_ssa_dest_init(&sel->instr, &sel->dest.dest,
                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
 
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
-                               &sel->dest.dest.ssa);
+      nir_def_rewrite_uses(&phi->dest.ssa,
+                           &sel->dest.dest.ssa);
 
       nir_instr_insert_before(&phi->instr, &sel->instr);
       nir_instr_remove(&phi->instr);
index 67e9653..689a678 100644 (file)
@@ -238,8 +238,8 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
    nir_foreach_phi_src(src, phi) {
       /* insert new conversion instr in block of original phi src: */
       b->cursor = nir_after_instr_and_phis(src->src.ssa->parent_instr);
-      nir_ssa_def *old_src = src->src.ssa;
-      nir_ssa_def *new_src = nir_build_alu(b, op, old_src, NULL, NULL, NULL);
+      nir_def *old_src = src->src.ssa;
+      nir_def *new_src = nir_build_alu(b, op, old_src, NULL, NULL, NULL);
 
       /* and add corresponding phi_src to the new_phi: */
       nir_phi_instr_add_src(new_phi, src->pred, nir_src_for_ssa(new_src));
@@ -256,7 +256,7 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
       nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
       alu->op = nir_op_mov;
    }
-   nir_ssa_def_rewrite_uses(&phi->dest.ssa, &new_phi->dest.ssa);
+   nir_def_rewrite_uses(&phi->dest.ssa, &new_phi->dest.ssa);
 
    /* And finally insert the new phi after all sources are in place: */
    b->cursor = nir_after_instr(&phi->instr);
@@ -380,7 +380,7 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
    /* Remove the widening conversions from the phi sources: */
    nir_foreach_phi_src(src, phi) {
       nir_instr *instr = src->src.ssa->parent_instr;
-      nir_ssa_def *new_src;
+      nir_def *new_src;
 
       b->cursor = nir_after_instr(instr);
 
@@ -419,9 +419,9 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
     * and re-write the original phi's uses
     */
    b->cursor = nir_after_instr_and_phis(&new_phi->instr);
-   nir_ssa_def *def = nir_build_alu(b, op, &new_phi->dest.ssa, NULL, NULL, NULL);
+   nir_def *def = nir_build_alu(b, op, &new_phi->dest.ssa, NULL, NULL, NULL);
 
-   nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
+   nir_def_rewrite_uses(&phi->dest.ssa, def);
 
    return true;
 }
index 281377e..725acb0 100644 (file)
@@ -74,7 +74,7 @@ typedef struct {
    /* Per-definition array of states */
    def_state *states;
 
-   nir_ssa_def *def;
+   nir_def *def;
 
    const nir_opt_preamble_options *options;
 } opt_preamble_ctx;
@@ -355,7 +355,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
    /* Step 1: Calculate can_move */
    nir_foreach_block(block, impl) {
       nir_foreach_instr(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (!def)
             continue;
 
@@ -381,7 +381,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
    unsigned num_candidates = 0;
    nir_foreach_block_reverse(block, impl) {
       nir_foreach_instr_reverse(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (!def)
             continue;
 
@@ -394,7 +394,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
          state->candidate = false;
          state->must_stay = false;
          nir_foreach_use(use, def) {
-            nir_ssa_def *use_def = nir_instr_ssa_def(use->parent_instr);
+            nir_def *use_def = nir_instr_ssa_def(use->parent_instr);
             if (!use_def || !ctx.states[use_def->index].can_move ||
                 ctx.states[use_def->index].must_stay) {
                if (is_candidate)
@@ -441,7 +441,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
     */
    nir_foreach_block(block, impl) {
       nir_foreach_instr(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (!def)
             continue;
 
@@ -527,7 +527,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
 
    nir_foreach_block(block, impl) {
       nir_foreach_instr(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (!def)
             continue;
 
@@ -549,7 +549,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
                 */
                b->cursor = nir_before_instr(clone);
 
-               nir_ssa_def *zero =
+               nir_def *zero =
                   nir_imm_zero(b, tex->coord_components - tex->is_array, 32);
                nir_tex_instr_add_src(tex, nir_tex_src_ddx, nir_src_for_ssa(zero));
                nir_tex_instr_add_src(tex, nir_tex_src_ddy, nir_src_for_ssa(zero));
@@ -560,7 +560,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
          }
 
          if (state->replace) {
-            nir_ssa_def *clone_def = nir_instr_ssa_def(clone);
+            nir_def *clone_def = nir_instr_ssa_def(clone);
             nir_store_preamble(b, clone_def, .base = state->offset);
          }
       }
@@ -571,7 +571,7 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
 
    nir_foreach_block(block, impl) {
       nir_foreach_instr_safe(instr, block) {
-         nir_ssa_def *def = nir_instr_ssa_def(instr);
+         nir_def *def = nir_instr_ssa_def(instr);
          if (!def)
             continue;
 
@@ -581,11 +581,11 @@ nir_opt_preamble(nir_shader *shader, const nir_opt_preamble_options *options,
 
          b->cursor = nir_before_instr(instr);
 
-         nir_ssa_def *new_def =
+         nir_def *new_def =
             nir_load_preamble(b, def->num_components, def->bit_size,
                               .base = state->offset);
 
-         nir_ssa_def_rewrite_uses(def, new_def);
+         nir_def_rewrite_uses(def, new_def);
          nir_instr_free_and_dce(instr);
       }
    }
index b1dd703..b21ecd6 100644 (file)
@@ -39,7 +39,7 @@ static void
 mark_query_read(struct set *queries,
                 nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *rq_def = intrin->src[0].ssa;
+   nir_def *rq_def = intrin->src[0].ssa;
 
    nir_variable *query;
    if (rq_def->parent_instr->type == nir_instr_type_intrinsic) {
index ed239ee..29567ed 100644 (file)
@@ -120,14 +120,14 @@ nir_opt_reassociate_bfi_instr(nir_builder *b,
    /* The extra nir_mov_alu are to handle swizzles that might be on the
     * original sources.
     */
-   nir_ssa_def *new_bfi = nir_bfi(b,
-                                  nir_mov_alu(b, bfiCD0->src[0], 1),
-                                  nir_mov_alu(b, bfiCD0->src[1], 1),
-                                  nir_iand(b,
-                                           nir_mov_alu(b, bfiABx->src[0], 1),
-                                           nir_mov_alu(b, bfiABx->src[1], 1)));
-
-   nir_ssa_def_rewrite_uses(&bfiABx->dest.dest.ssa, new_bfi);
+   nir_def *new_bfi = nir_bfi(b,
+                              nir_mov_alu(b, bfiCD0->src[0], 1),
+                              nir_mov_alu(b, bfiCD0->src[1], 1),
+                              nir_iand(b,
+                                       nir_mov_alu(b, bfiABx->src[0], 1),
+                                       nir_mov_alu(b, bfiABx->src[1], 1)));
+
+   nir_def_rewrite_uses(&bfiABx->dest.dest.ssa, new_bfi);
    return true;
 }
 
index 84de32f..c7406a2 100644 (file)
@@ -29,7 +29,7 @@
 #include "nir_builder.h"
 
 static nir_alu_instr *
-get_parent_mov(nir_ssa_def *ssa)
+get_parent_mov(nir_def *ssa)
 {
    if (ssa->parent_instr->type != nir_instr_type_alu)
       return NULL;
@@ -39,7 +39,7 @@ get_parent_mov(nir_ssa_def *ssa)
 }
 
 static bool
-matching_mov(nir_alu_instr *mov1, nir_ssa_def *ssa)
+matching_mov(nir_alu_instr *mov1, nir_def *ssa)
 {
    if (!mov1)
       return false;
@@ -69,7 +69,7 @@ remove_phis_block(nir_block *block, nir_builder *b)
    bool progress = false;
 
    nir_foreach_phi_safe(phi, block) {
-      nir_ssa_def *def = NULL;
+      nir_def *def = NULL;
       nir_alu_instr *mov = NULL;
       bool srcs_same = true;
 
@@ -109,8 +109,8 @@ remove_phis_block(nir_block *block, nir_builder *b)
          /* In this case, the phi had no sources. So turn it into an undef. */
 
          b->cursor = nir_after_phis(block);
-         def = nir_ssa_undef(b, phi->dest.ssa.num_components,
-                             phi->dest.ssa.bit_size);
+         def = nir_undef(b, phi->dest.ssa.num_components,
+                         phi->dest.ssa.bit_size);
       } else if (mov) {
          /* If the sources were all movs from the same source with the same
           * swizzle, then we can't just pick a random move because it may not
@@ -124,7 +124,7 @@ remove_phis_block(nir_block *block, nir_builder *b)
          def = nir_mov_alu(b, mov->src[0], def->num_components);
       }
 
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
+      nir_def_rewrite_uses(&phi->dest.ssa, def);
       nir_instr_remove(&phi->instr);
 
       progress = true;
index 6f1f1d3..7a0d2d7 100644 (file)
@@ -48,7 +48,7 @@ opt_shrink_vectors_image_store(nir_builder *b, nir_intrinsic_instr *instr)
    if (components >= instr->num_components)
       return false;
 
-   nir_ssa_def *data = nir_trim_vector(b, instr->src[3].ssa, components);
+   nir_def *data = nir_trim_vector(b, instr->src[3].ssa, components);
    nir_instr_rewrite_src(&instr->instr, &instr->src[3], nir_src_for_ssa(data));
    instr->num_components = components;
 
@@ -83,7 +83,7 @@ opt_shrink_store_instr(nir_builder *b, nir_intrinsic_instr *instr, bool shrink_i
    unsigned write_mask = nir_intrinsic_write_mask(instr);
    unsigned last_bit = util_last_bit(write_mask);
    if (last_bit < instr->num_components) {
-      nir_ssa_def *def = nir_trim_vector(b, instr->src[0].ssa, last_bit);
+      nir_def *def = nir_trim_vector(b, instr->src[0].ssa, last_bit);
       nir_instr_rewrite_src(&instr->instr,
                             &instr->src[0],
                             nir_src_for_ssa(def));
index ee1790f..17cee53 100644 (file)
@@ -59,7 +59,7 @@ round_up_components(unsigned n)
 }
 
 static bool
-shrink_dest_to_read_mask(nir_ssa_def *def)
+shrink_dest_to_read_mask(nir_def *def)
 {
    /* early out if there's nothing to do. */
    if (def->num_components == 1)
@@ -71,7 +71,7 @@ shrink_dest_to_read_mask(nir_ssa_def *def)
          return false;
    }
 
-   unsigned mask = nir_ssa_def_components_read(def);
+   unsigned mask = nir_def_components_read(def);
    int last_bit = util_last_bit(mask);
 
    /* If nothing was read, leave it up to DCE. */
@@ -93,7 +93,7 @@ shrink_dest_to_read_mask(nir_ssa_def *def)
 static bool
 shrink_intrinsic_to_non_sparse(nir_intrinsic_instr *instr)
 {
-   unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
+   unsigned mask = nir_def_components_read(&instr->dest.ssa);
    int last_bit = util_last_bit(mask);
 
    /* If the sparse component is used, do nothing. */
@@ -122,7 +122,7 @@ shrink_intrinsic_to_non_sparse(nir_intrinsic_instr *instr)
 }
 
 static void
-reswizzle_alu_uses(nir_ssa_def *def, uint8_t *reswizzle)
+reswizzle_alu_uses(nir_def *def, uint8_t *reswizzle)
 {
    nir_foreach_use(use_src, def) {
       /* all uses must be ALU instructions */
@@ -136,7 +136,7 @@ reswizzle_alu_uses(nir_ssa_def *def, uint8_t *reswizzle)
 }
 
 static bool
-is_only_used_by_alu(nir_ssa_def *def)
+is_only_used_by_alu(nir_def *def)
 {
    nir_foreach_use(use_src, def) {
       if (use_src->parent_instr->type != nir_instr_type_alu)
@@ -149,8 +149,8 @@ is_only_used_by_alu(nir_ssa_def *def)
 static bool
 opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
 {
-   nir_ssa_def *def = &instr->dest.dest.ssa;
-   unsigned mask = nir_ssa_def_components_read(def);
+   nir_def *def = &instr->dest.dest.ssa;
+   unsigned mask = nir_def_components_read(def);
 
    /* If nothing was read, leave it up to DCE. */
    if (mask == 0)
@@ -161,13 +161,13 @@ opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
       return false;
 
    uint8_t reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
-   nir_ssa_scalar srcs[NIR_MAX_VEC_COMPONENTS] = { 0 };
+   nir_scalar srcs[NIR_MAX_VEC_COMPONENTS] = { 0 };
    unsigned num_components = 0;
    for (unsigned i = 0; i < def->num_components; i++) {
       if (!((mask >> i) & 0x1))
          continue;
 
-      nir_ssa_scalar scalar = nir_get_ssa_scalar(instr->src[i].src.ssa, instr->src[i].swizzle[0]);
+      nir_scalar scalar = nir_get_ssa_scalar(instr->src[i].src.ssa, instr->src[i].swizzle[0]);
 
       /* Try reuse a component with the same value */
       unsigned j;
@@ -190,8 +190,8 @@ opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
       return false;
 
    /* create new vecN and replace uses */
-   nir_ssa_def *new_vec = nir_vec_scalars(b, srcs, num_components);
-   nir_ssa_def_rewrite_uses(def, new_vec);
+   nir_def *new_vec = nir_vec_scalars(b, srcs, num_components);
+   nir_def_rewrite_uses(def, new_vec);
    reswizzle_alu_uses(new_vec, reswizzle);
 
    return true;
@@ -200,7 +200,7 @@ opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
 static bool
 opt_shrink_vectors_alu(nir_builder *b, nir_alu_instr *instr)
 {
-   nir_ssa_def *def = &instr->dest.dest.ssa;
+   nir_def *def = &instr->dest.dest.ssa;
 
    /* Nothing to shrink */
    if (def->num_components == 1)
@@ -222,7 +222,7 @@ opt_shrink_vectors_alu(nir_builder *b, nir_alu_instr *instr)
    if (!is_only_used_by_alu(def))
       return false;
 
-   unsigned mask = nir_ssa_def_components_read(def);
+   unsigned mask = nir_def_components_read(def);
    /* return, if there is nothing to do */
    if (mask == 0)
       return false;
@@ -323,7 +323,7 @@ opt_shrink_vectors_tex(nir_builder *b, nir_tex_instr *tex)
    if (!tex->is_sparse)
       return false;
 
-   unsigned mask = nir_ssa_def_components_read(&tex->dest.ssa);
+   unsigned mask = nir_def_components_read(&tex->dest.ssa);
    int last_bit = util_last_bit(mask);
 
    /* If the sparse component is used, do nothing. */
@@ -339,7 +339,7 @@ opt_shrink_vectors_tex(nir_builder *b, nir_tex_instr *tex)
 static bool
 opt_shrink_vectors_load_const(nir_load_const_instr *instr)
 {
-   nir_ssa_def *def = &instr->def;
+   nir_def *def = &instr->def;
 
    /* early out if there's nothing to do. */
    if (def->num_components == 1)
@@ -349,7 +349,7 @@ opt_shrink_vectors_load_const(nir_load_const_instr *instr)
    if (!is_only_used_by_alu(def))
       return false;
 
-   unsigned mask = nir_ssa_def_components_read(def);
+   unsigned mask = nir_def_components_read(def);
 
    /* If nothing was read, leave it up to DCE. */
    if (!mask)
@@ -395,7 +395,7 @@ opt_shrink_vectors_load_const(nir_load_const_instr *instr)
 }
 
 static bool
-opt_shrink_vectors_ssa_undef(nir_ssa_undef_instr *instr)
+opt_shrink_vectors_ssa_undef(nir_undef_instr *instr)
 {
    return shrink_dest_to_read_mask(&instr->def);
 }
@@ -403,7 +403,7 @@ opt_shrink_vectors_ssa_undef(nir_ssa_undef_instr *instr)
 static bool
 opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
 {
-   nir_ssa_def *def = &instr->dest.ssa;
+   nir_def *def = &instr->dest.ssa;
 
    /* early out if there's nothing to do. */
    if (def->num_components == 1)
@@ -425,7 +425,7 @@ opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
       int src_idx = alu_src - &alu->src[0];
       nir_component_mask_t src_read_mask = nir_alu_instr_src_read_mask(alu, src_idx);
 
-      nir_ssa_def *alu_def = &alu->dest.dest.ssa;
+      nir_def *alu_def = &alu->dest.dest.ssa;
 
       /* We don't mark the channels used if the only reader is the original phi.
        * This can happen in the case of loops.
@@ -484,7 +484,7 @@ opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
 
       for (unsigned i = 0; i < num_components; i++)
          alu_src.swizzle[i] = src_reswizzle[i];
-      nir_ssa_def *mov = nir_mov_alu(b, alu_src, num_components);
+      nir_def *mov = nir_mov_alu(b, alu_src, num_components);
 
       nir_instr_rewrite_src_ssa(&instr->instr, &phi_src->src, mov);
    }
index 7fd50a8..da6bf4e 100644 (file)
@@ -136,7 +136,7 @@ adjust_block_for_loops(nir_block *use_block, nir_block *def_block,
  * the uses
  */
 static nir_block *
-get_preferred_block(nir_ssa_def *def, bool sink_out_of_loops)
+get_preferred_block(nir_def *def, bool sink_out_of_loops)
 {
    nir_block *lca = NULL;
 
@@ -211,7 +211,7 @@ nir_opt_sink(nir_shader *shader, nir_move_options options)
             if (!nir_can_move_instr(instr, options))
                continue;
 
-            nir_ssa_def *def = nir_instr_ssa_def(instr);
+            nir_def *def = nir_instr_ssa_def(instr);
 
             bool sink_out_of_loops =
                instr->type != nir_instr_type_intrinsic ||
index 1aec350..b65a004 100644 (file)
@@ -81,15 +81,15 @@ opt_undef_vecN(nir_builder *b, nir_alu_instr *alu)
    }
 
    b->cursor = nir_before_instr(&alu->instr);
-   nir_ssa_def *undef = nir_ssa_undef(b, alu->dest.dest.ssa.num_components,
-                                      nir_dest_bit_size(alu->dest.dest));
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, undef);
+   nir_def *undef = nir_undef(b, alu->dest.dest.ssa.num_components,
+                              nir_dest_bit_size(alu->dest.dest));
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, undef);
 
    return true;
 }
 
 static uint32_t
-nir_get_undef_mask(nir_ssa_def *def)
+nir_get_undef_mask(nir_def *def)
 {
    nir_instr *instr = def->parent_instr;
 
@@ -139,7 +139,7 @@ opt_undef_store(nir_intrinsic_instr *intrin)
       return false;
    }
 
-   nir_ssa_def *def = intrin->src[arg_index].ssa;
+   nir_def *def = intrin->src[arg_index].ssa;
 
    unsigned write_mask = nir_intrinsic_write_mask(intrin);
    unsigned undef_mask = nir_get_undef_mask(def);
@@ -171,8 +171,8 @@ opt_undef_pack(nir_builder *b, nir_alu_instr *alu)
    }
    unsigned num_components = nir_dest_num_components(alu->dest.dest);
    b->cursor = nir_before_instr(&alu->instr);
-   nir_ssa_def *def = nir_ssa_undef(b, num_components, 32);
-   nir_ssa_def_rewrite_uses_after(&alu->dest.dest.ssa, def, &alu->instr);
+   nir_def *def = nir_undef(b, num_components, 32);
+   nir_def_rewrite_uses_after(&alu->dest.dest.ssa, def, &alu->instr);
    nir_instr_remove(&alu->instr);
    return true;
 }
index 9ad2e6f..5d0abaa 100644 (file)
@@ -114,7 +114,7 @@ parse_atomic_op(nir_intrinsic_instr *intr, unsigned *offset_src,
 }
 
 static unsigned
-get_dim(nir_ssa_scalar scalar)
+get_dim(nir_scalar scalar)
 {
    if (!scalar.def->divergent)
       return 0;
@@ -131,11 +131,11 @@ get_dim(nir_ssa_scalar scalar)
          return 0x7;
       else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_id)
          return 1 << scalar.comp;
-   } else if (nir_ssa_scalar_is_alu(scalar)) {
-      if (nir_ssa_scalar_alu_op(scalar) == nir_op_iadd ||
-          nir_ssa_scalar_alu_op(scalar) == nir_op_imul) {
-         nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
-         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
+   } else if (nir_scalar_is_alu(scalar)) {
+      if (nir_scalar_alu_op(scalar) == nir_op_iadd ||
+          nir_scalar_alu_op(scalar) == nir_op_imul) {
+         nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
+         nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
 
          unsigned src0_dim = get_dim(src0);
          if (!src0_dim && src0.def->divergent)
@@ -145,9 +145,9 @@ get_dim(nir_ssa_scalar scalar)
             return 0;
 
          return src0_dim | src1_dim;
-      } else if (nir_ssa_scalar_alu_op(scalar) == nir_op_ishl) {
-         nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
-         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
+      } else if (nir_scalar_alu_op(scalar) == nir_op_ishl) {
+         nir_scalar src0 = nir_scalar_chase_alu_src(scalar, 0);
+         nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
          return src1.def->divergent ? 0 : get_dim(src0);
       }
    }
@@ -159,17 +159,17 @@ get_dim(nir_ssa_scalar scalar)
  * uniform value.
  */
 static unsigned
-match_invocation_comparison(nir_ssa_scalar scalar)
+match_invocation_comparison(nir_scalar scalar)
 {
-   bool is_alu = nir_ssa_scalar_is_alu(scalar);
-   if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_iand) {
-      return match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 0)) |
-             match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 1));
-   } else if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_ieq) {
-      if (!nir_ssa_scalar_chase_alu_src(scalar, 0).def->divergent)
-         return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 1));
-      if (!nir_ssa_scalar_chase_alu_src(scalar, 1).def->divergent)
-         return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 0));
+   bool is_alu = nir_scalar_is_alu(scalar);
+   if (is_alu && nir_scalar_alu_op(scalar) == nir_op_iand) {
+      return match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 0)) |
+             match_invocation_comparison(nir_scalar_chase_alu_src(scalar, 1));
+   } else if (is_alu && nir_scalar_alu_op(scalar) == nir_op_ieq) {
+      if (!nir_scalar_chase_alu_src(scalar, 0).def->divergent)
+         return get_dim(nir_scalar_chase_alu_src(scalar, 1));
+      if (!nir_scalar_chase_alu_src(scalar, 1).def->divergent)
+         return get_dim(nir_scalar_chase_alu_src(scalar, 0));
    } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
       if (intrin->intrinsic == nir_intrinsic_elect)
@@ -195,7 +195,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
          if (!within_then)
             continue;
 
-         nir_ssa_scalar cond = { nir_cf_node_as_if(cf)->condition.ssa, 0 };
+         nir_scalar cond = { nir_cf_node_as_if(cf)->condition.ssa, 0 };
          dims |= match_invocation_comparison(cond);
       }
    }
@@ -215,14 +215,14 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
 
 /* Perform a reduction and/or exclusive scan. */
 static void
-reduce_data(nir_builder *b, nir_op op, nir_ssa_def *data,
-            nir_ssa_def **reduce, nir_ssa_def **scan)
+reduce_data(nir_builder *b, nir_op op, nir_def *data,
+            nir_def **reduce, nir_def **scan)
 {
    if (scan) {
       *scan = nir_exclusive_scan(b, data, .reduction_op = op);
       if (reduce) {
-         nir_ssa_def *last_lane = nir_last_invocation(b);
-         nir_ssa_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
+         nir_def *last_lane = nir_last_invocation(b);
+         nir_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
          *reduce = nir_read_invocation(b, res, last_lane);
       }
    } else {
@@ -230,24 +230,24 @@ reduce_data(nir_builder *b, nir_op op, nir_ssa_def *data,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
 {
    unsigned offset_src = 0;
    unsigned data_src = 0;
    unsigned offset2_src = 0;
    nir_op op = parse_atomic_op(intrin, &offset_src, &data_src, &offset2_src);
-   nir_ssa_def *data = intrin->src[data_src].ssa;
+   nir_def *data = intrin->src[data_src].ssa;
 
    /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
    bool combined_scan_reduce = return_prev && data->divergent;
-   nir_ssa_def *reduce = NULL, *scan = NULL;
+   nir_def *reduce = NULL, *scan = NULL;
    reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
 
    nir_instr_rewrite_src(&intrin->instr, &intrin->src[data_src], nir_src_for_ssa(reduce));
    nir_update_instr_divergence(b->shader, &intrin->instr);
 
-   nir_ssa_def *cond = nir_elect(b, 1);
+   nir_def *cond = nir_elect(b, 1);
 
    nir_if *nif = nir_push_if(b, cond);
 
@@ -257,10 +257,10 @@ optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
    if (return_prev) {
       nir_push_else(b, nif);
 
-      nir_ssa_def *undef = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
+      nir_def *undef = nir_undef(b, 1, intrin->dest.ssa.bit_size);
 
       nir_pop_if(b, nif);
-      nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
+      nir_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
       result = nir_read_first_invocation(b, result);
 
       if (!combined_scan_reduce)
@@ -278,23 +278,23 @@ optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    nir_if *helper_nif = NULL;
    if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
-      nir_ssa_def *helper = nir_is_helper_invocation(b, 1);
+      nir_def *helper = nir_is_helper_invocation(b, 1);
       helper_nif = nir_push_if(b, nir_inot(b, helper));
    }
 
    ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent;
-   bool return_prev = !nir_ssa_def_is_unused(&intrin->dest.ssa);
+   bool return_prev = !nir_def_is_unused(&intrin->dest.ssa);
 
-   nir_ssa_def old_result = intrin->dest.ssa;
+   nir_def old_result = intrin->dest.ssa;
    list_replace(&intrin->dest.ssa.uses, &old_result.uses);
    nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1,
                      intrin->dest.ssa.bit_size);
 
-   nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
+   nir_def *result = optimize_atomic(b, intrin, return_prev);
 
    if (helper_nif) {
       nir_push_else(b, helper_nif);
-      nir_ssa_def *undef = result ? nir_ssa_undef(b, 1, result->bit_size) : NULL;
+      nir_def *undef = result ? nir_undef(b, 1, result->bit_size) : NULL;
       nir_pop_if(b, helper_nif);
       if (result)
          result = nir_if_phi(b, result, undef);
@@ -302,7 +302,7 @@ optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
 
    if (result) {
       assert(result->divergent == original_result_divergent);
-      nir_ssa_def_rewrite_uses(&old_result, result);
+      nir_def_rewrite_uses(&old_result, result);
    }
 }
 
index da6d28f..82ebb3c 100644 (file)
@@ -221,7 +221,7 @@ instr_try_combine(struct set *instr_set, nir_instr *instr1, nir_instr *instr2)
          for (unsigned j = 0; j < total_components; j++) {
             value[j].u64 = j < alu1_components ? c1[alu1->src[i].swizzle[j]].u64 : c2[alu2->src[i].swizzle[j - alu1_components]].u64;
          }
-         nir_ssa_def *def = nir_build_imm(&b, total_components, bit_size, value);
+         nir_def *def = nir_build_imm(&b, total_components, bit_size, value);
 
          new_alu->src[i].src = nir_src_for_ssa(def);
          for (unsigned j = 0; j < total_components; j++)
@@ -282,20 +282,20 @@ instr_try_combine(struct set *instr_set, nir_instr *instr1, nir_instr *instr2)
    /* update all other uses if there are any */
    unsigned swiz[NIR_MAX_VEC_COMPONENTS];
 
-   if (!nir_ssa_def_is_unused(&alu1->dest.dest.ssa)) {
+   if (!nir_def_is_unused(&alu1->dest.dest.ssa)) {
       for (unsigned i = 0; i < alu1_components; i++)
          swiz[i] = i;
-      nir_ssa_def *new_alu1 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
-                                          alu1_components);
-      nir_ssa_def_rewrite_uses(&alu1->dest.dest.ssa, new_alu1);
+      nir_def *new_alu1 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
+                                      alu1_components);
+      nir_def_rewrite_uses(&alu1->dest.dest.ssa, new_alu1);
    }
 
-   if (!nir_ssa_def_is_unused(&alu2->dest.dest.ssa)) {
+   if (!nir_def_is_unused(&alu2->dest.dest.ssa)) {
       for (unsigned i = 0; i < alu2_components; i++)
          swiz[i] = i + alu1_components;
-      nir_ssa_def *new_alu2 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
-                                          alu2_components);
-      nir_ssa_def_rewrite_uses(&alu2->dest.dest.ssa, new_alu2);
+      nir_def *new_alu2 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
+                                      alu2_components);
+      nir_def_rewrite_uses(&alu2->dest.dest.ssa, new_alu2);
    }
 
    nir_instr_remove(instr1);
index ff56842..4c37866 100644 (file)
@@ -106,7 +106,7 @@ copy_vars(nir_builder *b, nir_deref_instr *dst, nir_deref_instr *src)
          copy_vars(b, nir_build_deref_array_imm(b, dst, i), nir_build_deref_array_imm(b, src, i));
       }
    } else {
-      nir_ssa_def *load = nir_load_deref(b, src);
+      nir_def *load = nir_load_deref(b, src);
       nir_store_deref(b, dst, load, BITFIELD_MASK(load->num_components));
    }
 }
@@ -215,12 +215,12 @@ nir_create_passthrough_gs(const nir_shader_compiler_options *options,
    }
 
    nir_variable *edge_var = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_EDGE);
-   nir_ssa_def *flat_interp_mask_def = nir_load_flat_mask(&b);
-   nir_ssa_def *last_pv_vert_def = nir_load_provoking_last(&b);
+   nir_def *flat_interp_mask_def = nir_load_flat_mask(&b);
+   nir_def *last_pv_vert_def = nir_load_provoking_last(&b);
    last_pv_vert_def = nir_ine_imm(&b, last_pv_vert_def, 0);
-   nir_ssa_def *start_vert_index = nir_imm_int(&b, start_vert);
-   nir_ssa_def *end_vert_index = nir_imm_int(&b, end_vert - 1);
-   nir_ssa_def *pv_vert_index = nir_bcsel(&b, last_pv_vert_def, end_vert_index, start_vert_index);
+   nir_def *start_vert_index = nir_imm_int(&b, start_vert);
+   nir_def *end_vert_index = nir_imm_int(&b, end_vert - 1);
+   nir_def *pv_vert_index = nir_bcsel(&b, last_pv_vert_def, end_vert_index, start_vert_index);
    for (unsigned i = start_vert; i < end_vert || needs_closing; i += vert_step) {
       int idx = i < end_vert ? i : start_vert;
       /* Copy inputs to outputs. */
@@ -229,7 +229,7 @@ nir_create_passthrough_gs(const nir_shader_compiler_options *options,
             continue;
          }
          /* no need to use copy_var to save a lower pass */
-         nir_ssa_def *index;
+         nir_def *index;
          if (in_vars[j]->data.location == VARYING_SLOT_POS || !handle_flat)
             index = nir_imm_int(&b, idx);
          else {
@@ -242,7 +242,7 @@ nir_create_passthrough_gs(const nir_shader_compiler_options *options,
       }
       nir_emit_vertex(&b, 0);
       if (emulate_edgeflags) {
-         nir_ssa_def *edge_value = nir_channel(&b, nir_load_array_var_imm(&b, edge_var, idx), 0);
+         nir_def *edge_value = nir_channel(&b, nir_load_array_var_imm(&b, edge_var, idx), 0);
          nir_if *edge_if = nir_push_if(&b, nir_fneu_imm(&b, edge_value, 1.0));
          nir_end_primitive(&b, 0);
          nir_pop_if(&b, edge_if);
index ec59418..baaf470 100644 (file)
@@ -50,7 +50,7 @@ nir_create_passthrough_tcs_impl(const nir_shader_compiler_options *options,
       nir_create_variable_with_location(b.shader, nir_var_shader_out,
                                         VARYING_SLOT_TESS_LEVEL_INNER, glsl_vec_type(2));
 
-   nir_ssa_def *inner = nir_load_var(&b, in_inner);
+   nir_def *inner = nir_load_var(&b, in_inner);
    nir_store_var(&b, out_inner, inner, 0x3);
 
    nir_variable *in_outer =
@@ -61,10 +61,10 @@ nir_create_passthrough_tcs_impl(const nir_shader_compiler_options *options,
       nir_create_variable_with_location(b.shader, nir_var_shader_out,
                                         VARYING_SLOT_TESS_LEVEL_OUTER, glsl_vec4_type());
 
-   nir_ssa_def *outer = nir_load_var(&b, in_outer);
+   nir_def *outer = nir_load_var(&b, in_outer);
    nir_store_var(&b, out_outer, outer, 0xf);
 
-   nir_ssa_def *id = nir_load_invocation_id(&b);
+   nir_def *id = nir_load_invocation_id(&b);
    for (unsigned i = 0; i < num_locations; i++) {
       const struct glsl_type *type;
       unsigned semantic = locations[i];
@@ -81,7 +81,7 @@ nir_create_passthrough_tcs_impl(const nir_shader_compiler_options *options,
                                                             semantic, type);
 
       /* no need to use copy_var to save a lower pass */
-      nir_ssa_def *value = nir_load_array_var(&b, in, id);
+      nir_def *value = nir_load_array_var(&b, in, id);
       nir_store_array_var(&b, out, id, value, 0xf);
    }
 
index af05635..b71c1a1 100644 (file)
@@ -43,7 +43,7 @@ struct nir_phi_builder {
    nir_block **W;
 };
 
-#define NEEDS_PHI ((nir_ssa_def *)(intptr_t)-1)
+#define NEEDS_PHI ((nir_def *)(intptr_t)-1)
 
 struct nir_phi_builder_value {
    struct exec_node node;
@@ -173,12 +173,12 @@ nir_phi_builder_add_value(struct nir_phi_builder *pb, unsigned num_components,
 
 void
 nir_phi_builder_value_set_block_def(struct nir_phi_builder_value *val,
-                                    nir_block *block, nir_ssa_def *def)
+                                    nir_block *block, nir_def *def)
 {
    _mesa_hash_table_insert(&val->ht, INDEX_TO_KEY(block->index), def);
 }
 
-nir_ssa_def *
+nir_def *
 nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
                                     nir_block *block)
 {
@@ -199,16 +199,16 @@ nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
    /* Exactly one of (he != NULL) and (dom == NULL) must be true. */
    assert((he != NULL) != (dom == NULL));
 
-   nir_ssa_def *def;
+   nir_def *def;
    if (dom == NULL) {
       /* No dominator means either that we crawled to the top without ever
        * finding a definition or that this block is unreachable.  In either
        * case, the value is undefined so we need an SSA undef.
        */
-      nir_ssa_undef_instr *undef =
-         nir_ssa_undef_instr_create(val->builder->shader,
-                                    val->num_components,
-                                    val->bit_size);
+      nir_undef_instr *undef =
+         nir_undef_instr_create(val->builder->shader,
+                                val->num_components,
+                                val->bit_size);
       nir_instr_insert(nir_before_cf_list(&val->builder->impl->body),
                        &undef->instr);
       def = &undef->def;
@@ -243,7 +243,7 @@ nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
        * phi node created by the case above or one passed to us through
        * nir_phi_builder_value_set_block_def().
        */
-      def = (struct nir_ssa_def *)he->data;
+      def = (struct nir_def *)he->data;
    }
 
    /* Walk the chain and stash the def in all of the applicable blocks.  We do
index c663d04..90fb383 100644 (file)
@@ -89,7 +89,7 @@ nir_phi_builder_add_value(struct nir_phi_builder *pb, unsigned num_components,
  */
 void
 nir_phi_builder_value_set_block_def(struct nir_phi_builder_value *val,
-                                    nir_block *block, nir_ssa_def *def);
+                                    nir_block *block, nir_def *def);
 
 /* Get the definition for the given value in the given block.
  *
@@ -97,13 +97,13 @@ nir_phi_builder_value_set_block_def(struct nir_phi_builder_value *val,
  * block.  If no definition is immediately available, it will crawl up the
  * dominance tree and insert phi nodes as needed until it finds one.  In the
  * case that no suitable definition is found, it will return the result of a
- * nir_ssa_undef_instr with the correct number of components.
+ * nir_undef_instr with the correct number of components.
  *
  * Because this function only uses the latest available information for any
  * given block, you must have already finished registering definitions for any
  * blocks that dominate the current block in order to get the correct result.
  */
-nir_ssa_def *
+nir_def *
 nir_phi_builder_value_get_block_def(struct nir_phi_builder_value *val,
                                     nir_block *block);
 
index ae6f5f3..6f3de26 100644 (file)
@@ -55,7 +55,7 @@ typedef struct {
    /* an index used to make new non-conflicting names */
    unsigned index;
 
-   /* Used with nir_gather_ssa_types() to identify best representation
+   /* Used with nir_gather_types() to identify best representation
     * to print terse inline constant values together with SSA sources.
     * Updated per nir_function_impl being printed.
     */
@@ -117,7 +117,7 @@ count_digits(unsigned n)
 }
 
 static void
-print_ssa_def(nir_ssa_def *def, print_state *state)
+print_ssa_def(nir_def *def, print_state *state)
 {
    FILE *fp = state->fp;
 
@@ -386,7 +386,7 @@ print_load_const_instr(nir_load_const_instr *instr, print_state *state)
 }
 
 static void
-print_ssa_use(nir_ssa_def *def, print_state *state, nir_alu_type src_type)
+print_ssa_use(nir_def *def, print_state *state, nir_alu_type src_type)
 {
    FILE *fp = state->fp;
    fprintf(fp, "%%%u", def->index);
@@ -1790,7 +1790,7 @@ print_jump_instr(nir_jump_instr *instr, print_state *state)
 }
 
 static void
-print_ssa_undef_instr(nir_ssa_undef_instr *instr, print_state *state)
+print_ssa_undef_instr(nir_undef_instr *instr, print_state *state)
 {
    FILE *fp = state->fp;
    print_ssa_def(&instr->def, state);
@@ -2089,13 +2089,13 @@ print_function_impl(nir_function_impl *impl, print_state *state)
    }
 
    if (!NIR_DEBUG(PRINT_NO_INLINE_CONSTS)) {
-      /* Don't reindex the SSA as suggested by nir_gather_ssa_types() because
+      /* Don't reindex the SSA as suggested by nir_gather_types() because
        * nir_print don't modify the shader.  If needed, a limit for ssa_alloc
        * can be added.
        */
       state->float_types = calloc(BITSET_WORDS(impl->ssa_alloc), sizeof(BITSET_WORD));
       state->int_types = calloc(BITSET_WORDS(impl->ssa_alloc), sizeof(BITSET_WORD));
-      nir_gather_ssa_types(impl, state->float_types, state->int_types);
+      nir_gather_types(impl, state->float_types, state->int_types);
    }
 
    nir_foreach_function_temp_variable(var, impl) {
index ee6d0ab..cefdd83 100644 (file)
@@ -1393,7 +1393,7 @@ mul_clamp(uint32_t a, uint32_t b)
 
 /* recursively gather at most "buf_size" phi/bcsel sources */
 static unsigned
-search_phi_bcsel(nir_ssa_scalar scalar, nir_ssa_scalar *buf, unsigned buf_size, struct set *visited)
+search_phi_bcsel(nir_scalar scalar, nir_scalar *buf, unsigned buf_size, struct set *visited)
 {
    if (_mesa_set_search(visited, scalar.def))
       return 0;
@@ -1416,12 +1416,12 @@ search_phi_bcsel(nir_ssa_scalar scalar, nir_ssa_scalar *buf, unsigned buf_size,
       }
    }
 
-   if (nir_ssa_scalar_is_alu(scalar)) {
-      nir_op op = nir_ssa_scalar_alu_op(scalar);
+   if (nir_scalar_is_alu(scalar)) {
+      nir_op op = nir_scalar_alu_op(scalar);
 
       if ((op == nir_op_bcsel || op == nir_op_b32csel) && buf_size >= 2) {
-         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
-         nir_ssa_scalar src2 = nir_ssa_scalar_chase_alu_src(scalar, 2);
+         nir_scalar src1 = nir_scalar_chase_alu_src(scalar, 1);
+         nir_scalar src2 = nir_scalar_chase_alu_src(scalar, 2);
 
          unsigned added = search_phi_bcsel(src1, buf, buf_size - 1, visited);
          buf_size -= added;
@@ -1497,11 +1497,11 @@ static const nir_unsigned_upper_bound_config default_ub_config = {
 
 struct uub_query {
    struct analysis_query head;
-   nir_ssa_scalar scalar;
+   nir_scalar scalar;
 };
 
 static void
-push_uub_query(struct analysis_state *state, nir_ssa_scalar scalar)
+push_uub_query(struct analysis_state *state, nir_scalar scalar)
 {
    struct uub_query *pushed_q = push_analysis_query(state, sizeof(struct uub_query));
    pushed_q->scalar = scalar;
@@ -1510,10 +1510,10 @@ push_uub_query(struct analysis_state *state, nir_ssa_scalar scalar)
 static uintptr_t
 get_uub_key(struct analysis_query *q)
 {
-   nir_ssa_scalar scalar = ((struct uub_query *)q)->scalar;
+   nir_scalar scalar = ((struct uub_query *)q)->scalar;
    /* keys can't be 0, so we have to add 1 to the index */
    unsigned shift_amount = ffs(NIR_MAX_VEC_COMPONENTS) - 1;
-   return nir_ssa_scalar_is_const(scalar)
+   return nir_scalar_is_const(scalar)
              ? 0
              : ((uintptr_t)(scalar.def->index + 1) << shift_amount) | scalar.comp;
 }
@@ -1698,7 +1698,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
 static void
 get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result, const uint32_t *src)
 {
-   nir_op op = nir_ssa_scalar_alu_op(q.scalar);
+   nir_op op = nir_scalar_alu_op(q.scalar);
 
    /* Early exit for unsupported ALU opcodes. */
    switch (op) {
@@ -1735,7 +1735,7 @@ get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
    case nir_op_u2u16:
    case nir_op_u2u32:
    case nir_op_f2u32:
-      if (nir_ssa_scalar_chase_alu_src(q.scalar, 0).def->bit_size > 32) {
+      if (nir_scalar_chase_alu_src(q.scalar, 0).def->bit_size > 32) {
          /* If src is >32 bits, return max */
          return;
       }
@@ -1746,7 +1746,7 @@ get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
 
    if (!q.head.pushed_queries) {
       for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
-         push_uub_query(state, nir_ssa_scalar_chase_alu_src(q.scalar, i));
+         push_uub_query(state, nir_scalar_chase_alu_src(q.scalar, i));
       return;
    }
 
@@ -1778,19 +1778,19 @@ get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
          *result = src[0] * src[1];
       break;
    case nir_op_ushr: {
-      nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(q.scalar, 1);
+      nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
       uint32_t mask = q.scalar.def->bit_size - 1u;
-      if (nir_ssa_scalar_is_const(src1_scalar))
-         *result = src[0] >> (nir_ssa_scalar_as_uint(src1_scalar) & mask);
+      if (nir_scalar_is_const(src1_scalar))
+         *result = src[0] >> (nir_scalar_as_uint(src1_scalar) & mask);
       else
          *result = src[0];
       break;
    }
    case nir_op_ishr: {
-      nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(q.scalar, 1);
+      nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
       uint32_t mask = q.scalar.def->bit_size - 1u;
-      if (src[0] <= 2147483647 && nir_ssa_scalar_is_const(src1_scalar))
-         *result = src[0] >> (nir_ssa_scalar_as_uint(src1_scalar) & mask);
+      if (src[0] <= 2147483647 && nir_scalar_is_const(src1_scalar))
+         *result = src[0] >> (nir_scalar_as_uint(src1_scalar) & mask);
       else
          *result = src[0];
       break;
@@ -1803,10 +1803,10 @@ get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
       *result = src[1] ? src[1] - 1 : 0;
       break;
    case nir_op_udiv: {
-      nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(q.scalar, 1);
-      if (nir_ssa_scalar_is_const(src1_scalar))
-         *result = nir_ssa_scalar_as_uint(src1_scalar)
-                      ? src[0] / nir_ssa_scalar_as_uint(src1_scalar)
+      nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
+      if (nir_scalar_is_const(src1_scalar))
+         *result = nir_scalar_as_uint(src1_scalar)
+                      ? src[0] / nir_scalar_as_uint(src1_scalar)
                       : 0;
       else
          *result = src[0];
@@ -1820,10 +1820,10 @@ get_alu_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
       *result = bitmask(MIN2(src[2], q.scalar.def->bit_size));
       break;
    case nir_op_bfm: {
-      nir_ssa_scalar src1_scalar = nir_ssa_scalar_chase_alu_src(q.scalar, 1);
-      if (nir_ssa_scalar_is_const(src1_scalar)) {
+      nir_scalar src1_scalar = nir_scalar_chase_alu_src(q.scalar, 1);
+      if (nir_scalar_is_const(src1_scalar)) {
          uint32_t src0 = MIN2(src[0], 31);
-         uint32_t src1 = nir_ssa_scalar_as_uint(src1_scalar) & 0x1fu;
+         uint32_t src1 = nir_scalar_as_uint(src1_scalar) & 0x1fu;
          *result = bitmask(src0) << src1;
       } else {
          uint32_t src0 = MIN2(src[0], 31);
@@ -1906,7 +1906,7 @@ get_phi_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
       _mesa_hash_table_insert(state->range_ht, (void *)get_uub_key(&q.head), (void *)(uintptr_t)max);
 
       struct set *visited = _mesa_pointer_set_create(NULL);
-      nir_ssa_scalar *defs = alloca(sizeof(nir_ssa_scalar) * 64);
+      nir_scalar *defs = alloca(sizeof(nir_scalar) * 64);
       unsigned def_count = search_phi_bcsel(q.scalar, defs, 64, visited);
       _mesa_set_destroy(visited, NULL);
 
@@ -1925,11 +1925,11 @@ process_uub_query(struct analysis_state *state, struct analysis_query *aq, uint3
    struct uub_query q = *(struct uub_query *)aq;
 
    *result = bitmask(q.scalar.def->bit_size);
-   if (nir_ssa_scalar_is_const(q.scalar))
-      *result = nir_ssa_scalar_as_uint(q.scalar);
+   if (nir_scalar_is_const(q.scalar))
+      *result = nir_scalar_as_uint(q.scalar);
    else if (q.scalar.def->parent_instr->type == nir_instr_type_intrinsic)
       get_intrinsic_uub(state, q, result, src);
-   else if (nir_ssa_scalar_is_alu(q.scalar))
+   else if (nir_scalar_is_alu(q.scalar))
       get_alu_uub(state, q, result, src);
    else if (q.scalar.def->parent_instr->type == nir_instr_type_phi)
       get_phi_uub(state, q, result, src);
@@ -1937,7 +1937,7 @@ process_uub_query(struct analysis_state *state, struct analysis_query *aq, uint3
 
 uint32_t
 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
-                         nir_ssa_scalar scalar,
+                         nir_scalar scalar,
                          const nir_unsigned_upper_bound_config *config)
 {
    if (!config)
@@ -1963,21 +1963,21 @@ nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
 
 bool
 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
-                            nir_ssa_scalar ssa, unsigned const_val,
+                            nir_scalar ssa, unsigned const_val,
                             const nir_unsigned_upper_bound_config *config)
 {
-   if (nir_ssa_scalar_is_alu(ssa)) {
-      nir_op alu_op = nir_ssa_scalar_alu_op(ssa);
+   if (nir_scalar_is_alu(ssa)) {
+      nir_op alu_op = nir_scalar_alu_op(ssa);
 
       /* iadd(imul(a, #b), #c) */
       if (alu_op == nir_op_imul || alu_op == nir_op_ishl) {
-         nir_ssa_scalar mul_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0);
-         nir_ssa_scalar mul_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1);
+         nir_scalar mul_src0 = nir_scalar_chase_alu_src(ssa, 0);
+         nir_scalar mul_src1 = nir_scalar_chase_alu_src(ssa, 1);
          uint32_t stride = 1;
-         if (nir_ssa_scalar_is_const(mul_src0))
-            stride = nir_ssa_scalar_as_uint(mul_src0);
-         else if (nir_ssa_scalar_is_const(mul_src1))
-            stride = nir_ssa_scalar_as_uint(mul_src1);
+         if (nir_scalar_is_const(mul_src0))
+            stride = nir_scalar_as_uint(mul_src0);
+         else if (nir_scalar_is_const(mul_src1))
+            stride = nir_scalar_as_uint(mul_src1);
 
          if (alu_op == nir_op_ishl)
             stride = 1u << (stride % 32u);
@@ -1988,13 +1988,13 @@ nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
 
       /* iadd(iand(a, #b), #c) */
       if (alu_op == nir_op_iand) {
-         nir_ssa_scalar and_src0 = nir_ssa_scalar_chase_alu_src(ssa, 0);
-         nir_ssa_scalar and_src1 = nir_ssa_scalar_chase_alu_src(ssa, 1);
+         nir_scalar and_src0 = nir_scalar_chase_alu_src(ssa, 0);
+         nir_scalar and_src1 = nir_scalar_chase_alu_src(ssa, 1);
          uint32_t mask = 0xffffffff;
-         if (nir_ssa_scalar_is_const(and_src0))
-            mask = nir_ssa_scalar_as_uint(and_src0);
-         else if (nir_ssa_scalar_is_const(and_src1))
-            mask = nir_ssa_scalar_as_uint(and_src1);
+         if (nir_scalar_is_const(and_src0))
+            mask = nir_scalar_as_uint(and_src0);
+         else if (nir_scalar_is_const(and_src1))
+            mask = nir_scalar_as_uint(and_src1);
          if (mask == 0 || const_val < (1u << (ffs(mask) - 1)))
             return false;
       }
@@ -2005,7 +2005,7 @@ nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
 }
 
 static uint64_t
-ssa_def_bits_used(const nir_ssa_def *def, int recur)
+ssa_def_bits_used(const nir_def *def, int recur)
 {
    uint64_t bits_used = 0;
    uint64_t all_bits = BITFIELD64_MASK(def->bit_size);
@@ -2197,7 +2197,7 @@ ssa_def_bits_used(const nir_ssa_def *def, int recur)
 }
 
 uint64_t
-nir_ssa_def_bits_used(const nir_ssa_def *def)
+nir_def_bits_used(const nir_def *def)
 {
    return ssa_def_bits_used(def, 2);
 }
index dfb8f94..36ddfb5 100644 (file)
@@ -57,7 +57,7 @@ extern struct ssa_result_range
 nir_analyze_range(struct hash_table *range_ht,
                   const nir_alu_instr *instr, unsigned src);
 
-uint64_t nir_ssa_def_bits_used(const nir_ssa_def *def);
+uint64_t nir_def_bits_used(const nir_def *def);
 
 #ifdef __cplusplus
 }
index 31fdb67..6463a09 100644 (file)
@@ -67,7 +67,7 @@ get_src_block(nir_src *src)
 }
 
 static bool
-repair_ssa_def(nir_ssa_def *def, void *void_state)
+repair_ssa_def(nir_def *def, void *void_state)
 {
    struct repair_ssa_state *state = void_state;
 
@@ -103,7 +103,7 @@ repair_ssa_def(nir_ssa_def *def, void *void_state)
          continue;
       }
 
-      nir_ssa_def *block_def =
+      nir_def *block_def =
          nir_phi_builder_value_get_block_def(val, block);
       if (block_def == def)
          continue;
index 2be14b4..13c9c80 100644 (file)
@@ -38,26 +38,26 @@ nir_scale_fdiv_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
 
    b->cursor = nir_before_instr(&alu->instr);
 
-   nir_ssa_def *orig_a = nir_ssa_for_alu_src(b, alu, 0);
-   nir_ssa_def *orig_b = nir_ssa_for_alu_src(b, alu, 1);
-   nir_ssa_def *fabs = nir_fabs(b, orig_b);
-   nir_ssa_def *big = nir_fgt_imm(b, fabs, uif(0x7e800000));
-   nir_ssa_def *small = nir_flt_imm(b, fabs, uif(0x00800000));
+   nir_def *orig_a = nir_ssa_for_alu_src(b, alu, 0);
+   nir_def *orig_b = nir_ssa_for_alu_src(b, alu, 1);
+   nir_def *fabs = nir_fabs(b, orig_b);
+   nir_def *big = nir_fgt_imm(b, fabs, uif(0x7e800000));
+   nir_def *small = nir_flt_imm(b, fabs, uif(0x00800000));
 
-   nir_ssa_def *scaled_down_a = nir_fmul_imm(b, orig_a, 0.25);
-   nir_ssa_def *scaled_down_b = nir_fmul_imm(b, orig_b, 0.25);
-   nir_ssa_def *scaled_up_a = nir_fmul_imm(b, orig_a, 16777216.0);
-   nir_ssa_def *scaled_up_b = nir_fmul_imm(b, orig_b, 16777216.0);
+   nir_def *scaled_down_a = nir_fmul_imm(b, orig_a, 0.25);
+   nir_def *scaled_down_b = nir_fmul_imm(b, orig_b, 0.25);
+   nir_def *scaled_up_a = nir_fmul_imm(b, orig_a, 16777216.0);
+   nir_def *scaled_up_b = nir_fmul_imm(b, orig_b, 16777216.0);
 
-   nir_ssa_def *final_a =
+   nir_def *final_a =
       nir_bcsel(b, big, scaled_down_a,
                 (nir_bcsel(b, small, scaled_up_a, orig_a)));
-   nir_ssa_def *final_b =
+   nir_def *final_b =
       nir_bcsel(b, big, scaled_down_b,
                 (nir_bcsel(b, small, scaled_up_b, orig_b)));
 
-   nir_ssa_def *new_div = nir_fdiv(b, final_a, final_b);
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, new_div);
+   nir_def *new_div = nir_fdiv(b, final_a, final_b);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, new_div);
 
    return true;
 }
index ddb50cd..8a00d03 100644 (file)
@@ -87,7 +87,7 @@ typedef struct {
 
    nir_shader *shader;
 
-   /* Mapping from nir_ssa_def * to a struct set of
+   /* Mapping from nir_def * to a struct set of
     * instructions remaining to be scheduled using the register.
     */
    struct hash_table *remaining_uses;
@@ -95,7 +95,7 @@ typedef struct {
    /* Map from nir_instr to nir_schedule_node * */
    struct hash_table *instr_map;
 
-   /* Set of nir_ssa_def * that have had any instruction scheduled on them. */
+   /* Set of nir_def * that have had any instruction scheduled on them. */
    struct set *live_values;
 
    /* An abstract approximation of the number of nir_scheduler_node->delay
@@ -160,7 +160,7 @@ nir_schedule_get_node(struct hash_table *instr_map, nir_instr *instr)
 
 static struct set *
 nir_schedule_scoreboard_get_reg(nir_schedule_scoreboard *scoreboard,
-                                nir_ssa_def *reg)
+                                nir_def *reg)
 {
    return _mesa_hash_table_search_data(scoreboard->remaining_uses, reg);
 }
@@ -172,14 +172,14 @@ nir_schedule_scoreboard_get_src(nir_schedule_scoreboard *scoreboard, nir_src *sr
 }
 
 static int
-nir_schedule_reg_pressure(nir_ssa_def *reg)
+nir_schedule_reg_pressure(nir_def *reg)
 {
    nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
    return nir_intrinsic_num_components(decl);
 }
 
 static int
-nir_schedule_def_pressure(nir_ssa_def *def)
+nir_schedule_def_pressure(nir_def *def)
 {
    return def->num_components;
 }
@@ -234,7 +234,7 @@ static void
 nir_schedule_load_reg_deps(nir_intrinsic_instr *load,
                            nir_deps_state *state)
 {
-   nir_ssa_def *reg = load->src[0].ssa;
+   nir_def *reg = load->src[0].ssa;
    (void)nir_reg_get_decl(reg);
 
    struct hash_entry *entry = _mesa_hash_table_search(state->reg_map, reg);
@@ -252,7 +252,7 @@ static void
 nir_schedule_store_reg_deps(nir_intrinsic_instr *store,
                             nir_deps_state *state)
 {
-   nir_ssa_def *reg = store->src[1].ssa;
+   nir_def *reg = store->src[1].ssa;
    (void)nir_reg_get_decl(reg);
 
    nir_schedule_node *dest_n =
@@ -269,7 +269,7 @@ nir_schedule_store_reg_deps(nir_intrinsic_instr *store,
 }
 
 static bool
-nir_schedule_ssa_deps(nir_ssa_def *def, void *in_state)
+nir_schedule_ssa_deps(nir_def *def, void *in_state)
 {
    nir_deps_state *state = in_state;
    struct hash_table *instr_map = state->scoreboard->instr_map;
@@ -538,7 +538,7 @@ nir_schedule_regs_freed_src_cb(nir_src *src, void *in_state)
 }
 
 static bool
-nir_schedule_regs_freed_def_cb(nir_ssa_def *def, void *in_state)
+nir_schedule_regs_freed_def_cb(nir_def *def, void *in_state)
 {
    nir_schedule_regs_freed_state *state = in_state;
 
@@ -557,7 +557,7 @@ nir_schedule_regs_freed_load_reg(nir_intrinsic_instr *load,
       nir_schedule_regs_freed_src_cb(&load->src[1], state);
 
    nir_schedule_scoreboard *scoreboard = state->scoreboard;
-   nir_ssa_def *reg = load->src[0].ssa;
+   nir_def *reg = load->src[0].ssa;
    struct set *remaining_uses = nir_schedule_scoreboard_get_reg(scoreboard, reg);
 
    if (remaining_uses->entries == 1 &&
@@ -579,7 +579,7 @@ nir_schedule_regs_freed_store_reg(nir_intrinsic_instr *store,
       nir_schedule_regs_freed_src_cb(&store->src[2], state);
 
    nir_schedule_scoreboard *scoreboard = state->scoreboard;
-   nir_ssa_def *reg = store->src[1].ssa;
+   nir_def *reg = store->src[1].ssa;
 
    /* Only the first def of a reg counts against register pressure. */
    if (!_mesa_set_search(scoreboard->live_values, reg))
@@ -930,7 +930,7 @@ nir_schedule_mark_src_scheduled(nir_src *src, void *state)
 }
 
 static bool
-nir_schedule_mark_def_scheduled(nir_ssa_def *def, void *state)
+nir_schedule_mark_def_scheduled(nir_def *def, void *state)
 {
    nir_schedule_scoreboard *scoreboard = state;
 
@@ -945,7 +945,7 @@ nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr *load,
                                      nir_schedule_scoreboard *scoreboard)
 {
    assert(nir_is_load_reg(load));
-   nir_ssa_def *reg = load->src[0].ssa;
+   nir_def *reg = load->src[0].ssa;
 
    if (load->intrinsic == nir_intrinsic_load_reg_indirect)
       nir_schedule_mark_src_scheduled(&load->src[1], scoreboard);
@@ -961,7 +961,7 @@ nir_schedule_mark_store_reg_scheduled(nir_intrinsic_instr *store,
                                       nir_schedule_scoreboard *scoreboard)
 {
    assert(nir_is_store_reg(store));
-   nir_ssa_def *reg = store->src[1].ssa;
+   nir_def *reg = store->src[1].ssa;
 
    nir_schedule_mark_src_scheduled(&store->src[0], scoreboard);
    if (store->intrinsic == nir_intrinsic_store_reg_indirect)
@@ -1167,7 +1167,7 @@ is_decl_reg(nir_instr *instr)
 }
 
 static bool
-nir_schedule_ssa_def_init_scoreboard(nir_ssa_def *def, void *state)
+nir_schedule_ssa_def_init_scoreboard(nir_def *def, void *state)
 {
    nir_schedule_scoreboard *scoreboard = state;
    struct set *def_uses = _mesa_pointer_set_create(scoreboard);
index 3c0b512..b8eae0e 100644 (file)
@@ -505,7 +505,7 @@ construct_value(nir_builder *build,
       const nir_search_constant *c = nir_search_value_as_constant(value);
       unsigned bit_size = replace_bitsize(value, search_bitsize, state);
 
-      nir_ssa_def *cval;
+      nir_def *cval;
       switch (c->type) {
       case nir_type_float:
          cval = nir_imm_floatN_t(build, c->data.d, bit_size);
@@ -619,7 +619,7 @@ add_uses_to_worklist(nir_instr *instr,
                      struct util_dynarray *states,
                      const struct per_op_table *pass_op_table)
 {
-   nir_ssa_def *def = nir_instr_ssa_def(instr);
+   nir_def *def = nir_instr_ssa_def(instr);
 
    nir_foreach_use_safe(use_src, def) {
       if (nir_algebraic_automaton(use_src->parent_instr, states, pass_op_table))
@@ -650,7 +650,7 @@ nir_algebraic_update_automaton(nir_instr *new_instr,
    nir_instr_worklist_destroy(automaton_worklist);
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
                   struct hash_table *range_ht,
                   struct util_dynarray *states,
@@ -749,7 +749,7 @@ nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
    /* Note that NIR builder will elide the MOV if it's a no-op, which may
     * allow more work to be done in a single pass through algebraic.
     */
-   nir_ssa_def *ssa_val =
+   nir_def *ssa_val =
       nir_mov_alu(build, val, instr->dest.dest.ssa.num_components);
    if (ssa_val->index == util_dynarray_num_elements(states, uint16_t)) {
       util_dynarray_append(states, uint16_t, 0);
@@ -759,7 +759,7 @@ nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
    /* Rewrite the uses of the old SSA value to the new one, and recurse
     * through the uses updating the automaton's state.
     */
-   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, ssa_val);
+   nir_def_rewrite_uses(&instr->dest.dest.ssa, ssa_val);
    nir_algebraic_update_automaton(ssa_val->parent_instr, algebraic_worklist,
                                   states, table->pass_op_table);
 
index c30340f..03439a0 100644 (file)
@@ -408,7 +408,7 @@ is_used_once(const nir_alu_instr *instr)
 static inline bool
 is_used_by_if(const nir_alu_instr *instr)
 {
-   return nir_ssa_def_used_by_if(&instr->dest.dest.ssa);
+   return nir_def_used_by_if(&instr->dest.dest.ssa);
 }
 
 static inline bool
@@ -481,13 +481,13 @@ is_only_used_by_fadd(const nir_alu_instr *instr)
 static inline bool
 only_lower_8_bits_used(const nir_alu_instr *instr)
 {
-   return (nir_ssa_def_bits_used(&instr->dest.dest.ssa) & ~0xffull) == 0;
+   return (nir_def_bits_used(&instr->dest.dest.ssa) & ~0xffull) == 0;
 }
 
 static inline bool
 only_lower_16_bits_used(const nir_alu_instr *instr)
 {
-   return (nir_ssa_def_bits_used(&instr->dest.dest.ssa) & ~0xffffull) == 0;
+   return (nir_def_bits_used(&instr->dest.dest.ssa) & ~0xffffull) == 0;
 }
 
 /**
index 3630794..c1b4f9a 100644 (file)
@@ -32,7 +32,7 @@
 
 typedef struct {
    size_t blob_offset;
-   nir_ssa_def *src;
+   nir_def *src;
    nir_block *block;
 } write_phi_fixup;
 
@@ -1320,7 +1320,7 @@ read_load_const(read_ctx *ctx, union packed_instr header)
 }
 
 static void
-write_ssa_undef(write_ctx *ctx, const nir_ssa_undef_instr *undef)
+write_ssa_undef(write_ctx *ctx, const nir_undef_instr *undef)
 {
    assert(undef->def.num_components >= 1 && undef->def.num_components <= 16);
 
@@ -1335,12 +1335,12 @@ write_ssa_undef(write_ctx *ctx, const nir_ssa_undef_instr *undef)
    write_add_object(ctx, &undef->def);
 }
 
-static nir_ssa_undef_instr *
+static nir_undef_instr *
 read_ssa_undef(read_ctx *ctx, union packed_instr header)
 {
-   nir_ssa_undef_instr *undef =
-      nir_ssa_undef_instr_create(ctx->nir, header.undef.last_component + 1,
-                                 decode_bit_size_3bits(header.undef.bit_size));
+   nir_undef_instr *undef =
+      nir_undef_instr_create(ctx->nir, header.undef.last_component + 1,
+                             decode_bit_size_3bits(header.undef.bit_size));
 
    undef->def.divergent = false;
 
@@ -1510,7 +1510,7 @@ read_phi(read_ctx *ctx, nir_block *blk, union packed_instr header)
    nir_instr_insert_after_block(blk, &phi->instr);
 
    for (unsigned i = 0; i < header.phi.num_srcs; i++) {
-      nir_ssa_def *def = (nir_ssa_def *)(uintptr_t)blob_read_uint32(ctx->blob);
+      nir_def *def = (nir_def *)(uintptr_t)blob_read_uint32(ctx->blob);
       nir_block *pred = (nir_block *)(uintptr_t)blob_read_uint32(ctx->blob);
       nir_phi_src *src = nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
 
index 973aa4e..5d997f0 100644 (file)
@@ -84,9 +84,9 @@ typedef struct {
    nir_variable *zw;
 } variable_pair;
 
-static nir_ssa_def *
-merge_to_vec3_or_vec4(nir_builder *b, nir_ssa_def *load1,
-                      nir_ssa_def *load2)
+static nir_def *
+merge_to_vec3_or_vec4(nir_builder *b, nir_def *load1,
+                      nir_def *load2)
 {
    assert(load2->num_components > 0 && load2->num_components < 3);
 
@@ -101,17 +101,17 @@ merge_to_vec3_or_vec4(nir_builder *b, nir_ssa_def *load1,
                       nir_channel(b, load2, 1));
 }
 
-static nir_ssa_def *
+static nir_def *
 get_linear_array_offset(nir_builder *b, nir_deref_instr *deref)
 {
    nir_deref_path path;
    nir_deref_path_init(&path, deref, NULL);
 
-   nir_ssa_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
+   nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
    for (nir_deref_instr **p = &path.path[1]; *p; p++) {
       switch ((*p)->deref_type) {
       case nir_deref_type_array: {
-         nir_ssa_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+         nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
          int stride = glsl_array_size((*p)->type);
          if (stride >= 0)
             offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
@@ -163,9 +163,9 @@ get_var_pair(nir_builder *b, nir_variable *old_var,
    return new_var;
 }
 
-static nir_ssa_def *
+static nir_def *
 split_load_deref(nir_builder *b, nir_intrinsic_instr *intr,
-                 nir_ssa_def *offset, struct hash_table *split_vars)
+                 nir_def *offset, struct hash_table *split_vars)
 {
    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
    unsigned old_components = glsl_get_components(
@@ -181,15 +181,15 @@ split_load_deref(nir_builder *b, nir_intrinsic_instr *intr,
       deref2 = nir_build_deref_array(b, deref2, offset);
    }
 
-   nir_ssa_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->dest.ssa, 0);
-   nir_ssa_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->dest.ssa, 0);
+   nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->dest.ssa, 0);
+   nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->dest.ssa, 0);
 
    return merge_to_vec3_or_vec4(b, load1, load2);
 }
 
-static nir_ssa_def *
+static nir_def *
 split_store_deref(nir_builder *b, nir_intrinsic_instr *intr,
-                  nir_ssa_def *offset, struct hash_table *split_vars)
+                  nir_def *offset, struct hash_table *split_vars)
 {
    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
 
@@ -205,21 +205,21 @@ split_store_deref(nir_builder *b, nir_intrinsic_instr *intr,
 
    int write_mask_xy = nir_intrinsic_write_mask(intr) & 3;
    if (write_mask_xy) {
-      nir_ssa_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2);
+      nir_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2);
       nir_build_store_deref(b, &deref_xy->dest.ssa, src_xy, write_mask_xy);
    }
 
    int write_mask_zw = nir_intrinsic_write_mask(intr) & 0xc;
    if (write_mask_zw) {
-      nir_ssa_def *src_zw = nir_channels(b, intr->src[1].ssa,
-                                         nir_component_mask(intr->src[1].ssa->num_components) & 0xc);
+      nir_def *src_zw = nir_channels(b, intr->src[1].ssa,
+                                     nir_component_mask(intr->src[1].ssa->num_components) & 0xc);
       nir_build_store_deref(b, &deref_zw->dest.ssa, src_zw, write_mask_zw >> 2);
    }
 
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-static nir_ssa_def *
+static nir_def *
 split_phi(nir_builder *b, nir_phi_instr *phi)
 {
    nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components);
@@ -247,8 +247,8 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
          else
             b->cursor = nir_after_block(src->pred);
 
-         nir_ssa_def *new_src = nir_channels(b, src->src.ssa,
-                                             ((1 << num_comp[i]) - 1) << (2 * i));
+         nir_def *new_src = nir_channels(b, src->src.ssa,
+                                         ((1 << num_comp[i]) - 1) << (2 * i));
 
          nir_phi_instr_add_src(new_phi[i], src->pred, nir_src_for_ssa(new_src));
       }
@@ -259,7 +259,7 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
    return merge_to_vec3_or_vec4(b, &new_phi[0]->dest.ssa, &new_phi[1]->dest.ssa);
 };
 
-static nir_ssa_def *
+static nir_def *
 nir_split_64bit_vec3_and_vec4_impl(nir_builder *b, nir_instr *instr, void *d)
 {
    struct hash_table *split_vars = (struct hash_table *)d;
index d761a51..f65ca2f 100644 (file)
@@ -149,8 +149,8 @@ rewrite_deref_instr(nir_builder *b, nir_instr *instr, void *cb_data)
    b->cursor = nir_before_instr(&deref->instr);
    nir_deref_instr *member_deref =
       build_member_deref(b, nir_deref_instr_parent(deref), member);
-   nir_ssa_def_rewrite_uses(&deref->dest.ssa,
-                            &member_deref->dest.ssa);
+   nir_def_rewrite_uses(&deref->dest.ssa,
+                        &member_deref->dest.ssa);
 
    /* The referenced variable is no longer valid, clean up the deref */
    nir_deref_instr_remove_if_unused(deref);
index 73d5c41..02a3776 100644 (file)
@@ -312,8 +312,8 @@ split_struct_derefs_impl(nir_function_impl *impl,
          }
 
          assert(new_deref->type == deref->type);
-         nir_ssa_def_rewrite_uses(&deref->dest.ssa,
-                                  &new_deref->dest.ssa);
+         nir_def_rewrite_uses(&deref->dest.ssa,
+                              &new_deref->dest.ssa);
          nir_deref_instr_remove_if_unused(deref);
       }
    }
@@ -833,11 +833,11 @@ split_array_access_impl(nir_function_impl *impl,
                 * garbage in the destination alone.
                 */
                if (intrin->intrinsic == nir_intrinsic_load_deref) {
-                  nir_ssa_def *u =
-                     nir_ssa_undef(&b, intrin->dest.ssa.num_components,
-                                   intrin->dest.ssa.bit_size);
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                           u);
+                  nir_def *u =
+                     nir_undef(&b, intrin->dest.ssa.num_components,
+                               intrin->dest.ssa.bit_size);
+                  nir_def_rewrite_uses(&intrin->dest.ssa,
+                                       u);
                }
                nir_instr_remove(&intrin->instr);
                for (unsigned i = 0; i < num_derefs; i++)
@@ -1242,7 +1242,7 @@ find_used_components_impl(nir_function_impl *impl,
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_deref:
             mark_deref_used(nir_src_as_deref(intrin->src[0]),
-                            nir_ssa_def_components_read(&intrin->dest.ssa), 0,
+                            nir_def_components_read(&intrin->dest.ssa), 0,
                             NULL, var_usage_map, modes, mem_ctx);
             break;
 
@@ -1551,11 +1551,11 @@ shrink_vec_var_access_impl(nir_function_impl *impl,
 
             if (usage->comps_kept == 0 || vec_deref_is_oob(deref, usage)) {
                if (intrin->intrinsic == nir_intrinsic_load_deref) {
-                  nir_ssa_def *u =
-                     nir_ssa_undef(&b, intrin->dest.ssa.num_components,
-                                   intrin->dest.ssa.bit_size);
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                                           u);
+                  nir_def *u =
+                     nir_undef(&b, intrin->dest.ssa.num_components,
+                               intrin->dest.ssa.bit_size);
+                  nir_def_rewrite_uses(&intrin->dest.ssa,
+                                       u);
                }
                nir_instr_remove(&intrin->instr);
                nir_deref_instr_remove_if_unused(deref);
@@ -1571,9 +1571,9 @@ shrink_vec_var_access_impl(nir_function_impl *impl,
             if (intrin->intrinsic == nir_intrinsic_load_deref) {
                b.cursor = nir_after_instr(&intrin->instr);
 
-               nir_ssa_def *undef =
-                  nir_ssa_undef(&b, 1, intrin->dest.ssa.bit_size);
-               nir_ssa_def *vec_srcs[NIR_MAX_VEC_COMPONENTS];
+               nir_def *undef =
+                  nir_undef(&b, 1, intrin->dest.ssa.bit_size);
+               nir_def *vec_srcs[NIR_MAX_VEC_COMPONENTS];
                unsigned c = 0;
                for (unsigned i = 0; i < intrin->num_components; i++) {
                   if (usage->comps_kept & (1u << i))
@@ -1581,11 +1581,11 @@ shrink_vec_var_access_impl(nir_function_impl *impl,
                   else
                      vec_srcs[i] = undef;
                }
-               nir_ssa_def *vec = nir_vec(&b, vec_srcs, intrin->num_components);
+               nir_def *vec = nir_vec(&b, vec_srcs, intrin->num_components);
 
-               nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
-                                              vec,
-                                              vec->parent_instr);
+               nir_def_rewrite_uses_after(&intrin->dest.ssa,
+                                          vec,
+                                          vec->parent_instr);
 
                /* The SSA def is now only used by the swizzle.  It's safe to
                 * shrink the number of components.
@@ -1611,7 +1611,7 @@ shrink_vec_var_access_impl(nir_function_impl *impl,
 
                b.cursor = nir_before_instr(&intrin->instr);
 
-               nir_ssa_def *swizzled =
+               nir_def *swizzled =
                   nir_swizzle(&b, intrin->src[1].ssa, swizzle, c);
 
                /* Rewrite to use the compacted source */
index d6805f0..4ea16da 100644 (file)
@@ -90,7 +90,7 @@ is_use_inside_loop(nir_src *use, nir_loop *loop)
 }
 
 static bool
-is_defined_before_loop(nir_ssa_def *def, nir_loop *loop)
+is_defined_before_loop(nir_def *def, nir_loop *loop)
 {
    nir_instr *instr = def->parent_instr;
    nir_block *block_before_loop =
@@ -109,7 +109,7 @@ static instr_invariance
 instr_is_invariant(nir_instr *instr, nir_loop *loop);
 
 static bool
-def_is_invariant(nir_ssa_def *def, nir_loop *loop)
+def_is_invariant(nir_def *def, nir_loop *loop)
 {
    if (is_defined_before_loop(def, loop))
       return invariant;
@@ -188,7 +188,7 @@ instr_is_invariant(nir_instr *instr, nir_loop *loop)
 }
 
 static bool
-convert_loop_exit_for_ssa(nir_ssa_def *def, void *void_state)
+convert_loop_exit_for_ssa(nir_def *def, void *void_state)
 {
    lcssa_state *state = void_state;
    bool all_uses_inside_loop = true;
@@ -237,7 +237,7 @@ convert_loop_exit_for_ssa(nir_ssa_def *def, void *void_state)
    }
 
    nir_instr_insert_before_block(state->block_after_loop, &phi->instr);
-   nir_ssa_def *dest = &phi->dest.ssa;
+   nir_def *dest = &phi->dest.ssa;
 
    /* deref instructions need a cast after the phi */
    if (def->parent_instr->type == nir_instr_type_deref) {
index 278aa1d..c18d323 100644 (file)
@@ -99,9 +99,9 @@ trivialize_load(nir_intrinsic_instr *load)
    assert(nir_is_load_reg(load));
 
    nir_builder b = nir_builder_at(nir_after_instr(&load->instr));
-   nir_ssa_def *copy = nir_mov(&b, &load->dest.ssa);
+   nir_def *copy = nir_mov(&b, &load->dest.ssa);
    copy->divergent = load->dest.ssa.divergent;
-   nir_ssa_def_rewrite_uses_after(&load->dest.ssa, copy, copy->parent_instr);
+   nir_def_rewrite_uses_after(&load->dest.ssa, copy, copy->parent_instr);
 
    assert(list_is_singular(&load->dest.ssa.uses));
 }
@@ -193,7 +193,7 @@ isolate_store(nir_intrinsic_instr *store)
    assert(nir_is_store_reg(store));
 
    nir_builder b = nir_builder_at(nir_before_instr(&store->instr));
-   nir_ssa_def *copy = nir_mov(&b, store->src[0].ssa);
+   nir_def *copy = nir_mov(&b, store->src[0].ssa);
    copy->divergent = store->src[0].ssa->divergent;
    nir_instr_rewrite_src_ssa(&store->instr, &store->src[0], copy);
 }
@@ -212,7 +212,7 @@ clear_store(nir_intrinsic_instr *store,
 }
 
 static void
-clear_reg_stores(nir_ssa_def *reg,
+clear_reg_stores(nir_def *reg,
                  struct hash_table *possibly_trivial_stores)
 {
    /* At any given point in store trivialize pass, every store in the current
@@ -239,7 +239,7 @@ static void
 trivialize_store(nir_intrinsic_instr *store,
                  struct hash_table *possibly_trivial_stores)
 {
-   nir_ssa_def *reg = store->src[1].ssa;
+   nir_def *reg = store->src[1].ssa;
 
    /* At any given point in store trivialize pass, every store in the current
     * block is either trivial or in the possibly_trivial_stores map.
@@ -269,7 +269,7 @@ trivialize_store(nir_intrinsic_instr *store,
 }
 
 static void
-trivialize_reg_stores(nir_ssa_def *reg, nir_component_mask_t mask,
+trivialize_reg_stores(nir_def *reg, nir_component_mask_t mask,
                       struct hash_table *possibly_trivial_stores)
 {
    /* At any given point in store trivialize pass, every store in the current
@@ -310,7 +310,7 @@ trivialize_read_after_write(nir_intrinsic_instr *load,
 }
 
 static bool
-clear_def(nir_ssa_def *def, void *state)
+clear_def(nir_def *def, void *state)
 {
    struct hash_table *possibly_trivial_stores = state;
 
@@ -395,8 +395,8 @@ trivialize_stores(nir_function_impl *impl, nir_block *block)
             /* Read-after-write: there is a load between the def and store. */
             trivialize_read_after_write(intr, possibly_trivial_stores);
          } else if (nir_is_store_reg(intr)) {
-            nir_ssa_def *value = intr->src[0].ssa;
-            nir_ssa_def *reg = intr->src[1].ssa;
+            nir_def *value = intr->src[0].ssa;
+            nir_def *reg = intr->src[1].ssa;
             nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
             unsigned num_components = nir_intrinsic_num_components(decl);
             nir_component_mask_t write_mask = nir_intrinsic_write_mask(intr);
index 786a155..65106d6 100644 (file)
@@ -180,7 +180,7 @@ validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
 }
 
 static void
-validate_ssa_def(nir_ssa_def *def, validate_state *state)
+validate_ssa_def(nir_def *def, validate_state *state)
 {
    validate_assert(state, def->index < state->impl->ssa_alloc);
    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
@@ -460,7 +460,7 @@ validate_register_handle(nir_src handle_src,
                          unsigned bit_size,
                          validate_state *state)
 {
-   nir_ssa_def *handle = handle_src.ssa;
+   nir_def *handle = handle_src.ssa;
    nir_instr *parent = handle->parent_instr;
 
    if (!validate_assert(state, parent->type == nir_instr_type_intrinsic))
@@ -923,7 +923,7 @@ validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
 }
 
 static void
-validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
+validate_ssa_undef_instr(nir_undef_instr *instr, validate_state *state)
 {
    validate_ssa_def(&instr->def, state);
 }
@@ -1439,7 +1439,7 @@ validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
 }
 
 static bool
-validate_ssa_def_dominance(nir_ssa_def *def, void *_state)
+validate_ssa_def_dominance(nir_def *def, void *_state)
 {
    validate_state *state = _state;
 
index 649a689..a616b17 100644 (file)
@@ -32,8 +32,8 @@ protected:
 
    virtual void run_pass()=0;
 
-   void test_op(nir_op op, nir_ssa_def *src0, nir_ssa_def *src1, nir_ssa_def *src2,
-                nir_ssa_def *src3, const char *desc);
+   void test_op(nir_op op, nir_def *src0, nir_def *src1, nir_def *src2,
+                nir_def *src3, const char *desc);
 
    void test_2src_op(nir_op op, int64_t src0, int64_t src1);
 
@@ -46,10 +46,10 @@ algebraic_test_base::algebraic_test_base()
    res_var = nir_local_variable_create(b->impl, glsl_int_type(), "res");
 }
 
-void algebraic_test_base::test_op(nir_op op, nir_ssa_def *src0, nir_ssa_def *src1,
-                                     nir_ssa_def *src2, nir_ssa_def *src3, const char *desc)
+void algebraic_test_base::test_op(nir_op op, nir_def *src0, nir_def *src1,
+                                     nir_def *src2, nir_def *src3, const char *desc)
 {
-   nir_ssa_def *res_deref = &nir_build_deref_var(b, res_var)->dest.ssa;
+   nir_def *res_deref = &nir_build_deref_var(b, res_var)->dest.ssa;
 
    /* create optimized expression */
    nir_intrinsic_instr *optimized_instr = nir_build_store_deref(
index 637b014..c62f9fe 100644 (file)
@@ -27,7 +27,7 @@ namespace {
 
 class nir_builder_test : public nir_test {
 private:
-   const glsl_type *type_for_def(nir_ssa_def *def)
+   const glsl_type *type_for_def(nir_def *def)
    {
       switch (def->bit_size) {
       case 8:  return glsl_type::u8vec(def->num_components);
@@ -44,7 +44,7 @@ protected:
    {
    }
 
-   void store_test_val(nir_ssa_def *val)
+   void store_test_val(nir_def *val)
    {
       nir_variable *var = nir_variable_create(b->shader, nir_var_mem_ssbo,
                                               type_for_def(val), NULL);
@@ -59,7 +59,7 @@ protected:
       stores.push_back(store);
    }
 
-   nir_ssa_def *test_val(unsigned idx)
+   nir_def *test_val(unsigned idx)
    {
       return stores[idx]->src[1].ssa;
    }
@@ -75,7 +75,7 @@ class nir_extract_bits_test : public nir_builder_test {};
 // TODO: Re-enable this once we get vec8 support in NIR
 TEST_F(nir_extract_bits_test, DISABLED_unaligned8)
 {
-   nir_ssa_def *srcs[] = {
+   nir_def *srcs[] = {
       nir_imm_int(b, 0x03020100),
       nir_imm_ivec2(b, 0x07060504, 0x0b0a0908),
    };
@@ -91,7 +91,7 @@ TEST_F(nir_extract_bits_test, DISABLED_unaligned8)
 
 TEST_F(nir_extract_bits_test, unaligned16_disabled)
 {
-   nir_ssa_def *srcs[] = {
+   nir_def *srcs[] = {
       nir_imm_int(b, 0x03020100),
       nir_imm_ivec2(b, 0x07060504, 0x0b0a0908),
    };
@@ -107,7 +107,7 @@ TEST_F(nir_extract_bits_test, unaligned16_disabled)
 
 TEST_F(nir_extract_bits_test, mixed_bit_sizes)
 {
-   nir_ssa_def *srcs[] = {
+   nir_def *srcs[] = {
       nir_imm_int(b, 0x03020100),
       nir_imm_intN_t(b, 0x04, 8),
       nir_imm_intN_t(b, 0x08070605, 32),
index 0661a64..4f19db0 100644 (file)
@@ -47,9 +47,9 @@ protected:
 
    struct nir_builder bld;
 
-   nir_ssa_def *v1;
-   nir_ssa_def *v2;
-   nir_ssa_def *v3;
+   nir_def *v1;
+   nir_def *v2;
+   nir_def *v3;
 
    const uint8_t xxxx[4] = { 0, 0, 0, 0 };
    const uint8_t wwww[4] = { 3, 3, 3, 3 };
@@ -93,10 +93,10 @@ TEST_F(comparison_pre_test, a_lt_b_vs_neg_a_plus_b)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, a, one);
+   nir_def *flt = nir_flt(&bld, a, one);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -145,10 +145,10 @@ TEST_F(comparison_pre_test, a_lt_b_vs_a_minus_b)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, one, b);
+   nir_def *flt = nir_flt(&bld, one, b);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -198,10 +198,10 @@ TEST_F(comparison_pre_test, neg_a_lt_b_vs_a_plus_b)
     * }
     */
 
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, nir_fneg(&bld, a), one);
+   nir_def *flt = nir_flt(&bld, nir_fneg(&bld, a), one);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -250,10 +250,10 @@ TEST_F(comparison_pre_test, a_lt_neg_b_vs_a_plus_b)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *b = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, one, nir_fneg(&bld, b));
+   nir_def *flt = nir_flt(&bld, one, nir_fneg(&bld, b));
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -302,11 +302,11 @@ TEST_F(comparison_pre_test, imm_lt_b_vs_neg_imm_plus_b)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *neg_one = nir_imm_float(&bld, -1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *neg_one = nir_imm_float(&bld, -1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, one, a);
+   nir_def *flt = nir_flt(&bld, one, a);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -355,11 +355,11 @@ TEST_F(comparison_pre_test, a_lt_imm_vs_a_minus_imm)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *neg_one = nir_imm_float(&bld, -1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *neg_one = nir_imm_float(&bld, -1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, a, one);
+   nir_def *flt = nir_flt(&bld, a, one);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -409,11 +409,11 @@ TEST_F(comparison_pre_test, neg_imm_lt_a_vs_a_plus_imm)
     * }
     */
 
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *neg_one = nir_imm_float(&bld, -1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *neg_one = nir_imm_float(&bld, -1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, neg_one, a);
+   nir_def *flt = nir_flt(&bld, neg_one, a);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -462,11 +462,11 @@ TEST_F(comparison_pre_test, a_lt_neg_imm_vs_a_plus_imm)
     * } else {
     * }
     */
-   nir_ssa_def *one = nir_imm_float(&bld, 1.0f);
-   nir_ssa_def *neg_one = nir_imm_float(&bld, -1.0f);
-   nir_ssa_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
+   nir_def *one = nir_imm_float(&bld, 1.0f);
+   nir_def *neg_one = nir_imm_float(&bld, -1.0f);
+   nir_def *a = nir_channel(&bld, nir_fadd(&bld, v1, v3), 0);
 
-   nir_ssa_def *flt = nir_flt(&bld, a, neg_one);
+   nir_def *flt = nir_flt(&bld, a, neg_one);
 
    nir_if *nif = nir_push_if(&bld, flt);
 
@@ -492,7 +492,7 @@ TEST_F(comparison_pre_test, swizzle_of_same_immediate_vector)
     * } else {
     * }
     */
-   nir_ssa_def *a = nir_fadd(&bld, v1, v3);
+   nir_def *a = nir_fadd(&bld, v1, v3);
 
    nir_alu_instr *flt = nir_alu_instr_create(bld.shader, nir_op_flt);
 
@@ -547,7 +547,7 @@ TEST_F(comparison_pre_test, non_scalar_add_result)
     *
     * No change.
     */
-   nir_ssa_def *a = nir_fadd(&bld, v1, v3);
+   nir_def *a = nir_fadd(&bld, v1, v3);
 
    nir_alu_instr *flt = nir_alu_instr_create(bld.shader, nir_op_flt);
 
index 4bed211..6344531 100644 (file)
@@ -32,16 +32,16 @@ protected:
    {
    }
 
-   bool shader_contains_def(nir_ssa_def *def);
+   bool shader_contains_def(nir_def *def);
 };
 
 struct contains_def_state {
-   nir_ssa_def *def;
+   nir_def *def;
    bool found;
 };
 
 static bool
-contains_def_cb(nir_ssa_def *def, void *_state)
+contains_def_cb(nir_def *def, void *_state)
 {
    struct contains_def_state *state = (struct contains_def_state *)_state;
    if (def == state->def)
@@ -51,7 +51,7 @@ contains_def_cb(nir_ssa_def *def, void *_state)
 }
 
 bool
-nir_core_test::shader_contains_def(nir_ssa_def *def)
+nir_core_test::shader_contains_def(nir_def *def)
 {
    nir_foreach_block(block, b->impl) {
       nir_foreach_instr(instr, block) {
@@ -68,10 +68,10 @@ nir_core_test::shader_contains_def(nir_ssa_def *def)
 
 TEST_F(nir_core_test, nir_instr_free_and_dce_test)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *add01 = nir_iadd(b, zero, one);
-   nir_ssa_def *add11 = nir_iadd(b, one, one);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *add01 = nir_iadd(b, zero, one);
+   nir_def *add11 = nir_iadd(b, one, one);
 
    nir_cursor c = nir_instr_free_and_dce(add01->parent_instr);
    ASSERT_FALSE(shader_contains_def(add01));
@@ -86,8 +86,8 @@ TEST_F(nir_core_test, nir_instr_free_and_dce_test)
 
 TEST_F(nir_core_test, nir_instr_free_and_dce_all_test)
 {
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *add = nir_iadd(b, one, one);
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *add = nir_iadd(b, one, one);
 
    nir_cursor c = nir_instr_free_and_dce(add->parent_instr);
    ASSERT_FALSE(shader_contains_def(add));
@@ -100,12 +100,12 @@ TEST_F(nir_core_test, nir_instr_free_and_dce_all_test)
 
 TEST_F(nir_core_test, nir_instr_free_and_dce_multiple_src_test)
 {
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *add = nir_iadd(b, one, one);
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *add = nir_iadd(b, one, one);
 
    /* This risks triggering removing add multiple times, which can segfault in
     * nir_instr_remove for instructions with srcs. */
-   nir_ssa_def *add2 = nir_iadd(b, add, add);
+   nir_def *add2 = nir_iadd(b, add, add);
 
    nir_cursor c = nir_instr_free_and_dce(add2->parent_instr);
    ASSERT_FALSE(shader_contains_def(add2));
index 373bfc1..5977ac3 100644 (file)
@@ -40,7 +40,7 @@ protected:
 };
 
 nir_phi_instr *create_one_source_phi(nir_shader *shader, nir_block *pred,
-                                     nir_ssa_def *def)
+                                     nir_def *def)
 {
    nir_phi_instr *phi = nir_phi_instr_create(shader);
    nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
@@ -83,7 +83,7 @@ TEST_F(nir_opt_dce_test, return_before_loop)
 
    nir_loop *loop = nir_push_loop(b);
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *one = nir_imm_int(b, 1);
 
    nir_phi_instr *phi = create_one_source_phi(b->shader, one->parent_instr->block, one);
    nir_instr_insert_before_block(one->parent_instr->block, &phi->instr);
@@ -125,7 +125,7 @@ TEST_F(nir_opt_dead_cf_test, jump_before_constant_if)
     */
    nir_variable *var = nir_variable_create(b->shader, nir_var_shader_out, glsl_int_type(), "out");
 
-   nir_ssa_def *cond = nir_imm_false(b);
+   nir_def *cond = nir_imm_false(b);
    nir_jump(b, nir_jump_return);
    nir_push_if(b, cond);
    nir_store_var(b, var, nir_imm_int(b, 1), 0x1);
index f3d7887..94d4c7f 100644 (file)
@@ -45,12 +45,12 @@ protected:
    bool run_vectorizer(nir_variable_mode modes, bool cse=false,
                        nir_variable_mode robust_modes = (nir_variable_mode)0);
 
-   nir_ssa_def *get_resource(uint32_t binding, bool ssbo);
+   nir_def *get_resource(uint32_t binding, bool ssbo);
 
-   nir_intrinsic_instr *create_indirect_load(nir_variable_mode mode, uint32_t binding, nir_ssa_def *offset,
+   nir_intrinsic_instr *create_indirect_load(nir_variable_mode mode, uint32_t binding, nir_def *offset,
                                              uint32_t id, unsigned bit_size=32, unsigned components=1,
                                              unsigned access=0);
-   void create_indirect_store(nir_variable_mode mode, uint32_t binding, nir_ssa_def *offset,
+   void create_indirect_store(nir_variable_mode mode, uint32_t binding, nir_def *offset,
                               uint32_t id, unsigned bit_size=32, unsigned components=1,
                               unsigned wrmask=0xf, unsigned access=0);
 
@@ -67,7 +67,7 @@ protected:
                             unsigned bit_size=32, unsigned components=1, unsigned wrmask=0xf);
 
    bool test_alu(nir_instr *instr, nir_op op);
-   bool test_alu_def(nir_instr *instr, unsigned index, nir_ssa_def *def, unsigned swizzle=0);
+   bool test_alu_def(nir_instr *instr, unsigned index, nir_def *def, unsigned swizzle=0);
 
    static bool mem_vectorize_callback(unsigned align_mul, unsigned align_offset,
                                       unsigned bit_size,
@@ -80,7 +80,7 @@ protected:
 
    std::map<unsigned, nir_alu_instr*> movs;
    std::map<unsigned, nir_alu_src*> loads;
-   std::map<unsigned, nir_ssa_def*> res_map;
+   std::map<unsigned, nir_def*> res_map;
 };
 
 std::string
@@ -154,7 +154,7 @@ nir_load_store_vectorize_test::run_vectorizer(nir_variable_mode modes,
    return progress;
 }
 
-nir_ssa_def *
+nir_def *
 nir_load_store_vectorize_test::get_resource(uint32_t binding, bool ssbo)
 {
    if (res_map.count(binding))
@@ -176,11 +176,11 @@ nir_load_store_vectorize_test::get_resource(uint32_t binding, bool ssbo)
 
 nir_intrinsic_instr *
 nir_load_store_vectorize_test::create_indirect_load(
-   nir_variable_mode mode, uint32_t binding, nir_ssa_def *offset, uint32_t id,
+   nir_variable_mode mode, uint32_t binding, nir_def *offset, uint32_t id,
    unsigned bit_size, unsigned components, unsigned access)
 {
    nir_intrinsic_op intrinsic;
-   nir_ssa_def *res = NULL;
+   nir_def *res = NULL;
    switch (mode) {
    case nir_var_mem_ubo:
       intrinsic = nir_intrinsic_load_ubo;
@@ -236,16 +236,16 @@ nir_load_store_vectorize_test::create_indirect_load(
 
 void
 nir_load_store_vectorize_test::create_indirect_store(
-   nir_variable_mode mode, uint32_t binding, nir_ssa_def *offset, uint32_t id,
+   nir_variable_mode mode, uint32_t binding, nir_def *offset, uint32_t id,
    unsigned bit_size, unsigned components, unsigned wrmask, unsigned access)
 {
    nir_const_value values[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < components; i++)
       values[i] = nir_const_value_for_raw_uint((id << 4) | i, bit_size);
-   nir_ssa_def *value = nir_build_imm(b, components, bit_size, values);
+   nir_def *value = nir_build_imm(b, components, bit_size, values);
 
    nir_intrinsic_op intrinsic;
-   nir_ssa_def *res = NULL;
+   nir_def *res = NULL;
    switch (mode) {
    case nir_var_mem_ssbo:
       intrinsic = nir_intrinsic_store_ssbo;
@@ -293,7 +293,7 @@ nir_load_store_vectorize_test::create_store(
 void nir_load_store_vectorize_test::create_shared_load(
    nir_deref_instr *deref, uint32_t id, unsigned bit_size, unsigned components)
 {
-   nir_ssa_def *load = nir_load_deref(b, deref);
+   nir_def *load = nir_load_deref(b, deref);
    nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, load)->parent_instr);
    movs[id] = mov;
    loads[id] = &mov->src[0];
@@ -306,7 +306,7 @@ void nir_load_store_vectorize_test::create_shared_store(
    nir_const_value values[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < components; i++)
       values[i] = nir_const_value_for_raw_uint((id << 4) | i, bit_size);
-   nir_ssa_def *value = nir_build_imm(b, components, bit_size, values);
+   nir_def *value = nir_build_imm(b, components, bit_size, values);
 
    nir_store_deref(b, deref, value, wrmask & ((1 << components) - 1));
 }
@@ -317,7 +317,7 @@ bool nir_load_store_vectorize_test::test_alu(nir_instr *instr, nir_op op)
 }
 
 bool nir_load_store_vectorize_test::test_alu_def(
-   nir_instr *instr, unsigned index, nir_ssa_def *def, unsigned swizzle)
+   nir_instr *instr, unsigned index, nir_def *def, unsigned swizzle)
 {
    if (instr->type != nir_instr_type_alu)
       return false;
@@ -535,7 +535,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
    create_indirect_load(nir_var_mem_ssbo, 0, index_base, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, nir_iadd_imm(b, index_base, 4), 0x2);
 
@@ -556,8 +556,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_sub)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *index_base_prev = nir_iadd_imm(b, index_base, 0xfffffffc);
+   nir_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base_prev = nir_iadd_imm(b, index_base, 0xfffffffc);
    create_indirect_load(nir_var_mem_ssbo, 0, index_base_prev, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, index_base, 0x2);
 
@@ -578,10 +578,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_sub)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride)
 {
-   nir_ssa_def *inv = nir_load_local_invocation_index(b);
-   nir_ssa_def *inv_plus_one = nir_iadd_imm(b, inv, 1);
-   nir_ssa_def *index_base = nir_imul_imm(b, inv, 0xfffffffc);
-   nir_ssa_def *index_base_prev = nir_imul_imm(b, inv_plus_one, 0xfffffffc);
+   nir_def *inv = nir_load_local_invocation_index(b);
+   nir_def *inv_plus_one = nir_iadd_imm(b, inv, 1);
+   nir_def *index_base = nir_imul_imm(b, inv, 0xfffffffc);
+   nir_def *index_base_prev = nir_imul_imm(b, inv_plus_one, 0xfffffffc);
    create_indirect_load(nir_var_mem_ssbo, 0, index_base_prev, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, index_base, 0x2);
 
@@ -600,7 +600,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride)
 
    /* nir_opt_algebraic optimizes the imul */
    ASSERT_TRUE(test_alu(load->src[1].ssa->parent_instr, nir_op_ineg));
-   nir_ssa_def *offset = nir_instr_as_alu(load->src[1].ssa->parent_instr)->src[0].src.ssa;
+   nir_def *offset = nir_instr_as_alu(load->src[1].ssa->parent_instr)->src[0].src.ssa;
    ASSERT_TRUE(test_alu(offset->parent_instr, nir_op_ishl));
    nir_alu_instr *shl = nir_instr_as_alu(offset->parent_instr);
    ASSERT_EQ(shl->src[0].src.ssa, inv_plus_one);
@@ -713,7 +713,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x3);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 2);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -736,7 +736,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_intersecting)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x7);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 3);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -760,7 +760,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_identical)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x1);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_EQ(nir_src_as_uint(store->src[0]), 0x20);
@@ -924,12 +924,12 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_8_8_16)
    EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
    EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
 
-   nir_ssa_def *val = loads[0x3]->src.ssa;
+   nir_def *val = loads[0x3]->src.ssa;
    ASSERT_EQ(val->bit_size, 16);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_ior));
-   nir_ssa_def *low = nir_instr_as_alu(val->parent_instr)->src[0].src.ssa;
-   nir_ssa_def *high = nir_instr_as_alu(val->parent_instr)->src[1].src.ssa;
+   nir_def *low = nir_instr_as_alu(val->parent_instr)->src[0].src.ssa;
+   nir_def *high = nir_instr_as_alu(val->parent_instr)->src[1].src.ssa;
    ASSERT_TRUE(test_alu(high->parent_instr, nir_op_ishl));
    high = nir_instr_as_alu(high->parent_instr)->src[0].src.ssa;
    ASSERT_TRUE(test_alu(low->parent_instr, nir_op_u2u16));
@@ -956,7 +956,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64)
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
    EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
 
-   nir_ssa_def *val = loads[0x2]->src.ssa;
+   nir_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
@@ -983,7 +983,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64_64)
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
    EXPECT_INSTR_SWIZZLES(movs[0x3], load, "z");
 
-   nir_ssa_def *val = loads[0x2]->src.ssa;
+   nir_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_mov));
@@ -1016,7 +1016,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_intersecting_32_32_64)
    ASSERT_EQ(nir_src_as_uint(load->src[1]), 4);
    EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
 
-   nir_ssa_def *val = loads[0x2]->src.ssa;
+   nir_def *val = loads[0x2]->src.ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 1);
    ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
@@ -1040,7 +1040,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_8_8_16)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0xf);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 8);
    ASSERT_EQ(val->num_components, 4);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1065,7 +1065,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_32_32_64)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0xf);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 4);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1091,7 +1091,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_adjacent_32_32_64_64)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x7);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 64);
    ASSERT_EQ(val->num_components, 3);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1115,7 +1115,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_intersecting_32_32_64)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x7);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 3);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1152,7 +1152,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_store_identical_wrmask)
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_ssbo, 0);
    ASSERT_EQ(nir_src_as_uint(store->src[2]), 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0xf);
-   nir_ssa_def *val = store->src[0].ssa;
+   nir_def *val = store->src[0].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 4);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1217,7 +1217,7 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect)
 {
    nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var");
    nir_deref_instr *deref = nir_build_deref_var(b, var);
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
 
    create_shared_load(nir_build_deref_array(b, deref, index_base), 0x1);
    create_shared_load(nir_build_deref_array(b, deref, nir_iadd_imm(b, index_base, 1)), 0x2);
@@ -1252,8 +1252,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_adjacent_indirect_sub)
 {
    nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var");
    nir_deref_instr *deref = nir_build_deref_var(b, var);
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *index_base_prev = nir_iadd_imm(b, index_base, 0xffffffff);
+   nir_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base_prev = nir_iadd_imm(b, index_base, 0xffffffff);
 
    create_shared_load(nir_build_deref_array(b, deref, index_base_prev), 0x1);
    create_shared_load(nir_build_deref_array(b, deref, index_base), 0x2);
@@ -1503,7 +1503,7 @@ TEST_F(nir_load_store_vectorize_test, shared_store_adjacent)
 
    nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
    ASSERT_EQ(nir_intrinsic_write_mask(store), 0x3);
-   nir_ssa_def *val = store->src[1].ssa;
+   nir_def *val = store->src[1].ssa;
    ASSERT_EQ(val->bit_size, 32);
    ASSERT_EQ(val->num_components, 2);
    nir_const_value *cv = nir_instr_as_load_const(val->parent_instr)->value;
@@ -1550,7 +1550,7 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_separate_direct_direct)
 
 TEST_F(nir_load_store_vectorize_test, push_const_load_separate_direct_indirect)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
    create_load(nir_var_mem_push_const, 0, 0, 0x1);
    create_indirect_load(nir_var_mem_push_const, 0, index_base, 0x2);
 
@@ -1564,7 +1564,7 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_separate_direct_indirect)
 
 TEST_F(nir_load_store_vectorize_test, push_const_load_separate_indirect_indirect)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
    create_indirect_load(nir_var_mem_push_const, 0,
       nir_iadd_imm(b, nir_imul_imm(b, nir_iadd_imm(b, index_base, 2), 16), 32), 0x1);
    create_indirect_load(nir_var_mem_push_const, 0,
@@ -1580,10 +1580,10 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_separate_indirect_indirect
 
 TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_complex_indirect)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
    //vec4 pc[]; pc[gl_LocalInvocationIndex].w; pc[gl_LocalInvocationIndex+1].x;
-   nir_ssa_def *low = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 12);
-   nir_ssa_def *high = nir_imul_imm(b, nir_iadd_imm(b, index_base, 1), 16);
+   nir_def *low = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 12);
+   nir_def *high = nir_imul_imm(b, nir_iadd_imm(b, index_base, 1), 16);
    create_indirect_load(nir_var_mem_push_const, 0, low, 0x1);
    create_indirect_load(nir_var_mem_push_const, 0, high, 0x2);
 
@@ -1604,7 +1604,7 @@ TEST_F(nir_load_store_vectorize_test, push_const_load_adjacent_complex_indirect)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_alias0)
 {
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *index_base = nir_load_local_invocation_index(b);
    create_load(nir_var_mem_ssbo, 0, 0, 0x1);
    create_indirect_store(nir_var_mem_ssbo, 0, index_base, 0x2);
    create_load(nir_var_mem_ssbo, 0, 0, 0x3);
@@ -1619,8 +1619,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias0)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_alias1)
 {
-   nir_ssa_def *load_base = nir_load_global_invocation_index(b, 32);
-   nir_ssa_def *store_base = nir_load_local_invocation_index(b);
+   nir_def *load_base = nir_load_global_invocation_index(b, 32);
+   nir_def *store_base = nir_load_local_invocation_index(b);
    create_indirect_load(nir_var_mem_ssbo, 0, load_base, 0x1);
    create_indirect_store(nir_var_mem_ssbo, 0, store_base, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, load_base, 0x3);
@@ -1636,8 +1636,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias1)
 TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias2)
 {
    /* TODO: try to combine these loads */
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 4);
+   nir_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 4);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x3);
@@ -1662,8 +1662,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias3)
    /* these loads can be combined if nir_alu_instr::no_unsigned_wrap is set.
     * these loads can't be combined because if index_base == 268435455, then
     * offset == 0 because the addition would wrap around */
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
+   nir_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x3);
@@ -1679,8 +1679,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_alias3)
 TEST_F(nir_load_store_vectorize_test, DISABLED_ssbo_alias4)
 {
    /* TODO: try to combine these loads */
-   nir_ssa_def *index_base = nir_load_local_invocation_index(b);
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
+   nir_def *index_base = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_iadd_imm(b, nir_imul_imm(b, index_base, 16), 16);
    nir_instr_as_alu(offset->parent_instr)->no_unsigned_wrap = true;
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_store(nir_var_mem_ssbo, 0, 0, 0x2);
@@ -1749,8 +1749,8 @@ TEST_F(nir_load_store_vectorize_test, DISABLED_shared_alias0)
    nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_struct_type(fields, 2, "Struct", false), "var");
    nir_deref_instr *deref = nir_build_deref_var(b, var);
 
-   nir_ssa_def *index0 = nir_load_local_invocation_index(b);
-   nir_ssa_def *index1 = nir_load_global_invocation_index(b, 32);
+   nir_def *index0 = nir_load_local_invocation_index(b);
+   nir_def *index1 = nir_load_global_invocation_index(b, 32);
    nir_deref_instr *load_deref = nir_build_deref_array(b, nir_build_deref_struct(b, deref, 0), index0);
 
    create_shared_load(load_deref, 0x1);
@@ -1812,9 +1812,9 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_distant_64bit)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_load_distant_indirect_64bit)
 {
-   nir_ssa_def *index_base = nir_u2u64(b, nir_load_local_invocation_index(b));
-   nir_ssa_def *first = nir_imul_imm(b, index_base, 0x100000000);
-   nir_ssa_def *second = nir_imul_imm(b, index_base, 0x200000000);
+   nir_def *index_base = nir_u2u64(b, nir_load_local_invocation_index(b));
+   nir_def *first = nir_imul_imm(b, index_base, 0x100000000);
+   nir_def *second = nir_imul_imm(b, index_base, 0x200000000);
    create_indirect_load(nir_var_mem_ssbo, 0, first, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, second, 0x2);
 
@@ -1841,7 +1841,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust)
 
 TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_stride1)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, nir_iadd_imm(b, offset, 4), 0x2);
 
@@ -1855,7 +1855,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_strid
 
 TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_stride8)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 8);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, nir_iadd_imm(b, offset, 4), 0x2);
@@ -1870,10 +1870,10 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_strid
 
 TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_stride12)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 12);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
-   nir_ssa_def *offset_4 = nir_iadd_imm(b, offset, 4);
+   nir_def *offset_4 = nir_iadd_imm(b, offset, 4);
    create_indirect_load(nir_var_mem_ssbo, 0, offset_4, 0x2);
    create_indirect_load(nir_var_mem_ssbo, 0, nir_iadd_imm(b, offset, 8), 0x3);
 
@@ -1900,7 +1900,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_strid
 
 TEST_F(nir_load_store_vectorize_test, ssbo_offset_overflow_robust_indirect_stride16)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 16);
    create_indirect_load(nir_var_mem_ssbo, 0, offset, 0x1);
    create_indirect_load(nir_var_mem_ssbo, 0, nir_iadd_imm(b, offset, 4), 0x2);
@@ -1921,7 +1921,7 @@ TEST_F(nir_load_store_vectorize_test, shared_offset_overflow_robust_indirect_str
                                            glsl_array_type(glsl_uint_type(), 4, 0), "var");
    nir_deref_instr *deref = nir_build_deref_var(b, var);
 
-   nir_ssa_def *index = nir_load_local_invocation_index(b);
+   nir_def *index = nir_load_local_invocation_index(b);
    index = nir_imul_imm(b, index, 3);
    create_shared_load(nir_build_deref_array(b, deref, index), 0x1);
    create_shared_load(nir_build_deref_array(b, deref, nir_iadd_imm(b, index, 1)), 0x2);
@@ -1937,7 +1937,7 @@ TEST_F(nir_load_store_vectorize_test, shared_offset_overflow_robust_indirect_str
 
 TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 16);
    offset = nir_iadd_imm(b, offset, 4);
    nir_intrinsic_instr *load = create_indirect_load(nir_var_mem_ubo, 0, offset,
@@ -1950,7 +1950,7 @@ TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4)
 
 TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4_swapped)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_iadd_imm(b, offset, 1);
    offset = nir_imul_imm(b, offset, 16);
    offset = nir_iadd_imm(b, offset, 4);
@@ -1965,7 +1965,7 @@ TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_4_swapped)
 /* Check offset % mul != 0 */
 TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_20)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 16);
    offset = nir_iadd_imm(b, offset, 20);
    nir_intrinsic_instr *load = create_indirect_load(nir_var_mem_ubo, 0, offset,
@@ -1979,7 +1979,7 @@ TEST_F(nir_load_store_vectorize_test, ubo_alignment_16_20)
 /* Check that we don't upgrade to non-power-of-two alignments. */
 TEST_F(nir_load_store_vectorize_test, ubo_alignment_24_4)
 {
-   nir_ssa_def *offset = nir_load_local_invocation_index(b);
+   nir_def *offset = nir_load_local_invocation_index(b);
    offset = nir_imul_imm(b, offset, 24);
    offset = nir_iadd_imm(b, offset, 4);
    nir_intrinsic_instr *load =
@@ -1993,9 +1993,9 @@ TEST_F(nir_load_store_vectorize_test, ubo_alignment_24_4)
 /* Check that we don't upgrade to non-power-of-two alignments. */
 TEST_F(nir_load_store_vectorize_test, ubo_alignment_64_16_8)
 {
-   nir_ssa_def *x = nir_imul_imm(b, nir_load_local_invocation_index(b), 64);
-   nir_ssa_def *y = nir_imul_imm(b, nir_load_instance_id(b), 16);
-   nir_ssa_def *offset = nir_iadd(b, x, y);
+   nir_def *x = nir_imul_imm(b, nir_load_local_invocation_index(b), 64);
+   nir_def *y = nir_imul_imm(b, nir_load_instance_id(b), 16);
+   nir_def *offset = nir_iadd(b, x, y);
    offset = nir_iadd_imm(b, offset, 8);
    nir_intrinsic_instr *load =
       create_indirect_load(nir_var_mem_ubo, 0, offset, 0x1);
index dba30fd..4700994 100644 (file)
@@ -54,12 +54,12 @@ struct loop_builder_param {
    uint32_t init_value;
    uint32_t cond_value;
    uint32_t incr_value;
-   nir_ssa_def *(*cond_instr)(nir_builder *,
-                              nir_ssa_def *,
-                              nir_ssa_def *);
-   nir_ssa_def *(*incr_instr)(nir_builder *,
-                              nir_ssa_def *,
-                              nir_ssa_def *);
+   nir_def *(*cond_instr)(nir_builder *,
+                              nir_def *,
+                              nir_def *);
+   nir_def *(*incr_instr)(nir_builder *,
+                              nir_def *,
+                              nir_def *);
 };
 
 static nir_loop *
@@ -75,9 +75,9 @@ loop_builder(nir_builder *b, loop_builder_param p)
     *       i = incr_instr(i, incr_value);
     *    }
     */
-   nir_ssa_def *ssa_0 = nir_imm_int(b, p.init_value);
-   nir_ssa_def *ssa_1 = nir_imm_int(b, p.cond_value);
-   nir_ssa_def *ssa_2 = nir_imm_int(b, p.incr_value);
+   nir_def *ssa_0 = nir_imm_int(b, p.init_value);
+   nir_def *ssa_1 = nir_imm_int(b, p.cond_value);
+   nir_def *ssa_2 = nir_imm_int(b, p.incr_value);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
 
@@ -89,8 +89,8 @@ loop_builder(nir_builder *b, loop_builder_param p)
       nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
                             nir_src_for_ssa(ssa_0));
 
-      nir_ssa_def *ssa_5 = &phi->dest.ssa;
-      nir_ssa_def *ssa_3 = p.cond_instr(b, ssa_5, ssa_1);
+      nir_def *ssa_5 = &phi->dest.ssa;
+      nir_def *ssa_3 = p.cond_instr(b, ssa_5, ssa_1);
 
       nir_if *nif = nir_push_if(b, ssa_3);
       {
@@ -99,7 +99,7 @@ loop_builder(nir_builder *b, loop_builder_param p)
       }
       nir_pop_if(b, nif);
 
-      nir_ssa_def *ssa_4 = p.incr_instr(b, ssa_5, ssa_2);
+      nir_def *ssa_4 = p.incr_instr(b, ssa_5, ssa_2);
 
       nir_phi_instr_add_src(phi, ssa_4->parent_instr->block,
                             nir_src_for_ssa(ssa_4));
@@ -116,12 +116,12 @@ struct loop_builder_invert_param {
    uint32_t init_value;
    uint32_t incr_value;
    uint32_t cond_value;
-   nir_ssa_def *(*cond_instr)(nir_builder *,
-                              nir_ssa_def *,
-                              nir_ssa_def *);
-   nir_ssa_def *(*incr_instr)(nir_builder *,
-                              nir_ssa_def *,
-                              nir_ssa_def *);
+   nir_def *(*cond_instr)(nir_builder *,
+                              nir_def *,
+                              nir_def *);
+   nir_def *(*incr_instr)(nir_builder *,
+                              nir_def *,
+                              nir_def *);
 };
 
 /**
@@ -145,9 +145,9 @@ loop_builder_invert(nir_builder *b, loop_builder_invert_param p)
     *          break;
     *    }
     */
-   nir_ssa_def *ssa_0 = nir_imm_int(b, p.init_value);
-   nir_ssa_def *ssa_1 = nir_imm_int(b, p.incr_value);
-   nir_ssa_def *ssa_2 = nir_imm_int(b, p.cond_value);
+   nir_def *ssa_0 = nir_imm_int(b, p.init_value);
+   nir_def *ssa_1 = nir_imm_int(b, p.incr_value);
+   nir_def *ssa_2 = nir_imm_int(b, p.cond_value);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
 
@@ -159,11 +159,11 @@ loop_builder_invert(nir_builder *b, loop_builder_invert_param p)
       nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
                             nir_src_for_ssa(ssa_0));
 
-      nir_ssa_def *ssa_5 = &phi->dest.ssa;
+      nir_def *ssa_5 = &phi->dest.ssa;
 
-      nir_ssa_def *ssa_3 = p.incr_instr(b, ssa_5, ssa_1);
+      nir_def *ssa_3 = p.incr_instr(b, ssa_5, ssa_1);
 
-      nir_ssa_def *ssa_4 = p.cond_instr(b, ssa_3, ssa_2);
+      nir_def *ssa_4 = p.cond_instr(b, ssa_3, ssa_2);
 
       nir_if *nif = nir_push_if(b, ssa_4);
       {
@@ -268,8 +268,8 @@ TEST_F(nir_loop_analyze_test, one_iteration_fneu)
 }
 
 #define COMPARE_REVERSE(comp)                                           \
-   static nir_ssa_def *                                                 \
-   nir_ ## comp ## _rev(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) \
+   static nir_def *                                                 \
+   nir_ ## comp ## _rev(nir_builder *b, nir_def *x, nir_def *y) \
    {                                                                    \
       return nir_ ## comp (b, y, x);                                    \
    }
@@ -281,8 +281,8 @@ COMPARE_REVERSE(uge)
 COMPARE_REVERSE(ishl)
 
 #define INOT_COMPARE(comp)                                              \
-   static nir_ssa_def *                                                 \
-   nir_inot_ ## comp (nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)   \
+   static nir_def *                                                 \
+   nir_inot_ ## comp (nir_builder *b, nir_def *x, nir_def *y)   \
    {                                                                    \
       return nir_inot(b, nir_ ## comp (b, x, y));                       \
    }
index a5f3141..ecc4a33 100644 (file)
@@ -31,9 +31,9 @@
                            _exp_instr_count, _exp_loop_count)           \
    TEST_F(nir_loop_unroll_test, _label)                                 \
    {                                                                    \
-      nir_ssa_def *init = nir_imm_##_type(&bld, _init);                 \
-      nir_ssa_def *limit = nir_imm_##_type(&bld, _limit);               \
-      nir_ssa_def *step = nir_imm_##_type(&bld, _step);                 \
+      nir_def *init = nir_imm_##_type(&bld, _init);                 \
+      nir_def *limit = nir_imm_##_type(&bld, _limit);               \
+      nir_def *step = nir_imm_##_type(&bld, _step);                 \
       loop_unroll_test_helper(&bld, init, limit, step,                  \
                               &nir_##_cond, &nir_##_incr, _rev);        \
       EXPECT_##_exp_res(nir_opt_loop_unroll(bld.shader));               \
@@ -99,14 +99,14 @@ nir_loop_unroll_test::count_loops(void)
 }
 
 void
-loop_unroll_test_helper(nir_builder *bld, nir_ssa_def *init,
-                        nir_ssa_def *limit, nir_ssa_def *step,
-                        nir_ssa_def* (*cond_instr)(nir_builder*,
-                                                   nir_ssa_def*,
-                                                   nir_ssa_def*),
-                        nir_ssa_def* (*incr_instr)(nir_builder*,
-                                                   nir_ssa_def*,
-                                                   nir_ssa_def*),
+loop_unroll_test_helper(nir_builder *bld, nir_def *init,
+                        nir_def *limit, nir_def *step,
+                        nir_def* (*cond_instr)(nir_builder*,
+                                                   nir_def*,
+                                                   nir_def*),
+                        nir_def* (*incr_instr)(nir_builder*,
+                                                   nir_def*,
+                                                   nir_def*),
                         bool reverse)
 {
    nir_loop *loop = nir_push_loop(bld);
@@ -120,7 +120,7 @@ loop_unroll_test_helper(nir_builder *bld, nir_ssa_def *init,
 
    nir_phi_instr_add_src(phi, top_block, nir_src_for_ssa(init));
 
-   nir_ssa_def *cond = cond_instr(bld,
+   nir_def *cond = cond_instr(bld,
                                   (reverse ? limit : &phi->dest.ssa),
                                   (reverse ? &phi->dest.ssa : limit));
 
@@ -128,7 +128,7 @@ loop_unroll_test_helper(nir_builder *bld, nir_ssa_def *init,
    nir_jump(bld, nir_jump_break);
    nir_pop_if(bld, nif);
 
-   nir_ssa_def *var = incr_instr(bld, &phi->dest.ssa, step);
+   nir_def *var = incr_instr(bld, &phi->dest.ssa, step);
 
    nir_phi_instr_add_src(phi, nir_cursor_current_block(bld->cursor),
                          nir_src_for_ssa(var));
index 2b1a279..b5bb5d0 100644 (file)
@@ -40,7 +40,7 @@ TEST_F(nir_lower_alu_width_test, fdot_order)
    b->exact = true;
 
    /* If this isn't done in xyz order, it evaluates to infinity. */
-   nir_ssa_def *val = nir_fdot(
+   nir_def *val = nir_fdot(
       b, nir_imm_vec3(b, 1.7014118346046923e+38, 1.7014118346046923e+38, 8.507059173023462e+37),
       nir_imm_vec3(b, -0.5, 1.5, 1.0));
    nir_intrinsic_instr *store =
index 7b0ae39..74b5065 100644 (file)
@@ -25,7 +25,7 @@
 #include "util/u_math.h"
 
 static inline bool
-nir_mod_analysis_comp0(nir_ssa_def *val, nir_alu_type val_type, unsigned div, unsigned *mod)
+nir_mod_analysis_comp0(nir_def *val, nir_alu_type val_type, unsigned div, unsigned *mod)
 {
    return nir_mod_analysis(nir_get_ssa_scalar(val, 0), val_type, div, mod);
 }
@@ -34,10 +34,10 @@ class nir_mod_analysis_test : public nir_test {
 protected:
    nir_mod_analysis_test();
 
-   nir_ssa_def *nir_imul_vec2y(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1);
+   nir_def *nir_imul_vec2y(nir_builder *b, nir_def *src0, nir_def *src1);
 
-   nir_ssa_def *v[50];
-   nir_ssa_def *invocation;
+   nir_def *v[50];
+   nir_def *invocation;
 };
 
 nir_mod_analysis_test::nir_mod_analysis_test()
@@ -49,8 +49,8 @@ nir_mod_analysis_test::nir_mod_analysis_test()
 }
 
 /* returns src0 * src1.y */
-nir_ssa_def *
-nir_mod_analysis_test::nir_imul_vec2y(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
+nir_def *
+nir_mod_analysis_test::nir_imul_vec2y(nir_builder *b, nir_def *src0, nir_def *src1)
 {
    nir_alu_instr *instr = nir_alu_instr_create(b->shader, nir_op_imul);
 
@@ -96,7 +96,7 @@ TEST_F(nir_mod_analysis_test, const_plus_const)
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c1 = 0; c1 < 10; ++c1) {
          for (unsigned c2 = 0; c2 < 10; ++c2) {
-            nir_ssa_def *sum = nir_iadd(b, v[c1], v[c2]);
+            nir_def *sum = nir_iadd(b, v[c1], v[c2]);
 
             unsigned mod = INT32_MAX;
 
@@ -112,7 +112,7 @@ TEST_F(nir_mod_analysis_test, dynamic_plus_const)
    /* (invocation + const) % const_mod should never be known unless const_mod is 1 */
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 10; ++c) {
-         nir_ssa_def *sum = nir_iadd(b, invocation, v[c]);
+         nir_def *sum = nir_iadd(b, invocation, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -132,7 +132,7 @@ TEST_F(nir_mod_analysis_test, const_mul_const)
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c1 = 0; c1 < 10; ++c1) {
          for (unsigned c2 = 0; c2 < 10; ++c2) {
-            nir_ssa_def *mul = nir_imul(b, v[c1], v[c2]);
+            nir_def *mul = nir_imul(b, v[c1], v[c2]);
 
             unsigned mod = INT32_MAX;
 
@@ -148,7 +148,7 @@ TEST_F(nir_mod_analysis_test, dynamic_mul_const)
    /* (invocation * const) % const_mod == 0 only if const % const_mod == 0, unknown otherwise */
    for (unsigned const_mod = 2; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 10; ++c) {
-         nir_ssa_def *mul = nir_imul(b, invocation, v[c]);
+         nir_def *mul = nir_imul(b, invocation, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -167,8 +167,8 @@ TEST_F(nir_mod_analysis_test, dynamic_mul_const_swizzled)
    /* (invocation * const.y) % const_mod == 0 only if const.y % const_mod == 0, unknown otherwise */
    for (unsigned const_mod = 2; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 10; ++c) {
-         nir_ssa_def *vec2 = nir_imm_ivec2(b, 10 - c, c);
-         nir_ssa_def *mul = nir_imul_vec2y(b, invocation, vec2);
+         nir_def *vec2 = nir_imm_ivec2(b, 10 - c, c);
+         nir_def *mul = nir_imul_vec2y(b, invocation, vec2);
 
          unsigned mod = INT32_MAX;
 
@@ -189,7 +189,7 @@ TEST_F(nir_mod_analysis_test, dynamic_mul32x16_const)
     */
    for (unsigned const_mod = 1; const_mod <= (1u << 24); const_mod *= 2) {
       for (unsigned c = 0; c < 10; ++c) {
-         nir_ssa_def *mul = nir_imul_32x16(b, invocation, v[c]);
+         nir_def *mul = nir_imul_32x16(b, invocation, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -208,7 +208,7 @@ TEST_F(nir_mod_analysis_test, dynamic_shl_const)
    /* (invocation << const) % const_mod == 0 only if const >= log2(const_mod), unknown otherwise */
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 10; ++c) {
-         nir_ssa_def *shl = nir_ishl(b, invocation, v[c]);
+         nir_def *shl = nir_ishl(b, invocation, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -227,7 +227,7 @@ TEST_F(nir_mod_analysis_test, dynamic_shr_const)
    /* (invocation >> const) % const_mod should never be known, unless const_mod is 1 */
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned i = 0; i < 10; ++i) {
-         nir_ssa_def *shr = nir_ishr(b, invocation, v[i]);
+         nir_def *shr = nir_ishr(b, invocation, v[i]);
 
          unsigned mod = INT32_MAX;
 
@@ -248,10 +248,10 @@ TEST_F(nir_mod_analysis_test, dynamic_mul_const_shr_const)
     *   (32 >> const) is not 0 and (32 >> const) % const_mod == 0
     *
     */
-   nir_ssa_def *inv_mul_32 = nir_imul(b, invocation, v[32]);
+   nir_def *inv_mul_32 = nir_imul(b, invocation, v[32]);
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 8; ++c) {
-         nir_ssa_def *shr = nir_ishr(b, inv_mul_32, v[c]);
+         nir_def *shr = nir_ishr(b, inv_mul_32, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -272,12 +272,12 @@ TEST_F(nir_mod_analysis_test, dynamic_mul_const_swizzled_shr_const)
     *   (32 >> const) is not 0 and (32 >> const) % const_mod == 0
     *
     */
-   nir_ssa_def *vec2 = nir_imm_ivec2(b, 31, 32);
-   nir_ssa_def *inv_mul_32 = nir_imul_vec2y(b, invocation, vec2);
+   nir_def *vec2 = nir_imm_ivec2(b, 31, 32);
+   nir_def *inv_mul_32 = nir_imul_vec2y(b, invocation, vec2);
 
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned c = 0; c < 8; ++c) {
-         nir_ssa_def *shr = nir_ishr(b, inv_mul_32, v[c]);
+         nir_def *shr = nir_ishr(b, inv_mul_32, v[c]);
 
          unsigned mod = INT32_MAX;
 
@@ -297,7 +297,7 @@ TEST_F(nir_mod_analysis_test, const_shr_const)
    for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
       for (unsigned i = 0; i < 50; ++i) {
          for (unsigned j = 0; j < 6; ++j) {
-            nir_ssa_def *shr = nir_ishr(b, v[i], v[j]);
+            nir_def *shr = nir_ishr(b, v[i], v[j]);
 
             unsigned mod = INT32_MAX;
 
@@ -314,10 +314,10 @@ TEST_F(nir_mod_analysis_test, const_shr_const_overflow)
     * const_mod << const_shr is still below UINT32_MAX.
     */
    unsigned large_const_int = 0x12345678;
-   nir_ssa_def *large_const = nir_imm_int(b, large_const_int);
+   nir_def *large_const = nir_imm_int(b, large_const_int);
 
    for (unsigned shift = 0; shift < 30; ++shift) {
-      nir_ssa_def *shr = nir_ishr(b, large_const, v[shift]);
+      nir_def *shr = nir_ishr(b, large_const, v[shift]);
 
       for (unsigned const_mod = 1; const_mod <= 1024; const_mod *= 2) {
          unsigned mod = INT32_MAX;
index 5daaaf1..f135156 100644 (file)
@@ -134,10 +134,10 @@ compare_with_negation(nir_type_uint64)
 
 TEST_F(alu_srcs_negative_equal_test, trivial_float)
 {
-   nir_ssa_def *two = nir_imm_float(&bld, 2.0f);
-   nir_ssa_def *negative_two = nir_imm_float(&bld, -2.0f);
+   nir_def *two = nir_imm_float(&bld, 2.0f);
+   nir_def *negative_two = nir_imm_float(&bld, -2.0f);
 
-   nir_ssa_def *result = nir_fadd(&bld, two, negative_two);
+   nir_def *result = nir_fadd(&bld, two, negative_two);
    nir_alu_instr *instr = nir_instr_as_alu(result->parent_instr);
 
    ASSERT_NE((void *) 0, instr);
@@ -148,10 +148,10 @@ TEST_F(alu_srcs_negative_equal_test, trivial_float)
 
 TEST_F(alu_srcs_negative_equal_test, trivial_int)
 {
-   nir_ssa_def *two = nir_imm_int(&bld, 2);
-   nir_ssa_def *negative_two = nir_imm_int(&bld, -2);
+   nir_def *two = nir_imm_int(&bld, 2);
+   nir_def *negative_two = nir_imm_int(&bld, -2);
 
-   nir_ssa_def *result = nir_iadd(&bld, two, negative_two);
+   nir_def *result = nir_iadd(&bld, two, negative_two);
    nir_alu_instr *instr = nir_instr_as_alu(result->parent_instr);
 
    ASSERT_NE((void *) 0, instr);
@@ -166,11 +166,11 @@ TEST_F(alu_srcs_negative_equal_test, trivial_negation_float)
     * nir_alu_srcs_negative_equal expects that constant folding will convert
     * fneg(2.0) to just -2.0.
     */
-   nir_ssa_def *two = nir_imm_float(&bld, 2.0f);
-   nir_ssa_def *two_plus_two = nir_fadd(&bld, two, two);
-   nir_ssa_def *negation = nir_fneg(&bld, two_plus_two);
+   nir_def *two = nir_imm_float(&bld, 2.0f);
+   nir_def *two_plus_two = nir_fadd(&bld, two, two);
+   nir_def *negation = nir_fneg(&bld, two_plus_two);
 
-   nir_ssa_def *result = nir_fadd(&bld, two_plus_two, negation);
+   nir_def *result = nir_fadd(&bld, two_plus_two, negation);
 
    nir_alu_instr *instr = nir_instr_as_alu(result->parent_instr);
 
@@ -186,11 +186,11 @@ TEST_F(alu_srcs_negative_equal_test, trivial_negation_int)
     * nir_alu_srcs_negative_equal expects that constant folding will convert
     * ineg(2) to just -2.
     */
-   nir_ssa_def *two = nir_imm_int(&bld, 2);
-   nir_ssa_def *two_plus_two = nir_iadd(&bld, two, two);
-   nir_ssa_def *negation = nir_ineg(&bld, two_plus_two);
+   nir_def *two = nir_imm_int(&bld, 2);
+   nir_def *two_plus_two = nir_iadd(&bld, two, two);
+   nir_def *negation = nir_ineg(&bld, two_plus_two);
 
-   nir_ssa_def *result = nir_iadd(&bld, two_plus_two, negation);
+   nir_def *result = nir_iadd(&bld, two_plus_two, negation);
 
    nir_alu_instr *instr = nir_instr_as_alu(result->parent_instr);
 
@@ -207,11 +207,11 @@ TEST_F(alu_srcs_negative_equal_test, trivial_negation_int)
 TEST_F(alu_srcs_negative_equal_test, full_type ## _self)                \
 {                                                                       \
    count_sequence(c1, full_type, 1);                                    \
-   nir_ssa_def *a = nir_build_imm(&bld,                                 \
+   nir_def *a = nir_build_imm(&bld,                                 \
                                   NIR_MAX_VEC_COMPONENTS,               \
                                   nir_alu_type_get_type_size(full_type), \
                                   c1);                                  \
-   nir_ssa_def *result;                                                 \
+   nir_def *result;                                                 \
    if (nir_alu_type_get_base_type(full_type) == nir_type_float)         \
       result = nir_fadd(&bld, a, a);                                    \
    else                                                                 \
@@ -243,15 +243,15 @@ TEST_F(alu_srcs_negative_equal_test, full_type ## _trivially_true)      \
 {                                                                       \
    count_sequence(c1, full_type, 1);                                    \
    negate(c2, c1, full_type, NIR_MAX_VEC_COMPONENTS);                   \
-   nir_ssa_def *a = nir_build_imm(&bld,                                 \
+   nir_def *a = nir_build_imm(&bld,                                 \
                                   NIR_MAX_VEC_COMPONENTS,               \
                                   nir_alu_type_get_type_size(full_type), \
                                   c1);                                  \
-   nir_ssa_def *b = nir_build_imm(&bld,                                 \
+   nir_def *b = nir_build_imm(&bld,                                 \
                                   NIR_MAX_VEC_COMPONENTS,               \
                                   nir_alu_type_get_type_size(full_type), \
                                   c2);                                  \
-   nir_ssa_def *result;                                                 \
+   nir_def *result;                                                 \
    if (nir_alu_type_get_base_type(full_type) == nir_type_float)         \
       result = nir_fadd(&bld, a, b);                                    \
    else                                                                 \
@@ -278,7 +278,7 @@ compare_with_negation(nir_type_uint64)
 
 TEST_F(alu_srcs_negative_equal_test, swizzle_scalar_to_vector)
 {
-   nir_ssa_def *v = nir_imm_vec2(&bld, 1.0, -1.0);
+   nir_def *v = nir_imm_vec2(&bld, 1.0, -1.0);
    const uint8_t s0[4] = { 0, 0, 0, 0 };
    const uint8_t s1[4] = { 1, 1, 1, 1 };
 
index 30b5285..886b24d 100644 (file)
@@ -29,7 +29,7 @@ protected:
 
    nir_builder bld;
 
-   nir_ssa_def *in_def;
+   nir_def *in_def;
    nir_variable *out_var;
 };
 
@@ -55,9 +55,9 @@ TEST_F(nir_opt_if_test, opt_if_simplification)
     * }
     */
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *one = nir_imm_int(b, 1);
 
-   nir_ssa_def *cmp_result = nir_ieq(b, in_def, one);
+   nir_def *cmp_result = nir_ieq(b, in_def, one);
    nir_if *nif = nir_push_if(b, cmp_result);
 
    nir_push_else(b, NULL);
@@ -92,9 +92,9 @@ TEST_F(nir_opt_if_test, opt_if_simplification_single_source_phi_after_if)
     * vec1 32 ssa_3 = phi block_2: ssa_0
     */
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
+   nir_def *one = nir_imm_int(b, 1);
 
-   nir_ssa_def *cmp_result = nir_ieq(b, in_def, one);
+   nir_def *cmp_result = nir_ieq(b, in_def, one);
    nir_if *nif = nir_push_if(b, cmp_result);
 
    nir_push_else(b, NULL);
@@ -128,8 +128,8 @@ TEST_F(nir_opt_if_test, opt_if_simplification_single_source_phi_after_if)
 
 TEST_F(nir_opt_if_test, opt_if_alu_of_phi_progress)
 {
-   nir_ssa_def *two = nir_imm_int(b, 2);
-   nir_ssa_def *x = nir_imm_int(b, 0);
+   nir_def *two = nir_imm_int(b, 2);
+   nir_def *x = nir_imm_int(b, 0);
 
    nir_phi_instr *phi = nir_phi_instr_create(b->shader);
 
@@ -140,7 +140,7 @@ TEST_F(nir_opt_if_test, opt_if_alu_of_phi_progress)
 
       nir_phi_instr_add_src(phi, x->parent_instr->block, nir_src_for_ssa(x));
 
-      nir_ssa_def *y = nir_iadd(b, &phi->dest.ssa, two);
+      nir_def *y = nir_iadd(b, &phi->dest.ssa, two);
       nir_store_var(b, out_var,
                     nir_imul(b, &phi->dest.ssa, two), 1);
 
index ac6a067..caa7729 100644 (file)
@@ -27,7 +27,7 @@ class nir_opt_shrink_vectors_test : public nir_test {
 protected:
    nir_opt_shrink_vectors_test();
 
-   nir_ssa_def *in_def;
+   nir_def *in_def;
    nir_variable *out_var;
 };
 
@@ -84,9 +84,9 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_vectors_load_const_trailing_compo
     * vec1 32 ssa_2 = fmov ssa_1.x
     */
 
-   nir_ssa_def *imm_vec = nir_imm_vec4(b, 1.0, 2.0, 3.0, 4.0);
+   nir_def *imm_vec = nir_imm_vec4(b, 1.0, 2.0, 3.0, 4.0);
 
-   nir_ssa_def *alu_result = nir_build_alu1(b, nir_op_mov, imm_vec);
+   nir_def *alu_result = nir_build_alu1(b, nir_op_mov, imm_vec);
    nir_alu_instr *alu_instr = nir_instr_as_alu(alu_result->parent_instr);
    set_swizzle(&alu_instr->src[0], "x");
    alu_result->num_components = 1;
@@ -118,12 +118,12 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_vectors_alu_trailing_component_on
     * vec1 32 ssa_2 = fmov ssa_1.x
     */
 
-   nir_ssa_def *alu_result = nir_build_alu1(b, nir_op_mov, in_def);
+   nir_def *alu_result = nir_build_alu1(b, nir_op_mov, in_def);
    nir_alu_instr *alu_instr = nir_instr_as_alu(alu_result->parent_instr);
    alu_result->num_components = 4;
    set_swizzle(&alu_instr->src[0], "xyxx");
 
-   nir_ssa_def *alu2_result = nir_build_alu1(b, nir_op_mov, alu_result);
+   nir_def *alu2_result = nir_build_alu1(b, nir_op_mov, alu_result);
    nir_alu_instr *alu2_instr = nir_instr_as_alu(alu2_result->parent_instr);
    set_swizzle(&alu2_instr->src[0], "x");
    alu2_result->num_components = 1;
@@ -155,15 +155,15 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_vectors_simple)
     * vec1 32 ssa_4 = fdot3 ssa_3.xxy ssa_3.xxy
     */
 
-   nir_ssa_def *imm_vec = nir_imm_vec4(b, 3.0, 1.0, 2.0, 1.0);
+   nir_def *imm_vec = nir_imm_vec4(b, 3.0, 1.0, 2.0, 1.0);
 
-   nir_ssa_def *alu_result = nir_build_alu2(b, nir_op_fadd, in_def, imm_vec);
+   nir_def *alu_result = nir_build_alu2(b, nir_op_fadd, in_def, imm_vec);
    nir_alu_instr *alu_instr = nir_instr_as_alu(alu_result->parent_instr);
    alu_result->num_components = 4;
    set_swizzle(&alu_instr->src[0], "xxxy");
    set_swizzle(&alu_instr->src[1], "ywyz");
 
-   nir_ssa_def *alu2_result = nir_build_alu2(b, nir_op_fdot3, alu_result, alu_result);
+   nir_def *alu2_result = nir_build_alu2(b, nir_op_fdot3, alu_result, alu_result);
    nir_alu_instr *alu2_instr = nir_instr_as_alu(alu2_result->parent_instr);
    set_swizzle(&alu2_instr->src[0], "xzw");
    set_swizzle(&alu2_instr->src[1], "xzw");
@@ -220,15 +220,15 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_vectors_vec8)
       nir_const_value_for_float(2.0, 32),
       nir_const_value_for_float(6.0, 32),
    };
-   nir_ssa_def *imm_vec = nir_build_imm(b, 8, 32, v);
+   nir_def *imm_vec = nir_build_imm(b, 8, 32, v);
 
-   nir_ssa_def *alu_result = nir_build_alu2(b, nir_op_fadd, in_def, imm_vec);
+   nir_def *alu_result = nir_build_alu2(b, nir_op_fadd, in_def, imm_vec);
    nir_alu_instr *alu_instr = nir_instr_as_alu(alu_result->parent_instr);
    alu_result->num_components = 8;
    set_swizzle(&alu_instr->src[0], "xxxxxxxy");
    set_swizzle(&alu_instr->src[1], "afhdefgh");
 
-   nir_ssa_def *alu2_result = nir_build_alu2(b, nir_op_fdot8, alu_result, alu_result);
+   nir_def *alu2_result = nir_build_alu2(b, nir_op_fdot8, alu_result, alu_result);
    nir_alu_instr *alu2_instr = nir_instr_as_alu(alu2_result->parent_instr);
    set_swizzle(&alu2_instr->src[0], "accdefgh");
    set_swizzle(&alu2_instr->src[1], "accdefgh");
@@ -271,12 +271,12 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple)
     *
     * This mimics nir for loops that come out of nine+ttn.
     */
-   nir_ssa_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
-   nir_ssa_def *increment = nir_imm_float(b, 1.0);
-   nir_ssa_def *loop_max = nir_imm_float(b, 3.0);
+   nir_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
+   nir_def *increment = nir_imm_float(b, 1.0);
+   nir_def *loop_max = nir_imm_float(b, 3.0);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
-   nir_ssa_def *phi_def = &phi->dest.ssa;
+   nir_def *phi_def = &phi->dest.ssa;
 
    nir_loop *loop = nir_push_loop(b);
 
@@ -286,7 +286,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple)
    nir_phi_instr_add_src(phi, v->parent_instr->block,
                          nir_src_for_ssa(v));
 
-   nir_ssa_def *fge = nir_fge(b, phi_def, loop_max);
+   nir_def *fge = nir_fge(b, phi_def, loop_max);
    nir_alu_instr *fge_alu_instr = nir_instr_as_alu(fge->parent_instr);
    fge->num_components = 1;
    fge_alu_instr->src[0].swizzle[0] = 1;
@@ -298,17 +298,17 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple)
    }
    nir_pop_if(b, nif);
 
-   nir_ssa_def *fadd = nir_fadd(b, phi_def, increment);
+   nir_def *fadd = nir_fadd(b, phi_def, increment);
    nir_alu_instr *fadd_alu_instr = nir_instr_as_alu(fadd->parent_instr);
    fadd->num_components = 1;
    fadd_alu_instr->src[0].swizzle[0] = 1;
 
-   nir_ssa_scalar srcs[4] = {{0}};
+   nir_scalar srcs[4] = {{0}};
    for (unsigned i = 0; i < 4; i++) {
       srcs[i] = nir_get_ssa_scalar(phi_def, i);
    }
    srcs[1] = nir_get_ssa_scalar(fadd, 0);
-   nir_ssa_def *vec = nir_vec_scalars(b, srcs, 4);
+   nir_def *vec = nir_vec_scalars(b, srcs, 4);
 
    nir_phi_instr_add_src(phi, vec->parent_instr->block,
                          nir_src_for_ssa(vec));
@@ -378,12 +378,12 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle)
     *       v = vec4(v.x, v.z + 1, v.y, v.w};
     *    }
     */
-   nir_ssa_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
-   nir_ssa_def *increment = nir_imm_float(b, 1.0);
-   nir_ssa_def *loop_max = nir_imm_float(b, 3.0);
+   nir_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
+   nir_def *increment = nir_imm_float(b, 1.0);
+   nir_def *loop_max = nir_imm_float(b, 3.0);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
-   nir_ssa_def *phi_def = &phi->dest.ssa;
+   nir_def *phi_def = &phi->dest.ssa;
 
    nir_loop *loop = nir_push_loop(b);
 
@@ -393,7 +393,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle)
    nir_phi_instr_add_src(phi, v->parent_instr->block,
                          nir_src_for_ssa(v));
 
-   nir_ssa_def *fge = nir_fge(b, phi_def, loop_max);
+   nir_def *fge = nir_fge(b, phi_def, loop_max);
    nir_alu_instr *fge_alu_instr = nir_instr_as_alu(fge->parent_instr);
    fge->num_components = 1;
    fge_alu_instr->src[0].swizzle[0] = 2;
@@ -405,17 +405,17 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle)
 
    nir_pop_if(b, nif);
 
-   nir_ssa_def *fadd = nir_fadd(b, phi_def, increment);
+   nir_def *fadd = nir_fadd(b, phi_def, increment);
    nir_alu_instr *fadd_alu_instr = nir_instr_as_alu(fadd->parent_instr);
    fadd->num_components = 1;
    fadd_alu_instr->src[0].swizzle[0] = 2;
 
-   nir_ssa_scalar srcs[4] = {{0}};
+   nir_scalar srcs[4] = {{0}};
    srcs[0] = nir_get_ssa_scalar(phi_def, 0);
    srcs[1] = nir_get_ssa_scalar(fadd, 0);
    srcs[2] = nir_get_ssa_scalar(phi_def, 1);
    srcs[3] = nir_get_ssa_scalar(phi_def, 3);
-   nir_ssa_def *vec = nir_vec_scalars(b, srcs, 4);
+   nir_def *vec = nir_vec_scalars(b, srcs, 4);
 
    nir_phi_instr_add_src(phi, vec->parent_instr->block,
                          nir_src_for_ssa(vec));
@@ -486,12 +486,12 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out)
     *    }
     *    out = v;
     */
-   nir_ssa_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
-   nir_ssa_def *increment = nir_imm_float(b, 1.0);
-   nir_ssa_def *loop_max = nir_imm_float(b, 3.0);
+   nir_def *v = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
+   nir_def *increment = nir_imm_float(b, 1.0);
+   nir_def *loop_max = nir_imm_float(b, 3.0);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
-   nir_ssa_def *phi_def = &phi->dest.ssa;
+   nir_def *phi_def = &phi->dest.ssa;
 
    nir_loop *loop = nir_push_loop(b);
 
@@ -501,7 +501,7 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out)
    nir_phi_instr_add_src(phi, v->parent_instr->block,
                          nir_src_for_ssa(v));
 
-   nir_ssa_def *fge = nir_fge(b, phi_def, loop_max);
+   nir_def *fge = nir_fge(b, phi_def, loop_max);
    nir_alu_instr *fge_alu_instr = nir_instr_as_alu(fge->parent_instr);
    fge->num_components = 1;
    fge_alu_instr->src[0].swizzle[0] = 1;
@@ -513,17 +513,17 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out)
    }
    nir_pop_if(b, nif);
 
-   nir_ssa_def *fadd = nir_fadd(b, phi_def, increment);
+   nir_def *fadd = nir_fadd(b, phi_def, increment);
    nir_alu_instr *fadd_alu_instr = nir_instr_as_alu(fadd->parent_instr);
    fadd->num_components = 1;
    fadd_alu_instr->src[0].swizzle[0] = 1;
 
-   nir_ssa_scalar srcs[4] = {{0}};
+   nir_scalar srcs[4] = {{0}};
    for (unsigned i = 0; i < 4; i++) {
       srcs[i] = nir_get_ssa_scalar(phi_def, i);
    }
    srcs[1] = nir_get_ssa_scalar(fadd, 0);
-   nir_ssa_def *vec = nir_vec_scalars(b, srcs, 4);
+   nir_def *vec = nir_vec_scalars(b, srcs, 4);
 
    nir_phi_instr_add_src(phi, vec->parent_instr->block,
                          nir_src_for_ssa(vec));
index 23aa7f2..e67eef1 100644 (file)
 class ssa_def_bits_used_test : public nir_test {
 protected:
    ssa_def_bits_used_test()
-      : nir_test::nir_test("nir_ssa_def_bits_used_test")
+      : nir_test::nir_test("nir_def_bits_used_test")
    {
    }
 
-   nir_alu_instr *build_alu_instr(nir_op op, nir_ssa_def *, nir_ssa_def *);
+   nir_alu_instr *build_alu_instr(nir_op op, nir_def *, nir_def *);
 };
 
 class unsigned_upper_bound_test : public nir_test {
@@ -42,16 +42,16 @@ protected:
 };
 
 static bool
-is_used_once(const nir_ssa_def *def)
+is_used_once(const nir_def *def)
 {
    return list_is_singular(&def->uses);
 }
 
 nir_alu_instr *
 ssa_def_bits_used_test::build_alu_instr(nir_op op,
-                                        nir_ssa_def *src0, nir_ssa_def *src1)
+                                        nir_def *src0, nir_def *src1)
 {
-   nir_ssa_def *def = nir_build_alu(b, op, src0, src1, NULL, NULL);
+   nir_def *def = nir_build_alu(b, op, src0, src1, NULL, NULL);
 
    if (def == NULL)
       return NULL;
@@ -70,10 +70,10 @@ TEST_F(ssa_def_bits_used_test, iand_with_const_vector)
 {
    static const unsigned src0_imm[4] = { 255u << 24, 255u << 16, 255u << 8, 255u };
 
-   nir_ssa_def *src0 = nir_imm_ivec4(b,
+   nir_def *src0 = nir_imm_ivec4(b,
                                      src0_imm[0], src0_imm[1],
                                      src0_imm[2], src0_imm[3]);
-   nir_ssa_def *src1 = nir_imm_int(b, 0xffffffff);
+   nir_def *src1 = nir_imm_int(b, 0xffffffff);
 
    nir_alu_instr *alu = build_alu_instr(nir_op_iand, src0, src1);
 
@@ -81,14 +81,14 @@ TEST_F(ssa_def_bits_used_test, iand_with_const_vector)
 
    for (unsigned i = 0; i < 4; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[0].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[1].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[1].src.ssa);
 
       /* The answer should be the value swizzled from src0. */
       EXPECT_EQ(src0_imm[i], bits_used);
@@ -99,10 +99,10 @@ TEST_F(ssa_def_bits_used_test, ior_with_const_vector)
 {
    static const unsigned src0_imm[4] = { 255u << 24, 255u << 16, 255u << 8, 255u };
 
-   nir_ssa_def *src0 = nir_imm_ivec4(b,
+   nir_def *src0 = nir_imm_ivec4(b,
                                      src0_imm[0], src0_imm[1],
                                      src0_imm[2], src0_imm[3]);
-   nir_ssa_def *src1 = nir_imm_int(b, 0xffffffff);
+   nir_def *src1 = nir_imm_int(b, 0xffffffff);
 
    nir_alu_instr *alu = build_alu_instr(nir_op_ior, src0, src1);
 
@@ -110,14 +110,14 @@ TEST_F(ssa_def_bits_used_test, ior_with_const_vector)
 
    for (unsigned i = 0; i < 4; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[0].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[1].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[1].src.ssa);
 
       /* The answer should be the value swizzled from ~src0. */
       EXPECT_EQ(~src0_imm[i], bits_used);
@@ -126,11 +126,11 @@ TEST_F(ssa_def_bits_used_test, ior_with_const_vector)
 
 TEST_F(ssa_def_bits_used_test, extract_i16_with_const_index)
 {
-   nir_ssa_def *src0 = nir_imm_int(b, 0xffffffff);
+   nir_def *src0 = nir_imm_int(b, 0xffffffff);
 
    static const unsigned src1_imm[4] = { 9, 1, 0, 9 };
 
-   nir_ssa_def *src1 = nir_imm_ivec4(b,
+   nir_def *src1 = nir_imm_ivec4(b,
                                      src1_imm[0],
                                      src1_imm[1],
                                      src1_imm[2],
@@ -142,14 +142,14 @@ TEST_F(ssa_def_bits_used_test, extract_i16_with_const_index)
 
    for (unsigned i = 1; i < 3; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[1].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[0].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
 
       EXPECT_EQ(0xffffu << (16 * src1_imm[i]), bits_used);
    }
@@ -157,11 +157,11 @@ TEST_F(ssa_def_bits_used_test, extract_i16_with_const_index)
 
 TEST_F(ssa_def_bits_used_test, extract_u16_with_const_index)
 {
-   nir_ssa_def *src0 = nir_imm_int(b, 0xffffffff);
+   nir_def *src0 = nir_imm_int(b, 0xffffffff);
 
    static const unsigned src1_imm[4] = { 9, 1, 0, 9 };
 
-   nir_ssa_def *src1 = nir_imm_ivec4(b,
+   nir_def *src1 = nir_imm_ivec4(b,
                                      src1_imm[0],
                                      src1_imm[1],
                                      src1_imm[2],
@@ -173,14 +173,14 @@ TEST_F(ssa_def_bits_used_test, extract_u16_with_const_index)
 
    for (unsigned i = 1; i < 3; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[1].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[0].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
 
       EXPECT_EQ(0xffffu << (16 * src1_imm[i]), bits_used);
    }
@@ -188,11 +188,11 @@ TEST_F(ssa_def_bits_used_test, extract_u16_with_const_index)
 
 TEST_F(ssa_def_bits_used_test, extract_i8_with_const_index)
 {
-   nir_ssa_def *src0 = nir_imm_int(b, 0xffffffff);
+   nir_def *src0 = nir_imm_int(b, 0xffffffff);
 
    static const unsigned src1_imm[4] = { 3, 2, 1, 0 };
 
-   nir_ssa_def *src1 = nir_imm_ivec4(b,
+   nir_def *src1 = nir_imm_ivec4(b,
                                      src1_imm[0],
                                      src1_imm[1],
                                      src1_imm[2],
@@ -204,14 +204,14 @@ TEST_F(ssa_def_bits_used_test, extract_i8_with_const_index)
 
    for (unsigned i = 0; i < 4; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[1].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[0].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
 
       EXPECT_EQ(0xffu << (8 * src1_imm[i]), bits_used);
    }
@@ -219,11 +219,11 @@ TEST_F(ssa_def_bits_used_test, extract_i8_with_const_index)
 
 TEST_F(ssa_def_bits_used_test, extract_u8_with_const_index)
 {
-   nir_ssa_def *src0 = nir_imm_int(b, 0xffffffff);
+   nir_def *src0 = nir_imm_int(b, 0xffffffff);
 
    static const unsigned src1_imm[4] = { 3, 2, 1, 0 };
 
-   nir_ssa_def *src1 = nir_imm_ivec4(b,
+   nir_def *src1 = nir_imm_ivec4(b,
                                      src1_imm[0],
                                      src1_imm[1],
                                      src1_imm[2],
@@ -235,14 +235,14 @@ TEST_F(ssa_def_bits_used_test, extract_u8_with_const_index)
 
    for (unsigned i = 0; i < 4; i++) {
       /* If the test is changed, and somehow src1 is used multiple times,
-       * nir_ssa_def_bits_used will accumulate *all* the uses (as it should).
+       * nir_def_bits_used will accumulate *all* the uses (as it should).
        * This isn't what we're trying to test here.
        */
       ASSERT_TRUE(is_used_once(src1));
 
       alu->src[1].swizzle[0] = i;
 
-      const uint64_t bits_used = nir_ssa_def_bits_used(alu->src[0].src.ssa);
+      const uint64_t bits_used = nir_def_bits_used(alu->src[0].src.ssa);
 
       EXPECT_EQ(0xffu << (8 * src1_imm[i]), bits_used);
    }
@@ -268,15 +268,15 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel)
     *     block b3:
     * }
     */
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *two = nir_imm_int(b, 2);
-   nir_ssa_def *cond = nir_imm_false(b);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *two = nir_imm_int(b, 2);
+   nir_def *cond = nir_imm_false(b);
 
    nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
    nir_ssa_dest_init(&phi->instr, &phi->dest, 1, 32);
 
    nir_push_loop(b);
-   nir_ssa_def *sel = nir_bcsel(b, cond, &phi->dest.ssa, two);
+   nir_def *sel = nir_bcsel(b, cond, &phi->dest.ssa, two);
    nir_pop_loop(b, NULL);
 
    nir_phi_instr_add_src(phi, zero->parent_instr->block,
@@ -289,7 +289,7 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel)
    nir_validate_shader(b->shader, NULL);
 
    struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
-   nir_ssa_scalar scalar = nir_get_ssa_scalar(&phi->dest.ssa, 0);
+   nir_scalar scalar = nir_get_ssa_scalar(&phi->dest.ssa, 0);
    EXPECT_EQ(nir_unsigned_upper_bound(b->shader, range_ht, scalar, NULL), 2);
    _mesa_hash_table_destroy(range_ht, NULL);
 }
index 983a672..dca0963 100644 (file)
@@ -124,8 +124,8 @@ INSTANTIATE_TEST_SUITE_P(
 
 TEST_P(nir_serialize_all_test, alu_single_value_src_swizzle)
 {
-   nir_ssa_def *zero = nir_imm_zero(b, GetParam(), 32);
-   nir_ssa_def *fmax = nir_fmax(b, zero, zero);
+   nir_def *zero = nir_imm_zero(b, GetParam(), 32);
+   nir_def *fmax = nir_fmax(b, zero, zero);
 
    nir_alu_instr *fmax_alu = nir_instr_as_alu(fmax->parent_instr);
 
@@ -142,15 +142,15 @@ TEST_P(nir_serialize_all_test, alu_single_value_src_swizzle)
 
 TEST_P(nir_serialize_all_test, alu_vec)
 {
-   nir_ssa_def *undef = nir_ssa_undef(b, GetParam(), 32);
-   nir_ssa_def *undefs[] = {
+   nir_def *undef = nir_undef(b, GetParam(), 32);
+   nir_def *undefs[] = {
       undef, undef, undef, undef,
       undef, undef, undef, undef,
       undef, undef, undef, undef,
       undef, undef, undef, undef,
    };
 
-   nir_ssa_def *vec = nir_vec(b, undefs, GetParam());
+   nir_def *vec = nir_vec(b, undefs, GetParam());
    nir_alu_instr *vec_alu = nir_instr_as_alu(vec->parent_instr);
    for (int i = 0; i < GetParam(); i++)
       vec_alu->src[i].swizzle[0] = (GetParam() - 1) - i;
@@ -164,8 +164,8 @@ TEST_P(nir_serialize_all_test, alu_vec)
 
 TEST_P(nir_serialize_all_test, alu_two_components_full_swizzle)
 {
-   nir_ssa_def *undef = nir_ssa_undef(b, 2, 32);
-   nir_ssa_def *fma = nir_ffma(b, undef, undef, undef);
+   nir_def *undef = nir_undef(b, 2, 32);
+   nir_def *fma = nir_ffma(b, undef, undef, undef);
    nir_alu_instr *fma_alu = nir_instr_as_alu(fma->parent_instr);
 
    fma->num_components = GetParam();
@@ -185,8 +185,8 @@ TEST_P(nir_serialize_all_test, alu_two_components_full_swizzle)
 
 TEST_P(nir_serialize_all_but_one_test, single_channel)
 {
-   nir_ssa_def *zero = nir_ssa_undef(b, GetParam(), 32);
-   nir_ssa_def *vec = nir_channel(b, zero, GetParam() - 1);
+   nir_def *zero = nir_undef(b, GetParam(), 32);
+   nir_def *vec = nir_channel(b, zero, GetParam() - 1);
    nir_alu_instr *vec_alu = nir_instr_as_alu(vec->parent_instr);
 
    serialize();
index 3dcd5d7..02b1d37 100644 (file)
@@ -187,7 +187,7 @@ class nir_remove_dead_variables_test : public nir_vars_test {};
 
 } // namespace
 
-static nir_ssa_def *
+static nir_def *
 nir_load_var_volatile(nir_builder *b, nir_variable *var)
 {
    return nir_load_deref_with_access(b, nir_build_deref_var(b, var),
@@ -196,7 +196,7 @@ nir_load_var_volatile(nir_builder *b, nir_variable *var)
 
 static void
 nir_store_var_volatile(nir_builder *b, nir_variable *var,
-                       nir_ssa_def *value, nir_component_mask_t writemask)
+                       nir_def *value, nir_component_mask_t writemask)
 {
    nir_store_deref_with_access(b, nir_build_deref_var(b, var),
                                value, writemask, ACCESS_VOLATILE);
@@ -399,10 +399,10 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load)
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
    unsigned mask = 1 | 2;
 
-   nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *stored_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], stored_value, mask);
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, mask);
 
    nir_validate_shader(b->shader, NULL);
@@ -425,13 +425,13 @@ TEST_F(nir_copy_prop_vars_test, store_store_load)
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
    unsigned mask = 1 | 2;
 
-   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *first_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], first_value, mask);
 
-   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
+   nir_def *second_value = nir_imm_ivec2(b, 30, 40);
    nir_store_var(b, v[0], second_value, mask);
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, mask);
 
    nir_validate_shader(b->shader, NULL);
@@ -453,13 +453,13 @@ TEST_F(nir_copy_prop_vars_test, store_store_load_different_components)
 {
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
 
-   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *first_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], first_value, 1 << 1);
 
-   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
+   nir_def *second_value = nir_imm_ivec2(b, 30, 40);
    nir_store_var(b, v[0], second_value, 1 << 0);
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, 1 << 1);
 
    nir_validate_shader(b->shader, NULL);
@@ -486,19 +486,19 @@ TEST_F(nir_copy_prop_vars_test, store_store_load_different_components_in_many_bl
 {
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
 
-   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *first_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], first_value, 1 << 1);
 
    /* Adding an if statement will cause blocks to be created. */
    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
 
-   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
+   nir_def *second_value = nir_imm_ivec2(b, 30, 40);
    nir_store_var(b, v[0], second_value, 1 << 0);
 
    /* Adding an if statement will cause blocks to be created. */
    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, 1 << 1);
 
    nir_validate_shader(b->shader, NULL);
@@ -526,16 +526,16 @@ TEST_F(nir_copy_prop_vars_test, store_volatile)
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
    unsigned mask = 1 | 2;
 
-   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *first_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], first_value, mask);
 
-   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
+   nir_def *second_value = nir_imm_ivec2(b, 30, 40);
    nir_store_var_volatile(b, v[0], second_value, mask);
 
-   nir_ssa_def *third_value = nir_imm_ivec2(b, 50, 60);
+   nir_def *third_value = nir_imm_ivec2(b, 50, 60);
    nir_store_var(b, v[0], third_value, mask);
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, mask);
 
    nir_validate_shader(b->shader, NULL);
@@ -1029,13 +1029,13 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
    nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
    unsigned mask = 1 | 2;
 
-   nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
+   nir_def *stored_value = nir_imm_ivec2(b, 10, 20);
    nir_store_var(b, v[0], stored_value, mask);
 
    /* Adding an if statement will cause blocks to be created. */
    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
 
-   nir_ssa_def *read_value = nir_load_var(b, v[0]);
+   nir_def *read_value = nir_load_var(b, v[0]);
    nir_store_var(b, v[1], read_value, mask);
 
    nir_validate_shader(b->shader, NULL);
@@ -1066,7 +1066,7 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previou
    /* This load will be dropped, as vec.y (or vec[1]) is already known. */
    nir_deref_instr *deref =
       nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
-   nir_ssa_def *loaded_from_deref = nir_load_deref(b, deref);
+   nir_def *loaded_from_deref = nir_load_deref(b, deref);
 
    /* This store should use the value loaded from in1. */
    nir_store_var(b, out, loaded_from_deref, 1 << 0);
@@ -1127,7 +1127,7 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
    nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
 
    /* This load will be dropped, since both vec.x and vec.y are known. */
-   nir_ssa_def *loaded_from_vec = nir_load_var(b, vec);
+   nir_def *loaded_from_vec = nir_load_var(b, vec);
    nir_store_var(b, out, loaded_from_vec, 0x3);
 
    nir_validate_shader(b->shader, NULL);
@@ -1194,7 +1194,7 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
    nir_variable *idx = create_int(nir_var_mem_global, "idx");
    nir_variable *out = create_int(nir_var_mem_global, "out");
 
-   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
+   nir_def *idx_ssa = nir_load_var(b, idx);
 
    /* Store to vec[idx]. */
    nir_deref_instr *store_deref =
@@ -1229,7 +1229,7 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_ve
    nir_variable *idx = create_int(nir_var_mem_global, "idx");
    nir_variable **out = create_many_int(nir_var_mem_global, "out", 2);
 
-   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
+   nir_def *idx_ssa = nir_load_var(b, idx);
 
    /* Store to vec. */
    nir_store_var(b, vec, nir_imm_ivec2(b, 10, 10), 1 | 2);
@@ -1267,7 +1267,7 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
    nir_variable *idx = create_int(nir_var_mem_global, "idx");
    nir_variable *out = create_int(nir_var_mem_global, "out");
 
-   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
+   nir_def *idx_ssa = nir_load_var(b, idx);
 
    /* Store to arr[idx]. */
    nir_deref_instr *store_deref =
@@ -1624,7 +1624,7 @@ TEST_F(nir_dead_write_vars_test, dead_write_in_block)
    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
 
    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
-   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
+   nir_def *load_v2 = nir_load_var(b, v[2]);
    nir_store_var(b, v[0], load_v2, 1);
 
    bool progress = nir_opt_dead_write_vars(b->shader);
@@ -1641,7 +1641,7 @@ TEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
    nir_variable **v = create_many_ivec2(nir_var_mem_global, "v", 3);
 
    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
-   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
+   nir_def *load_v2 = nir_load_var(b, v[2]);
    nir_store_var(b, v[0], load_v2, 1 << 0);
 
    bool progress = nir_opt_dead_write_vars(b->shader);
@@ -1664,7 +1664,7 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
    nir_variable **v = create_many_int(nir_var_mem_global, "v", 3);
 
    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
-   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
+   nir_def *load_v2 = nir_load_var(b, v[2]);
 
    /* Causes the stores to be in different blocks. */
    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
@@ -1689,7 +1689,7 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
    /* Causes the stores to be in different blocks. */
    nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
 
-   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
+   nir_def *load_v2 = nir_load_var(b, v[2]);
    nir_store_var(b, v[0], load_v2, 1 << 0);
 
    bool progress = nir_opt_dead_write_vars(b->shader);
@@ -1709,11 +1709,11 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
    nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
 
    nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
-   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
+   nir_def *load_v2 = nir_load_var(b, v[2]);
    nir_store_var(b, v[0], load_v2, 1);
 
    nir_push_else(b, if_stmt);
-   nir_ssa_def *load_v3 = nir_load_var(b, v[3]);
+   nir_def *load_v3 = nir_load_var(b, v[3]);
    nir_store_var(b, v[0], load_v3, 1);
 
    nir_pop_if(b, if_stmt);
@@ -1947,9 +1947,9 @@ vec_src_comp_as_int(nir_src src, unsigned comp)
    if (nir_src_is_const(src))
       return nir_src_comp_as_int(src, comp);
 
-   nir_ssa_scalar s = { src.ssa, comp };
-   assert(nir_op_is_vec(nir_ssa_scalar_alu_op(s)));
-   return nir_ssa_scalar_as_int(nir_ssa_scalar_chase_alu_src(s, comp));
+   nir_scalar s = { src.ssa, comp };
+   assert(nir_op_is_vec(nir_scalar_alu_op(s)));
+   return nir_scalar_as_int(nir_scalar_chase_alu_src(s, comp));
 }
 
 TEST_F(nir_combine_stores_test, store_volatile)
index 97ecd97..32f43e2 100644 (file)
@@ -269,7 +269,7 @@ vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
    if (glsl_type_is_vector_or_scalar(type)) {
       unsigned num_components = glsl_get_vector_elements(val->type);
       unsigned bit_size = glsl_get_bit_size(val->type);
-      val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
+      val->def = nir_undef(&b->nb, num_components, bit_size);
    } else {
       unsigned elems = glsl_get_length(val->type);
       val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
@@ -383,7 +383,7 @@ vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
    return val;
 }
 
-nir_ssa_def *
+nir_def *
 vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
 {
    struct vtn_ssa_value *ssa = vtn_ssa_value(b, value_id);
@@ -393,7 +393,7 @@ vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
 }
 
 struct vtn_value *
-vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
+vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_def *def)
 {
    /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
     * type will be valid by the time we get here.
@@ -456,7 +456,7 @@ vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
                                nir_var_uniform, glsl_bare_sampler_type(), 0);
 }
 
-nir_ssa_def *
+nir_def *
 vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
                              struct vtn_sampled_image si)
 {
@@ -479,7 +479,7 @@ vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
 {
    struct vtn_type *type = vtn_get_value_type(b, value_id);
    vtn_assert(type->base_type == vtn_base_type_sampled_image);
-   nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
+   nir_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
 
    /* Even though this is a sampled image, we can end up here with a storage
     * image because OpenCL doesn't distinguish between the two.
@@ -2800,7 +2800,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
       vtn_push_image(b, w[2], si.image, access & ACCESS_NON_UNIFORM);
       return;
    } else if (opcode == SpvOpImageSparseTexelsResident) {
-      nir_ssa_def *code = vtn_get_nir_ssa(b, w[3]);
+      nir_def *code = vtn_get_nir_ssa(b, w[3]);
       vtn_push_nir_ssa(b, w[2], nir_is_sparse_texels_resident(&b->nb, 1, code));
       return;
    }
@@ -3004,7 +3004,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
 
    unsigned idx = 4;
 
-   struct nir_ssa_def *coord;
+   struct nir_def *coord;
    unsigned coord_components;
    switch (opcode) {
    case SpvOpImageSampleImplicitLod:
@@ -3054,8 +3054,8 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
                      "Unless the Kernel capability is being used, the coordinate parameter "
                      "OpImageSampleExplicitLod must be floating point.");
 
-         nir_ssa_def *coords[4];
-         nir_ssa_def *f0_5 = nir_imm_float(&b->nb, 0.5);
+         nir_def *coords[4];
+         nir_def *f0_5 = nir_imm_float(&b->nb, 0.5);
          for (unsigned i = 0; i < coord_components; i++) {
             coords[i] = nir_i2f32(&b->nb, nir_channel(&b->nb, p->src.ssa, i));
 
@@ -3419,10 +3419,10 @@ fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 get_image_coord(struct vtn_builder *b, uint32_t value)
 {
-   nir_ssa_def *coord = vtn_get_nir_ssa(b, value);
+   nir_def *coord = vtn_get_nir_ssa(b, value);
    /* The image_load_store intrinsics assume a 4-dim coordinate */
    return nir_pad_vec4(&b->nb, coord);
 }
@@ -3525,7 +3525,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
                                           SpvImageOperandsSampleMask);
          image.sample = vtn_get_nir_ssa(b, w[arg]);
       } else {
-         image.sample = nir_ssa_undef(&b->nb, 1, 32);
+         image.sample = nir_undef(&b->nb, 1, 32);
       }
 
       if (operands & SpvImageOperandsMakeTexelVisibleMask) {
@@ -3567,7 +3567,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
                                           SpvImageOperandsSampleMask);
          image.sample = vtn_get_nir_ssa(b, w[arg]);
       } else {
-         image.sample = nir_ssa_undef(&b->nb, 1, 32);
+         image.sample = nir_undef(&b->nb, 1, 32);
       }
 
       if (operands & SpvImageOperandsMakeTexelAvailableMask) {
@@ -3781,7 +3781,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
 
       nir_builder_instr_insert(&b->nb, &intrin->instr);
 
-      nir_ssa_def *result = nir_trim_vector(&b->nb, &intrin->dest.ssa,
+      nir_def *result = nir_trim_vector(&b->nb, &intrin->dest.ssa,
                                               dest_components);
 
       if (opcode == SpvOpImageQuerySize ||
@@ -4085,7 +4085,7 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
          dest->elems[i]->def = nir_channel(&b->nb, src->def, i);
       } else {
          unsigned cols = glsl_get_matrix_columns(src->type);
-         nir_ssa_scalar srcs[NIR_MAX_MATRIX_COLUMNS];
+         nir_scalar srcs[NIR_MAX_MATRIX_COLUMNS];
          for (unsigned j = 0; j < cols; j++) {
             srcs[j] = nir_get_ssa_scalar(src->elems[j]->def, i);
          }
@@ -4098,9 +4098,9 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
    return dest;
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
-                   nir_ssa_def *src0, nir_ssa_def *src1,
+                   nir_def *src0, nir_def *src1,
                    const uint32_t *indices)
 {
    nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
@@ -4114,7 +4114,7 @@ vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
 
       if (index == 0xffffffff) {
          vec->src[i].src =
-            nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
+            nir_src_for_ssa(nir_undef(&b->nb, 1, src0->bit_size));
       } else if (index < src0->num_components) {
          vec->src[i].src = nir_src_for_ssa(src0);
          vec->src[i].swizzle[0] = index;
@@ -4132,9 +4132,9 @@ vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
 /*
  * Concatentates a number of vectors/scalars together to produce a vector
  */
-static nir_ssa_def *
+static nir_def *
 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
-                     unsigned num_srcs, nir_ssa_def **srcs)
+                     unsigned num_srcs, nir_def **srcs)
 {
    nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
 
@@ -4147,7 +4147,7 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
 
    unsigned dest_idx = 0;
    for (unsigned i = 0; i < num_srcs; i++) {
-      nir_ssa_def *src = srcs[i];
+      nir_def *src = srcs[i];
       vtn_assert(dest_idx + src->num_components <= num_components);
       for (unsigned j = 0; j < src->num_components; j++) {
          vec->src[dest_idx].src = nir_src_for_ssa(src);
@@ -4287,7 +4287,7 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
       unsigned elems = count - 3;
       assume(elems >= 1);
       if (glsl_type_is_vector_or_scalar(type->type)) {
-         nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
+         nir_def *srcs[NIR_MAX_VEC_COMPONENTS];
          for (unsigned i = 0; i < elems; i++) {
             srcs[i] = vtn_get_nir_ssa(b, w[3 + i]);
             vtn_assert(glsl_get_bit_size(type->type) == srcs[i]->bit_size);
@@ -5723,7 +5723,7 @@ vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
    nir_address_format addr_format = vtn_mode_to_address_format(
       b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
 
-   nir_ssa_def *def;
+   nir_def *def;
 
    switch (opcode) {
    case SpvOpPtrDiff: {
@@ -5881,14 +5881,14 @@ vtn_handle_write_packed_primitive_indices(struct vtn_builder *b, SpvOp opcode,
       indices = nir_build_deref_var(&b->nb, var);
    }
 
-   nir_ssa_def *offset = vtn_get_nir_ssa(b, w[1]);
-   nir_ssa_def *packed = vtn_get_nir_ssa(b, w[2]);
-   nir_ssa_def *unpacked = nir_unpack_bits(&b->nb, packed, 8);
+   nir_def *offset = vtn_get_nir_ssa(b, w[1]);
+   nir_def *packed = vtn_get_nir_ssa(b, w[2]);
+   nir_def *unpacked = nir_unpack_bits(&b->nb, packed, 8);
    for (int i = 0; i < 4; i++) {
       nir_deref_instr *offset_deref =
          nir_build_deref_array(&b->nb, indices,
                                nir_iadd_imm(&b->nb, offset, i));
-      nir_ssa_def *val = nir_u2u32(&b->nb, nir_channel(&b->nb, unpacked, i));
+      nir_def *val = nir_u2u32(&b->nb, nir_channel(&b->nb, unpacked, i));
 
       nir_store_deref(&b->nb, offset_deref, val, 0x1);
    }
@@ -5934,7 +5934,7 @@ spirv_to_nir_type_ray_query_intrinsic(struct vtn_builder *b,
 
 static void
 ray_query_load_intrinsic_create(struct vtn_builder *b, SpvOp opcode,
-                                const uint32_t *w, nir_ssa_def *src0,
+                                const uint32_t *w, nir_def *src0,
                                 bool committed)
 {
    struct ray_query_value value =
@@ -6434,7 +6434,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
       /* Operation supports two result types: uvec2 and uint64_t.  The NIR
        * intrinsic gives uvec2, so pack the result for the other case.
        */
-      nir_ssa_def *result = nir_shader_clock(&b->nb, vtn_translate_scope(b, scope));
+      nir_def *result = nir_shader_clock(&b->nb, vtn_translate_scope(b, scope));
 
       struct vtn_type *type = vtn_get_type(b, w[1]);
       const struct glsl_type *dest_type = type->type;
index 7048149..4cba604 100644 (file)
@@ -102,7 +102,7 @@ matrix_multiply(struct vtn_builder *b,
        */
 
       for (unsigned i = 0; i < src1_columns; i++) {
-         nir_ssa_def *vec_src[4];
+         nir_def *vec_src[4];
          for (unsigned j = 0; j < src0_rows; j++) {
             vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def,
                                           src1->elems[i]->def);
@@ -140,7 +140,7 @@ matrix_multiply(struct vtn_builder *b,
 static struct vtn_ssa_value *
 mat_times_scalar(struct vtn_builder *b,
                  struct vtn_ssa_value *mat,
-                 nir_ssa_def *scalar)
+                 nir_def *scalar)
 {
    struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
    for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
@@ -153,8 +153,8 @@ mat_times_scalar(struct vtn_builder *b,
    return dest;
 }
 
-nir_ssa_def *
-vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_ssa_def *def)
+nir_def *
+vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_def *def)
 {
    if (def->bit_size == 16)
       return def;
@@ -559,8 +559,8 @@ vtn_alu_op_mediump_16bit(struct vtn_builder *b, SpvOp opcode, struct vtn_value *
    }
 }
 
-static nir_ssa_def *
-vtn_mediump_upconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_ssa_def *def)
+static nir_def *
+vtn_mediump_upconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_def *def)
 {
    if (def->bit_size != 16)
       return def;
@@ -622,7 +622,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
    }
 
    struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
-   nir_ssa_def *src[4] = { NULL, };
+   nir_def *src[4] = { NULL, };
    for (unsigned i = 0; i < num_inputs; i++) {
       vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
       src[i] = vtn_src[i]->def;
@@ -664,7 +664,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
    case SpvOpUMulExtended: {
       vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
       if (src[0]->bit_size == 32) {
-         nir_ssa_def *umul = nir_umul_2x32_64(&b->nb, src[0], src[1]);
+         nir_def *umul = nir_umul_2x32_64(&b->nb, src[0], src[1]);
          dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, umul);
          dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, umul);
       } else {
@@ -677,7 +677,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
    case SpvOpSMulExtended: {
       vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
       if (src[0]->bit_size == 32) {
-         nir_ssa_def *umul = nir_imul_2x32_64(&b->nb, src[0], src[1]);
+         nir_def *umul = nir_imul_2x32_64(&b->nb, src[0], src[1]);
          dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, umul);
          dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, umul);
       } else {
@@ -738,7 +738,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
    }
 
    case SpvOpIsInf: {
-      nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
+      nir_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
       dest->def = nir_ieq(&b->nb, nir_fabs(&b->nb, src[0]), inf);
       break;
    }
@@ -779,7 +779,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
                                                   src_bit_size, dst_bit_size);
 
       if (swap) {
-         nir_ssa_def *tmp = src[0];
+         nir_def *tmp = src[0];
          src[0] = src[1];
          src[1] = tmp;
       }
@@ -949,7 +949,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
                                                   src_bit_size, dst_bit_size);
 
       if (swap) {
-         nir_ssa_def *tmp = src[0];
+         nir_def *tmp = src[0];
          src[0] = src[1];
          src[1] = tmp;
       }
@@ -1021,7 +1021,7 @@ vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode,
    vtn_assert(count >= num_inputs + 3);
 
    struct vtn_ssa_value *vtn_src[3] = { NULL, };
-   nir_ssa_def *src[3] = { NULL, };
+   nir_def *src[3] = { NULL, };
 
    for (unsigned i = 0; i < num_inputs; i++) {
       vtn_src[i] = vtn_ssa_value(b, w[i + 3]);
@@ -1099,11 +1099,11 @@ vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode,
       vtn_fail_with_opcode("Invalid source types.", opcode);
    }
 
-   nir_ssa_def *dest = NULL;
+   nir_def *dest = NULL;
 
    if (src[0]->num_components > 1) {
-      nir_ssa_def *(*src0_conversion)(nir_builder *, nir_ssa_def *, unsigned);
-      nir_ssa_def *(*src1_conversion)(nir_builder *, nir_ssa_def *, unsigned);
+      nir_def *(*src0_conversion)(nir_builder *, nir_def *, unsigned);
+      nir_def *(*src1_conversion)(nir_builder *, nir_def *, unsigned);
 
       switch (opcode) {
       case SpvOpSDotKHR:
@@ -1142,13 +1142,13 @@ vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode,
          glsl_get_vector_elements(vtn_src[0]->type);
 
       for (unsigned i = 0; i < vector_components; i++) {
-         nir_ssa_def *const src0 =
+         nir_def *const src0 =
             src0_conversion(&b->nb, nir_channel(&b->nb, src[0], i), dest_size);
 
-         nir_ssa_def *const src1 =
+         nir_def *const src1 =
             src1_conversion(&b->nb, nir_channel(&b->nb, src[1], i), dest_size);
 
-         nir_ssa_def *const mul_result = nir_imul(&b->nb, src0, src1);
+         nir_def *const mul_result = nir_imul(&b->nb, src0, src1);
 
          dest = (i == 0) ? mul_result : nir_iadd(&b->nb, dest, mul_result);
       }
@@ -1178,7 +1178,7 @@ vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode,
       assert(src[0]->num_components == 1 && src[1]->num_components == 1);
       assert(src[0]->bit_size == 32 && src[1]->bit_size == 32);
 
-      nir_ssa_def *const zero = nir_imm_zero(&b->nb, 1, 32);
+      nir_def *const zero = nir_imm_zero(&b->nb, 1, 32);
       bool is_signed = opcode == SpvOpSDotKHR || opcode == SpvOpSUDotKHR ||
                        opcode == SpvOpSDotAccSatKHR || opcode == SpvOpSUDotAccSatKHR;
 
@@ -1297,13 +1297,13 @@ vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w, unsigned count)
     */
 
    struct vtn_type *type = vtn_get_type(b, w[1]);
-   struct nir_ssa_def *src = vtn_get_nir_ssa(b, w[3]);
+   struct nir_def *src = vtn_get_nir_ssa(b, w[3]);
 
    vtn_fail_if(src->num_components * src->bit_size !=
                glsl_get_vector_elements(type->type) * glsl_get_bit_size(type->type),
                "Source (%%%u) and destination (%%%u) of OpBitcast must have the same "
                "total number of bits", w[3], w[2]);
-   nir_ssa_def *val =
+   nir_def *val =
       nir_bitcast_vector(&b->nb, src, glsl_get_bit_size(type->type));
    vtn_push_nir_ssa(b, w[2], val);
 }
index f733366..f3cf89a 100644 (file)
@@ -30,15 +30,15 @@ bool
 vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
                                       const uint32_t *w, unsigned count)
 {
-   nir_ssa_def *def;
+   nir_def *def;
    switch ((enum GcnShaderAMD)ext_opcode) {
    case CubeFaceIndexAMD:
       def = nir_channel(&b->nb, nir_cube_amd(&b->nb, vtn_get_nir_ssa(b, w[5])), 3);
       break;
    case CubeFaceCoordAMD: {
       def = nir_cube_amd(&b->nb, vtn_get_nir_ssa(b, w[5]));
-      nir_ssa_def *st = nir_swizzle(&b->nb, def, (unsigned[]){1, 0}, 2);
-      nir_ssa_def *invma = nir_frcp(&b->nb, nir_channel(&b->nb, def, 2));
+      nir_def *st = nir_swizzle(&b->nb, def, (unsigned[]){1, 0}, 2);
+      nir_def *invma = nir_frcp(&b->nb, nir_channel(&b->nb, def, 2));
       def = nir_ffma_imm2(&b->nb, st, invma, 0.5);
       break;
    }
@@ -126,19 +126,19 @@ vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ex
 
    unsigned num_inputs = count - 5;
    assert(num_inputs == 3);
-   nir_ssa_def *src[3] = { NULL, };
+   nir_def *src[3] = { NULL, };
    for (unsigned i = 0; i < num_inputs; i++)
       src[i] = vtn_get_nir_ssa(b, w[i + 5]);
 
    /* place constants at src[1-2] for easier constant-folding */
    for (unsigned i = 1; i <= 2; i++) {
       if (nir_src_as_const_value(nir_src_for_ssa(src[0]))) {
-         nir_ssa_def* tmp = src[i];
+         nir_def* tmp = src[i];
          src[i] = src[0];
          src[0] = tmp;
       }
    }
-   nir_ssa_def *def;
+   nir_def *def;
    switch ((enum ShaderTrinaryMinMaxAMD)ext_opcode) {
    case FMin3AMD:
       def = nir_fmin(nb, src[0], nir_fmin(nb, src[1], src[2]));
@@ -222,7 +222,7 @@ vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *
 
    nir_builder_instr_insert(&b->nb, &intrin->instr);
 
-   nir_ssa_def *def;
+   nir_def *def;
    if (vec_array_deref) {
       assert(vec_deref);
       def = nir_vector_extract(&b->nb, &intrin->dest.ssa,
index 0f3e4e2..9ce95f7 100644 (file)
@@ -543,7 +543,7 @@ vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
       }
 
       case SpvOpBranchConditional: {
-         nir_ssa_def *cond = vtn_ssa_value(b, block->branch[1])->def;
+         nir_def *cond = vtn_ssa_value(b, block->branch[1])->def;
          struct vtn_block *then_block = vtn_block(b, block->branch[2]);
          struct vtn_block *else_block = vtn_block(b, block->branch[3]);
 
@@ -564,7 +564,7 @@ vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
          list_inithead(&cases);
          vtn_parse_switch(b, block->branch, &cases);
 
-         nir_ssa_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
+         nir_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
 
          struct vtn_case *def = NULL;
          vtn_foreach_case(cse, &cases) {
@@ -574,7 +574,7 @@ vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
                continue;
             }
 
-            nir_ssa_def *cond = nir_imm_false(&b->nb);
+            nir_def *cond = nir_imm_false(&b->nb);
             util_dynarray_foreach(&cse->values, uint64_t, val)
                cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
 
index 342e8bf..8f5598f 100644 (file)
 #define M_PI_4f ((float) M_PI_4)
 #endif
 
-static nir_ssa_def *build_det(nir_builder *b, nir_ssa_def **col, unsigned cols);
+static nir_def *build_det(nir_builder *b, nir_def **col, unsigned cols);
 
 /* Computes the determinate of the submatrix given by taking src and
  * removing the specified row and column.
  */
-static nir_ssa_def *
-build_mat_subdet(struct nir_builder *b, struct nir_ssa_def **src,
+static nir_def *
+build_mat_subdet(struct nir_builder *b, struct nir_def **src,
                  unsigned size, unsigned row, unsigned col)
 {
    assert(row < size && col < size);
@@ -57,7 +57,7 @@ build_mat_subdet(struct nir_builder *b, struct nir_ssa_def **src,
          swiz[j] = j + (j >= row);
 
       /* Grab all but the specified column */
-      nir_ssa_def *subcol[3];
+      nir_def *subcol[3];
       for (unsigned j = 0; j < size; j++) {
          if (j != col) {
             subcol[j - (j > col)] = nir_swizzle(b, src[j], swiz, size - 1);
@@ -68,19 +68,19 @@ build_mat_subdet(struct nir_builder *b, struct nir_ssa_def **src,
    }
 }
 
-static nir_ssa_def *
-build_det(nir_builder *b, nir_ssa_def **col, unsigned size)
+static nir_def *
+build_det(nir_builder *b, nir_def **col, unsigned size)
 {
    assert(size <= 4);
-   nir_ssa_def *subdet[4];
+   nir_def *subdet[4];
    for (unsigned i = 0; i < size; i++)
       subdet[i] = build_mat_subdet(b, col, size, i, 0);
 
-   nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, size));
+   nir_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, size));
 
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
    for (unsigned i = 0; i < size; i += 2) {
-      nir_ssa_def *term;
+      nir_def *term;
       if (i + 1 < size) {
          term = nir_fsub(b, nir_channel(b, prod, i),
                             nir_channel(b, prod, i + 1));
@@ -94,12 +94,12 @@ build_det(nir_builder *b, nir_ssa_def **col, unsigned size)
    return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
 {
    unsigned size = glsl_get_vector_elements(src->type);
 
-   nir_ssa_def *cols[4];
+   nir_def *cols[4];
    for (unsigned i = 0; i < size; i++)
       cols[i] = src->elems[i]->def;
 
@@ -109,16 +109,16 @@ build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
 static struct vtn_ssa_value *
 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
 {
-   nir_ssa_def *adj_col[4];
+   nir_def *adj_col[4];
    unsigned size = glsl_get_vector_elements(src->type);
 
-   nir_ssa_def *cols[4];
+   nir_def *cols[4];
    for (unsigned i = 0; i < size; i++)
       cols[i] = src->elems[i]->def;
 
    /* Build up an adjugate matrix */
    for (unsigned c = 0; c < size; c++) {
-      nir_ssa_def *elem[4];
+      nir_def *elem[4];
       for (unsigned r = 0; r < size; r++) {
          elem[r] = build_mat_subdet(&b->nb, cols, size, c, r);
 
@@ -129,7 +129,7 @@ matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
       adj_col[c] = nir_vec(&b->nb, elem, size);
    }
 
-   nir_ssa_def *det_inv = nir_frcp(&b->nb, build_det(&b->nb, cols, size));
+   nir_def *det_inv = nir_frcp(&b->nb, build_det(&b->nb, cols, size));
 
    struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
    for (unsigned i = 0; i < size; i++)
@@ -149,8 +149,8 @@ matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
  * in the asin and acos implementation to minimize some relative error metric
  * in each case.
  */
-static nir_ssa_def *
-build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1, bool piecewise)
+static nir_def *
+build_asin(nir_builder *b, nir_def *x, float p0, float p1, bool piecewise)
 {
    if (x->bit_size == 16) {
       /* The polynomial approximation isn't precise enough to meet half-float
@@ -165,18 +165,18 @@ build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1, bool piecewise)
        */
       return nir_f2f16(b, build_asin(b, nir_f2f32(b, x), p0, p1, piecewise));
    }
-   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
-   nir_ssa_def *half = nir_imm_floatN_t(b, 0.5f, x->bit_size);
-   nir_ssa_def *abs_x = nir_fabs(b, x);
+   nir_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
+   nir_def *half = nir_imm_floatN_t(b, 0.5f, x->bit_size);
+   nir_def *abs_x = nir_fabs(b, x);
 
-   nir_ssa_def *p0_plus_xp1 = nir_ffma_imm12(b, abs_x, p1, p0);
+   nir_def *p0_plus_xp1 = nir_ffma_imm12(b, abs_x, p1, p0);
 
-   nir_ssa_def *expr_tail =
+   nir_def *expr_tail =
       nir_ffma_imm2(b, abs_x,
                        nir_ffma_imm2(b, abs_x, p0_plus_xp1, M_PI_4f - 1.0f),
                        M_PI_2f);
 
-   nir_ssa_def *result0 = nir_fmul(b, nir_fsign(b, x),
+   nir_def *result0 = nir_fmul(b, nir_fsign(b, x),
                       nir_a_minus_bc(b, nir_imm_floatN_t(b, M_PI_2f, x->bit_size),
                                         nir_fsqrt(b, nir_fsub(b, one, abs_x)),
                                         expr_tail));
@@ -187,15 +187,15 @@ build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1, bool piecewise)
       const float pS2 = -8.6563630030e-03f;
       const float qS1 = -7.0662963390e-01f;
 
-      nir_ssa_def *x2 = nir_fmul(b, x, x);
-      nir_ssa_def *p = nir_fmul(b,
+      nir_def *x2 = nir_fmul(b, x, x);
+      nir_def *p = nir_fmul(b,
                                 x2,
                                 nir_ffma_imm2(b, x2,
                                                  nir_ffma_imm12(b, x2, pS2, pS1),
                                                  pS0));
 
-      nir_ssa_def *q = nir_ffma_imm1(b, x2, qS1, one);
-      nir_ssa_def *result1 = nir_ffma(b, x, nir_fdiv(b, p, q), x);
+      nir_def *q = nir_ffma_imm1(b, x2, qS1, one);
+      nir_def *result1 = nir_ffma(b, x, nir_fdiv(b, p, q), x);
       return nir_bcsel(b, nir_flt(b, abs_x, half), result1, result0);
    } else {
       return result0;
@@ -311,7 +311,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
 
    /* Collect the various SSA sources */
    unsigned num_inputs = count - 5;
-   nir_ssa_def *src[3] = { NULL, };
+   nir_def *src[3] = { NULL, };
    for (unsigned i = 0; i < num_inputs; i++) {
       /* These are handled specially below */
       if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
@@ -339,12 +339,12 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       break;
 
    case GLSLstd450Modf: {
-      nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
-      nir_ssa_def *sign_bit =
+      nir_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
+      nir_def *sign_bit =
          nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
                         src[0]->bit_size);
-      nir_ssa_def *sign = nir_fsign(nb, src[0]);
-      nir_ssa_def *abs = nir_fabs(nb, src[0]);
+      nir_def *sign = nir_fsign(nb, src[0]);
+      nir_def *abs = nir_fabs(nb, src[0]);
 
       /* NaN input should produce a NaN results, and Â±Inf input should provide
        * Â±0 result.  The fmul(sign(x), ffract(x)) calculation will already
@@ -364,12 +364,12 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
    }
 
    case GLSLstd450ModfStruct: {
-      nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
-      nir_ssa_def *sign_bit =
+      nir_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
+      nir_def *sign_bit =
          nir_imm_intN_t(&b->nb, (uint64_t)1 << (src[0]->bit_size - 1),
                         src[0]->bit_size);
-      nir_ssa_def *sign = nir_fsign(nb, src[0]);
-      nir_ssa_def *abs = nir_fabs(nb, src[0]);
+      nir_def *sign = nir_fsign(nb, src[0]);
+      nir_def *abs = nir_fabs(nb, src[0]);
       vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
 
       /* See GLSLstd450Modf for explanation of the Inf and NaN handling. */
@@ -397,7 +397,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       const bool exact = nb->exact;
       nb->exact = true;
 
-      nir_ssa_def *cmp = nir_slt(nb, src[1], src[0]);
+      nir_def *cmp = nir_slt(nb, src[1], src[0]);
 
       nb->exact = exact;
       dest->def = nir_fsub_imm(nb, 1.0f, cmp);
@@ -464,12 +464,12 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       break;
 
    case GLSLstd450Refract: {
-      nir_ssa_def *I = src[0];
-      nir_ssa_def *N = src[1];
-      nir_ssa_def *eta = src[2];
-      nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
-      nir_ssa_def *one = NIR_IMM_FP(nb, 1.0);
-      nir_ssa_def *zero = NIR_IMM_FP(nb, 0.0);
+      nir_def *I = src[0];
+      nir_def *N = src[1];
+      nir_def *eta = src[2];
+      nir_def *n_dot_i = nir_fdot(nb, N, I);
+      nir_def *one = NIR_IMM_FP(nb, 1.0);
+      nir_def *zero = NIR_IMM_FP(nb, 0.0);
       /* According to the SPIR-V and GLSL specs, eta is always a float
        * regardless of the type of the other operands. However in practice it
        * seems that if you try to pass it a float then glslang will just
@@ -483,10 +483,10 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
                                 nir_rounding_mode_undef);
       }
       /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
-      nir_ssa_def *k =
+      nir_def *k =
          nir_a_minus_bc(nb, one, eta,
                             nir_fmul(nb, eta, nir_a_minus_bc(nb, one, n_dot_i, n_dot_i)));
-      nir_ssa_def *result =
+      nir_def *result =
          nir_a_minus_bc(nb, nir_fmul(nb, eta, I),
                             nir_ffma(nb, eta, n_dot_i, nir_fsqrt(nb, k)),
                             N);
@@ -522,7 +522,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
        */
       const uint32_t bit_size = src[0]->bit_size;
       const double clamped_x = bit_size > 16 ? 10.0 : 4.2;
-      nir_ssa_def *x = nir_fclamp(nb, src[0],
+      nir_def *x = nir_fclamp(nb, src[0],
                                   nir_imm_floatN_t(nb, -clamped_x, bit_size),
                                   nir_imm_floatN_t(nb, clamped_x, bit_size));
 
@@ -535,14 +535,14 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       const bool exact = nb->exact;
 
       nb->exact = true;
-      nir_ssa_def *is_regular = nir_flt(nb,
+      nir_def *is_regular = nir_flt(nb,
                                         nir_imm_floatN_t(nb, 0, bit_size),
                                         nir_fabs(nb, src[0]));
 
       /* The extra 1.0*s ensures that subnormal inputs are flushed to zero
        * when that is selected by the shader.
        */
-      nir_ssa_def *flushed = nir_fmul(nb,
+      nir_def *flushed = nir_fmul(nb,
                                       src[0],
                                       nir_imm_floatN_t(nb, 1.0, bit_size));
       nb->exact = exact;
@@ -686,7 +686,7 @@ handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
 
    nir_builder_instr_insert(&b->nb, &intrin->instr);
 
-   nir_ssa_def *def = &intrin->dest.ssa;
+   nir_def *def = &intrin->dest.ssa;
    if (vec_array_deref)
       def = nir_vector_extract(&b->nb, def, vec_deref->arr.index.ssa);
 
index 3e9b673..494f687 100644 (file)
@@ -31,9 +31,9 @@
 #include "vtn_private.h"
 #include "OpenCL.std.h"
 
-typedef nir_ssa_def *(*nir_handler)(struct vtn_builder *b,
+typedef nir_def *(*nir_handler)(struct vtn_builder *b,
                                     uint32_t opcode,
-                                    unsigned num_srcs, nir_ssa_def **srcs,
+                                    unsigned num_srcs, nir_def **srcs,
                                     struct vtn_type **src_types,
                                     const struct vtn_type *dest_type);
 
@@ -170,7 +170,7 @@ static bool call_mangled_function(struct vtn_builder *b,
                                   uint32_t num_srcs,
                                   struct vtn_type **src_types,
                                   const struct vtn_type *dest_type,
-                                  nir_ssa_def **srcs,
+                                  nir_def **srcs,
                                   nir_deref_instr **ret_deref_ptr)
 {
    nir_function *found = mangle_and_find(b, name, const_mask, num_srcs, src_types);
@@ -203,7 +203,7 @@ handle_instr(struct vtn_builder *b, uint32_t opcode,
 {
    struct vtn_type *dest_type = w_dest ? vtn_get_type(b, w_dest[0]) : NULL;
 
-   nir_ssa_def *srcs[5] = { NULL };
+   nir_def *srcs[5] = { NULL };
    struct vtn_type *src_types[5] = { NULL };
    vtn_assert(num_srcs <= ARRAY_SIZE(srcs));
    for (unsigned i = 0; i < num_srcs; i++) {
@@ -213,7 +213,7 @@ handle_instr(struct vtn_builder *b, uint32_t opcode,
       src_types[i] = val->type;
    }
 
-   nir_ssa_def *result = handler(b, opcode, num_srcs, srcs, src_types, dest_type);
+   nir_def *result = handler(b, opcode, num_srcs, srcs, src_types, dest_type);
    if (result) {
       vtn_push_nir_ssa(b, w_dest[1], result);
    } else {
@@ -271,12 +271,12 @@ nir_alu_op_for_opencl_opcode(struct vtn_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_alu(struct vtn_builder *b, uint32_t opcode,
-           unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+           unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
            const struct vtn_type *dest_type)
 {
-   nir_ssa_def *ret = nir_build_alu(&b->nb, nir_alu_op_for_opencl_opcode(b, (enum OpenCLstd_Entrypoints)opcode),
+   nir_def *ret = nir_build_alu(&b->nb, nir_alu_op_for_opencl_opcode(b, (enum OpenCLstd_Entrypoints)opcode),
                                     srcs[0], srcs[1], srcs[2], NULL);
    if (opcode == OpenCLstd_Popcount)
       ret = nir_u2uN(&b->nb, ret, glsl_get_bit_size(dest_type->type));
@@ -418,10 +418,10 @@ get_signed_type(struct vtn_builder *b, struct vtn_type *t)
                           glsl_get_vector_elements(t->type)));
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_clc_fn(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
               int num_srcs,
-              nir_ssa_def **srcs,
+              nir_def **srcs,
               struct vtn_type **src_types,
               const struct vtn_type *dest_type)
 {
@@ -466,9 +466,9 @@ handle_clc_fn(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
    return ret_deref ? nir_load_deref(&b->nb, ret_deref) : NULL;
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_special(struct vtn_builder *b, uint32_t opcode,
-               unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+               unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
                const struct vtn_type *dest_type)
 {
    nir_builder *nb = &b->nb;
@@ -557,16 +557,16 @@ handle_special(struct vtn_builder *b, uint32_t opcode,
       break;
    }
 
-   nir_ssa_def *ret = handle_clc_fn(b, opcode, num_srcs, srcs, src_types, dest_type);
+   nir_def *ret = handle_clc_fn(b, opcode, num_srcs, srcs, src_types, dest_type);
    if (!ret)
       vtn_fail("No NIR equivalent");
 
    return ret;
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_core(struct vtn_builder *b, uint32_t opcode,
-            unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+            unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
             const struct vtn_type *dest_type)
 {
    nir_deref_instr *ret_deref = NULL;
@@ -629,13 +629,13 @@ _handle_v_load_store(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
    enum glsl_base_type base_type = glsl_get_base_type(type->type);
    unsigned components = glsl_get_vector_elements(type->type);
 
-   nir_ssa_def *offset = vtn_get_nir_ssa(b, w[5 + a]);
+   nir_def *offset = vtn_get_nir_ssa(b, w[5 + a]);
    struct vtn_value *p = vtn_value(b, w[6 + a], vtn_value_type_pointer);
 
    struct vtn_ssa_value *comps[NIR_MAX_VEC_COMPONENTS];
-   nir_ssa_def *ncomps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *ncomps[NIR_MAX_VEC_COMPONENTS];
 
-   nir_ssa_def *moffset = nir_imul_imm(&b->nb, offset,
+   nir_def *moffset = nir_imul_imm(&b->nb, offset,
       (vec_aligned && components == 3) ? 4 : components);
    nir_deref_instr *deref = vtn_pointer_to_deref(b, p->pointer);
 
@@ -658,7 +658,7 @@ _handle_v_load_store(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
    deref = nir_alignment_deref_cast(&b->nb, deref, alignment, 0);
 
    for (int i = 0; i < components; i++) {
-      nir_ssa_def *coffset = nir_iadd_imm(&b->nb, moffset, i);
+      nir_def *coffset = nir_iadd_imm(&b->nb, moffset, i);
       nir_deref_instr *arr_deref = nir_build_deref_ptr_as_array(&b->nb, deref, coffset);
 
       if (load) {
@@ -816,7 +816,7 @@ handle_printf(struct vtn_builder *b, uint32_t opcode,
    for (unsigned i = 1; i < num_srcs; ++i) {
       nir_deref_instr *field_deref =
          nir_build_deref_struct(&b->nb, deref_var, i - 1);
-      nir_ssa_def *field_src = vtn_ssa_value(b, w_src[i])->def;
+      nir_def *field_src = vtn_ssa_value(b, w_src[i])->def;
       /* extract strings */
       fmt_pos = util_printf_next_spec_pos(info->strings, fmt_pos);
       if (fmt_pos != -1 && info->strings[fmt_pos] == 's') {
@@ -829,36 +829,36 @@ handle_printf(struct vtn_builder *b, uint32_t opcode,
    }
 
    /* Lastly, the actual intrinsic */
-   nir_ssa_def *fmt_idx = nir_imm_int(&b->nb, info_idx);
-   nir_ssa_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->dest.ssa);
+   nir_def *fmt_idx = nir_imm_int(&b->nb, info_idx);
+   nir_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->dest.ssa);
    vtn_push_nir_ssa(b, w_dest[1], ret);
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_round(struct vtn_builder *b, uint32_t opcode,
-             unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+             unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
              const struct vtn_type *dest_type)
 {
-   nir_ssa_def *src = srcs[0];
+   nir_def *src = srcs[0];
    nir_builder *nb = &b->nb;
-   nir_ssa_def *half = nir_imm_floatN_t(nb, 0.5, src->bit_size);
-   nir_ssa_def *truncated = nir_ftrunc(nb, src);
-   nir_ssa_def *remainder = nir_fsub(nb, src, truncated);
+   nir_def *half = nir_imm_floatN_t(nb, 0.5, src->bit_size);
+   nir_def *truncated = nir_ftrunc(nb, src);
+   nir_def *remainder = nir_fsub(nb, src, truncated);
 
    return nir_bcsel(nb, nir_fge(nb, nir_fabs(nb, remainder), half),
                     nir_fadd(nb, truncated, nir_fsign(nb, src)), truncated);
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_shuffle(struct vtn_builder *b, uint32_t opcode,
-               unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+               unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
                const struct vtn_type *dest_type)
 {
-   struct nir_ssa_def *input = srcs[0];
-   struct nir_ssa_def *mask = srcs[1];
+   struct nir_def *input = srcs[0];
+   struct nir_def *mask = srcs[1];
 
    unsigned out_elems = dest_type->length;
-   nir_ssa_def *outres[NIR_MAX_VEC_COMPONENTS];
+   nir_def *outres[NIR_MAX_VEC_COMPONENTS];
    unsigned in_elems = input->num_components;
    if (mask->bit_size != 32)
       mask = nir_u2u32(&b->nb, mask);
@@ -869,17 +869,17 @@ handle_shuffle(struct vtn_builder *b, uint32_t opcode,
    return nir_vec(&b->nb, outres, out_elems);
 }
 
-static nir_ssa_def *
+static nir_def *
 handle_shuffle2(struct vtn_builder *b, uint32_t opcode,
-                unsigned num_srcs, nir_ssa_def **srcs, struct vtn_type **src_types,
+                unsigned num_srcs, nir_def **srcs, struct vtn_type **src_types,
                 const struct vtn_type *dest_type)
 {
-   struct nir_ssa_def *input0 = srcs[0];
-   struct nir_ssa_def *input1 = srcs[1];
-   struct nir_ssa_def *mask = srcs[2];
+   struct nir_def *input0 = srcs[0];
+   struct nir_def *input1 = srcs[1];
+   struct nir_def *mask = srcs[2];
 
    unsigned out_elems = dest_type->length;
-   nir_ssa_def *outres[NIR_MAX_VEC_COMPONENTS];
+   nir_def *outres[NIR_MAX_VEC_COMPONENTS];
    unsigned in_elems = input0->num_components;
    unsigned total_mask = 2 * in_elems - 1;
    unsigned half_mask = in_elems - 1;
@@ -887,11 +887,11 @@ handle_shuffle2(struct vtn_builder *b, uint32_t opcode,
       mask = nir_u2u32(&b->nb, mask);
    mask = nir_iand(&b->nb, mask, nir_imm_intN_t(&b->nb, total_mask, mask->bit_size));
    for (unsigned i = 0; i < out_elems; i++) {
-      nir_ssa_def *this_mask = nir_channel(&b->nb, mask, i);
-      nir_ssa_def *vmask = nir_iand(&b->nb, this_mask, nir_imm_intN_t(&b->nb, half_mask, mask->bit_size));
-      nir_ssa_def *val0 = nir_vector_extract(&b->nb, input0, vmask);
-      nir_ssa_def *val1 = nir_vector_extract(&b->nb, input1, vmask);
-      nir_ssa_def *sel = nir_ilt_imm(&b->nb, this_mask, in_elems);
+      nir_def *this_mask = nir_channel(&b->nb, mask, i);
+      nir_def *vmask = nir_iand(&b->nb, this_mask, nir_imm_intN_t(&b->nb, half_mask, mask->bit_size));
+      nir_def *val0 = nir_vector_extract(&b->nb, input0, vmask);
+      nir_def *val1 = nir_vector_extract(&b->nb, input1, vmask);
+      nir_def *sel = nir_ilt_imm(&b->nb, this_mask, in_elems);
       outres[i] = nir_bcsel(&b->nb, sel, val0, val1);
    }
    return nir_vec(&b->nb, outres, out_elems);
index 165b5f2..63f2ee3 100644 (file)
@@ -252,7 +252,7 @@ vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
 
 struct vtn_ssa_value {
    union {
-      nir_ssa_def *def;
+      nir_def *def;
       struct vtn_ssa_value **elems;
    };
 
@@ -483,8 +483,8 @@ struct vtn_pointer {
    nir_deref_instr *deref;
 
    /** A (block_index, offset) pair representing a UBO or SSBO position. */
-   struct nir_ssa_def *block_index;
-   struct nir_ssa_def *offset;
+   struct nir_def *block_index;
+   struct nir_def *offset;
 
    /* Access qualifiers */
    enum gl_access_qualifier access;
@@ -536,9 +536,9 @@ vtn_translate_scope(struct vtn_builder *b, SpvScope scope);
 
 struct vtn_image_pointer {
    nir_deref_instr *image;
-   nir_ssa_def *coord;
-   nir_ssa_def *sample;
-   nir_ssa_def *lod;
+   nir_def *coord;
+   nir_def *sample;
+   nir_def *lod;
 };
 
 struct vtn_value {
@@ -698,10 +698,10 @@ const char *
 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
                    unsigned word_count, unsigned *words_used);
 
-nir_ssa_def *
+nir_def *
 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr);
 struct vtn_pointer *
-vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
+vtn_pointer_from_ssa(struct vtn_builder *b, nir_def *ssa,
                      struct vtn_type *ptr_type);
 
 struct vtn_ssa_value *
@@ -782,7 +782,7 @@ vtn_value_to_pointer(struct vtn_builder *b, struct vtn_value *value)
 {
    if (value->is_null_constant) {
       vtn_assert(glsl_type_is_vector_or_scalar(value->type->type));
-      nir_ssa_def *const_ssa =
+      nir_def *const_ssa =
          vtn_const_ssa_value(b, value->constant, value->type->type)->def;
       return vtn_pointer_from_ssa(b, const_ssa, value->type);
    }
@@ -860,9 +860,9 @@ struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
 struct vtn_value *vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
                                      struct vtn_ssa_value *ssa);
 
-nir_ssa_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id);
+nir_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id);
 struct vtn_value *vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id,
-                                   nir_ssa_def *def);
+                                   nir_def *def);
 
 struct vtn_value *vtn_push_pointer(struct vtn_builder *b,
                                    uint32_t value_id,
@@ -873,7 +873,7 @@ struct vtn_sampled_image {
    nir_deref_instr *sampler;
 };
 
-nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
+nir_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
                                           struct vtn_sampled_image si);
 
 void
@@ -890,9 +890,9 @@ nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
 
 nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b,
                                       struct vtn_pointer *ptr);
-nir_ssa_def *
+nir_def *
 vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
-                      nir_ssa_def **index_out);
+                      nir_def **index_out);
 
 nir_deref_instr *
 vtn_get_call_payload_for_location(struct vtn_builder *b, uint32_t location_id);
@@ -1016,8 +1016,8 @@ void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
                              SpvMemorySemanticsMask semantics);
 
 bool vtn_value_is_relaxed_precision(struct vtn_builder *b, struct vtn_value *val);
-nir_ssa_def *
-vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_ssa_def *def);
+nir_def *
+vtn_mediump_downconvert(struct vtn_builder *b, enum glsl_base_type base_type, nir_def *def);
 struct vtn_ssa_value *
 vtn_mediump_downconvert_value(struct vtn_builder *b, struct vtn_ssa_value *src);
 void vtn_mediump_upconvert_value(struct vtn_builder *b, struct vtn_ssa_value *value);
index 07ad125..9d1a417 100644 (file)
@@ -1286,7 +1286,7 @@ vtn_emit_branch(struct vtn_builder *b, const struct vtn_block *block,
       /* Launches mesh shader workgroups from the task shader.
        * Arguments are: vec(x, y, z), payload pointer
        */
-      nir_ssa_def *dimensions =
+      nir_def *dimensions =
          nir_vec3(&b->nb, vtn_get_nir_ssa(b, w[1]),
                           vtn_get_nir_ssa(b, w[2]),
                           vtn_get_nir_ssa(b, w[3]));
@@ -1353,7 +1353,7 @@ vtn_emit_block(struct vtn_builder *b, struct vtn_block *block,
       struct vtn_successor *else_succ = &block->successors[1];
       struct vtn_construct *c = block->parent;
 
-      nir_ssa_def *cond = vtn_get_nir_ssa(b, block->branch[1]);
+      nir_def *cond = vtn_get_nir_ssa(b, block->branch[1]);
       if (then_succ->block == else_succ->block)
          cond = nir_imm_true(&b->nb);
 
@@ -1411,14 +1411,14 @@ vtn_emit_block(struct vtn_builder *b, struct vtn_block *block,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_switch_case_condition(struct vtn_builder *b, struct vtn_construct *swtch,
-                          nir_ssa_def *sel, struct vtn_case *cse)
+                          nir_def *sel, struct vtn_case *cse)
 {
    vtn_assert(swtch->type == vtn_construct_type_switch);
 
    if (cse->is_default) {
-      nir_ssa_def *any = nir_imm_false(&b->nb);
+      nir_def *any = nir_imm_false(&b->nb);
 
       struct vtn_block *header = b->func->ordered_blocks[swtch->start_pos];
 
@@ -1434,7 +1434,7 @@ vtn_switch_case_condition(struct vtn_builder *b, struct vtn_construct *swtch,
 
       return nir_inot(&b->nb, any);
    } else {
-      nir_ssa_def *cond = nir_imm_false(&b->nb);
+      nir_def *cond = nir_imm_false(&b->nb);
       util_dynarray_foreach(&cse->values, uint64_t, val)
          cond = nir_ior(&b->nb, cond, nir_ieq_imm(&b->nb, sel, *val));
       return cond;
@@ -1703,8 +1703,8 @@ vtn_emit_cf_func_structured(struct vtn_builder *b, struct vtn_function *func,
             struct vtn_construct *swtch = next->parent;
             struct vtn_block *header = func->ordered_blocks[swtch->start_pos];
 
-            nir_ssa_def *sel = vtn_get_nir_ssa(b, header->branch[1]);
-            nir_ssa_def *case_condition =
+            nir_def *sel = vtn_get_nir_ssa(b, header->branch[1]);
+            nir_def *case_condition =
                vtn_switch_case_condition(b, swtch, sel, block->switch_case);
             if (next->fallthrough_var) {
                case_condition =
index 3ed3c22..c09376c 100644 (file)
@@ -27,7 +27,7 @@ static struct vtn_ssa_value *
 vtn_build_subgroup_instr(struct vtn_builder *b,
                          nir_intrinsic_op nir_op,
                          struct vtn_ssa_value *src0,
-                         nir_ssa_def *index,
+                         nir_def *index,
                          unsigned const_idx0,
                          unsigned const_idx1)
 {
@@ -126,7 +126,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
    case SpvOpGroupNonUniformBallotBitCount:
    case SpvOpGroupNonUniformBallotFindLSB:
    case SpvOpGroupNonUniformBallotFindMSB: {
-      nir_ssa_def *src0, *src1 = NULL;
+      nir_def *src0, *src1 = NULL;
       nir_intrinsic_op op;
       switch (opcode) {
       case SpvOpGroupNonUniformBallotBitExtract:
@@ -249,7 +249,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
          unreachable("Unhandled opcode");
       }
 
-      nir_ssa_def *src0;
+      nir_def *src0;
       if (opcode == SpvOpGroupNonUniformAll || opcode == SpvOpGroupAll ||
           opcode == SpvOpGroupNonUniformAny || opcode == SpvOpGroupAny ||
           opcode == SpvOpGroupNonUniformAllEqual) {
@@ -315,8 +315,8 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
        */
 
       nir_builder *nb = &b->nb;
-      nir_ssa_def *size = nir_load_subgroup_size(nb);
-      nir_ssa_def *delta = vtn_get_nir_ssa(b, w[5]);
+      nir_def *size = nir_load_subgroup_size(nb);
+      nir_def *delta = vtn_get_nir_ssa(b, w[5]);
 
       /* Rewrite UP in terms of DOWN.
        *
@@ -325,7 +325,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
       if (opcode == SpvOpSubgroupShuffleUpINTEL)
          delta = nir_isub(nb, size, delta);
 
-      nir_ssa_def *index = nir_iadd(nb, nir_load_subgroup_invocation(nb), delta);
+      nir_def *index = nir_iadd(nb, nir_load_subgroup_invocation(nb), delta);
       struct vtn_ssa_value *current =
          vtn_build_subgroup_instr(b, nir_intrinsic_shuffle, vtn_ssa_value(b, w[3]),
                                   index, 0, 0);
@@ -334,7 +334,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
          vtn_build_subgroup_instr(b, nir_intrinsic_shuffle, vtn_ssa_value(b, w[4]),
                                   nir_isub(nb, index, size), 0, 0);
 
-      nir_ssa_def *cond = nir_ilt(nb, index, size);
+      nir_def *cond = nir_ilt(nb, index, size);
       vtn_push_nir_ssa(b, w[2], nir_bcsel(nb, cond, current->def, next->def));
 
       break;
index b212e93..d4fea61 100644 (file)
@@ -196,7 +196,7 @@ vtn_pointer_is_external_block(struct vtn_builder *b,
           ptr->mode == vtn_variable_mode_phys_ssbo;
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
                        unsigned stride, unsigned bit_size)
 {
@@ -204,7 +204,7 @@ vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
    if (link.mode == vtn_access_mode_literal) {
       return nir_imm_intN_t(&b->nb, link.id * stride, bit_size);
    } else {
-      nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
+      nir_def *ssa = vtn_ssa_value(b, link.id)->def;
       if (ssa->bit_size != bit_size)
          ssa = nir_i2iN(&b->nb, ssa, bit_size);
       return nir_imul_imm(&b->nb, ssa, stride);
@@ -226,9 +226,9 @@ vk_desc_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
-                            nir_ssa_def *desc_array_index)
+                            nir_def *desc_array_index)
 {
    vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
 
@@ -258,9 +258,9 @@ vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
    return &instr->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
-                     nir_ssa_def *base_index, nir_ssa_def *offset_index)
+                     nir_def *base_index, nir_def *offset_index)
 {
    vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
 
@@ -281,9 +281,9 @@ vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
    return &instr->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
-                    nir_ssa_def *desc_index)
+                    nir_def *desc_index)
 {
    vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
 
@@ -318,7 +318,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
    } else if (b->options->environment == NIR_SPIRV_VULKAN &&
               (vtn_pointer_is_external_block(b, base) ||
                base->mode == vtn_variable_mode_accel_struct)) {
-      nir_ssa_def *block_index = base->block_index;
+      nir_def *block_index = base->block_index;
 
       /* We dereferencing an external block pointer.  Correctness of this
        * operation relies on one particular line in the SPIR-V spec, section
@@ -344,7 +344,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
        * UBOs/SSBOs will work correctly even if variable pointers are
        * completley toast.
        */
-      nir_ssa_def *desc_arr_idx = NULL;
+      nir_def *desc_arr_idx = NULL;
       if (!block_index || vtn_type_contains_block(b, type) ||
           base->mode == vtn_variable_mode_accel_struct) {
          /* If our type contains a block, then we're still outside the block
@@ -365,7 +365,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
             }
 
             unsigned aoa_size = glsl_get_aoa_size(type->array_element->type);
-            nir_ssa_def *arr_offset =
+            nir_def *arr_offset =
                vtn_access_link_as_ssa(b, deref_chain->link[idx],
                                       MAX2(aoa_size, 1), 32);
             if (desc_arr_idx)
@@ -403,7 +403,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
        * final block index.  Insert a descriptor load and cast to a deref to
        * start the deref chain.
        */
-      nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
+      nir_def *desc = vtn_descriptor_load(b, base->mode, block_index);
 
       assert(base->mode == vtn_variable_mode_ssbo ||
              base->mode == vtn_variable_mode_ubo);
@@ -445,7 +445,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
       tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->modes,
                                   tail->type, base->ptr_type->stride);
 
-      nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
+      nir_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
                                                   tail->dest.ssa.bit_size);
       tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
       idx++;
@@ -458,7 +458,7 @@ vtn_pointer_dereference(struct vtn_builder *b,
          tail = nir_build_deref_struct(&b->nb, tail, field);
          type = type->members[field];
       } else {
-         nir_ssa_def *arr_index =
+         nir_def *arr_index =
             vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
                                    tail->dest.ssa.bit_size);
          tail = nir_build_deref_array(&b->nb, tail, arr_index);
@@ -582,7 +582,7 @@ vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 vtn_pointer_to_descriptor(struct vtn_builder *b, struct vtn_pointer *ptr)
 {
    assert(ptr->mode == vtn_variable_mode_accel_struct);
@@ -1751,7 +1751,7 @@ vtn_mode_to_address_format(struct vtn_builder *b, enum vtn_variable_mode mode)
    unreachable("Invalid variable mode");
 }
 
-nir_ssa_def *
+nir_def *
 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
 {
    if ((vtn_pointer_is_external_block(b, ptr) &&
@@ -1789,7 +1789,7 @@ vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
 }
 
 struct vtn_pointer *
-vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
+vtn_pointer_from_ssa(struct vtn_builder *b, nir_def *ssa,
                      struct vtn_type *ptr_type)
 {
    vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
@@ -2290,14 +2290,14 @@ vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
             glsl_get_type_name(src_type->type), src_type->id);
 }
 
-static nir_ssa_def *
-nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
+static nir_def *
+nir_shrink_zero_pad_vec(nir_builder *b, nir_def *val,
                         unsigned num_components)
 {
    if (val->num_components == num_components)
       return val;
 
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < num_components; i++) {
       if (i < val->num_components)
          comps[i] = nir_channel(b, val, i);
@@ -2307,8 +2307,8 @@ nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
    return nir_vec(b, comps, num_components);
 }
 
-static nir_ssa_def *
-nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
+static nir_def *
+nir_sloppy_bitcast(nir_builder *b, nir_def *val,
                    const struct glsl_type *type)
 {
    const unsigned num_components = glsl_get_vector_elements(type);
@@ -2575,7 +2575,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
    case SpvOpCopyMemorySized: {
       struct vtn_value *dest_val = vtn_pointer_value(b, w[1]);
       struct vtn_value *src_val = vtn_pointer_value(b, w[2]);
-      nir_ssa_def *size = vtn_get_nir_ssa(b, w[3]);
+      nir_def *size = vtn_get_nir_ssa(b, w[3]);
       struct vtn_pointer *dest = vtn_value_to_pointer(b, dest_val);
       struct vtn_pointer *src = vtn_value_to_pointer(b, src_val);
 
@@ -2686,7 +2686,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
       };
       struct vtn_pointer *array = vtn_pointer_dereference(b, ptr, &chain);
 
-      nir_ssa_def *array_length =
+      nir_def *array_length =
          nir_deref_buffer_array_length(&b->nb, 32,
                                        vtn_pointer_to_ssa(b, array),
                                        .access=ptr->access | ptr->type->access);
@@ -2709,8 +2709,8 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
                   "scalar type");
 
       /* The pointer will be converted to an SSA value automatically */
-      nir_ssa_def *ptr = vtn_get_nir_ssa(b, w[3]);
-      nir_ssa_def *u = nir_sloppy_bitcast(&b->nb, ptr, u_type->type);
+      nir_def *ptr = vtn_get_nir_ssa(b, w[3]);
+      nir_def *u = nir_sloppy_bitcast(&b->nb, ptr, u_type->type);
       vtn_push_nir_ssa(b, w[2], u);
       break;
    }
@@ -2728,8 +2728,8 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
                   "OpConvertUToPtr can only be used to cast from a vector or "
                   "scalar type");
 
-      nir_ssa_def *u = vtn_get_nir_ssa(b, w[3]);
-      nir_ssa_def *ptr = nir_sloppy_bitcast(&b->nb, u, ptr_type->type);
+      nir_def *u = vtn_get_nir_ssa(b, w[3]);
+      nir_def *ptr = nir_sloppy_bitcast(&b->nb, u, ptr_type->type);
       vtn_push_pointer(b, w[2], vtn_pointer_from_ssa(b, ptr, ptr_type));
       break;
    }
@@ -2768,12 +2768,12 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
          vtn_storage_class_to_mode(b, storage_class, dst_type->deref, &nir_mode);
       nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
 
-      nir_ssa_def *null_value =
+      nir_def *null_value =
          nir_build_imm(&b->nb, nir_address_format_num_components(addr_format),
                                nir_address_format_bit_size(addr_format),
                                nir_address_format_null_value(addr_format));
 
-      nir_ssa_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, nir_mode);
+      nir_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, nir_mode);
       vtn_push_nir_ssa(b, w[2], nir_bcsel(&b->nb, valid,
                                                   &src_deref->dest.ssa,
                                                   null_value));
@@ -2796,13 +2796,13 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
 
       nir_deref_instr *src_deref = vtn_nir_deref(b, w[3]);
 
-      nir_ssa_def *global_bit =
+      nir_def *global_bit =
          nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
                                                    nir_var_mem_global),
                    nir_imm_int(&b->nb, SpvMemorySemanticsCrossWorkgroupMemoryMask),
                    nir_imm_int(&b->nb, 0));
 
-      nir_ssa_def *shared_bit =
+      nir_def *shared_bit =
          nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
                                                    nir_var_mem_shared),
                    nir_imm_int(&b->nb, SpvMemorySemanticsWorkgroupMemoryMask),
@@ -2830,7 +2830,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
 
    case SpvOpSubgroupBlockWriteINTEL: {
       nir_deref_instr *dest = vtn_nir_deref(b, w[1]);
-      nir_ssa_def *data = vtn_ssa_value(b, w[2])->def;
+      nir_def *data = vtn_ssa_value(b, w[2])->def;
 
       nir_intrinsic_instr *store =
          nir_intrinsic_instr_create(b->nb.shader,
@@ -2856,7 +2856,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
                   "The result type of an OpConvertUToAccelerationStructure "
                   "must be OpTypeAccelerationStructure");
 
-      nir_ssa_def *u = vtn_get_nir_ssa(b, w[3]);
+      nir_def *u = vtn_get_nir_ssa(b, w[3]);
       vtn_push_nir_ssa(b, w[2], nir_sloppy_bitcast(&b->nb, u, as_type->type));
       break;
    }
index 167db6b..13b137e 100644 (file)
@@ -584,7 +584,7 @@ struct ir3_array {
    unsigned length;
    unsigned id;
 
-   struct nir_ssa_def *r;
+   struct nir_def *r;
 
    /* To avoid array write's from getting DCE'd, keep track of the
     * most recent write.  Any array access depends on the most
index 6a8afde..b577319 100644 (file)
@@ -1811,7 +1811,7 @@ get_frag_coord(struct ir3_context *ctx, nir_intrinsic_instr *intr)
       ctx->frag_coord = ir3_create_collect(b, xyzw, 4);
    }
 
-   ctx->so->fragcoord_compmask |= nir_ssa_def_components_read(&intr->dest.ssa);
+   ctx->so->fragcoord_compmask |= nir_def_components_read(&intr->dest.ssa);
 
    return ctx->frag_coord;
 }
@@ -2684,7 +2684,7 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
 }
 
 static void
-emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
+emit_undef(struct ir3_context *ctx, nir_undef_instr *undef)
 {
    struct ir3_instruction **dst =
       ir3_get_dst_ssa(ctx, &undef->def, undef->def.num_components);
index 3b2e826..8995259 100644 (file)
@@ -192,7 +192,7 @@ ir3_context_free(struct ir3_context *ctx)
  * insert in def_ht
  */
 struct ir3_instruction **
-ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
+ir3_get_dst_ssa(struct ir3_context *ctx, nir_def *dst, unsigned n)
 {
    struct ir3_instruction **value =
       ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
@@ -515,7 +515,7 @@ ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl)
 }
 
 struct ir3_array *
-ir3_get_array(struct ir3_context *ctx, nir_ssa_def *reg)
+ir3_get_array(struct ir3_context *ctx, nir_def *reg)
 {
    foreach_array (arr, &ctx->ir->array_list) {
       if (arr->r == reg)
index ecd8e9b..9b02a0a 100644 (file)
@@ -203,7 +203,7 @@ struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
 void ir3_context_free(struct ir3_context *ctx);
 
 struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
-                                         nir_ssa_def *dst, unsigned n);
+                                         nir_def *dst, unsigned n);
 struct ir3_instruction **ir3_get_dst(struct ir3_context *ctx, nir_dest *dst,
                                      unsigned n);
 struct ir3_instruction *const *ir3_get_src(struct ir3_context *ctx,
@@ -244,7 +244,7 @@ struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
                                           struct ir3_instruction *src);
 
 void ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl);
-struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_ssa_def *reg);
+struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_def *reg);
 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
                                               struct ir3_array *arr, int n,
                                               struct ir3_instruction *address);
index d2193e1..90d2708 100644 (file)
@@ -217,7 +217,7 @@ ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
              nir_intrinsic_get_ssbo_size;
 }
 
-static nir_ssa_def *
+static nir_def *
 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
 {
    uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
@@ -306,10 +306,10 @@ ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_d
    b->cursor = nir_before_instr(&tex->instr);
 
    unsigned ncomp = tex->coord_components;
-   nir_ssa_def *src = nir_ssa_for_src(b, tex->src[coord_idx].src, ncomp);
+   nir_def *src = nir_ssa_for_src(b, tex->src[coord_idx].src, ncomp);
 
    assume(ncomp >= 1);
-   nir_ssa_def *ai = nir_channel(b, src, ncomp - 1);
+   nir_def *ai = nir_channel(b, src, ncomp - 1);
    ai = nir_fadd_imm(b, ai, 0.5);
    nir_instr_rewrite_src(&tex->instr, &tex->src[coord_idx].src,
                          nir_src_for_ssa(nir_vector_insert_imm(b, src, ai, ncomp - 1)));
@@ -418,7 +418,7 @@ lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
           intr->intrinsic == nir_intrinsic_load_num_subgroups;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
 {
    (void)unused;
@@ -437,12 +437,12 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
        * nir_lower_compute_system_values() will replace local_size with a
        * constant so this can mostly be constant folded away.
        */
-      nir_ssa_def *local_size = nir_load_workgroup_size(b);
-      nir_ssa_def *size =
+      nir_def *local_size = nir_load_workgroup_size(b);
+      nir_def *size =
          nir_imul24(b, nir_channel(b, local_size, 0),
                     nir_imul24(b, nir_channel(b, local_size, 1),
                                nir_channel(b, local_size, 2)));
-      nir_ssa_def *one = nir_imm_int(b, 1);
+      nir_def *one = nir_imm_int(b, 1);
       return nir_iadd(b, one,
                       nir_ishr(b, nir_isub(b, size, one),
                                nir_load_subgroup_id_shift_ir3(b)));
index 2a83155..b8cf29b 100644 (file)
@@ -41,7 +41,7 @@ bool ir3_nir_lower_io_offsets(nir_shader *shader);
 bool ir3_nir_lower_load_barycentric_at_sample(nir_shader *shader);
 bool ir3_nir_lower_load_barycentric_at_offset(nir_shader *shader);
 bool ir3_nir_move_varying_inputs(nir_shader *shader);
-int ir3_nir_coord_offset(nir_ssa_def *ssa);
+int ir3_nir_coord_offset(nir_def *ssa);
 bool ir3_nir_lower_tex_prefetch(nir_shader *shader);
 bool ir3_nir_lower_wide_load_store(nir_shader *shader);
 bool ir3_nir_lower_layer_id(nir_shader *shader);
@@ -79,8 +79,8 @@ bool ir3_nir_fixup_load_uniform(nir_shader *nir);
 bool ir3_nir_opt_preamble(nir_shader *nir, struct ir3_shader_variant *v);
 bool ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v);
 
-nir_ssa_def *ir3_nir_try_propagate_bit_shift(nir_builder *b,
-                                             nir_ssa_def *offset,
+nir_def *ir3_nir_try_propagate_bit_shift(nir_builder *b,
+                                             nir_def *offset,
                                              int32_t shift);
 
 static inline nir_intrinsic_instr *
index 1388da4..f2e896e 100644 (file)
@@ -212,7 +212,7 @@ gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
  * with (ie. not requiring value range tracking)
  */
 static void
-handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
+handle_partial_const(nir_builder *b, nir_def **srcp, int *offp)
 {
    if ((*srcp)->parent_instr->type != nir_instr_type_alu)
       return;
@@ -287,7 +287,7 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
       return false;
    }
 
-   nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
+   nir_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
    int const_offset = 0;
 
    handle_partial_const(b, &ubo_offset, &const_offset);
@@ -298,8 +298,8 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
     * also the same for the constant part of the offset:
     */
    const int shift = -2;
-   nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
-   nir_ssa_def *uniform_offset = NULL;
+   nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
+   nir_def *uniform_offset = NULL;
    if (new_offset) {
       uniform_offset = new_offset;
    } else {
@@ -325,11 +325,11 @@ lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
       const_offset = 0;
    }
 
-   nir_ssa_def *uniform =
+   nir_def *uniform =
       nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,
                        uniform_offset, .base = const_offset);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, uniform);
+   nir_def_rewrite_uses(&instr->dest.ssa, uniform);
 
    nir_instr_remove(&instr->instr);
 
@@ -361,7 +361,7 @@ copy_ubo_to_uniform(nir_shader *nir, const struct ir3_const_state *const_state)
           range->ubo.block == const_state->constant_data_ubo)
          continue;
 
-      nir_ssa_def *ubo = nir_imm_int(b, range->ubo.block);
+      nir_def *ubo = nir_imm_int(b, range->ubo.block);
       if (range->ubo.bindless) {
          ubo = nir_bindless_resource_ir3(b, 32, ubo,
                                          .desc_set = range->ubo.bindless_base);
@@ -516,7 +516,7 @@ fixup_load_uniform_filter(const nir_instr *instr, const void *arg)
           nir_intrinsic_load_uniform;
 }
 
-static nir_ssa_def *
+static nir_def *
 fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
@@ -534,7 +534,7 @@ fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
+   nir_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
 
    /* We'd like to avoid a sequence like:
     *
@@ -578,7 +578,7 @@ ir3_nir_fixup_load_uniform(nir_shader *nir)
    return nir_shader_lower_instructions(nir, fixup_load_uniform_filter,
                                         fixup_load_uniform_instr, NULL);
 }
-static nir_ssa_def *
+static nir_def *
 ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
 {
    struct ir3_const_state *const_state = data;
@@ -604,11 +604,11 @@ ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
       num_components = DIV_ROUND_UP(num_components, 2);
    }
    unsigned base = nir_intrinsic_base(instr);
-   nir_ssa_def *index = nir_imm_int(b, const_state->constant_data_ubo);
-   nir_ssa_def *offset =
+   nir_def *index = nir_imm_int(b, const_state->constant_data_ubo);
+   nir_def *offset =
       nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);
 
-   nir_ssa_def *result =
+   nir_def *result =
       nir_load_ubo(b, num_components, 32, index, offset,
                    .align_mul = nir_intrinsic_align_mul(instr),
                    .align_offset = nir_intrinsic_align_offset(instr),
index 4770b10..c07c1bc 100644 (file)
@@ -53,7 +53,7 @@ lower_64b_intrinsics_filter(const nir_instr *instr, const void *unused)
    return nir_dest_bit_size(intr->dest) == 64;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
 {
    (void)unused;
@@ -78,15 +78,15 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
       unsigned wrmask = nir_intrinsic_has_write_mask(intr) ?
          nir_intrinsic_write_mask(intr) : BITSET_MASK(num_comp);
-      nir_ssa_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
-      nir_ssa_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
+      nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
+      nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
 
       for (unsigned i = 0; i < num_comp; i++) {
          if (!(wrmask & BITFIELD_BIT(i)))
             continue;
 
-         nir_ssa_def *c64 = nir_channel(b, val, i);
-         nir_ssa_def *c32 = nir_unpack_64_2x32(b, c64);
+         nir_def *c64 = nir_channel(b, val, i);
+         nir_def *c32 = nir_unpack_64_2x32(b, c64);
 
          nir_intrinsic_instr *store =
             nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
@@ -106,7 +106,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
 
    unsigned num_comp = nir_intrinsic_dest_components(intr);
 
-   nir_ssa_def *def = &intr->dest.ssa;
+   nir_def *def = &intr->dest.ssa;
    def->bit_size = 32;
 
    /* load_kernel_input is handled specially, lowering to two 32b inputs:
@@ -114,15 +114,15 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
    if (intr->intrinsic == nir_intrinsic_load_kernel_input) {
       assert(num_comp == 1);
 
-      nir_ssa_def *offset = nir_iadd_imm(b,
+      nir_def *offset = nir_iadd_imm(b,
             nir_ssa_for_src(b, intr->src[0], 1), 4);
 
-      nir_ssa_def *upper = nir_load_kernel_input(b, 1, 32, offset);
+      nir_def *upper = nir_load_kernel_input(b, 1, 32, offset);
 
       return nir_pack_64_2x32_split(b, def, upper);
    }
 
-   nir_ssa_def *components[num_comp];
+   nir_def *components[num_comp];
 
    if (is_intrinsic_load(intr->intrinsic)) {
       unsigned offset_src_idx;
@@ -136,7 +136,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
          offset_src_idx = 0;
       }
 
-      nir_ssa_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
+      nir_def *off = nir_ssa_for_src(b, intr->src[offset_src_idx], 1);
 
       for (unsigned i = 0; i < num_comp; i++) {
          nir_intrinsic_instr *load =
@@ -156,7 +156,7 @@ lower_64b_intrinsics(nir_builder *b, nir_instr *instr, void *unused)
        * extended from 32b to 64b:
        */
       for (unsigned i = 0; i < num_comp; i++) {
-         nir_ssa_def *c = nir_channel(b, def, i);
+         nir_def *c = nir_channel(b, def, i);
          components[i] = nir_pack_64_2x32_split(b, c, nir_imm_zero(b, 1, 32));
       }
    }
@@ -176,17 +176,17 @@ ir3_nir_lower_64b_intrinsics(nir_shader *shader)
  * Lowering for 64b undef instructions, splitting into a two 32b undefs
  */
 
-static nir_ssa_def *
+static nir_def *
 lower_64b_undef(nir_builder *b, nir_instr *instr, void *unused)
 {
    (void)unused;
 
-   nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
+   nir_undef_instr *undef = nir_instr_as_ssa_undef(instr);
    unsigned num_comp = undef->def.num_components;
-   nir_ssa_def *components[num_comp];
+   nir_def *components[num_comp];
 
    for (unsigned i = 0; i < num_comp; i++) {
-      nir_ssa_def *lowered = nir_ssa_undef(b, 2, 32);
+      nir_def *lowered = nir_undef(b, 2, 32);
 
       components[i] = nir_pack_64_2x32_split(b,
                                              nir_channel(b, lowered, 0),
@@ -239,7 +239,7 @@ lower_64b_global_filter(const nir_instr *instr, const void *unused)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
 {
    (void)unused;
@@ -247,8 +247,8 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    bool load = intr->intrinsic != nir_intrinsic_store_global;
 
-   nir_ssa_def *addr64 = nir_ssa_for_src(b, intr->src[load ? 0 : 1], 1);
-   nir_ssa_def *addr = nir_unpack_64_2x32(b, addr64);
+   nir_def *addr64 = nir_ssa_for_src(b, intr->src[load ? 0 : 1], 1);
+   nir_def *addr = nir_unpack_64_2x32(b, addr64);
 
    /*
     * Note that we can get vec8/vec16 with OpenCL.. we need to split
@@ -270,10 +270,10 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
 
    if (load) {
       unsigned num_comp = nir_intrinsic_dest_components(intr);
-      nir_ssa_def *components[num_comp];
+      nir_def *components[num_comp];
       for (unsigned off = 0; off < num_comp;) {
          unsigned c = MIN2(num_comp - off, 4);
-         nir_ssa_def *val = nir_load_global_ir3(
+         nir_def *val = nir_load_global_ir3(
                b, c, nir_dest_bit_size(intr->dest),
                addr, nir_imm_int(b, off));
          for (unsigned i = 0; i < c; i++) {
@@ -283,10 +283,10 @@ lower_64b_global(nir_builder *b, nir_instr *instr, void *unused)
       return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
    } else {
       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
-      nir_ssa_def *value = nir_ssa_for_src(b, intr->src[0], num_comp);
+      nir_def *value = nir_ssa_for_src(b, intr->src[0], num_comp);
       for (unsigned off = 0; off < num_comp; off += 4) {
          unsigned c = MIN2(num_comp - off, 4);
-         nir_ssa_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
+         nir_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
          nir_store_global_ir3(b, v, addr, nir_imm_int(b, off));
       }
       return NIR_LOWER_INSTR_PROGRESS_REPLACE;
index b82268a..076b6d1 100644 (file)
@@ -66,11 +66,11 @@ get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
    return -1;
 }
 
-static nir_ssa_def *
+static nir_def *
 check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
                                 int32_t direction, int32_t shift)
 {
-   nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
+   nir_def *shift_ssa = alu_instr->src[1].src.ssa;
 
    /* Only propagate if the shift is a const value so we can check value range
     * statically.
@@ -103,8 +103,8 @@ check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
    return shift_ssa;
 }
 
-nir_ssa_def *
-ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,
+nir_def *
+ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_def *offset,
                                 int32_t shift)
 {
    nir_instr *offset_instr = offset->parent_instr;
@@ -112,14 +112,14 @@ ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,
       return NULL;
 
    nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
-   nir_ssa_def *shift_ssa;
-   nir_ssa_def *new_offset = NULL;
+   nir_def *shift_ssa;
+   nir_def *new_offset = NULL;
 
    /* the first src could be something like ssa_18.x, but we only want
     * the single component.  Otherwise the ishl/ishr/ushr could turn
     * into a vec4 operation:
     */
-   nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
+   nir_def *src0 = nir_mov_alu(b, alu->src[0], 1);
 
    switch (alu->op) {
    case nir_op_ishl:
@@ -151,11 +151,11 @@ ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,
 static void
 scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
 {
-   struct nir_ssa_def *results[NIR_MAX_VEC_COMPONENTS];
+   struct nir_def *results[NIR_MAX_VEC_COMPONENTS];
 
-   nir_ssa_def *descriptor = intrinsic->src[0].ssa;
-   nir_ssa_def *offset = intrinsic->src[1].ssa;
-   nir_ssa_def *new_offset = intrinsic->src[2].ssa;
+   nir_def *descriptor = intrinsic->src[0].ssa;
+   nir_def *offset = intrinsic->src[1].ssa;
+   nir_def *new_offset = intrinsic->src[2].ssa;
    unsigned comp_size = intrinsic->dest.ssa.bit_size / 8;
    for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) {
       results[i] =
@@ -167,9 +167,9 @@ scalarize_load(nir_intrinsic_instr *intrinsic, nir_builder *b)
                            .align_offset = nir_intrinsic_align_offset(intrinsic));
    }
 
-   nir_ssa_def *result = nir_vec(b, results, intrinsic->dest.ssa.num_components);
+   nir_def *result = nir_vec(b, results, intrinsic->dest.ssa.num_components);
 
-   nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, result);
+   nir_def_rewrite_uses(&intrinsic->dest.ssa, result);
 
    nir_instr_remove(&intrinsic->instr);
 }
@@ -182,7 +182,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
    int shift = 2;
 
    bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
-   nir_ssa_def *new_dest = NULL;
+   nir_def *new_dest = NULL;
 
    /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
    if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
@@ -200,7 +200,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
    /* 'offset_src_idx' holds the index of the source that represent the offset. */
    new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
 
-   nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
+   nir_def *offset = intrinsic->src[offset_src_idx].ssa;
 
    /* Since we don't have value range checking, we first try to propagate
     * the division by 4 ('offset >> 2') into another bit-shift instruction that
@@ -210,7 +210,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
     * Here we use the convention that shifting right is negative while shifting
     * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
     */
-   nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
+   nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
 
    /* The new source that will hold the dword-offset is always the last
     * one for every intrinsic.
@@ -219,7 +219,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
    *target_src = nir_src_for_ssa(offset);
 
    if (has_dest) {
-      nir_ssa_def *dest = &intrinsic->dest.ssa;
+      nir_def *dest = &intrinsic->dest.ssa;
       nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
                         dest->num_components, dest->bit_size);
       new_dest = &new_intrinsic->dest.ssa;
@@ -253,7 +253,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
       /* Replace the uses of the original destination by that
        * of the new intrinsic.
        */
-      nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
+      nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
    }
 
    /* Finally remove the original intrinsic. */
index d5e1df2..849a625 100644 (file)
@@ -38,7 +38,7 @@ nir_lower_layer_id(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
   nir_intrinsic_set_io_semantics(load_input, semantics);
   nir_ssa_dest_init(&load_input->instr, &load_input->dest, 1, 32);
   nir_builder_instr_insert(b, &load_input->instr);
-  nir_ssa_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa);
+  nir_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa);
   return true;
 }
 
index d82d967..782f38b 100644 (file)
@@ -29,7 +29,7 @@
  * instructions.
  */
 
-static nir_ssa_def *
+static nir_def *
 ir3_nir_lower_load_barycentric_at_offset_instr(nir_builder *b, nir_instr *instr,
                                                void *data)
 {
@@ -38,9 +38,9 @@ ir3_nir_lower_load_barycentric_at_offset_instr(nir_builder *b, nir_instr *instr,
 
 #define chan(var, c) nir_channel(b, var, c)
 
-   nir_ssa_def *off = intr->src[0].ssa;
+   nir_def *off = intr->src[0].ssa;
    /* note: at_offset is defined to be relative to the center of the pixel */
-   nir_ssa_def *ij = nir_load_barycentric_pixel(b, 32, .interp_mode = interp_mode);
+   nir_def *ij = nir_load_barycentric_pixel(b, 32, .interp_mode = interp_mode);
 
    /* Need helper invocations for our ddx/ddys to work. */
    if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
@@ -50,13 +50,13 @@ ir3_nir_lower_load_barycentric_at_offset_instr(nir_builder *b, nir_instr *instr,
       /* Offset our pixel center ij by the offset argument (units of pixels)
        * times the derivatives of ij in screen space.
        */
-      nir_ssa_def *new_ij = ij;
+      nir_def *new_ij = ij;
       new_ij = nir_ffma(b, chan(off, 0), nir_fddx(b, ij), new_ij);
       new_ij = nir_ffma(b, chan(off, 1), nir_fddy(b, ij), new_ij);
 
       return new_ij;
    } else {
-      nir_ssa_def *center_w = nir_frcp(b, nir_load_persp_center_rhw_ir3(b, 32));
+      nir_def *center_w = nir_frcp(b, nir_load_persp_center_rhw_ir3(b, 32));
 
       /* scaled ij -- ij comes in multiplied by by 1/center_w so multiply that
        * back out, plus add center_w as the 3rd component for taking the
@@ -65,11 +65,11 @@ ir3_nir_lower_load_barycentric_at_offset_instr(nir_builder *b, nir_instr *instr,
        * We actually suspect that we should be using rhw here instead of center_w,
        * but no tests seem to distinguish between the two.
        */
-      nir_ssa_def *sij =
+      nir_def *sij =
          nir_vec3(b, nir_fmul(b, chan(ij, 0), center_w), nir_fmul(b, chan(ij, 1), center_w), center_w);
 
       /* Get the offset value from pixel center for ij, and also for w. */
-      nir_ssa_def *pos = sij;
+      nir_def *pos = sij;
       pos = nir_ffma(b, chan(off, 0), nir_fddx(b, sij), pos);
       pos = nir_ffma(b, chan(off, 1), nir_fddy(b, sij), pos);
 
index 2b62459..cd91d62 100644 (file)
  * that needs to happen at the same early stage (before wpos_ytransform)
  */
 
-static nir_ssa_def *
-load_sample_pos(nir_builder *b, nir_ssa_def *samp_id)
+static nir_def *
+load_sample_pos(nir_builder *b, nir_def *samp_id)
 {
    return nir_load_sample_pos_from_id(b, 32, samp_id);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_load_barycentric_at_sample(nir_builder *b, nir_intrinsic_instr *intr)
 {
-   nir_ssa_def *pos = load_sample_pos(b, intr->src[0].ssa);
+   nir_def *pos = load_sample_pos(b, intr->src[0].ssa);
 
    return nir_load_barycentric_at_offset(b, 32, pos, .interp_mode = nir_intrinsic_interp_mode(intr));
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_load_sample_pos(nir_builder *b, nir_intrinsic_instr *intr)
 {
-   nir_ssa_def *pos = load_sample_pos(b, nir_load_sample_id(b));
+   nir_def *pos = load_sample_pos(b, nir_load_sample_id(b));
 
    /* Note that gl_SamplePosition is offset by +vec2(0.5, 0.5) vs the
     * offset passed to interpolateAtOffset().   See
     * dEQP-GLES31.functional.shaders.multisample_interpolation.interpolate_at_offset.at_sample_position.default_framebuffer
     * for example.
     */
-   nir_ssa_def *half = nir_imm_float(b, 0.5);
+   nir_def *half = nir_imm_float(b, 0.5);
    return nir_fadd(b, pos, nir_vec2(b, half, half));
 }
 
-static nir_ssa_def *
+static nir_def *
 ir3_nir_lower_load_barycentric_at_sample_instr(nir_builder *b, nir_instr *instr,
                                                void *data)
 {
index ebf7693..abee878 100644 (file)
@@ -34,7 +34,7 @@ struct state {
       unsigned stride;
    } map;
 
-   nir_ssa_def *header;
+   nir_def *header;
 
    nir_variable *vertex_count_var;
    nir_variable *emitted_vertex_var;
@@ -48,25 +48,25 @@ struct state {
    unsigned local_primitive_id_start;
 };
 
-static nir_ssa_def *
-bitfield_extract(nir_builder *b, nir_ssa_def *v, uint32_t start, uint32_t mask)
+static nir_def *
+bitfield_extract(nir_builder *b, nir_def *v, uint32_t start, uint32_t mask)
 {
    return nir_iand_imm(b, nir_ushr_imm(b, v, start), mask);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_invocation_id(nir_builder *b, struct state *state)
 {
    return bitfield_extract(b, state->header, 11, 31);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_vertex_id(nir_builder *b, struct state *state)
 {
    return bitfield_extract(b, state->header, 6, 31);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_local_primitive_id(nir_builder *b, struct state *state)
 {
    return bitfield_extract(b, state->header, state->local_primitive_id_start,
@@ -125,15 +125,15 @@ shader_io_get_unique_index(gl_varying_slot slot)
    }
 }
 
-static nir_ssa_def *
-build_local_offset(nir_builder *b, struct state *state, nir_ssa_def *vertex,
-                   uint32_t location, uint32_t comp, nir_ssa_def *offset)
+static nir_def *
+build_local_offset(nir_builder *b, struct state *state, nir_def *vertex,
+                   uint32_t location, uint32_t comp, nir_def *offset)
 {
-   nir_ssa_def *primitive_stride = nir_load_vs_primitive_stride_ir3(b);
-   nir_ssa_def *primitive_offset =
+   nir_def *primitive_stride = nir_load_vs_primitive_stride_ir3(b);
+   nir_def *primitive_offset =
       nir_imul24(b, build_local_primitive_id(b, state), primitive_stride);
-   nir_ssa_def *attr_offset;
-   nir_ssa_def *vertex_stride;
+   nir_def *attr_offset;
+   nir_def *vertex_stride;
    unsigned index = shader_io_get_unique_index(location);
 
    switch (b->shader->info.stage) {
@@ -152,7 +152,7 @@ build_local_offset(nir_builder *b, struct state *state, nir_ssa_def *vertex,
       unreachable("bad shader stage");
    }
 
-   nir_ssa_def *vertex_offset = nir_imul24(b, vertex, vertex_stride);
+   nir_def *vertex_offset = nir_imul24(b, vertex, vertex_stride);
 
    return nir_iadd(
       b, nir_iadd(b, primitive_offset, vertex_offset),
@@ -161,8 +161,8 @@ build_local_offset(nir_builder *b, struct state *state, nir_ssa_def *vertex,
 
 static nir_intrinsic_instr *
 replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
-                  nir_intrinsic_op op, nir_ssa_def *src0, nir_ssa_def *src1,
-                  nir_ssa_def *src2)
+                  nir_intrinsic_op op, nir_def *src0, nir_def *src1,
+                  nir_def *src2)
 {
    nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, op);
 
@@ -181,7 +181,7 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
    nir_builder_instr_insert(b, &new_intr->instr);
 
    if (nir_intrinsic_infos[op].has_dest)
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, &new_intr->dest.ssa);
+      nir_def_rewrite_uses(&intr->dest.ssa, &new_intr->dest.ssa);
 
    nir_instr_remove(&intr->instr);
 
@@ -267,8 +267,8 @@ lower_block_to_explicit_output(nir_block *block, nir_builder *b,
 
          b->cursor = nir_instr_remove(&intr->instr);
 
-         nir_ssa_def *vertex_id = build_vertex_id(b, state);
-         nir_ssa_def *offset = build_local_offset(
+         nir_def *vertex_id = build_vertex_id(b, state);
+         nir_def *offset = build_local_offset(
             b, state, vertex_id, nir_intrinsic_io_semantics(intr).location,
             nir_intrinsic_component(intr), intr->src[1].ssa);
 
@@ -282,7 +282,7 @@ lower_block_to_explicit_output(nir_block *block, nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 local_thread_id(nir_builder *b)
 {
    return bitfield_extract(b, nir_load_gs_header_ir3(b), 16, 1023);
@@ -333,7 +333,7 @@ lower_block_to_explicit_input(nir_block *block, nir_builder *b,
 
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *offset = build_local_offset(
+         nir_def *offset = build_local_offset(
             b, state,
             intr->src[0].ssa, // this is typically gl_InvocationID
             nir_intrinsic_io_semantics(intr).location,
@@ -347,8 +347,8 @@ lower_block_to_explicit_input(nir_block *block, nir_builder *b,
       case nir_intrinsic_load_invocation_id: {
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *iid = build_invocation_id(b, state);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, iid);
+         nir_def *iid = build_invocation_id(b, state);
+         nir_def_rewrite_uses(&intr->dest.ssa, iid);
          nir_instr_remove(&intr->instr);
          break;
       }
@@ -388,7 +388,7 @@ ir3_nir_lower_to_explicit_input(nir_shader *shader,
    v->input_size = calc_primitive_map_size(shader);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_tcs_out_vertices(nir_builder *b)
 {
    if (b->shader->info.stage == MESA_SHADER_TESS_CTRL)
@@ -397,15 +397,15 @@ build_tcs_out_vertices(nir_builder *b)
       return nir_load_patch_vertices_in(b);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_per_vertex_offset(nir_builder *b, struct state *state,
-                        nir_ssa_def *vertex, uint32_t location, uint32_t comp,
-                        nir_ssa_def *offset)
+                        nir_def *vertex, uint32_t location, uint32_t comp,
+                        nir_def *offset)
 {
-   nir_ssa_def *patch_id = nir_load_rel_patch_id_ir3(b);
-   nir_ssa_def *patch_stride = nir_load_hs_patch_stride_ir3(b);
-   nir_ssa_def *patch_offset = nir_imul24(b, patch_id, patch_stride);
-   nir_ssa_def *attr_offset;
+   nir_def *patch_id = nir_load_rel_patch_id_ir3(b);
+   nir_def *patch_stride = nir_load_hs_patch_stride_ir3(b);
+   nir_def *patch_offset = nir_imul24(b, patch_id, patch_stride);
+   nir_def *attr_offset;
 
    if (nir_src_is_const(nir_src_for_ssa(offset))) {
       location += nir_src_as_uint(nir_src_for_ssa(offset));
@@ -417,7 +417,7 @@ build_per_vertex_offset(nir_builder *b, struct state *state,
       offset = nir_ishl_imm(b, offset, 2);
    }
 
-   nir_ssa_def *vertex_offset;
+   nir_def *vertex_offset;
    if (vertex) {
       unsigned index = shader_io_get_unique_index(location);
       switch (b->shader->info.stage) {
@@ -446,9 +446,9 @@ build_per_vertex_offset(nir_builder *b, struct state *state,
    return nir_iadd(b, nir_iadd(b, patch_offset, attr_offset), vertex_offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_patch_offset(nir_builder *b, struct state *state, uint32_t base,
-                   uint32_t comp, nir_ssa_def *offset)
+                   uint32_t comp, nir_def *offset)
 {
    return build_per_vertex_offset(b, state, NULL, base, comp, offset);
 }
@@ -474,7 +474,7 @@ tess_level_components(struct state *state, uint32_t *inner, uint32_t *outer)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 build_tessfactor_base(nir_builder *b, gl_varying_slot slot, uint32_t comp,
                       struct state *state)
 {
@@ -483,9 +483,9 @@ build_tessfactor_base(nir_builder *b, gl_varying_slot slot, uint32_t comp,
 
    const uint32_t patch_stride = 1 + inner_levels + outer_levels;
 
-   nir_ssa_def *patch_id = nir_load_rel_patch_id_ir3(b);
+   nir_def *patch_id = nir_load_rel_patch_id_ir3(b);
 
-   nir_ssa_def *patch_offset =
+   nir_def *patch_offset =
       nir_imul24(b, patch_id, nir_imm_int(b, patch_stride));
 
    uint32_t offset;
@@ -521,8 +521,8 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state)
 
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *address = nir_load_tess_param_base_ir3(b);
-         nir_ssa_def *offset = build_per_vertex_offset(
+         nir_def *address = nir_load_tess_param_base_ir3(b);
+         nir_def *offset = build_per_vertex_offset(
             b, state, intr->src[0].ssa,
             nir_intrinsic_io_semantics(intr).location,
             nir_intrinsic_component(intr), intr->src[1].ssa);
@@ -541,9 +541,9 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state)
          assert(
             util_is_power_of_two_nonzero(nir_intrinsic_write_mask(intr) + 1));
 
-         nir_ssa_def *value = intr->src[0].ssa;
-         nir_ssa_def *address = nir_load_tess_param_base_ir3(b);
-         nir_ssa_def *offset = build_per_vertex_offset(
+         nir_def *value = intr->src[0].ssa;
+         nir_def *address = nir_load_tess_param_base_ir3(b);
+         nir_def *offset = build_per_vertex_offset(
             b, state, intr->src[1].ssa,
             nir_intrinsic_io_semantics(intr).location,
             nir_intrinsic_component(intr), intr->src[2].ssa);
@@ -559,7 +559,7 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state)
 
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *address, *offset;
+         nir_def *address, *offset;
 
          /* note if vectorization of the tess level loads ever happens:
           * "ldg" across 16-byte boundaries can behave incorrectly if results
@@ -614,12 +614,12 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state)
                else
                   levels = inner_levels;
 
-               nir_ssa_def *offset = nir_iadd_imm(
+               nir_def *offset = nir_iadd_imm(
                   b, intr->src[1].ssa, nir_intrinsic_component(intr));
                nif = nir_push_if(b, nir_ult_imm(b, offset, levels));
             }
 
-            nir_ssa_def *offset = build_tessfactor_base(
+            nir_def *offset = build_tessfactor_base(
                b, location, nir_intrinsic_component(intr), state);
 
             replace_intrinsic(b, intr, nir_intrinsic_store_global_ir3,
@@ -631,8 +631,8 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state)
                nir_pop_if(b, nif);
             }
          } else {
-            nir_ssa_def *address = nir_load_tess_param_base_ir3(b);
-            nir_ssa_def *offset = build_patch_offset(
+            nir_def *address = nir_load_tess_param_base_ir3(b);
+            nir_def *offset = build_patch_offset(
                b, state, location, nir_intrinsic_component(intr),
                intr->src[1].ssa);
 
@@ -713,10 +713,10 @@ ir3_nir_lower_tess_ctrl(nir_shader *shader, struct ir3_shader_variant *v,
 
    /* Re-emit the header, since the old one got moved into the if branch */
    state.header = nir_load_tcs_header_ir3(&b);
-   nir_ssa_def *iid = build_invocation_id(&b, &state);
+   nir_def *iid = build_invocation_id(&b, &state);
 
    const uint32_t nvertices = shader->info.tess.tcs_vertices_out;
-   nir_ssa_def *cond = nir_ult_imm(&b, iid, nvertices);
+   nir_def *cond = nir_ult_imm(&b, iid, nvertices);
 
    nir_if *nif = nir_push_if(&b, cond);
 
@@ -725,7 +725,7 @@ ir3_nir_lower_tess_ctrl(nir_shader *shader, struct ir3_shader_variant *v,
    b.cursor = nir_after_cf_list(&nif->then_list);
 
    /* Insert conditional exit for threads invocation id != 0 */
-   nir_ssa_def *iid0_cond = nir_ieq_imm(&b, iid, 0);
+   nir_def *iid0_cond = nir_ieq_imm(&b, iid, 0);
    nir_cond_end_ir3(&b, iid0_cond);
 
    emit_tess_epilouge(&b, &state);
@@ -750,8 +750,8 @@ lower_tess_eval_block(nir_block *block, nir_builder *b, struct state *state)
 
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *address = nir_load_tess_param_base_ir3(b);
-         nir_ssa_def *offset = build_per_vertex_offset(
+         nir_def *address = nir_load_tess_param_base_ir3(b);
+         nir_def *offset = build_per_vertex_offset(
             b, state, intr->src[0].ssa,
             nir_intrinsic_io_semantics(intr).location,
             nir_intrinsic_component(intr), intr->src[1].ssa);
@@ -766,7 +766,7 @@ lower_tess_eval_block(nir_block *block, nir_builder *b, struct state *state)
 
          b->cursor = nir_before_instr(&intr->instr);
 
-         nir_ssa_def *address, *offset;
+         nir_def *address, *offset;
 
          /* note if vectorization of the tess level loads ever happens:
           * "ldg" across 16-byte boundaries can behave incorrectly if results
@@ -947,7 +947,7 @@ lower_gs_block(nir_block *block, nir_builder *b, struct state *state)
       case nir_intrinsic_emit_vertex: {
          /* Load the vertex count */
          b->cursor = nir_before_instr(&intr->instr);
-         nir_ssa_def *count = nir_load_var(b, state->vertex_count_var);
+         nir_def *count = nir_load_var(b, state->vertex_count_var);
 
          nir_push_if(b, nir_ieq(b, count, local_thread_id(b)));
 
@@ -1088,7 +1088,7 @@ ir3_nir_lower_gs(nir_shader *shader)
     *
     * [1] ex, tests/spec/glsl-1.50/execution/compatibility/clipping/gs-clip-vertex-const-accept.shader_test
     */
-   nir_ssa_def *cond =
+   nir_def *cond =
       nir_ieq_imm(&b, nir_load_var(&b, state.emitted_vertex_var), 0);
    nir_push_if(&b, cond);
    nir_store_var(&b, state.vertex_flags_out, nir_imm_int(&b, 4), 0x1);
index 1763e81..05855d8 100644 (file)
@@ -29,7 +29,7 @@
  */
 
 static int
-coord_offset(nir_ssa_def *ssa)
+coord_offset(nir_def *ssa)
 {
    nir_instr *parent_instr = ssa->parent_instr;
 
@@ -97,7 +97,7 @@ coord_offset(nir_ssa_def *ssa)
 }
 
 int
-ir3_nir_coord_offset(nir_ssa_def *ssa)
+ir3_nir_coord_offset(nir_def *ssa)
 {
 
    assert(ssa->num_components == 2);
index 1202b73..196416e 100644 (file)
@@ -47,7 +47,7 @@ lower_wide_load_store_filter(const nir_instr *instr, const void *unused)
    return false;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
 {
    (void)unused;
@@ -57,12 +57,12 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
    if (is_intrinsic_store(intr->intrinsic)) {
       unsigned num_comp = nir_intrinsic_src_components(intr, 0);
       unsigned wrmask = nir_intrinsic_write_mask(intr);
-      nir_ssa_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
-      nir_ssa_def *addr = nir_ssa_for_src(b, intr->src[1], 1);
+      nir_def *val = nir_ssa_for_src(b, intr->src[0], num_comp);
+      nir_def *addr = nir_ssa_for_src(b, intr->src[1], 1);
 
       for (unsigned off = 0; off < num_comp; off += 4) {
          unsigned c = MIN2(num_comp - off, 4);
-         nir_ssa_def *v = nir_channels(b, val, BITFIELD_MASK(c) << off);
+         nir_def *v = nir_channels(b, val, BITFIELD_MASK(c) << off);
 
          nir_intrinsic_instr *store =
                nir_intrinsic_instr_create(b->shader, intr->intrinsic);
@@ -82,8 +82,8 @@ lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
    } else {
       unsigned num_comp = nir_intrinsic_dest_components(intr);
       unsigned bit_size = nir_dest_bit_size(intr->dest);
-      nir_ssa_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
-      nir_ssa_def *components[num_comp];
+      nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
+      nir_def *components[num_comp];
 
       for (unsigned off = 0; off < num_comp;) {
          unsigned c = MIN2(num_comp - off, 4);
index 6db45b9..a26568f 100644 (file)
@@ -33,7 +33,7 @@
  */
 
 static void
-def_size(nir_ssa_def *def, unsigned *size, unsigned *align)
+def_size(nir_def *def, unsigned *size, unsigned *align)
 {
    unsigned bit_size = def->bit_size == 1 ? 32 : def->bit_size;
    /* Due to the implicit const file promotion we want to expand 16-bit values
@@ -45,7 +45,7 @@ def_size(nir_ssa_def *def, unsigned *size, unsigned *align)
 }
 
 static bool
-all_uses_float(nir_ssa_def *def, bool allow_src2)
+all_uses_float(nir_def *def, bool allow_src2)
 {
    nir_foreach_use_including_if (use, def) {
       if (use->is_if)
@@ -75,7 +75,7 @@ all_uses_float(nir_ssa_def *def, bool allow_src2)
 }
 
 static bool
-all_uses_bit(nir_ssa_def *def)
+all_uses_bit(nir_def *def)
 {
    nir_foreach_use_including_if (use, def) {
       if (use->is_if)
@@ -215,7 +215,7 @@ instr_cost(nir_instr *instr, const void *data)
 }
 
 static float
-rewrite_cost(nir_ssa_def *def, const void *data)
+rewrite_cost(nir_def *def, const void *data)
 {
    /* We always have to expand booleans */
    if (def->bit_size == 1)
@@ -322,12 +322,12 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
          if (intrin->intrinsic != nir_intrinsic_load_preamble)
             continue;
 
-         nir_ssa_def *dest = &intrin->dest.ssa;
+         nir_def *dest = &intrin->dest.ssa;
 
          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
          b->cursor = nir_before_instr(instr);
 
-         nir_ssa_def *new_dest =
+         nir_def *new_dest =
             nir_load_uniform(b, dest->num_components, 32, nir_imm_int(b, 0),
                              .base = offset);
 
@@ -343,7 +343,7 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
             }
          }
 
-         nir_ssa_def_rewrite_uses(dest, new_dest);
+         nir_def_rewrite_uses(dest, new_dest);
          nir_instr_remove(instr);
          nir_instr_free(instr);
       }
@@ -361,7 +361,7 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
          if (intrin->intrinsic != nir_intrinsic_store_preamble)
             continue;
 
-         nir_ssa_def *src = intrin->src[0].ssa;
+         nir_def *src = intrin->src[0].ssa;
          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
 
          b->cursor = nir_before_instr(instr);
index 6712dd4..9198b98 100644 (file)
@@ -495,7 +495,7 @@ r2d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 
 /* r3d_ = shader path operations */
 
-static nir_ssa_def *
+static nir_def *
 load_const(nir_builder *b, unsigned base, unsigned components)
 {
    return nir_load_uniform(b, components, 32, nir_imm_int(b, 0),
@@ -515,11 +515,11 @@ build_blit_vs_shader(void)
                           "gl_Position");
    out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *vert0_pos = load_const(b, 0, 2);
-   nir_ssa_def *vert1_pos = load_const(b, 4, 2);
-   nir_ssa_def *vertex = nir_load_vertex_id(b);
+   nir_def *vert0_pos = load_const(b, 0, 2);
+   nir_def *vert1_pos = load_const(b, 4, 2);
+   nir_def *vertex = nir_load_vertex_id(b);
 
-   nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
+   nir_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
    pos = nir_vec4(b, nir_channel(b, pos, 0),
                      nir_channel(b, pos, 1),
                      nir_imm_float(b, 0.0),
@@ -532,13 +532,13 @@ build_blit_vs_shader(void)
                           "coords");
    out_coords->data.location = VARYING_SLOT_VAR0;
 
-   nir_ssa_def *vert0_coords = load_const(b, 2, 2);
-   nir_ssa_def *vert1_coords = load_const(b, 6, 2);
+   nir_def *vert0_coords = load_const(b, 2, 2);
+   nir_def *vert1_coords = load_const(b, 6, 2);
 
    /* Only used with "z scale" blit path which uses a 3d texture */
-   nir_ssa_def *z_coord = load_const(b, 8, 1);
+   nir_def *z_coord = load_const(b, 8, 1);
 
-   nir_ssa_def *coords = nir_bcsel(b, nir_i2b(b, vertex), vert1_coords, vert0_coords);
+   nir_def *coords = nir_bcsel(b, nir_i2b(b, vertex), vert1_coords, vert0_coords);
    coords = nir_vec3(b, nir_channel(b, coords, 0), nir_channel(b, coords, 1),
                      z_coord);
 
@@ -560,13 +560,13 @@ build_clear_vs_shader(void)
                           "gl_Position");
    out_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *vert0_pos = load_const(b, 0, 2);
-   nir_ssa_def *vert1_pos = load_const(b, 4, 2);
+   nir_def *vert0_pos = load_const(b, 0, 2);
+   nir_def *vert1_pos = load_const(b, 4, 2);
    /* c0.z is used to clear depth */
-   nir_ssa_def *depth = load_const(b, 2, 1);
-   nir_ssa_def *vertex = nir_load_vertex_id(b);
+   nir_def *depth = load_const(b, 2, 1);
+   nir_def *vertex = nir_load_vertex_id(b);
 
-   nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
+   nir_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
    pos = nir_vec4(b, nir_channel(b, pos, 0),
                      nir_channel(b, pos, 1),
                      depth, nir_imm_float(b, 1.0));
@@ -577,7 +577,7 @@ build_clear_vs_shader(void)
       nir_variable_create(b->shader, nir_var_shader_out, glsl_uint_type(),
                           "gl_Layer");
    out_layer->data.location = VARYING_SLOT_LAYER;
-   nir_ssa_def *layer = load_const(b, 3, 1);
+   nir_def *layer = load_const(b, 3, 1);
    nir_store_var(b, out_layer, layer, 1);
 
    return b->shader;
@@ -673,7 +673,7 @@ build_ms_copy_fs_shader(void)
    BITSET_SET(b->shader->info.textures_used, 0);
    BITSET_SET(b->shader->info.textures_used_by_txf, 0);
 
-   nir_ssa_def *coord = nir_f2i32(b, nir_load_var(b, in_coords));
+   nir_def *coord = nir_f2i32(b, nir_load_var(b, in_coords));
 
    tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
    tex->coord_components = 2;
@@ -704,7 +704,7 @@ build_clear_fs_shader(unsigned mrts)
                              "color");
       out_color->data.location = FRAG_RESULT_DATA0 + i;
 
-      nir_ssa_def *color = load_const(b, 4 * i, 4);
+      nir_def *color = load_const(b, 4 * i, 4);
       nir_store_var(b, out_color, color, 0xf);
    }
 
index 500f80c..81e352d 100644 (file)
@@ -49,16 +49,16 @@ lower_multiview_mask(nir_shader *nir, uint32_t *mask)
          if (var->data.location != VARYING_SLOT_POS)
             continue;
 
-         nir_ssa_def *orig_src = intrin->src[1].ssa;
+         nir_def *orig_src = intrin->src[1].ssa;
          b.cursor = nir_before_instr(instr);
 
          /* ((1ull << gl_ViewIndex) & mask) != 0 */
-         nir_ssa_def *cmp =
+         nir_def *cmp =
             nir_i2b(&b, nir_iand(&b, nir_imm_int(&b, old_mask),
                                   nir_ishl(&b, nir_imm_int(&b, 1),
                                            nir_load_view_index(&b))));
 
-         nir_ssa_def *src = nir_bcsel(&b, cmp, orig_src, nir_imm_float(&b, 0.));
+         nir_def *src = nir_bcsel(&b, cmp, orig_src, nir_imm_float(&b, 0.));
          nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(src));
 
          nir_metadata_preserve(impl, nir_metadata_block_index |
index 1226992..779e9fd 100644 (file)
@@ -153,13 +153,13 @@ lower_load_push_constant(struct tu_device *dev,
       base -= shader->const_state.push_consts.lo * 4;
    }
 
-   nir_ssa_def *load =
+   nir_def *load =
       nir_load_uniform(b, instr->num_components,
             instr->dest.ssa.bit_size,
             nir_ushr_imm(b, instr->src[0].ssa, 2),
             .base = base);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, load);
+   nir_def_rewrite_uses(&instr->dest.ssa, load);
 
    nir_instr_remove(&instr->instr);
 }
@@ -169,14 +169,14 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
                             struct tu_shader *shader,
                             const struct tu_pipeline_layout *layout)
 {
-   nir_ssa_def *vulkan_idx = instr->src[0].ssa;
+   nir_def *vulkan_idx = instr->src[0].ssa;
 
    unsigned set = nir_intrinsic_desc_set(instr);
    unsigned binding = nir_intrinsic_binding(instr);
    struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
    struct tu_descriptor_set_binding_layout *binding_layout =
       &set_layout->binding[binding];
-   nir_ssa_def *base;
+   nir_def *base;
 
    if (binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
       return;
@@ -193,7 +193,7 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
           * get it from the const file instead.
           */
          base = nir_imm_int(b, binding_layout->dynamic_offset_offset / (4 * A6XX_TEX_CONST_DWORDS));
-         nir_ssa_def *dynamic_offset_start =
+         nir_def *dynamic_offset_start =
             nir_load_uniform(b, 1, 32, nir_imm_int(b, 0),
                              .base = shader->const_state.dynamic_offset_loc + set);
          base = nir_iadd(b, base, dynamic_offset_start);
@@ -210,46 +210,46 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
 
    unsigned stride = binding_layout->size / (4 * A6XX_TEX_CONST_DWORDS);
    assert(util_is_power_of_two_nonzero(stride));
-   nir_ssa_def *shift = nir_imm_int(b, util_logbase2(stride));
+   nir_def *shift = nir_imm_int(b, util_logbase2(stride));
 
-   nir_ssa_def *def = nir_vec3(b, nir_imm_int(b, set),
+   nir_def *def = nir_vec3(b, nir_imm_int(b, set),
                                nir_iadd(b, base,
                                         nir_ishl(b, vulkan_idx, shift)),
                                shift);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, def);
+   nir_def_rewrite_uses(&instr->dest.ssa, def);
    nir_instr_remove(&instr->instr);
 }
 
 static void
 lower_vulkan_resource_reindex(nir_builder *b, nir_intrinsic_instr *instr)
 {
-   nir_ssa_def *old_index = instr->src[0].ssa;
-   nir_ssa_def *delta = instr->src[1].ssa;
-   nir_ssa_def *shift = nir_channel(b, old_index, 2);
+   nir_def *old_index = instr->src[0].ssa;
+   nir_def *delta = instr->src[1].ssa;
+   nir_def *shift = nir_channel(b, old_index, 2);
 
-   nir_ssa_def *new_index =
+   nir_def *new_index =
       nir_vec3(b, nir_channel(b, old_index, 0),
                nir_iadd(b, nir_channel(b, old_index, 1),
                         nir_ishl(b, delta, shift)),
                shift);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, new_index);
+   nir_def_rewrite_uses(&instr->dest.ssa, new_index);
    nir_instr_remove(&instr->instr);
 }
 
 static void
 lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin)
 {
-   nir_ssa_def *old_index = intrin->src[0].ssa;
+   nir_def *old_index = intrin->src[0].ssa;
    /* Loading the descriptor happens as part of the load/store instruction so
     * this is a no-op. We just need to turn the shift into an offset of 0.
     */
-   nir_ssa_def *new_index =
+   nir_def *new_index =
       nir_vec3(b, nir_channel(b, old_index, 0),
                nir_channel(b, old_index, 1),
                nir_imm_int(b, 0));
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_index);
+   nir_def_rewrite_uses(&intrin->dest.ssa, new_index);
    nir_instr_remove(&intrin->instr);
 }
 
@@ -273,8 +273,8 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
       buffer_src = 0;
    }
 
-   nir_ssa_scalar scalar_idx = nir_ssa_scalar_resolved(intrin->src[buffer_src].ssa, 0);
-   nir_ssa_def *descriptor_idx = nir_channel(b, intrin->src[buffer_src].ssa, 1);
+   nir_scalar scalar_idx = nir_scalar_resolved(intrin->src[buffer_src].ssa, 0);
+   nir_def *descriptor_idx = nir_channel(b, intrin->src[buffer_src].ssa, 1);
 
    /* For isam, we need to use the appropriate descriptor if 16-bit storage is
     * enabled. Descriptor 0 is the 16-bit one, descriptor 1 is the 32-bit one.
@@ -286,21 +286,21 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
       descriptor_idx = nir_iadd_imm(b, descriptor_idx, 1);
    }
 
-   nir_ssa_def *results[MAX_SETS + 1] = { NULL };
+   nir_def *results[MAX_SETS + 1] = { NULL };
 
-   if (nir_ssa_scalar_is_const(scalar_idx)) {
-      nir_ssa_def *bindless =
-         nir_bindless_resource_ir3(b, 32, descriptor_idx, .desc_set = nir_ssa_scalar_as_uint(scalar_idx));
+   if (nir_scalar_is_const(scalar_idx)) {
+      nir_def *bindless =
+         nir_bindless_resource_ir3(b, 32, descriptor_idx, .desc_set = nir_scalar_as_uint(scalar_idx));
       nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[buffer_src], bindless);
       return;
    }
 
-   nir_ssa_def *base_idx = nir_channel(b, scalar_idx.def, scalar_idx.comp);
+   nir_def *base_idx = nir_channel(b, scalar_idx.def, scalar_idx.comp);
    for (unsigned i = 0; i < MAX_SETS + 1; i++) {
       /* if (base_idx == i) { ... */
       nir_if *nif = nir_push_if(b, nir_ieq_imm(b, base_idx, i));
 
-      nir_ssa_def *bindless =
+      nir_def *bindless =
          nir_bindless_resource_ir3(b, 32, descriptor_idx, .desc_set = i);
 
       nir_intrinsic_instr *copy =
@@ -332,8 +332,8 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
       nir_push_else(b, nif);
    }
 
-   nir_ssa_def *result =
-      nir_ssa_undef(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+   nir_def *result =
+      nir_undef(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
    for (int i = MAX_SETS; i >= 0; i--) {
       nir_pop_if(b, NULL);
       if (info->has_dest)
@@ -341,11 +341,11 @@ lower_ssbo_ubo_intrinsic(struct tu_device *dev,
    }
 
    if (info->has_dest)
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, result);
+      nir_def_rewrite_uses(&intrin->dest.ssa, result);
    nir_instr_remove(&intrin->instr);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_bindless(struct tu_device *dev, nir_builder *b,
                nir_deref_instr *deref, bool is_sampler,
                struct tu_shader *shader,
@@ -373,13 +373,13 @@ build_bindless(struct tu_device *dev, nir_builder *b,
       if (deref->deref_type == nir_deref_type_var)
          return nir_imm_int(b, idx);
 
-      nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
       return nir_iadd_imm(b, nir_imul_imm(b, arr_index, 2), idx);
    }
 
    shader->active_desc_sets |= 1u << set;
 
-   nir_ssa_def *desc_offset;
+   nir_def *desc_offset;
    unsigned descriptor_stride;
    unsigned offset = 0;
    /* Samplers come second in combined image/sampler descriptors, see
@@ -397,7 +397,7 @@ build_bindless(struct tu_device *dev, nir_builder *b,
    if (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
 
-      nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
       desc_offset = nir_iadd(b, desc_offset,
                              nir_imul_imm(b, arr_index, descriptor_stride));
    }
@@ -411,7 +411,7 @@ lower_image_deref(struct tu_device *dev, nir_builder *b,
                   const struct tu_pipeline_layout *layout)
 {
    nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
-   nir_ssa_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
+   nir_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
    nir_rewrite_image_intrinsic(instr, bindless, true);
 }
 
@@ -519,12 +519,12 @@ lower_tex_ycbcr(const struct tu_pipeline_layout *layout,
    }
 
    uint32_t bpcs[3] = {bits, bits, bits}; /* TODO: use right bpc for each channel ? */
-   nir_ssa_def *result = nir_convert_ycbcr_to_rgb(builder,
+   nir_def *result = nir_convert_ycbcr_to_rgb(builder,
                                                   ycbcr_sampler->ycbcr_model,
                                                   ycbcr_sampler->ycbcr_range,
                                                   &tex->dest.ssa,
                                                   bpcs);
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, result,
+   nir_def_rewrite_uses_after(&tex->dest.ssa, result,
                                   result->parent_instr);
 
    builder->cursor = nir_before_instr(&tex->instr);
@@ -539,7 +539,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex, struct tu_device *dev,
    int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
    if (sampler_src_idx >= 0) {
       nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
-      nir_ssa_def *bindless = build_bindless(dev, b, deref, true, shader, layout);
+      nir_def *bindless = build_bindless(dev, b, deref, true, shader, layout);
       nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
                             nir_src_for_ssa(bindless));
       tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
@@ -548,7 +548,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex, struct tu_device *dev,
    int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
    if (tex_src_idx >= 0) {
       nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
-      nir_ssa_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
+      nir_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
       nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
                             nir_src_for_ssa(bindless));
       tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
@@ -629,19 +629,19 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data)
       /* Assume we're loading out-of-bounds from a 0-sized inline uniform
        * filtered out below.
        */
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
-                               nir_ssa_undef(b, intrin->num_components,
+      nir_def_rewrite_uses(&intrin->dest.ssa,
+                               nir_undef(b, intrin->num_components,
                                              intrin->dest.ssa.bit_size));
       return true;
    }
 
-   nir_ssa_def *offset = intrin->src[1].ssa;
+   nir_def *offset = intrin->src[1].ssa;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *val;
+   nir_def *val;
 
    if (use_load) {
-      nir_ssa_def *base_addr =
+      nir_def *base_addr =
          nir_load_uniform(b, 2, 32, nir_imm_int(b, 0), .base = base);
       val = nir_load_global_ir3(b, intrin->num_components,
                                 intrin->dest.ssa.bit_size,
@@ -652,7 +652,7 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data)
                              nir_ishr_imm(b, offset, 2), .base = base);
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
+   nir_def_rewrite_uses(&intrin->dest.ssa, val);
    nir_instr_remove(instr);
    return true;
 }
@@ -843,7 +843,7 @@ lower_fdm_filter(const nir_instr *instr, const void *data)
        options->adjust_fragcoord);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_fdm_instr(struct nir_builder *b, nir_instr *instr, void *data)
 {
    const struct lower_fdm_options *options =
@@ -851,7 +851,7 @@ lower_fdm_instr(struct nir_builder *b, nir_instr *instr, void *data)
 
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *view;
+   nir_def *view;
    if (options->multiview) {
       nir_variable *view_var =
          nir_find_variable_with_location(b->shader, nir_var_shader_in,
@@ -870,14 +870,14 @@ lower_fdm_instr(struct nir_builder *b, nir_instr *instr, void *data)
       view = nir_imm_int(b, 0);
    }
 
-   nir_ssa_def *frag_size =
+   nir_def *frag_size =
       nir_load_frag_size_ir3(b, view, .range = options->num_views);
 
    if (intrin->intrinsic == nir_intrinsic_load_frag_coord) {
-      nir_ssa_def *frag_offset =
+      nir_def *frag_offset =
          nir_load_frag_offset_ir3(b, view, .range = options->num_views);
-      nir_ssa_def *unscaled_coord = nir_load_frag_coord_unscaled_ir3(b);
-      nir_ssa_def *xy = nir_trim_vector(b, unscaled_coord, 2);
+      nir_def *unscaled_coord = nir_load_frag_coord_unscaled_ir3(b);
+      nir_def *xy = nir_trim_vector(b, unscaled_coord, 2);
       xy = nir_fmul(b, nir_fsub(b, xy, frag_offset), nir_i2f32(b, frag_size));
       return nir_vec4(b,
                       nir_channel(b, xy, 0),
index 5a0a60a..dd3cb82 100644 (file)
@@ -166,7 +166,7 @@ assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
 
 
 static void
-assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_ssa_def *ssa,
+assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_def *ssa,
                 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
 {
    if ((ssa->num_components == 1 || is_aos(bld_base))) {
@@ -2701,7 +2701,7 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
 
 static void
 visit_ssa_undef(struct lp_build_nir_context *bld_base,
-                const nir_ssa_undef_instr *instr)
+                const nir_undef_instr *instr)
 {
    unsigned num_components = instr->def.num_components;
    LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
index 16a4694..4398ee1 100644 (file)
@@ -43,11 +43,11 @@ typedef struct {
    nir_shader *shader;
    bool fs_pos_is_sysval;
    nir_variable *stip_tex;
-   nir_ssa_def *fragcoord;
+   nir_def *fragcoord;
    nir_alu_type bool_type;
 } lower_pstipple;
 
-static nir_ssa_def *
+static nir_def *
 load_frag_coord(nir_builder *b)
 {
    nir_variable *pos = nir_get_variable_with_location(b->shader, nir_var_shader_in,
@@ -61,11 +61,11 @@ nir_lower_pstipple_block(nir_block *block,
                          lower_pstipple *state)
 {
    nir_builder *b = &state->b;
-   nir_ssa_def *texcoord;
+   nir_def *texcoord;
 
    b->cursor = nir_before_block(block);
 
-   nir_ssa_def *frag_coord = state->fs_pos_is_sysval ? nir_load_frag_coord(b) : load_frag_coord(b);
+   nir_def *frag_coord = state->fs_pos_is_sysval ? nir_load_frag_coord(b) : load_frag_coord(b);
 
    texcoord = nir_fmul(b, nir_trim_vector(b, frag_coord, 2),
                        nir_imm_vec2(b, 1.0/32.0, 1.0/32.0));
@@ -82,7 +82,7 @@ nir_lower_pstipple_block(nir_block *block,
 
    nir_builder_instr_insert(b, &tex->instr);
 
-   nir_ssa_def *condition;
+   nir_def *condition;
 
    switch (state->bool_type) {
    case nir_type_bool1:
@@ -178,45 +178,45 @@ lower_aaline_instr(nir_builder *b, nir_instr *instr, void *data)
    if (var->data.location < FRAG_RESULT_DATA0 && var->data.location != FRAG_RESULT_COLOR)
       return false;
 
-   nir_ssa_def *out_input = intrin->src[1].ssa;
+   nir_def *out_input = intrin->src[1].ssa;
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *lw = nir_load_var(b, state->line_width_input);
-   nir_ssa_def *len = nir_channel(b, lw, 3);
+   nir_def *lw = nir_load_var(b, state->line_width_input);
+   nir_def *len = nir_channel(b, lw, 3);
    len = nir_fadd_imm(b, nir_fmul_imm(b, len, 2.0), -1.0);
-   nir_ssa_def *tmp = nir_fsat(b, nir_fadd(b, nir_channels(b, lw, 0xa),
+   nir_def *tmp = nir_fsat(b, nir_fadd(b, nir_channels(b, lw, 0xa),
                                              nir_fneg(b, nir_fabs(b, nir_channels(b, lw, 0x5)))));
 
-   nir_ssa_def *max = len;
+   nir_def *max = len;
    if (state->stipple_counter) {
       assert(state->stipple_pattern);
 
-      nir_ssa_def *counter = nir_load_var(b, state->stipple_counter);
-      nir_ssa_def *pattern = nir_load_var(b, state->stipple_pattern);
-      nir_ssa_def *factor = nir_i2f32(b, nir_ishr_imm(b, pattern, 16));
+      nir_def *counter = nir_load_var(b, state->stipple_counter);
+      nir_def *pattern = nir_load_var(b, state->stipple_pattern);
+      nir_def *factor = nir_i2f32(b, nir_ishr_imm(b, pattern, 16));
       pattern = nir_iand_imm(b, pattern, 0xffff);
 
-      nir_ssa_def *stipple_pos = nir_vec2(b, nir_fadd_imm(b, counter, -0.5),
+      nir_def *stipple_pos = nir_vec2(b, nir_fadd_imm(b, counter, -0.5),
                                              nir_fadd_imm(b, counter, 0.5));
 
       stipple_pos = nir_frem(b, nir_fdiv(b, stipple_pos, factor),
                                  nir_imm_float(b, 16.0));
 
-      nir_ssa_def *p = nir_f2i32(b, stipple_pos);
-      nir_ssa_def *one = nir_imm_float(b, 1.0);
+      nir_def *p = nir_f2i32(b, stipple_pos);
+      nir_def *one = nir_imm_float(b, 1.0);
 
       // float t = 1.0 - min((1.0 - fract(stipple_pos.x)) * factor, 1.0);
-      nir_ssa_def *t = nir_ffract(b, nir_channel(b, stipple_pos, 0));
+      nir_def *t = nir_ffract(b, nir_channel(b, stipple_pos, 0));
       t = nir_fsub(b, one,
                      nir_fmin(b, nir_fmul(b, factor,
                                           nir_fsub(b, one, t)), one));
 
       // vec2 a = vec2((uvec2(pattern) >> p) & uvec2(1u));
-      nir_ssa_def *a = nir_i2f32(b,
+      nir_def *a = nir_i2f32(b,
          nir_iand(b, nir_ishr(b, nir_replicate(b, pattern, 2), p),
                   nir_imm_ivec2(b, 1, 1)));
 
       // float cov = mix(a.x, a.y, t);
-      nir_ssa_def *cov = nir_flrp(b, nir_channel(b, a, 0), nir_channel(b, a, 1), t);
+      nir_def *cov = nir_flrp(b, nir_channel(b, a, 0), nir_channel(b, a, 1), t);
 
       max = nir_fmin(b, len, cov);
    }
@@ -225,7 +225,7 @@ lower_aaline_instr(nir_builder *b, nir_instr *instr, void *data)
                   nir_fmin(b, nir_channel(b, tmp, 1), max));
    tmp = nir_fmul(b, nir_channel(b, out_input, 3), tmp);
 
-   nir_ssa_def *out = nir_vec4(b, nir_channel(b, out_input, 0),
+   nir_def *out = nir_vec4(b, nir_channel(b, out_input, 0),
                                  nir_channel(b, out_input, 1),
                                  nir_channel(b, out_input, 2),
                                  tmp);
@@ -277,7 +277,7 @@ typedef struct {
 
 static void
 nir_lower_aapoint_block(nir_block *block,
-                        lower_aapoint *state, nir_ssa_def *sel)
+                        lower_aapoint *state, nir_def *sel)
 {
   nir_builder *b = &state->b;
   nir_foreach_instr(instr, block) {
@@ -294,11 +294,11 @@ nir_lower_aapoint_block(nir_block *block,
       if (var->data.location < FRAG_RESULT_DATA0 && var->data.location != FRAG_RESULT_COLOR)
          continue;
 
-      nir_ssa_def *out_input = intrin->src[1].ssa;
+      nir_def *out_input = intrin->src[1].ssa;
       b->cursor = nir_before_instr(instr);
 
-      nir_ssa_def *tmp = nir_fmul(b, nir_channel(b, out_input, 3), sel);
-      nir_ssa_def *out = nir_vec4(b, nir_channel(b, out_input, 0),
+      nir_def *tmp = nir_fmul(b, nir_channel(b, out_input, 3), sel);
+      nir_def *out = nir_vec4(b, nir_channel(b, out_input, 0),
                                   nir_channel(b, out_input, 1),
                                   nir_channel(b, out_input, 2),
                                   tmp);
@@ -315,14 +315,14 @@ nir_lower_aapoint_impl(nir_function_impl *impl, lower_aapoint *state,
    state->b = nir_builder_at(nir_before_block(block));
 
    nir_builder *b = &state->b;
-   nir_ssa_def *aainput = nir_load_var(b, state->input);
+   nir_def *aainput = nir_load_var(b, state->input);
 
-   nir_ssa_def *dist = nir_fadd(b, nir_fmul(b, nir_channel(b, aainput, 0), nir_channel(b, aainput, 0)),
+   nir_def *dist = nir_fadd(b, nir_fmul(b, nir_channel(b, aainput, 0), nir_channel(b, aainput, 0)),
                                 nir_fmul(b, nir_channel(b, aainput, 1), nir_channel(b, aainput, 1)));
 
-   nir_ssa_def *k = nir_channel(b, aainput, 2);
-   nir_ssa_def *chan_val_one = nir_channel(b, aainput, 3);
-   nir_ssa_def *comp;
+   nir_def *k = nir_channel(b, aainput, 2);
+   nir_def *chan_val_one = nir_channel(b, aainput, 3);
+   nir_def *comp;
 
    switch (bool_type) {
    case nir_type_bool1:
@@ -343,22 +343,22 @@ nir_lower_aapoint_impl(nir_function_impl *impl, lower_aapoint *state,
 
    /* compute coverage factor = (1-d)/(1-k) */
    /* 1 - k */
-   nir_ssa_def *tmp = nir_fadd(b, chan_val_one, nir_fneg(b, k));
+   nir_def *tmp = nir_fadd(b, chan_val_one, nir_fneg(b, k));
    /* 1.0 / (1 - k) */
    tmp = nir_frcp(b, tmp);
 
    /* 1 - d */
-   nir_ssa_def *tmp2 = nir_fadd(b, chan_val_one, nir_fneg(b, dist));
+   nir_def *tmp2 = nir_fadd(b, chan_val_one, nir_fneg(b, dist));
 
    /* (1 - d) / (1 - k) */
-   nir_ssa_def *coverage = nir_fmul(b, tmp, tmp2);
+   nir_def *coverage = nir_fmul(b, tmp, tmp2);
 
    /* if (k >= distance)
     *    sel = coverage;
     * else
     *    sel = 1.0;
     */
-   nir_ssa_def *sel;
+   nir_def *sel;
 
    switch (bool_type) {
    case nir_type_bool1:
@@ -384,7 +384,7 @@ nir_lower_aapoint_impl(nir_function_impl *impl, lower_aapoint *state,
        * optimized to fsat(coverage + (1 - (k >= distance))), but I don't feel
        * like verifying that right now.
        */
-      nir_ssa_def *cmp_result = nir_sge(b, k, dist);
+      nir_def *cmp_result = nir_sge(b, k, dist);
       sel = nir_fadd(b,
                      nir_fmul(b, coverage, cmp_result),
                      nir_fadd(b, chan_val_one, nir_fneg(b, cmp_result)));
index b2a9303..b9a9aed 100644 (file)
@@ -550,9 +550,9 @@ ntt_allocate_regs_unoptimized(struct ntt_compile *c, nir_function_impl *impl)
 static const uint32_t
 ntt_extract_const_src_offset(nir_src *src)
 {
-   nir_ssa_scalar s = nir_get_ssa_scalar(src->ssa, 0);
+   nir_scalar s = nir_get_ssa_scalar(src->ssa, 0);
 
-   while (nir_ssa_scalar_is_alu(s)) {
+   while (nir_scalar_is_alu(s)) {
       nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
 
       if (alu->op == nir_op_iadd) {
@@ -567,7 +567,7 @@ ntt_extract_const_src_offset(nir_src *src)
          return 0;
       }
 
-      /* We'd like to reuse nir_ssa_scalar_chase_movs(), but it assumes SSA and that
+      /* We'd like to reuse nir_scalar_chase_movs(), but it assumes SSA and that
        * seems reasonable for something used in inner loops of the compiler.
        */
       if (!nir_alu_instr_is_copy(alu))
@@ -825,7 +825,7 @@ ntt_try_store_reg_in_tgsi_output(struct ntt_compile *c, struct ureg_dst *dst,
  */
 static bool
 ntt_try_store_ssa_in_tgsi_output(struct ntt_compile *c, struct ureg_dst *dst,
-                                 nir_ssa_def *def)
+                                 nir_def *def)
 {
    *dst = ureg_dst_undef();
 
@@ -1310,7 +1310,7 @@ ntt_swizzle_for_write_mask(struct ureg_src src, uint32_t write_mask)
 }
 
 static struct ureg_dst
-ntt_get_ssa_def_decl(struct ntt_compile *c, nir_ssa_def *ssa)
+ntt_get_ssa_def_decl(struct ntt_compile *c, nir_def *ssa)
 {
    uint32_t writemask = BITSET_MASK(ssa->num_components);
    if (ssa->bit_size == 64)
@@ -1384,7 +1384,7 @@ ntt_get_alu_dest(struct ntt_compile *c, nir_dest *dest)
  * a copy of the ureg_src.
  */
 static void
-ntt_store_def(struct ntt_compile *c, nir_ssa_def *def, struct ureg_src src)
+ntt_store_def(struct ntt_compile *c, nir_def *def, struct ureg_src src)
 {
    if (!src.Indirect && !src.DimIndirect) {
       switch (src.File) {
@@ -2904,7 +2904,7 @@ ntt_emit_jump(struct ntt_compile *c, nir_jump_instr *jump)
 }
 
 static void
-ntt_emit_ssa_undef(struct ntt_compile *c, nir_ssa_undef_instr *instr)
+ntt_emit_ssa_undef(struct ntt_compile *c, nir_undef_instr *instr)
 {
    /* Nothing to do but make sure that we have some storage to deref. */
    (void)ntt_get_ssa_def_decl(c, &instr->def);
@@ -3437,20 +3437,20 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
 
    if (has_dest) {
       /* Merge the two loads' results back into a vector. */
-      nir_ssa_scalar channels[4] = {
+      nir_scalar channels[4] = {
          nir_get_ssa_scalar(&first->dest.ssa, 0),
          nir_get_ssa_scalar(&first->dest.ssa, 1),
          nir_get_ssa_scalar(&second->dest.ssa, 0),
          nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0),
       };
-      nir_ssa_def *new = nir_vec_scalars(b, channels, instr->num_components);
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, new);
+      nir_def *new = nir_vec_scalars(b, channels, instr->num_components);
+      nir_def_rewrite_uses(&instr->dest.ssa, new);
    } else {
       /* Split the src value across the two stores. */
       b->cursor = nir_before_instr(&instr->instr);
 
-      nir_ssa_def *src0 = instr->src[0].ssa;
-      nir_ssa_scalar channels[4] = { 0 };
+      nir_def *src0 = instr->src[0].ssa;
+      nir_scalar channels[4] = { 0 };
       for (int i = 0; i < instr->num_components; i++)
          channels[i] = nir_get_ssa_scalar(src0, i);
 
@@ -3484,7 +3484,7 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
    }
    if (offset_src != -1) {
       b->cursor = nir_before_instr(&second->instr);
-      nir_ssa_def *second_offset =
+      nir_def *second_offset =
          nir_iadd_imm(b, second->src[offset_src].ssa, offset_amount);
       nir_instr_rewrite_src(&second->instr, &second->src[offset_src],
                             nir_src_for_ssa(second_offset));
@@ -3529,14 +3529,14 @@ nir_to_tgsi_lower_64bit_load_const(nir_builder *b, nir_load_const_instr *instr)
    nir_builder_instr_insert(b, &first->instr);
    nir_builder_instr_insert(b, &second->instr);
 
-   nir_ssa_def *channels[4] = {
+   nir_def *channels[4] = {
       nir_channel(b, &first->def, 0),
       nir_channel(b, &first->def, 1),
       nir_channel(b, &second->def, 0),
       num_components == 4 ? nir_channel(b, &second->def, 1) : NULL,
    };
-   nir_ssa_def *new = nir_vec(b, channels, num_components);
-   nir_ssa_def_rewrite_uses(&instr->def, new);
+   nir_def *new = nir_vec(b, channels, num_components);
+   nir_def_rewrite_uses(&instr->def, new);
    nir_instr_remove(&instr->instr);
 
    return true;
@@ -3568,7 +3568,7 @@ nir_to_tgsi_lower_64bit_to_vec2(nir_shader *s)
 }
 
 struct ntt_lower_tex_state {
-   nir_ssa_scalar channels[8];
+   nir_scalar channels[8];
    unsigned i;
 };
 
@@ -3582,7 +3582,7 @@ nir_to_tgsi_lower_tex_instr_arg(nir_builder *b,
    if (tex_src < 0)
       return;
 
-   nir_ssa_def *def = instr->src[tex_src].src.ssa;
+   nir_def *def = instr->src[tex_src].src.ssa;
    for (int i = 0; i < def->num_components; i++) {
       s->channels[s->i++] = nir_get_ssa_scalar(def, i);
    }
@@ -3718,12 +3718,12 @@ ntt_lower_atomic_pre_dec_filter(const nir_instr *instr, const void *_data)
            nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_atomic_counter_pre_dec);
 }
 
-static nir_ssa_def *
+static nir_def *
 ntt_lower_atomic_pre_dec_lower(nir_builder *b, nir_instr *instr, void *_data)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *old_result = &intr->dest.ssa;
+   nir_def *old_result = &intr->dest.ssa;
    intr->intrinsic = nir_intrinsic_atomic_counter_post_dec;
 
    return nir_iadd_imm(b, old_result, -1);
@@ -3783,7 +3783,7 @@ nir_lower_primid_sysval_to_input_filter(const nir_instr *instr, const void *_dat
            nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_primitive_id);
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_lower_primid_sysval_to_input_lower(nir_builder *b, nir_instr *instr, void *data)
 {
    nir_variable *var = nir_get_variable_with_location(b->shader, nir_var_shader_in,
index 4c9c2dd..d52853c 100644 (file)
@@ -136,7 +136,7 @@ static void gather_intrinsic_load_deref_info(const nir_shader *nir,
    assert(var && var->data.mode == nir_var_shader_in);
 
    if (nir->info.stage == MESA_SHADER_FRAGMENT)
-      gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
+      gather_usage(deref, nir_def_components_read(&instr->dest.ssa),
                    info->input_usage_mask);
 
    switch (nir->info.stage) {
index 384073f..08fb500 100644 (file)
@@ -52,7 +52,7 @@
 
 struct ttn_reg_info {
    /** nir register handle containing this TGSI index. */
-   nir_ssa_def *reg;
+   nir_def *reg;
    nir_variable *var;
    /** Offset (in vec4s) from the start of var for this TGSI index. */
    int offset;
@@ -65,12 +65,12 @@ struct ttn_compile {
 
    struct ttn_reg_info *output_regs;
    struct ttn_reg_info *temp_regs;
-   nir_ssa_def **imm_defs;
+   nir_def **imm_defs;
 
    unsigned num_samp_types;
    nir_alu_type *samp_types;
 
-   nir_ssa_def *addr_reg;
+   nir_def *addr_reg;
 
    nir_variable **inputs;
    nir_variable **outputs;
@@ -225,7 +225,7 @@ ttn_emit_declaration(struct ttn_compile *c)
          }
       } else {
          for (i = 0; i < array_size; i++) {
-            nir_ssa_def *reg = nir_decl_reg(b, 4, 32, 0);
+            nir_def *reg = nir_decl_reg(b, 4, 32, 0);
             c->temp_regs[decl->Range.First + i].reg = reg;
             c->temp_regs[decl->Range.First + i].var = NULL;
             c->temp_regs[decl->Range.First + i].offset = 0;
@@ -355,7 +355,7 @@ ttn_emit_declaration(struct ttn_compile *c)
              * for the outputs and emit stores to the real outputs at the end of
              * the shader.
              */
-            nir_ssa_def *reg = nir_decl_reg(b, 4, 32,
+            nir_def *reg = nir_decl_reg(b, 4, 32,
                                             is_array ? array_size : 0);
 
             var->data.mode = nir_var_shader_out;
@@ -481,7 +481,7 @@ ttn_emit_immediate(struct ttn_compile *c)
    nir_builder_instr_insert(b, &load_const->instr);
 }
 
-static nir_ssa_def *
+static nir_def *
 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
 
 /* generate either a constant or indirect deref chain for accessing an
@@ -492,7 +492,7 @@ ttn_array_deref(struct ttn_compile *c, nir_variable *var, unsigned offset,
                 struct tgsi_ind_register *indirect)
 {
    nir_deref_instr *deref = nir_build_deref_var(&c->build, var);
-   nir_ssa_def *index = nir_imm_int(&c->build, offset);
+   nir_def *index = nir_imm_int(&c->build, offset);
    if (indirect)
       index = nir_iadd(&c->build, index, ttn_src_for_indirect(c, indirect));
    return nir_build_deref_array(&c->build, deref, index);
@@ -501,17 +501,17 @@ ttn_array_deref(struct ttn_compile *c, nir_variable *var, unsigned offset,
 /* Special case: Turn the frontface varying into a load of the
  * frontface variable, and create the vector as required by TGSI.
  */
-static nir_ssa_def *
+static nir_def *
 ttn_emulate_tgsi_front_face(struct ttn_compile *c)
 {
-   nir_ssa_def *tgsi_frontface[4];
+   nir_def *tgsi_frontface[4];
 
    if (c->cap_face_is_sysval) {
       /* When it's a system value, it should be an integer vector: (F, 0, 0, 1)
        * F is 0xffffffff if front-facing, 0 if not.
        */
 
-      nir_ssa_def *frontface = nir_load_front_face(&c->build, 1);
+      nir_def *frontface = nir_load_front_face(&c->build, 1);
 
       tgsi_frontface[0] = nir_bcsel(&c->build,
                              frontface,
@@ -526,7 +526,7 @@ ttn_emulate_tgsi_front_face(struct ttn_compile *c)
        */
 
       assert(c->input_var_face);
-      nir_ssa_def *frontface = nir_load_var(&c->build, c->input_var_face);
+      nir_def *frontface = nir_load_var(&c->build, c->input_var_face);
 
       tgsi_frontface[0] = nir_bcsel(&c->build,
                              frontface,
@@ -557,7 +557,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
       if (c->temp_regs[index].var) {
          unsigned offset = c->temp_regs[index].offset;
          nir_variable *var = c->temp_regs[index].var;
-         nir_ssa_def *load = nir_load_deref(&c->build,
+         nir_def *load = nir_load_deref(&c->build,
                ttn_array_deref(c, var, offset, indirect));
 
          src = nir_src_for_ssa(load);
@@ -580,7 +580,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
       break;
 
    case TGSI_FILE_SYSTEM_VALUE: {
-      nir_ssa_def *load;
+      nir_def *load;
 
       assert(!indirect);
       assert(!dim);
@@ -707,7 +707,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
          srcn++;
       }
 
-      nir_ssa_def *offset;
+      nir_def *offset;
       if (op == nir_intrinsic_load_ubo) {
          /* UBO loads don't have a base offset. */
          offset = nir_imm_int(b, index);
@@ -757,7 +757,7 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
    return src;
 }
 
-static nir_ssa_def *
+static nir_def *
 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
 {
    nir_builder *b = &c->build;
@@ -789,7 +789,7 @@ ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
    return NULL;
 }
 
-static nir_ssa_def *
+static nir_def *
 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
             int src_idx)
 {
@@ -837,7 +837,7 @@ ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
    src.swizzle[2] = tgsi_src->SwizzleZ;
    src.swizzle[3] = tgsi_src->SwizzleW;
 
-   nir_ssa_def *def = nir_mov_alu(b, src, 4);
+   nir_def *def = nir_mov_alu(b, src, 4);
 
    if (tgsi_type_is_64bit(tgsi_src_type))
       def = nir_bitcast_vector(b, def, 64);
@@ -857,10 +857,10 @@ ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
    return def;
 }
 
-static nir_ssa_def *
-ttn_alu(nir_builder *b, nir_op op, unsigned dest_bitsize, nir_ssa_def **src)
+static nir_def *
+ttn_alu(nir_builder *b, nir_op op, unsigned dest_bitsize, nir_def **src)
 {
-   nir_ssa_def *def = nir_build_alu_src_arr(b, op, src);
+   nir_def *def = nir_build_alu_src_arr(b, op, src);
    if (def->bit_size == 1)
       def = nir_ineg(b, nir_b2iN(b, def, dest_bitsize));
    assert(def->bit_size == dest_bitsize);
@@ -887,10 +887,10 @@ ttn_alu(nir_builder *b, nir_op op, unsigned dest_bitsize, nir_ssa_def **src)
  *  dst.z = 2^{src.x}
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ttn_exp(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ttn_exp(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *srcx = ttn_channel(b, src[0], X);
+   nir_def *srcx = ttn_channel(b, src[0], X);
 
    return nir_vec4(b, nir_fexp2(b, nir_ffloor(b, srcx)),
                       nir_fsub(b, srcx, nir_ffloor(b, srcx)),
@@ -904,11 +904,11 @@ ttn_exp(nir_builder *b, nir_ssa_def **src)
  *  dst.z = \log_2{|src.x|}
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ttn_log(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ttn_log(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
-   nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
+   nir_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
+   nir_def *log2 = nir_flog2(b, abs_srcx);
 
    return nir_vec4(b, nir_ffloor(b, log2),
                       nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
@@ -922,8 +922,8 @@ ttn_log(nir_builder *b, nir_ssa_def **src)
  *   dst.z = src0.z
  *   dst.w = src1.w
  */
-static nir_ssa_def *
-ttn_dst(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ttn_dst(nir_builder *b, nir_def **src)
 {
    return nir_vec4(b, nir_imm_float(b, 1.0),
                       nir_fmul(b, ttn_channel(b, src[0], Y),
@@ -938,16 +938,16 @@ ttn_dst(nir_builder *b, nir_ssa_def **src)
  *  dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ttn_lit(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ttn_lit(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
-   nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
+   nir_def *src0_y = ttn_channel(b, src[0], Y);
+   nir_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
                                               nir_imm_float(b, 128.0)),
                                   nir_imm_float(b, -128.0));
-   nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
+   nir_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
                                wclamp);
-   nir_ssa_def *z = nir_bcsel(b, nir_flt_imm(b, ttn_channel(b, src[0], X), 0.0),
+   nir_def *z = nir_bcsel(b, nir_flt_imm(b, ttn_channel(b, src[0], X), 0.0),
                                  nir_imm_float(b, 0.0), pow);
 
    return nir_vec4(b, nir_imm_float(b, 1.0),
@@ -970,11 +970,11 @@ ttn_kill(nir_builder *b)
 }
 
 static void
-ttn_kill_if(nir_builder *b, nir_ssa_def **src)
+ttn_kill_if(nir_builder *b, nir_def **src)
 {
    /* flt must be exact, because NaN shouldn't discard. (apps rely on this) */
    b->exact = true;
-   nir_ssa_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0));
+   nir_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0));
    b->exact = false;
 
    nir_discard_if(b, cmp);
@@ -1167,8 +1167,8 @@ add_ssbo_var(struct ttn_compile *c, int binding)
    }
 }
 
-static nir_ssa_def *
-ttn_tex(struct ttn_compile *c, nir_ssa_def **src)
+static nir_def *
+ttn_tex(struct ttn_compile *c, nir_def **src)
 {
    nir_builder *b = &c->build;
    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
@@ -1416,8 +1416,8 @@ ttn_tex(struct ttn_compile *c, nir_ssa_def **src)
  *
  * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
  */
-static nir_ssa_def *
-ttn_txq(struct ttn_compile *c, nir_ssa_def **src)
+static nir_def *
+ttn_txq(struct ttn_compile *c, nir_def **src)
 {
    nir_builder *b = &c->build;
    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
@@ -1504,8 +1504,8 @@ get_mem_qualifier(struct tgsi_full_instruction *tgsi_inst)
    return access;
 }
 
-static nir_ssa_def *
-ttn_mem(struct ttn_compile *c, nir_ssa_def **src)
+static nir_def *
+ttn_mem(struct ttn_compile *c, nir_def **src)
 {
    nir_builder *b = &c->build;
    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
@@ -1600,7 +1600,7 @@ ttn_mem(struct ttn_compile *c, nir_ssa_def **src)
       if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS) {
          instr->src[2] = nir_src_for_ssa(ttn_channel(b, src[addr_src_index], W));
       } else {
-         instr->src[2] = nir_src_for_ssa(nir_ssa_undef(b, 1, 32));
+         instr->src[2] = nir_src_for_ssa(nir_undef(b, 1, 32));
       }
 
       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
@@ -1824,7 +1824,7 @@ ttn_emit_instruction(struct ttn_compile *c)
    if (tgsi_op == TGSI_OPCODE_END)
       return;
 
-   nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
+   nir_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
    for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
       src[i] = ttn_get_src(c, &tgsi_inst->Src[i], i);
    }
@@ -1840,7 +1840,7 @@ ttn_emit_instruction(struct ttn_compile *c)
    /* If this is non-NULL after the switch, it will be written to the
     * corresponding register/variable/etc after.
     */
-   nir_ssa_def *dst = NULL;
+   nir_def *dst = NULL;
 
    switch (tgsi_op) {
    case TGSI_OPCODE_RSQ:
@@ -2044,7 +2044,7 @@ ttn_emit_instruction(struct ttn_compile *c)
                       tgsi_dst->Register.WriteMask);
    } else {
       unsigned index = tgsi_dst->Register.Index;
-      nir_ssa_def *reg = NULL;
+      nir_def *reg = NULL;
       unsigned base_offset = 0;
 
       if (tgsi_dst->Register.File == TGSI_FILE_TEMPORARY) {
@@ -2062,7 +2062,7 @@ ttn_emit_instruction(struct ttn_compile *c)
       }
 
       if (tgsi_dst->Register.Indirect) {
-         nir_ssa_def *indirect = ttn_src_for_indirect(c, &tgsi_dst->Indirect);
+         nir_def *indirect = ttn_src_for_indirect(c, &tgsi_dst->Indirect);
          nir_store_reg_indirect(b, dst, reg, indirect, .base = base_offset,
                                 .write_mask = tgsi_dst->Register.WriteMask);
       } else {
@@ -2090,7 +2090,7 @@ ttn_add_output_stores(struct ttn_compile *c)
       if (!var)
          continue;
 
-      nir_ssa_def *store_value =
+      nir_def *store_value =
          nir_build_load_reg(b, 4, 32, c->output_regs[i].reg,
                             .base = c->output_regs[i].offset);
 
@@ -2130,7 +2130,7 @@ ttn_add_output_stores(struct ttn_compile *c)
             continue;
 
          nir_deref_instr *deref = nir_build_deref_var(b, c->clipdist);
-         nir_ssa_def *zero = nir_imm_zero(b, 1, 32);
+         nir_def *zero = nir_imm_zero(b, 1, 32);
          unsigned offset = var->data.location == VARYING_SLOT_CLIP_DIST1 ? 4 : 0;
          unsigned size = var->data.location == VARYING_SLOT_CLIP_DIST1 ?
                           b->shader->info.clip_distance_array_size :
@@ -2138,7 +2138,7 @@ ttn_add_output_stores(struct ttn_compile *c)
          for (unsigned i = offset; i < size; i++) {
             /* deref the array member and store each component */
             nir_deref_instr *component_deref = nir_build_deref_array_imm(b, deref, i);
-            nir_ssa_def *val = zero;
+            nir_def *val = zero;
             if (store_mask & BITFIELD_BIT(i - offset))
                val = nir_channel(b, store_value, i - offset);
             nir_store_deref(b, component_deref, val, 0x1);
@@ -2333,7 +2333,7 @@ ttn_compile_init(const void *tgsi_tokens,
                                   scan.file_max[TGSI_FILE_OUTPUT] + 1);
    c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
                                 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
-   c->imm_defs = rzalloc_array(c, nir_ssa_def *,
+   c->imm_defs = rzalloc_array(c, nir_def *,
                                scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
 
    c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
@@ -2442,7 +2442,7 @@ lower_clipdistance_to_array(nir_shader *nir)
       nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
       /* create a new deref for the arrayed clipdistance variable at the start of the function */
       nir_deref_instr *clipdist_deref = nir_build_deref_var(&b, dist0);
-      nir_ssa_def *zero = nir_imm_zero(&b, 1, 32);
+      nir_def *zero = nir_imm_zero(&b, 1, 32);
       nir_foreach_block(block, impl) {
          nir_foreach_instr_safe(instr, block) {
             /* filter through until a clipdistance store is reached */
@@ -2462,7 +2462,7 @@ lower_clipdistance_to_array(nir_shader *nir)
             for (unsigned i = 0; i < nir->info.clip_distance_array_size; i++) {
                /* deref the array member and store each component */
                nir_deref_instr *component_deref = nir_build_deref_array_imm(&b, clipdist_deref, i);
-               nir_ssa_def *val = zero;
+               nir_def *val = zero;
                if (wrmask & BITFIELD_BIT(i - offset))
                   val = nir_channel(&b, intr->src[1].ssa, i - offset);
                nir_store_deref(&b, component_deref, val, 0x1);
index 258f67c..d2c5f62 100644 (file)
  * indices mapped 1:1 with the binding table. So we want the bindless handle
  * (u0_u1, index) which is encoded in NIR as (0, index).
  */
-static nir_ssa_def *
-index_to_handle(nir_builder *b, nir_ssa_def *index)
+static nir_def *
+index_to_handle(nir_builder *b, nir_def *index)
 {
-   nir_ssa_def *table = nir_imm_int(b, 0);
-   nir_ssa_def *offset = nir_imul_imm(b, index, AGX_TEXTURE_DESC_STRIDE);
+   nir_def *table = nir_imm_int(b, 0);
+   nir_def *offset = nir_imul_imm(b, index, AGX_TEXTURE_DESC_STRIDE);
 
    return nir_vec2(b, table, offset);
 }
@@ -57,8 +57,8 @@ lower(nir_builder *b, nir_instr *instr, void *data)
       }
 #undef CASE
 
-      nir_ssa_def *index = intr->src[0].ssa;
-      nir_ssa_scalar index_scalar = nir_ssa_scalar_resolved(index, 0);
+      nir_def *index = intr->src[0].ssa;
+      nir_scalar index_scalar = nir_scalar_resolved(index, 0);
 
       /* Remap according to the driver layout */
       unsigned offset = BITSET_LAST_BIT(b->shader->info.textures_used);
@@ -73,11 +73,11 @@ lower(nir_builder *b, nir_instr *instr, void *data)
       /* If we can determine statically that the image fits in texture state
        * registers, avoid lowering to bindless access.
        */
-      if (nir_ssa_scalar_is_const(index_scalar)) {
-         unsigned idx = (nir_ssa_scalar_as_uint(index_scalar) * 2) + offset;
+      if (nir_scalar_is_const(index_scalar)) {
+         unsigned idx = (nir_scalar_as_uint(index_scalar) * 2) + offset;
 
          if (idx < AGX_NUM_TEXTURE_STATE_REGS) {
-            nir_src_rewrite_ssa(&intr->src[0], nir_imm_intN_t(b, idx, 16));
+            nir_src_rewrite(&intr->src[0], nir_imm_intN_t(b, idx, 16));
             return true;
          }
       }
@@ -87,7 +87,7 @@ lower(nir_builder *b, nir_instr *instr, void *data)
       *internal_bindless = true;
 
       index = nir_iadd_imm(b, nir_imul_imm(b, index, 2), offset);
-      nir_src_rewrite_ssa(&intr->src[0], index_to_handle(b, index));
+      nir_src_rewrite(&intr->src[0], index_to_handle(b, index));
    } else if (instr->type == nir_instr_type_tex) {
       nir_tex_instr *tex = nir_instr_as_tex(instr);
 
@@ -103,7 +103,7 @@ lower(nir_builder *b, nir_instr *instr, void *data)
          return false;
 
       /* Otherwise, lower to bindless. Could be optimized. */
-      nir_ssa_def *index = nir_steal_tex_src(tex, nir_tex_src_texture_offset);
+      nir_def *index = nir_steal_tex_src(tex, nir_tex_src_texture_offset);
       if (!index)
          index = nir_imm_int(b, tex->texture_index);
 
index abde5f7..75c2cc2 100644 (file)
@@ -39,7 +39,7 @@ struct state {
    struct table_state tables[AGX_NUM_SYSVAL_TABLES];
 };
 
-static nir_ssa_def *
+static nir_def *
 load_sysval(nir_builder *b, unsigned dim, unsigned bitsize, uint8_t table,
             uint16_t offset)
 {
@@ -48,40 +48,40 @@ load_sysval(nir_builder *b, unsigned dim, unsigned bitsize, uint8_t table,
    return nir_load_preamble(b, dim, bitsize, .base = packed);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_sysval_root(nir_builder *b, unsigned dim, unsigned bitsize, void *ptr)
 {
    return load_sysval(b, dim, bitsize, AGX_SYSVAL_TABLE_ROOT, (uintptr_t)ptr);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_sysval_indirect(nir_builder *b, unsigned dim, unsigned bitsize,
-                     uint8_t table, void *base, nir_ssa_def *offset_el)
+                     uint8_t table, void *base, nir_def *offset_el)
 {
-   nir_ssa_scalar scalar = {offset_el, 0};
+   nir_scalar scalar = {offset_el, 0};
    unsigned stride = (dim * bitsize) / 8;
 
-   if (nir_ssa_scalar_is_const(scalar)) {
+   if (nir_scalar_is_const(scalar)) {
       /* Load the sysval directly */
       return load_sysval(
          b, dim, bitsize, table,
-         (uintptr_t)base + (nir_ssa_scalar_as_uint(scalar) * stride));
+         (uintptr_t)base + (nir_scalar_as_uint(scalar) * stride));
    } else {
       /* Load the base address of the table */
       struct agx_draw_uniforms *u = NULL;
-      nir_ssa_def *table_base = load_sysval_root(b, 1, 64, &u->tables[table]);
+      nir_def *table_base = load_sysval_root(b, 1, 64, &u->tables[table]);
 
       /* Load address of the array in the table */
-      nir_ssa_def *array_base = nir_iadd_imm(b, table_base, (uintptr_t)base);
+      nir_def *array_base = nir_iadd_imm(b, table_base, (uintptr_t)base);
 
       /* Index into the table and load */
-      nir_ssa_def *address = nir_iadd(
+      nir_def *address = nir_iadd(
          b, array_base, nir_u2u64(b, nir_imul_imm(b, offset_el, stride)));
       return nir_load_global_constant(b, address, bitsize / 8, dim, bitsize);
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
 {
    struct agx_draw_uniforms *u = NULL;
@@ -138,7 +138,7 @@ lower_sysvals(nir_builder *b, nir_instr *instr, void *data)
 {
    b->cursor = nir_before_instr(instr);
    nir_dest *dest;
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
 
    if (instr->type == nir_instr_type_intrinsic) {
       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
@@ -165,7 +165,7 @@ lower_sysvals(nir_builder *b, nir_instr *instr, void *data)
    }
 
    if (replacement != NULL) {
-      nir_ssa_def_rewrite_uses(&dest->ssa, replacement);
+      nir_def_rewrite_uses(&dest->ssa, replacement);
       return true;
    } else {
       return false;
index 4debc9e..5de3c24 100644 (file)
@@ -281,7 +281,7 @@ agx_primitives_update_direct(struct agx_context *ctx,
  *
  * XXX: How do quads get tessellated?
  */
-static nir_ssa_def *
+static nir_def *
 primitive_fits(nir_builder *b, struct agx_xfb_key *key)
 {
    /* Get the number of vertices per primitive in the current mode, usually just
@@ -293,7 +293,7 @@ primitive_fits(nir_builder *b, struct agx_xfb_key *key)
       verts_per_prim = 6;
 
    /* Get the ID for this invocation */
-   nir_ssa_def *id = nir_load_vertex_id_zero_base(b);
+   nir_def *id = nir_load_vertex_id_zero_base(b);
 
    /* Figure out the ID for the first vertex of the next primitive. Since
     * transform feedback buffers are tightly packed, that's one byte after the
@@ -302,16 +302,16 @@ primitive_fits(nir_builder *b, struct agx_xfb_key *key)
     *
     *    (id - (id % prim size)) + prim size
     */
-   nir_ssa_def *rem = nir_umod_imm(b, id, verts_per_prim);
-   nir_ssa_def *next_id = nir_iadd_imm(b, nir_isub(b, id, rem), verts_per_prim);
+   nir_def *rem = nir_umod_imm(b, id, verts_per_prim);
+   nir_def *next_id = nir_iadd_imm(b, nir_isub(b, id, rem), verts_per_prim);
 
    /* Figure out where that vertex will land */
-   nir_ssa_def *index = nir_iadd(
+   nir_def *index = nir_iadd(
       b, nir_imul(b, nir_load_instance_id(b), nir_load_num_vertices(b)),
       next_id);
 
    /* Now check for overflow in each written buffer */
-   nir_ssa_def *all_fits = nir_imm_true(b);
+   nir_def *all_fits = nir_imm_true(b);
 
    u_foreach_bit(buffer, b->shader->xfb_info->buffers_written) {
       uint16_t stride = b->shader->info.xfb_stride[buffer] * 4;
@@ -320,10 +320,10 @@ primitive_fits(nir_builder *b, struct agx_xfb_key *key)
       /* For this primitive to fit, the next primitive cannot start after the
        * end of the transform feedback buffer.
        */
-      nir_ssa_def *end_offset = nir_imul_imm(b, index, stride);
+      nir_def *end_offset = nir_imul_imm(b, index, stride);
 
       /* Check whether that will remain in bounds */
-      nir_ssa_def *fits =
+      nir_def *fits =
          nir_uge(b, nir_load_xfb_size(b, .base = buffer), end_offset);
 
       /* Accumulate */
@@ -373,17 +373,17 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
 
    uint16_t offset = offset_words * 4;
 
-   nir_ssa_def *index = nir_iadd(
+   nir_def *index = nir_iadd(
       b, nir_imul(b, nir_load_instance_id(b), nir_load_num_vertices(b)),
       nir_load_vertex_id_zero_base(b));
 
-   nir_ssa_def *xfb_offset =
+   nir_def *xfb_offset =
       nir_iadd_imm(b, nir_imul_imm(b, index, stride), offset);
 
-   nir_ssa_def *buf = nir_load_xfb_address(b, 64, .base = buffer);
-   nir_ssa_def *addr = nir_iadd(b, buf, nir_u2u64(b, xfb_offset));
+   nir_def *buf = nir_load_xfb_address(b, 64, .base = buffer);
+   nir_def *addr = nir_iadd(b, buf, nir_u2u64(b, xfb_offset));
 
-   nir_ssa_def *value = nir_channels(
+   nir_def *value = nir_channels(
       b, intr->src[0].ssa, BITFIELD_MASK(num_components) << start_component);
    nir_store_global(b, addr, 4, value, BITFIELD_MASK(num_components));
 }
@@ -437,14 +437,14 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
    switch (intr->intrinsic) {
    /* XXX: Rename to "xfb index" to avoid the clash */
    case nir_intrinsic_load_vertex_id_zero_base: {
-      nir_ssa_def *id = nir_load_vertex_id(b);
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, id);
+      nir_def *id = nir_load_vertex_id(b);
+      nir_def_rewrite_uses(&intr->dest.ssa, id);
       return true;
    }
 
    case nir_intrinsic_load_vertex_id: {
       /* Get the raw invocation ID */
-      nir_ssa_def *id = nir_load_vertex_id(b);
+      nir_def *id = nir_load_vertex_id(b);
 
       /* Tessellate by primitive mode */
       if (key->mode == MESA_PRIM_LINE_STRIP ||
@@ -452,9 +452,9 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
          /* The last vertex is special for a loop. Check if that's we're dealing
           * with.
           */
-         nir_ssa_def *num_invocations =
+         nir_def *num_invocations =
             nir_imul_imm(b, nir_load_num_vertices(b), 2);
-         nir_ssa_def *last_vertex =
+         nir_def *last_vertex =
             nir_ieq(b, id, nir_iadd_imm(b, num_invocations, -1));
 
          /* (0, 1), (1, 2) */
@@ -470,24 +470,24 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
           * First: (0, 1, 2), (1, 3, 2), (2, 3, 4).
           * Last:  (0, 1, 2), (2, 1, 3), (2, 3, 4).
           */
-         nir_ssa_def *prim = nir_udiv_imm(b, id, 3);
-         nir_ssa_def *rem = nir_umod_imm(b, id, 3);
+         nir_def *prim = nir_udiv_imm(b, id, 3);
+         nir_def *rem = nir_umod_imm(b, id, 3);
 
          unsigned pv = key->flatshade_first ? 0 : 2;
 
          /* Swap the two non-provoking vertices third vertex in odd triangles */
-         nir_ssa_def *even = nir_ieq_imm(b, nir_iand_imm(b, prim, 1), 0);
-         nir_ssa_def *is_provoking = nir_ieq_imm(b, rem, pv);
-         nir_ssa_def *no_swap = nir_ior(b, is_provoking, even);
-         nir_ssa_def *swapped = nir_isub_imm(b, 3 - pv, rem);
-         nir_ssa_def *off = nir_bcsel(b, no_swap, rem, swapped);
+         nir_def *even = nir_ieq_imm(b, nir_iand_imm(b, prim, 1), 0);
+         nir_def *is_provoking = nir_ieq_imm(b, rem, pv);
+         nir_def *no_swap = nir_ior(b, is_provoking, even);
+         nir_def *swapped = nir_isub_imm(b, 3 - pv, rem);
+         nir_def *off = nir_bcsel(b, no_swap, rem, swapped);
 
          /* Pull the (maybe swapped) vertex from the corresponding primitive */
          id = nir_iadd(b, prim, off);
       } else if (key->mode == MESA_PRIM_TRIANGLE_FAN) {
          /* (0, 1, 2), (0, 2, 3) */
-         nir_ssa_def *prim = nir_udiv_imm(b, id, 3);
-         nir_ssa_def *rem = nir_umod_imm(b, id, 3);
+         nir_def *prim = nir_udiv_imm(b, id, 3);
+         nir_def *rem = nir_umod_imm(b, id, 3);
 
          id = nir_bcsel(b, nir_ieq_imm(b, rem, 0), nir_imm_int(b, 0),
                         nir_iadd(b, prim, rem));
@@ -498,9 +498,9 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
           */
          bool strips = key->mode == MESA_PRIM_QUAD_STRIP;
 
-         nir_ssa_def *prim = nir_udiv_imm(b, id, 6);
-         nir_ssa_def *rem = nir_umod_imm(b, id, 6);
-         nir_ssa_def *base = nir_imul_imm(b, prim, strips ? 2 : 4);
+         nir_def *prim = nir_udiv_imm(b, id, 6);
+         nir_def *rem = nir_umod_imm(b, id, 6);
+         nir_def *base = nir_imul_imm(b, prim, strips ? 2 : 4);
 
          /* Quads:       [0, 1, 3, 3, 1, 2]
           * Quad strips: [0, 1, 3, 0, 2, 3]
@@ -510,7 +510,7 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
          uint32_t order = strips ? order_strips : order_quads;
 
          /* Index out of the bitpacked array */
-         nir_ssa_def *offset = nir_iand_imm(
+         nir_def *offset = nir_iand_imm(
             b, nir_ushr(b, nir_imm_int(b, order), nir_imul_imm(b, rem, 4)),
             0xF);
 
@@ -524,16 +524,16 @@ lower_xfb_intrinsics(struct nir_builder *b, nir_instr *instr, void *data)
        * vertex ID is just the index as-is.
        */
       if (key->index_size) {
-         nir_ssa_def *index_buffer = nir_load_xfb_index_buffer(b, 64);
-         nir_ssa_def *offset = nir_imul_imm(b, id, key->index_size);
-         nir_ssa_def *address = nir_iadd(b, index_buffer, nir_u2u64(b, offset));
-         nir_ssa_def *index = nir_load_global_constant(
-            b, address, key->index_size, 1, key->index_size * 8);
+         nir_def *index_buffer = nir_load_xfb_index_buffer(b, 64);
+         nir_def *offset = nir_imul_imm(b, id, key->index_size);
+         nir_def *address = nir_iadd(b, index_buffer, nir_u2u64(b, offset));
+         nir_def *index = nir_load_global_constant(b, address, key->index_size,
+                                                   1, key->index_size * 8);
 
          id = nir_u2uN(b, index, id->bit_size);
       }
 
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, id);
+      nir_def_rewrite_uses(&intr->dest.ssa, id);
       return true;
    }
 
index fa98c39..4480d12 100644 (file)
@@ -243,19 +243,19 @@ get_new_program_id(struct crocus_screen *screen)
    return p_atomic_inc_return(&screen->program_id);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_aoa_deref_offset(nir_builder *b,
                      nir_deref_instr *deref,
                      unsigned elem_size)
 {
    unsigned array_size = elem_size;
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
 
    while (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
 
       /* This level's element size is the previous level's array size */
-      nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
       assert(deref->arr.index.ssa);
       offset = nir_iadd(b, offset,
                         nir_imul_imm(b, index, array_size));
@@ -301,7 +301,7 @@ crocus_lower_storage_image_derefs(nir_shader *nir)
             nir_variable *var = nir_deref_instr_get_variable(deref);
 
             b.cursor = nir_before_instr(&intrin->instr);
-            nir_ssa_def *index =
+            nir_def *index =
                nir_iadd_imm(&b, get_aoa_deref_offset(&b, deref, 1),
                             var->data.driver_location);
             nir_rewrite_image_intrinsic(intrin, index, false);
@@ -451,8 +451,8 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
 
    nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
 
-   nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
-   nir_ssa_def *temp_const_ubo_name = NULL;
+   nir_def *temp_ubo_name = nir_undef(&b, 1, 32);
+   nir_def *temp_const_ubo_name = NULL;
 
    /* Turn system value intrinsics into uniforms */
    nir_foreach_block(block, impl) {
@@ -461,13 +461,13 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
             continue;
 
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-         nir_ssa_def *offset;
+         nir_def *offset;
 
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_base_workgroup_id: {
             /* GL doesn't have a concept of base workgroup */
             b.cursor = nir_instr_remove(&intrin->instr);
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+            nir_def_rewrite_uses(&intrin->dest.ssa,
                                      nir_imm_zero(&b, 3, 32));
             continue;
          }
@@ -476,7 +476,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
              * data and not cbuf0 which gallium uploads for us.
              */
             b.cursor = nir_before_instr(instr);
-            nir_ssa_def *offset =
+            nir_def *offset =
                nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
                             nir_intrinsic_base(intrin));
 
@@ -496,7 +496,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
                               intrin->dest.ssa.bit_size);
             nir_builder_instr_insert(&b, &load_ubo->instr);
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+            nir_def_rewrite_uses(&intrin->dest.ssa,
                                      &load_ubo->dest.ssa);
             nir_instr_remove(&intrin->instr);
             continue;
@@ -633,7 +633,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
          nir_intrinsic_set_range(load, ~0);
          nir_ssa_dest_init(&load->instr, &load->dest, comps, 32);
          nir_builder_instr_insert(&b, &load->instr);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+         nir_def_rewrite_uses(&intrin->dest.ssa,
                                   &load->dest.ssa);
          nir_instr_remove(instr);
       }
@@ -670,7 +670,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
             b.cursor = nir_before_instr(instr);
 
             if (load->src[0].ssa == temp_ubo_name) {
-               nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
+               nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
                nir_instr_rewrite_src(instr, &load->src[0],
                                      nir_src_for_ssa(imm));
             }
@@ -774,7 +774,7 @@ rewrite_src_with_bti(nir_builder *b, struct crocus_binding_table *bt,
    assert(bt->sizes[group] > 0);
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *bti;
+   nir_def *bti;
    if (nir_src_is_const(*src)) {
       uint32_t index = nir_src_as_uint(*src);
       bti = nir_imm_intN_t(b, crocus_group_index_to_bti(bt, group, index),
@@ -984,13 +984,13 @@ crocus_setup_binding_table(const struct intel_device_info *devinfo,
                enum gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index];
                int width = (wa & WA_8BIT) ? 8 : 16;
 
-               nir_ssa_def *val = nir_fmul_imm(&b, &tex->dest.ssa, (1 << width) - 1);
+               nir_def *val = nir_fmul_imm(&b, &tex->dest.ssa, (1 << width) - 1);
                val = nir_f2u32(&b, val);
                if (wa & WA_SIGN) {
                   val = nir_ishl_imm(&b, val, 32 - width);
                   val = nir_ishr_imm(&b, val, 32 - width);
                }
-               nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, val, val->parent_instr);
+               nir_def_rewrite_uses_after(&tex->dest.ssa, val, val->parent_instr);
             }
 
             tex->texture_index =
index 6d49116..d18cff7 100644 (file)
@@ -661,14 +661,14 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip)
    sampler->data.binding = 0;
    sampler->data.explicit_binding = true;
 
-   nir_ssa_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
+   nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
 
    nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
                                               glsl_vec4_type(), "pos");
    pos_in->data.location = VARYING_SLOT_POS; // VARYING_SLOT_VAR0?
-   nir_ssa_def *pos = nir_load_var(&b, pos_in);
+   nir_def *pos = nir_load_var(&b, pos_in);
 
-   nir_ssa_def *pos_src;
+   nir_def *pos_src;
 
    if (no_flip)
       pos_src = pos;
index 61ab355..089a47a 100644 (file)
@@ -49,24 +49,24 @@ get_indirect_draw_base_vertex_transform(const nir_shader_compiler_options *optio
    input_ssbo->data.driver_location = 0;
    output_ssbo->data.driver_location = 1;
 
-   nir_ssa_def *draw_id = nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0);
+   nir_def *draw_id = nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0);
    if (args->base_vertex.dynamic_count) {
-      nir_ssa_def *count = nir_load_ubo(&b, 1, 32, nir_imm_int(&b, 1), nir_imm_int(&b, 0),
+      nir_def *count = nir_load_ubo(&b, 1, 32, nir_imm_int(&b, 1), nir_imm_int(&b, 0),
          (gl_access_qualifier)0, 4, 0, 0, 4);
       nir_push_if(&b, nir_ilt(&b, draw_id, count));
    }
 
    nir_variable *stride_ubo = NULL;
-   nir_ssa_def *in_stride_offset_and_base_drawid = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "d3d12_Stride",
+   nir_def *in_stride_offset_and_base_drawid = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "d3d12_Stride",
       glsl_uvec4_type(), &stride_ubo);
-   nir_ssa_def *in_offset = nir_iadd(&b, nir_channel(&b, in_stride_offset_and_base_drawid, 1),
+   nir_def *in_offset = nir_iadd(&b, nir_channel(&b, in_stride_offset_and_base_drawid, 1),
       nir_imul(&b, nir_channel(&b, in_stride_offset_and_base_drawid, 0), draw_id));
-   nir_ssa_def *in_data0 = nir_load_ssbo(&b, 4, 32, nir_imm_int(&b, 0), in_offset, (gl_access_qualifier)0, 4, 0);
+   nir_def *in_data0 = nir_load_ssbo(&b, 4, 32, nir_imm_int(&b, 0), in_offset, (gl_access_qualifier)0, 4, 0);
 
-   nir_ssa_def *in_data1 = NULL;
-   nir_ssa_def *base_vertex = NULL, *base_instance = NULL;
+   nir_def *in_data1 = NULL;
+   nir_def *base_vertex = NULL, *base_instance = NULL;
    if (args->base_vertex.indexed) {
-      nir_ssa_def *in_offset1 = nir_iadd(&b, in_offset, nir_imm_int(&b, 16));
+      nir_def *in_offset1 = nir_iadd(&b, in_offset, nir_imm_int(&b, 16));
       in_data1 = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 0), in_offset1, (gl_access_qualifier)0, 4, 0);
       base_vertex = nir_channel(&b, in_data0, 3);
       base_instance = in_data1;
@@ -78,11 +78,11 @@ get_indirect_draw_base_vertex_transform(const nir_shader_compiler_options *optio
    /* 4 additional uints for base vertex, base instance, draw ID, and a bool for indexed draw */
    unsigned out_stride = sizeof(uint32_t) * ((args->base_vertex.indexed ? 5 : 4) + 4);
 
-   nir_ssa_def *out_offset = nir_imul(&b, draw_id, nir_imm_int(&b, out_stride));
-   nir_ssa_def *out_data0 = nir_vec4(&b, base_vertex, base_instance,
+   nir_def *out_offset = nir_imul(&b, draw_id, nir_imm_int(&b, out_stride));
+   nir_def *out_data0 = nir_vec4(&b, base_vertex, base_instance,
       nir_iadd(&b, draw_id, nir_channel(&b, in_stride_offset_and_base_drawid, 2)),
       nir_imm_int(&b, args->base_vertex.indexed ? -1 : 0));
-   nir_ssa_def *out_data1 = in_data0;
+   nir_def *out_data1 = in_data0;
 
    nir_store_ssbo(&b, out_data0, nir_imm_int(&b, 1), out_offset, 0xf, (gl_access_qualifier)0, 4, 0);
    nir_store_ssbo(&b, out_data1, nir_imm_int(&b, 1), nir_iadd(&b, out_offset, nir_imm_int(&b, 16)),
@@ -116,29 +116,29 @@ get_fake_so_buffer_copy_back(const nir_shader_compiler_options *options, const d
       glsl_array_type(glsl_uint_type(), 5, 0), "input_ubo");
    input_ubo->data.driver_location = 0;
 
-   nir_ssa_def *original_so_filled_size = nir_load_ubo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 4 * sizeof(uint32_t)),
+   nir_def *original_so_filled_size = nir_load_ubo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 4 * sizeof(uint32_t)),
       (gl_access_qualifier)0, 4, 0, 4 * sizeof(uint32_t), 4);
 
    nir_variable *state_var = nullptr;
-   nir_ssa_def *fake_so_multiplier = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "fake_so_multiplier", glsl_uint_type(), &state_var);
+   nir_def *fake_so_multiplier = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "fake_so_multiplier", glsl_uint_type(), &state_var);
 
-   nir_ssa_def *vertex_offset = nir_imul(&b, nir_imm_int(&b, key->fake_so_buffer_copy_back.stride),
+   nir_def *vertex_offset = nir_imul(&b, nir_imm_int(&b, key->fake_so_buffer_copy_back.stride),
       nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0));
 
-   nir_ssa_def *output_offset_base = nir_iadd(&b, original_so_filled_size, vertex_offset);
-   nir_ssa_def *input_offset_base = nir_imul(&b, vertex_offset, fake_so_multiplier);
+   nir_def *output_offset_base = nir_iadd(&b, original_so_filled_size, vertex_offset);
+   nir_def *input_offset_base = nir_imul(&b, vertex_offset, fake_so_multiplier);
 
    for (unsigned i = 0; i < key->fake_so_buffer_copy_back.num_ranges; ++i) {
       auto& output = key->fake_so_buffer_copy_back.ranges[i];
       assert(output.size % 4 == 0 && output.offset % 4 == 0);
-      nir_ssa_def *field_offset = nir_imm_int(&b, output.offset);
-      nir_ssa_def *output_offset = nir_iadd(&b, output_offset_base, field_offset);
-      nir_ssa_def *input_offset = nir_iadd(&b, input_offset_base, field_offset);
+      nir_def *field_offset = nir_imm_int(&b, output.offset);
+      nir_def *output_offset = nir_iadd(&b, output_offset_base, field_offset);
+      nir_def *input_offset = nir_iadd(&b, input_offset_base, field_offset);
 
       for (unsigned loaded = 0; loaded < output.size; loaded += 16) {
          unsigned to_load = MIN2(output.size, 16);
          unsigned components = to_load / 4;
-         nir_ssa_def *loaded_data = nir_load_ssbo(&b, components, 32, nir_imm_int(&b, 1),
+         nir_def *loaded_data = nir_load_ssbo(&b, components, 32, nir_imm_int(&b, 1),
             nir_iadd(&b, input_offset, nir_imm_int(&b, loaded)), (gl_access_qualifier)0, 4, 0);
          nir_store_ssbo(&b, loaded_data, nir_imm_int(&b, 0),
             nir_iadd(&b, output_offset, nir_imm_int(&b, loaded)), (1u << components) - 1, (gl_access_qualifier)0, 4, 0);
@@ -158,24 +158,24 @@ get_fake_so_buffer_vertex_count(const nir_shader_compiler_options *options)
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options, "FakeSOBufferVertexCount");
 
    nir_variable_create(b.shader, nir_var_mem_ssbo, glsl_array_type(glsl_uint_type(), 0, 0), "fake_so");
-   nir_ssa_def *fake_buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
+   nir_def *fake_buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
 
    nir_variable *real_so_var = nir_variable_create(b.shader, nir_var_mem_ssbo,
       glsl_array_type(glsl_uint_type(), 0, 0), "real_so");
    real_so_var->data.driver_location = 1;
-   nir_ssa_def *real_buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 1), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
+   nir_def *real_buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 1), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
 
    nir_variable *state_var = nullptr;
-   nir_ssa_def *state_var_data = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "state_var", glsl_uvec4_type(), &state_var);
-   nir_ssa_def *stride = nir_channel(&b, state_var_data, 0);
-   nir_ssa_def *fake_so_multiplier = nir_channel(&b, state_var_data, 1);
+   nir_def *state_var_data = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "state_var", glsl_uvec4_type(), &state_var);
+   nir_def *stride = nir_channel(&b, state_var_data, 0);
+   nir_def *fake_so_multiplier = nir_channel(&b, state_var_data, 1);
 
-   nir_ssa_def *real_so_bytes_added = nir_idiv(&b, fake_buffer_filled_size, fake_so_multiplier);
-   nir_ssa_def *vertex_count = nir_idiv(&b, real_so_bytes_added, stride);
-   nir_ssa_def *to_write_to_fake_buffer = nir_vec4(&b, vertex_count, nir_imm_int(&b, 1), nir_imm_int(&b, 1), real_buffer_filled_size);
+   nir_def *real_so_bytes_added = nir_idiv(&b, fake_buffer_filled_size, fake_so_multiplier);
+   nir_def *vertex_count = nir_idiv(&b, real_so_bytes_added, stride);
+   nir_def *to_write_to_fake_buffer = nir_vec4(&b, vertex_count, nir_imm_int(&b, 1), nir_imm_int(&b, 1), real_buffer_filled_size);
    nir_store_ssbo(&b, to_write_to_fake_buffer, nir_imm_int(&b, 0), nir_imm_int(&b, 4), 0xf, (gl_access_qualifier)0, 4, 0);
 
-   nir_ssa_def *updated_filled_size = nir_iadd(&b, real_buffer_filled_size, real_so_bytes_added);
+   nir_def *updated_filled_size = nir_iadd(&b, real_buffer_filled_size, real_so_bytes_added);
    nir_store_ssbo(&b, updated_filled_size, nir_imm_int(&b, 1), nir_imm_int(&b, 0), 1, (gl_access_qualifier)0, 4, 0);
 
    nir_validate_shader(b.shader, "creation");
@@ -191,18 +191,18 @@ get_draw_auto(const nir_shader_compiler_options *options)
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options, "DrawAuto");
 
    nir_variable_create(b.shader, nir_var_mem_ssbo, glsl_array_type(glsl_uint_type(), 0, 0), "ssbo");
-   nir_ssa_def *buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
+   nir_def *buffer_filled_size = nir_load_ssbo(&b, 1, 32, nir_imm_int(&b, 0), nir_imm_int(&b, 0), (gl_access_qualifier)0, 4, 0);
 
    nir_variable *state_var = nullptr;
-   nir_ssa_def *state_var_data = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "state_var", glsl_uvec4_type(), &state_var);
-   nir_ssa_def *stride = nir_channel(&b, state_var_data, 0);
-   nir_ssa_def *vb_offset = nir_channel(&b, state_var_data, 1);
+   nir_def *state_var_data = d3d12_get_state_var(&b, D3D12_STATE_VAR_TRANSFORM_GENERIC0, "state_var", glsl_uvec4_type(), &state_var);
+   nir_def *stride = nir_channel(&b, state_var_data, 0);
+   nir_def *vb_offset = nir_channel(&b, state_var_data, 1);
 
-   nir_ssa_def *vb_bytes = nir_bcsel(&b, nir_ilt(&b, vb_offset, buffer_filled_size),
+   nir_def *vb_bytes = nir_bcsel(&b, nir_ilt(&b, vb_offset, buffer_filled_size),
       nir_isub(&b, buffer_filled_size, vb_offset), nir_imm_int(&b, 0));
 
-   nir_ssa_def *vertex_count = nir_idiv(&b, vb_bytes, stride);
-   nir_ssa_def *to_write = nir_vec4(&b, vertex_count, nir_imm_int(&b, 1), nir_imm_int(&b, 0), nir_imm_int(&b, 0));
+   nir_def *vertex_count = nir_idiv(&b, vb_bytes, stride);
+   nir_def *to_write = nir_vec4(&b, vertex_count, nir_imm_int(&b, 1), nir_imm_int(&b, 0), nir_imm_int(&b, 0));
    nir_store_ssbo(&b, to_write, nir_imm_int(&b, 0), nir_imm_int(&b, 4), 0xf, (gl_access_qualifier)0, 4, 0);
 
    nir_validate_shader(b.shader, "creation");
index a499511..719d8e2 100644 (file)
 #include "util/u_memory.h"
 #include "util/u_simple_shaders.h"
 
-static nir_ssa_def *
+static nir_def *
 nir_cull_face(nir_builder *b, nir_variable *vertices, bool ccw)
 {
-   nir_ssa_def *v0 =
+   nir_def *v0 =
        nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 0)));
-   nir_ssa_def *v1 =
+   nir_def *v1 =
        nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 1)));
-   nir_ssa_def *v2 =
+   nir_def *v2 =
        nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 2)));
 
-   nir_ssa_def *dir = nir_fdot(b, nir_cross4(b, nir_fsub(b, v1, v0),
+   nir_def *dir = nir_fdot(b, nir_cross4(b, nir_fsub(b, v1, v0),
                                                nir_fsub(b, v2, v0)),
                                    nir_imm_vec4(b, 0.0, 0.0, -1.0, 0.0));
    if (ccw)
@@ -154,9 +154,9 @@ struct emit_primitives_context
 
    nir_loop *loop;
    nir_deref_instr *loop_index_deref;
-   nir_ssa_def *loop_index;
-   nir_ssa_def *edgeflag_cmp;
-   nir_ssa_def *front_facing;
+   nir_def *loop_index;
+   nir_def *edgeflag_cmp;
+   nir_def *front_facing;
 };
 
 static bool
@@ -245,10 +245,10 @@ d3d12_begin_emit_primitives_gs(struct emit_primitives_context *emit_ctx,
    emit_ctx->loop_index_deref = nir_build_deref_var(b, loop_index_var);
    nir_store_deref(b, emit_ctx->loop_index_deref, nir_imm_int(b, 0), 1);
 
-   nir_ssa_def *diagonal_vertex = NULL;
+   nir_def *diagonal_vertex = NULL;
    if (key->edge_flag_fix) {
-      nir_ssa_def *prim_id = nir_load_primitive_id(b);
-      nir_ssa_def *odd = nir_build_alu(b, nir_op_imod,
+      nir_def *prim_id = nir_load_primitive_id(b);
+      nir_def *odd = nir_build_alu(b, nir_op_imod,
                                        prim_id,
                                        nir_imm_int(b, 2),
                                        NULL, NULL);
@@ -280,15 +280,15 @@ d3d12_begin_emit_primitives_gs(struct emit_primitives_context *emit_ctx,
    emit_ctx->loop = nir_push_loop(b);
 
    emit_ctx->loop_index = nir_load_deref(b, emit_ctx->loop_index_deref);
-   nir_ssa_def *cmp = nir_ige_imm(b, emit_ctx->loop_index, 3);
+   nir_def *cmp = nir_ige_imm(b, emit_ctx->loop_index, 3);
    nir_if *loop_check = nir_push_if(b, cmp);
    nir_jump(b, nir_jump_break);
    nir_pop_if(b, loop_check);
 
    if (edgeflag_var) {
-      nir_ssa_def *edge_flag =
+      nir_def *edge_flag =
          nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, edgeflag_var), emit_ctx->loop_index));
-      nir_ssa_def *is_edge = nir_feq_imm(b, nir_channel(b, edge_flag, 0), 1.0);
+      nir_def *is_edge = nir_feq_imm(b, nir_channel(b, edge_flag, 0), 1.0);
       if (emit_ctx->edgeflag_cmp)
          emit_ctx->edgeflag_cmp = nir_iand(b, emit_ctx->edgeflag_cmp, is_edge);
       else
@@ -296,7 +296,7 @@ d3d12_begin_emit_primitives_gs(struct emit_primitives_context *emit_ctx,
    }
 
    if (key->edge_flag_fix) {
-      nir_ssa_def *is_edge = nir_ine(b, emit_ctx->loop_index, diagonal_vertex);
+      nir_def *is_edge = nir_ine(b, emit_ctx->loop_index, diagonal_vertex);
       if (emit_ctx->edgeflag_cmp)
          emit_ctx->edgeflag_cmp = nir_iand(b, emit_ctx->edgeflag_cmp, is_edge);
       else
@@ -353,7 +353,7 @@ d3d12_emit_points(struct d3d12_context *ctx, struct d3d12_gs_variant_key *key)
     *  EmitVertex();
     */
    for (unsigned i = 0; i < emit_ctx.num_vars; ++i) {
-      nir_ssa_def *index = (key->flat_varyings & (1ull << emit_ctx.in[i]->data.location))  ?
+      nir_def *index = (key->flat_varyings & (1ull << emit_ctx.in[i]->data.location))  ?
                               nir_imm_int(b, (key->flatshade_first ? 0 : 2)) : emit_ctx.loop_index;
       nir_deref_instr *in_value = nir_build_deref_array(b, nir_build_deref_var(b, emit_ctx.in[i]), index);
       if (emit_ctx.in[i]->data.location == VARYING_SLOT_POS && emit_ctx.edgeflag_cmp) {
@@ -382,11 +382,11 @@ d3d12_emit_lines(struct d3d12_context *ctx, struct d3d12_gs_variant_key *key)
 
    d3d12_begin_emit_primitives_gs(&emit_ctx, ctx, key, GL_LINE_STRIP, 6);
 
-   nir_ssa_def *next_index = nir_imod_imm(b, nir_iadd_imm(b, emit_ctx.loop_index, 1), 3);
+   nir_def *next_index = nir_imod_imm(b, nir_iadd_imm(b, emit_ctx.loop_index, 1), 3);
 
    /* First vertex */
    for (unsigned i = 0; i < emit_ctx.num_vars; ++i) {
-      nir_ssa_def *index = (key->flat_varyings & (1ull << emit_ctx.in[i]->data.location)) ?
+      nir_def *index = (key->flat_varyings & (1ull << emit_ctx.in[i]->data.location)) ?
                               nir_imm_int(b, (key->flatshade_first ? 0 : 2)) : emit_ctx.loop_index;
       nir_deref_instr *in_value = nir_build_deref_array(b, nir_build_deref_var(b, emit_ctx.in[i]), index);
       copy_vars(b, nir_build_deref_var(b, emit_ctx.out[i]), in_value);
@@ -397,7 +397,7 @@ d3d12_emit_lines(struct d3d12_context *ctx, struct d3d12_gs_variant_key *key)
 
    /* Second vertex. If not an edge, use same position as first vertex */
    for (unsigned i = 0; i < emit_ctx.num_vars; ++i) {
-      nir_ssa_def *index = next_index;
+      nir_def *index = next_index;
       if (emit_ctx.in[i]->data.location == VARYING_SLOT_POS)
          index = nir_bcsel(b, emit_ctx.edgeflag_cmp, next_index, emit_ctx.loop_index);
       else if (key->flat_varyings & (1ull << emit_ctx.in[i]->data.location))
@@ -428,7 +428,7 @@ d3d12_emit_triangles(struct d3d12_context *ctx, struct d3d12_gs_variant_key *key
     *  EmitVertex();
     */
 
-   nir_ssa_def *incr = NULL;
+   nir_def *incr = NULL;
 
    if (key->provoking_vertex > 0)
       incr = nir_imm_int(b, key->provoking_vertex);
@@ -436,12 +436,12 @@ d3d12_emit_triangles(struct d3d12_context *ctx, struct d3d12_gs_variant_key *key
       incr = nir_imm_int(b, 3);
 
    if (key->alternate_tri) {
-      nir_ssa_def *odd = nir_imod_imm(b, nir_load_primitive_id(b), 2);
+      nir_def *odd = nir_imod_imm(b, nir_load_primitive_id(b), 2);
       incr = nir_isub(b, incr, odd);
    }
 
    assert(incr != NULL);
-   nir_ssa_def *index = nir_imod_imm(b, nir_iadd(b, emit_ctx.loop_index, incr), 3);
+   nir_def *index = nir_imod_imm(b, nir_iadd(b, emit_ctx.loop_index, incr), 3);
    for (unsigned i = 0; i < emit_ctx.num_vars; ++i) {
       nir_deref_instr *in_value = nir_build_deref_array(b, nir_build_deref_var(b, emit_ctx.in[i]), index);
       copy_vars(b, nir_build_deref_var(b, emit_ctx.out[i]), in_value);
index 78f265d..a00fcee 100644 (file)
@@ -31,8 +31,8 @@
 #include "d3d12_compiler.h"
 #include "d3d12_nir_passes.h"
 
-static nir_ssa_def *
-convert_value(nir_builder *b, nir_ssa_def *value,
+static nir_def *
+convert_value(nir_builder *b, nir_def *value,
    const struct util_format_description *from_desc,
    const struct util_format_description *to_desc)
 {
@@ -53,12 +53,12 @@ convert_value(nir_builder *b, nir_ssa_def *value,
     * formats that don't fall on a nice bit size, convert/pack them into 32bit values. Otherwise,
     * just produce a vecNx4 where N is the expected bit size.
     */
-   nir_ssa_def *src_as_vec;
+   nir_def *src_as_vec;
    if (from_desc->format == PIPE_FORMAT_R10G10B10A2_UINT ||
        from_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM) {
       if (from_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM)
          value = nir_format_float_to_unorm(b, value, rgba1010102_bits);
-      nir_ssa_def *channels[4];
+      nir_def *channels[4];
       for (unsigned i = 0; i < 4; ++i)
          channels[i] = nir_channel(b, value, i);
 
@@ -72,7 +72,7 @@ convert_value(nir_builder *b, nir_ssa_def *value,
       if (from_desc->channel[0].size == 8)
          src_as_vec = nir_pack_unorm_4x8(b, value);
       else {
-         nir_ssa_def *packed_channels[2];
+         nir_def *packed_channels[2];
          packed_channels[0] = nir_pack_unorm_2x16(b,
                                                   nir_trim_vector(b, value, 2));
          packed_channels[1] = nir_pack_unorm_2x16(b, nir_channels(b, value, 0x3 << 2));
@@ -82,7 +82,7 @@ convert_value(nir_builder *b, nir_ssa_def *value,
       if (from_desc->channel[0].size == 8)
          src_as_vec = nir_pack_snorm_4x8(b, value);
       else {
-         nir_ssa_def *packed_channels[2];
+         nir_def *packed_channels[2];
          packed_channels[0] = nir_pack_snorm_2x16(b,
                                                   nir_trim_vector(b, value, 2));
          packed_channels[1] = nir_pack_snorm_2x16(b, nir_channels(b, value, 0x3 << 2));
@@ -106,35 +106,35 @@ convert_value(nir_builder *b, nir_ssa_def *value,
     */
    if (to_desc->format == PIPE_FORMAT_R10G10B10A2_UINT ||
        to_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM) {
-      nir_ssa_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
-      nir_ssa_def *channels[4] = {
+      nir_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
+      nir_def *channels[4] = {
          nir_iand_imm(b, u32,                      (1 << 10) - 1),
          nir_iand_imm(b, nir_ushr_imm(b, u32, 10), (1 << 10) - 1),
          nir_iand_imm(b, nir_ushr_imm(b, u32, 20), (1 << 10) - 1),
                          nir_ushr_imm(b, u32, 30)
       };
-      nir_ssa_def *vec = nir_vec(b, channels, 4);
+      nir_def *vec = nir_vec(b, channels, 4);
       if (to_desc->format == PIPE_FORMAT_R10G10B10A2_UNORM)
          vec = nir_format_unorm_to_float(b, vec, rgba1010102_bits);
       return vec;
    } else if (to_desc->format == PIPE_FORMAT_R11G11B10_FLOAT) {
-      nir_ssa_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
-      nir_ssa_def *vec3 = nir_format_unpack_11f11f10f(b, u32);
+      nir_def *u32 = nir_extract_bits(b, &src_as_vec, 1, 0, 1, 32);
+      nir_def *vec3 = nir_format_unpack_11f11f10f(b, u32);
       return nir_vec4(b, nir_channel(b, vec3, 0),
                          nir_channel(b, vec3, 1),
                          nir_channel(b, vec3, 2),
                          nir_imm_float(b, 1.0f));
    } else if (to_desc->is_unorm || to_desc->is_snorm) {
-      nir_ssa_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
+      nir_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
          DIV_ROUND_UP(to_desc->nr_channels * to_desc->channel[0].size, 32), 32);
       if (to_desc->is_unorm) {
          if (to_desc->channel[0].size == 8) {
-            nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, dest_packed, 0));
+            nir_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, dest_packed, 0));
             if (to_desc->nr_channels < 4)
                unpacked = nir_vector_insert_imm(b, unpacked, nir_imm_float(b, 1.0f), 3);
             return unpacked;
          }
-         nir_ssa_def *vec2s[2] = {
+         nir_def *vec2s[2] = {
             nir_unpack_unorm_2x16(b, nir_channel(b, dest_packed, 0)),
             to_desc->nr_channels > 2 ?
                nir_unpack_unorm_2x16(b, nir_channel(b, dest_packed, 1)) :
@@ -148,12 +148,12 @@ convert_value(nir_builder *b, nir_ssa_def *value,
                             nir_channel(b, vec2s[1], 1));
       } else {
          if (to_desc->channel[0].size == 8) {
-            nir_ssa_def *unpacked = nir_unpack_snorm_4x8(b, nir_channel(b, dest_packed, 0));
+            nir_def *unpacked = nir_unpack_snorm_4x8(b, nir_channel(b, dest_packed, 0));
             if (to_desc->nr_channels < 4)
                unpacked = nir_vector_insert_imm(b, unpacked, nir_imm_float(b, 1.0f), 3);
             return unpacked;
          }
-         nir_ssa_def *vec2s[2] = {
+         nir_def *vec2s[2] = {
             nir_unpack_snorm_2x16(b, nir_channel(b, dest_packed, 0)),
             to_desc->nr_channels > 2 ?
                nir_unpack_snorm_2x16(b, nir_channel(b, dest_packed, 1)) :
@@ -167,9 +167,9 @@ convert_value(nir_builder *b, nir_ssa_def *value,
                             nir_channel(b, vec2s[1], 1));
       }
    } else {
-      nir_ssa_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
+      nir_def *dest_packed = nir_extract_bits(b, &src_as_vec, 1, 0,
          to_desc->nr_channels, to_desc->channel[0].size);
-      nir_ssa_def *final_channels[4];
+      nir_def *final_channels[4];
       for (unsigned i = 0; i < 4; ++i) {
          if (i >= dest_packed->num_components)
             final_channels[i] = util_format_is_float(to_desc->format) ?
@@ -178,7 +178,7 @@ convert_value(nir_builder *b, nir_ssa_def *value,
          else
             final_channels[i] = nir_channel(b, dest_packed, i);
       }
-      nir_ssa_def *final_vec = nir_vec(b, final_channels, 4);
+      nir_def *final_vec = nir_vec(b, final_channels, 4);
       if (util_format_is_float(to_desc->format))
          return nir_f2f32(b, final_vec);
       else if (util_format_is_pure_sint(to_desc->format))
@@ -213,7 +213,7 @@ lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
    enum pipe_format real_format = info->image_format_conversion[image->data.driver_location].view_format;
    assert(real_format != emulation_format);
 
-   nir_ssa_def *value;
+   nir_def *value;
    const struct util_format_description *from_desc, *to_desc;
    if (intr->intrinsic == nir_intrinsic_image_deref_load) {
       b->cursor = nir_after_instr(instr);
@@ -227,14 +227,14 @@ lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
       to_desc = util_format_description(emulation_format);
    }
 
-   nir_ssa_def *new_value = convert_value(b, value, from_desc, to_desc);
+   nir_def *new_value = convert_value(b, value, from_desc, to_desc);
 
    nir_alu_type alu_type = util_format_is_pure_uint(emulation_format) ?
       nir_type_uint : (util_format_is_pure_sint(emulation_format) ?
          nir_type_int : nir_type_float);
 
    if (intr->intrinsic == nir_intrinsic_image_deref_load) {
-      nir_ssa_def_rewrite_uses_after(value, new_value, new_value->parent_instr);
+      nir_def_rewrite_uses_after(value, new_value, new_value->parent_instr);
       nir_intrinsic_set_dest_type(intr, alu_type);
    } else {
       nir_instr_rewrite_src_ssa(instr, &intr->src[3], new_value);
index d871916..36b4ab4 100644 (file)
@@ -36,13 +36,13 @@ struct lower_state {
    unsigned num_point_coords;
    nir_variable *varying_out[VARYING_SLOT_MAX];
 
-   nir_ssa_def *point_dir_imm[4];
-   nir_ssa_def *point_coord_imm[4];
+   nir_def *point_dir_imm[4];
+   nir_def *point_coord_imm[4];
 
    /* Current point primitive */
-   nir_ssa_def *point_pos;
-   nir_ssa_def *point_size;
-   nir_ssa_def *varying[VARYING_SLOT_MAX];
+   nir_def *point_pos;
+   nir_def *point_size;
+   nir_def *varying[VARYING_SLOT_MAX];
    unsigned varying_write_mask[VARYING_SLOT_MAX];
 
    bool sprite_origin_lower_left;
@@ -68,7 +68,7 @@ find_outputs(nir_shader *shader, struct lower_state *state)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 get_point_dir(nir_builder *b, struct lower_state *state, unsigned i)
 {
    if (state->point_dir_imm[0] == NULL) {
@@ -81,7 +81,7 @@ get_point_dir(nir_builder *b, struct lower_state *state, unsigned i)
    return state->point_dir_imm[i];
 }
 
-static nir_ssa_def *
+static nir_def *
 get_point_coord(nir_builder *b, struct lower_state *state, unsigned i)
 {
    if (state->point_coord_imm[0] == NULL) {
@@ -106,11 +106,11 @@ get_point_coord(nir_builder *b, struct lower_state *state, unsigned i)
  */
 static void
 get_scaled_point_size(nir_builder *b, struct lower_state *state,
-                      nir_ssa_def **x, nir_ssa_def **y)
+                      nir_def **x, nir_def **y)
 {
    /* State uniform contains: (1/ViewportWidth, 1/ViewportHeight, PointSize, MaxPointSize) */
-   nir_ssa_def *uniform = nir_load_var(b, state->uniform);
-   nir_ssa_def *point_size = state->point_size;
+   nir_def *uniform = nir_load_var(b, state->uniform);
+   nir_def *point_size = state->point_size;
 
    /* clamp point-size to valid range */
    if (point_size && state->point_size_per_vertex) {
@@ -158,7 +158,7 @@ lower_emit_vertex(nir_intrinsic_instr *instr, nir_builder *b, struct lower_state
 {
    unsigned stream_id = nir_intrinsic_stream_id(instr);
 
-   nir_ssa_def *point_width, *point_height;
+   nir_def *point_width, *point_height;
    get_scaled_point_size(b, state, &point_width, &point_height);
 
    nir_instr_remove(&instr->instr);
@@ -173,8 +173,8 @@ lower_emit_vertex(nir_intrinsic_instr *instr, nir_builder *b, struct lower_state
          }
 
          /* pos = scaled_point_size * point_dir + point_pos */
-         nir_ssa_def *point_dir = get_point_dir(b, state, i);
-         nir_ssa_def *pos = nir_vec4(b,
+         nir_def *point_dir = get_point_dir(b, state, i);
+         nir_def *pos = nir_vec4(b,
                                      nir_ffma(b,
                                               point_width,
                                               nir_channel(b, point_dir, 0),
@@ -188,7 +188,7 @@ lower_emit_vertex(nir_intrinsic_instr *instr, nir_builder *b, struct lower_state
          nir_store_var(b, state->pos_out, pos, 0xf);
 
          /* point coord */
-         nir_ssa_def *point_coord = get_point_coord(b, state, i);
+         nir_def *point_coord = get_point_coord(b, state, i);
          for (unsigned j = 0; j < state->num_point_coords; ++j) {
             unsigned num_channels = glsl_get_components(state->point_coord_out[j]->type);
             unsigned mask = (1 << num_channels) - 1;
index d7f6406..f137275 100644 (file)
@@ -37,7 +37,7 @@
  * so we need to lower the flip into the NIR shader.
  */
 
-nir_ssa_def *
+nir_def *
 d3d12_get_state_var(nir_builder *b,
                     enum d3d12_state_var var_enum,
                     const char *var_name,
@@ -71,10 +71,10 @@ lower_pos_write(nir_builder *b, struct nir_instr *instr, nir_variable **flip)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
-   nir_ssa_def *flip_y = d3d12_get_state_var(b, D3D12_STATE_VAR_Y_FLIP, "d3d12_FlipY",
+   nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+   nir_def *flip_y = d3d12_get_state_var(b, D3D12_STATE_VAR_Y_FLIP, "d3d12_FlipY",
                                              glsl_float_type(), flip);
-   nir_ssa_def *def = nir_vec4(b,
+   nir_def *def = nir_vec4(b,
                                nir_channel(b, pos, 0),
                                nir_fmul(b, nir_channel(b, pos, 1), flip_y),
                                nir_channel(b, pos, 2),
@@ -124,11 +124,11 @@ lower_pos_read(nir_builder *b, struct nir_instr *instr,
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def *pos = nir_instr_ssa_def(instr);
-   nir_ssa_def *depth = nir_channel(b, pos, 2);
+   nir_def *pos = nir_instr_ssa_def(instr);
+   nir_def *depth = nir_channel(b, pos, 2);
 
    assert(depth_transform_var);
-   nir_ssa_def *depth_transform = d3d12_get_state_var(b, D3D12_STATE_VAR_DEPTH_TRANSFORM,
+   nir_def *depth_transform = d3d12_get_state_var(b, D3D12_STATE_VAR_DEPTH_TRANSFORM,
                                                       "d3d12_DepthTransform",
                                                       glsl_vec_type(2),
                                                       depth_transform_var);
@@ -137,7 +137,7 @@ lower_pos_read(nir_builder *b, struct nir_instr *instr,
 
    pos = nir_vector_insert_imm(b, pos, depth, 2);
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, pos,
+   nir_def_rewrite_uses_after(&intr->dest.ssa, pos,
                                   pos->parent_instr);
 }
 
@@ -173,7 +173,7 @@ lower_compute_state_vars(nir_builder *b, nir_instr *instr, void *_state)
    b->cursor = nir_after_instr(instr);
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    struct compute_state_vars *vars = _state;
-   nir_ssa_def *result = NULL;
+   nir_def *result = NULL;
    switch (intr->intrinsic) {
    case nir_intrinsic_load_num_workgroups:
       result = d3d12_get_state_var(b, D3D12_STATE_VAR_NUM_WORKGROUPS, "d3d12_NumWorkgroups",
@@ -183,7 +183,7 @@ lower_compute_state_vars(nir_builder *b, nir_instr *instr, void *_state)
       return false;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    nir_instr_remove(instr);
    return true;
 }
@@ -224,8 +224,8 @@ lower_uint_color_write(nir_builder *b, struct nir_instr *instr, bool is_signed)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *col = nir_ssa_for_src(b, intr->src[1], intr->num_components);
-   nir_ssa_def *def = is_signed ? nir_format_float_to_snorm(b, col, bits) :
+   nir_def *col = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+   nir_def *def = is_signed ? nir_format_float_to_snorm(b, col, bits) :
                                   nir_format_float_to_unorm(b, col, bits);
    if (is_signed)
       def = nir_bcsel(b, nir_ilt_imm(b, def, 0),
@@ -270,12 +270,12 @@ lower_load_draw_params(nir_builder *b, nir_instr *instr, void *draw_params)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *load = d3d12_get_state_var(b, D3D12_STATE_VAR_DRAW_PARAMS, "d3d12_DrawParams",
+   nir_def *load = d3d12_get_state_var(b, D3D12_STATE_VAR_DRAW_PARAMS, "d3d12_DrawParams",
                                            glsl_uvec4_type(), draw_params);
    unsigned channel = intr->intrinsic == nir_intrinsic_load_first_vertex ? 0 :
       intr->intrinsic == nir_intrinsic_load_base_instance ? 1 :
       intr->intrinsic == nir_intrinsic_load_draw_id ? 2 : 3;
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_channel(b, load, channel));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_channel(b, load, channel));
    nir_instr_remove(instr);
 
    return true;
@@ -302,10 +302,10 @@ lower_load_patch_vertices_in(nir_builder *b, nir_instr *instr, void *_state)
       return false;
 
    b->cursor = nir_before_instr(&intr->instr);
-   nir_ssa_def *load = b->shader->info.stage == MESA_SHADER_TESS_CTRL ?
+   nir_def *load = b->shader->info.stage == MESA_SHADER_TESS_CTRL ?
       d3d12_get_state_var(b, D3D12_STATE_VAR_PATCH_VERTICES_IN, "d3d12_FirstVertex", glsl_uint_type(), _state) :
       nir_imm_int(b, b->shader->info.tess.tcs_vertices_out);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+   nir_def_rewrite_uses(&intr->dest.ssa, load);
    nir_instr_remove(instr);
    return true;
 }
@@ -327,7 +327,7 @@ struct invert_depth_state
 {
    unsigned viewport_mask;
    bool clip_halfz;
-   nir_ssa_def *viewport_index;
+   nir_def *viewport_index;
    nir_instr *store_pos_instr;
 };
 
@@ -346,16 +346,16 @@ invert_depth_impl(nir_builder *b, struct invert_depth_state *state)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
+   nir_def *pos = nir_ssa_for_src(b, intr->src[1], 4);
 
    if (state->viewport_index) {
       nir_push_if(b, nir_test_mask(b, nir_ishl(b, nir_imm_int(b, 1), state->viewport_index), state->viewport_mask));
    }
-   nir_ssa_def *old_depth = nir_channel(b, pos, 2);
-   nir_ssa_def *new_depth = nir_fneg(b, old_depth);
+   nir_def *old_depth = nir_channel(b, pos, 2);
+   nir_def *new_depth = nir_fneg(b, old_depth);
    if (state->clip_halfz)
       new_depth = nir_fadd_imm(b, new_depth, 1.0);
-   nir_ssa_def *def = nir_vec4(b,
+   nir_def *def = nir_vec4(b,
                                nir_channel(b, pos, 0),
                                nir_channel(b, pos, 1),
                                new_depth,
@@ -479,9 +479,9 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b,
       return false;
 
    enum d3d12_state_var var = variable->state_slots[0].tokens[1];
-   nir_ssa_def *ubo_idx = nir_imm_int(b, binding);
-   nir_ssa_def *ubo_offset =  nir_imm_int(b, get_state_var_offset(shader, var) * 4);
-   nir_ssa_def *load =
+   nir_def *ubo_idx = nir_imm_int(b, binding);
+   nir_def *ubo_offset =  nir_imm_int(b, get_state_var_offset(shader, var) * 4);
+   nir_def *load =
       nir_load_ubo(b, instr->num_components, instr->dest.ssa.bit_size,
                    ubo_idx, ubo_offset,
                    .align_mul = 16,
@@ -490,7 +490,7 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b,
                    .range = ~0,
                    );
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, load);
+   nir_def_rewrite_uses(&instr->dest.ssa, load);
 
    /* Remove the old load_* instruction and any parent derefs */
    nir_instr_remove(&instr->instr);
@@ -590,7 +590,7 @@ d3d12_add_missing_dual_src_target(struct nir_shader *s,
    nir_function_impl *impl = nir_shader_get_entrypoint(s);
    b = nir_builder_at(nir_before_cf_list(&impl->body));
 
-   nir_ssa_def *zero = nir_imm_zero(&b, 4, 32);
+   nir_def *zero = nir_imm_zero(&b, 4, 32);
    for (unsigned i = 0; i < 2; ++i) {
 
       if (!(missing_mask & (1u << i)))
@@ -615,7 +615,7 @@ d3d12_lower_primitive_id(nir_shader *shader)
 {
    nir_builder b;
    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
-   nir_ssa_def *primitive_id;
+   nir_def *primitive_id;
    b = nir_builder_create(impl);
 
    nir_variable *primitive_id_var = nir_variable_create(shader, nir_var_shader_out,
@@ -648,15 +648,15 @@ lower_triangle_strip_store(nir_builder *b, nir_intrinsic_instr *intr,
    /**
     * tmp_varying[slot][min(vertex_count, 2)] = src
     */
-   nir_ssa_def *vertex_count = nir_load_var(b, vertex_count_var);
-   nir_ssa_def *index = nir_imin(b, vertex_count, nir_imm_int(b, 2));
+   nir_def *vertex_count = nir_load_var(b, vertex_count_var);
+   nir_def *index = nir_imin(b, vertex_count, nir_imm_int(b, 2));
    nir_variable *var = nir_intrinsic_get_var(intr, 0);
 
    if (var->data.mode != nir_var_shader_out)
       return;
 
    nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, varyings[var->data.location]), index);
-   nir_ssa_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
+   nir_def *value = nir_ssa_for_src(b, intr->src[1], intr->num_components);
    nir_store_deref(b, deref, value, 0xf);
    nir_instr_remove(&intr->instr);
 }
@@ -682,9 +682,9 @@ lower_triangle_strip_emit_vertex(nir_builder *b, nir_intrinsic_instr *intr,
     * vertex_count++;
     */
 
-   nir_ssa_def *two = nir_imm_int(b, 2);
-   nir_ssa_def *vertex_count = nir_load_var(b, vertex_count_var);
-   nir_ssa_def *count_cmp = nir_uge(b, vertex_count, two);
+   nir_def *two = nir_imm_int(b, 2);
+   nir_def *vertex_count = nir_load_var(b, vertex_count_var);
+   nir_def *count_cmp = nir_uge(b, vertex_count, two);
    nir_if *count_check = nir_push_if(b, count_cmp);
 
    for (int j = 0; j < 3; ++j) {
@@ -798,7 +798,7 @@ is_multisampling_instr(const nir_instr *instr, const void *_data)
    return false;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_multisampling_instr(nir_builder *b, nir_instr *instr, void *_data)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
@@ -894,7 +894,7 @@ split_multistream_varying_stores(nir_builder *b, nir_instr *instr, void *_state)
       b->cursor = nir_before_instr(instr);
       unsigned mask_num_channels = (1 << var_state->subvars[subvar].num_components) - 1;
       unsigned orig_write_mask = nir_intrinsic_write_mask(intr);
-      nir_ssa_def *sub_value = nir_channels(b, intr->src[1].ssa, mask_num_channels << first_channel);
+      nir_def *sub_value = nir_channels(b, intr->src[1].ssa, mask_num_channels << first_channel);
 
       first_channel += var_state->subvars[subvar].num_components;
 
@@ -977,13 +977,13 @@ write_0(nir_builder *b, nir_deref_instr *deref)
       for (unsigned i = 0; i < glsl_get_length(deref->type); ++i)
          write_0(b, nir_build_deref_struct(b, deref, i));
    } else {
-      nir_ssa_def *scalar = nir_imm_intN_t(b, 0, glsl_get_bit_size(deref->type));
-      nir_ssa_def *scalar_arr[NIR_MAX_VEC_COMPONENTS];
+      nir_def *scalar = nir_imm_intN_t(b, 0, glsl_get_bit_size(deref->type));
+      nir_def *scalar_arr[NIR_MAX_VEC_COMPONENTS];
       unsigned num_comps = glsl_get_components(deref->type);
       unsigned writemask = (1 << num_comps) - 1;
       for (unsigned i = 0; i < num_comps; ++i)
          scalar_arr[i] = scalar;
-      nir_ssa_def *zero_val = nir_vec(b, scalar_arr, num_comps);
+      nir_def *zero_val = nir_vec(b, scalar_arr, num_comps);
       nir_store_deref(b, deref, zero_val, writemask);
    }
 }
index f80d2e6..4fd2c67 100644 (file)
@@ -36,14 +36,14 @@ struct d3d12_image_format_conversion_info;
 struct d3d12_image_format_conversion_info_arr;
 enum d3d12_state_var;
 
-nir_ssa_def *
+nir_def *
 d3d12_get_state_var(nir_builder *b,
                     enum d3d12_state_var var_enum,
                     const char *var_name,
                     const struct glsl_type *var_type,
                     nir_variable **out_var);
 
-nir_ssa_def *
+nir_def *
 d3d12_get_state_var(nir_builder *b,
                     enum d3d12_state_var var_enum,
                     const char *var_name,
index 41e7bf7..c4b14ee 100644 (file)
@@ -83,7 +83,7 @@ create_tess_ctrl_shader_variant(struct d3d12_context *ctx, struct d3d12_tcs_vari
    nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_TESS_CTRL, &d3d12_screen(ctx->base.screen)->nir_options, "passthrough");
    nir_shader *nir = b.shader;
 
-   nir_ssa_def *invocation_id = nir_load_invocation_id(&b);
+   nir_def *invocation_id = nir_load_invocation_id(&b);
    uint64_t varying_mask = key->varyings->mask;
 
    while(varying_mask) {
@@ -123,8 +123,8 @@ create_tess_ctrl_shader_variant(struct d3d12_context *ctx, struct d3d12_tcs_vari
    gl_TessLevelOuter->data.compact = 1;
 
    nir_variable *state_var_inner = NULL, *state_var_outer = NULL;
-   nir_ssa_def *load_inner = d3d12_get_state_var(&b, D3D12_STATE_VAR_DEFAULT_INNER_TESS_LEVEL, "d3d12_TessLevelInner", glsl_vec_type(2), &state_var_inner);
-   nir_ssa_def *load_outer = d3d12_get_state_var(&b, D3D12_STATE_VAR_DEFAULT_OUTER_TESS_LEVEL, "d3d12_TessLevelOuter", glsl_vec4_type(), &state_var_outer);
+   nir_def *load_inner = d3d12_get_state_var(&b, D3D12_STATE_VAR_DEFAULT_INNER_TESS_LEVEL, "d3d12_TessLevelInner", glsl_vec_type(2), &state_var_inner);
+   nir_def *load_outer = d3d12_get_state_var(&b, D3D12_STATE_VAR_DEFAULT_OUTER_TESS_LEVEL, "d3d12_TessLevelOuter", glsl_vec4_type(), &state_var_outer);
 
    for (unsigned i = 0; i < 2; i++) {
       nir_deref_instr *store_idx = nir_build_deref_array_imm(&b, nir_build_deref_var(&b, gl_TessLevelInner), i);
index feca639..9e77a16 100644 (file)
@@ -413,7 +413,7 @@ get_src(struct etna_compile *c, nir_src *src)
 }
 
 static bool
-vec_dest_has_swizzle(nir_alu_instr *vec, nir_ssa_def *ssa)
+vec_dest_has_swizzle(nir_alu_instr *vec, nir_def *ssa)
 {
    for (unsigned i = 0; i < nir_dest_num_components(vec->dest.dest); i++) {
       if (vec->src[i].src.ssa != ssa)
@@ -829,7 +829,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
 
       /* resolve with single combined const src */
       if (swiz_max < 4) {
-         nir_ssa_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
+         nir_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
 
          for (unsigned i = 0; i < info->num_inputs; i++) {
             nir_const_value *cv = get_alu_cv(&alu->src[i]);
@@ -855,7 +855,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
          if (num_const == 1)
             continue;
 
-         nir_ssa_def *mov = nir_mov(&b, alu->src[i].src.ssa);
+         nir_def *mov = nir_mov(&b, alu->src[i].src.ssa);
          nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
       }
       return;
@@ -875,10 +875,10 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
     * are constant)
     */
    if (num_components > 1) {
-      nir_ssa_def *def = nir_build_imm(&b, num_components, 32, value);
+      nir_def *def = nir_build_imm(&b, num_components, 32, value);
 
       if (num_components == info->num_inputs) {
-         nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, def);
+         nir_def_rewrite_uses(&alu->dest.dest.ssa, def);
          nir_instr_remove(&alu->instr);
          return;
       }
@@ -895,7 +895,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
 
    unsigned finished_write_mask = 0;
    for (unsigned i = 0; i < nir_dest_num_components(alu->dest.dest); i++) {
-      nir_ssa_def *ssa = alu->src[i].src.ssa;
+      nir_def *ssa = alu->src[i].src.ssa;
 
       /* check that vecN instruction is only user of this */
       bool need_mov = false;
@@ -975,9 +975,9 @@ emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
                value[i] = UNIFORM(base * 4 + i);
 
             b.cursor = nir_after_instr(instr);
-            nir_ssa_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
+            nir_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
 
-            nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
+            nir_def_rewrite_uses(&intr->dest.ssa, def);
             nir_instr_remove(instr);
          } break;
          default:
index 90925d3..4f4bb75 100644 (file)
@@ -144,7 +144,7 @@ src_index(nir_function_impl *impl, nir_src *src)
    nir_intrinsic_instr *load = nir_load_reg_for_def(src->ssa);
 
    if (load) {
-      nir_ssa_def *reg = load->src[0].ssa;
+      nir_def *reg = load->src[0].ssa;
       ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
       assert(nir_intrinsic_base(load) == 0);
       assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -162,7 +162,7 @@ dest_index(nir_function_impl *impl, nir_dest *dest)
    nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
 
    if (store) {
-      nir_ssa_def *reg = store->src[1].ssa;
+      nir_def *reg = store->src[1].ssa;
       ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
       assert(nir_intrinsic_base(store) == 0);
       assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -202,7 +202,7 @@ real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
    if (!dest)
       return dest;
 
-   bool can_bypass_src = !nir_ssa_def_used_by_if(&dest->ssa);
+   bool can_bypass_src = !nir_def_used_by_if(&dest->ssa);
    nir_instr *p_instr = dest->ssa.parent_instr;
 
    /* if used by a vecN, the "real" destination becomes the vecN destination
@@ -236,7 +236,7 @@ real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
       case nir_op_vec2:
       case nir_op_vec3:
       case nir_op_vec4:
-         assert(!nir_ssa_def_used_by_if(&dest->ssa));
+         assert(!nir_def_used_by_if(&dest->ssa));
          nir_foreach_use(use_src, &dest->ssa)
             assert(use_src->parent_instr == instr);
 
@@ -250,7 +250,7 @@ real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
          default:
             continue;
          }
-         if (nir_ssa_def_used_by_if(&dest->ssa) ||
+         if (nir_def_used_by_if(&dest->ssa) ||
              list_length(&dest->ssa.uses) > 1)
             continue;
 
index 57e1c8d..b981893 100644 (file)
@@ -49,11 +49,11 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
 
                   b.cursor = nir_after_instr(instr);
 
-                  nir_ssa_def *ssa = nir_ine_imm(&b, &intr->dest.ssa, 0);
+                  nir_def *ssa = nir_ine_imm(&b, &intr->dest.ssa, 0);
                   if (v->key.front_ccw)
                      nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
 
-                  nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
+                  nir_def_rewrite_uses_after(&intr->dest.ssa,
                                                  ssa,
                                                  ssa->parent_instr);
                } break;
@@ -70,7 +70,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
 
                   b.cursor = nir_before_instr(instr);
 
-                  nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
+                  nir_def *ssa = nir_mov(&b, intr->src[1].ssa);
                   nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
                   alu->src[0].swizzle[0] = 2;
                   alu->src[0].swizzle[2] = 0;
@@ -164,7 +164,7 @@ etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
          if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
             b.cursor = nir_before_instr(instr);
 
-            nir_ssa_def *imm = has_new_transcendentals ?
+            nir_def *imm = has_new_transcendentals ?
                nir_imm_float(&b, 1.0 / M_PI) :
                nir_imm_float(&b, 2.0 / M_PI);
 
@@ -178,7 +178,7 @@ etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
          if (has_new_transcendentals && (
              alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
              alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
-            nir_ssa_def *ssa = &alu->dest.dest.ssa;
+            nir_def *ssa = &alu->dest.dest.ssa;
 
             assert(ssa->num_components == 1);
 
@@ -193,7 +193,7 @@ etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
 
             nir_instr_insert_after(instr, &mul->instr);
 
-            nir_ssa_def_rewrite_uses_after(ssa, &mul->dest.dest.ssa,
+            nir_def_rewrite_uses_after(ssa, &mul->dest.dest.ssa,
                                            &mul->instr);
          }
       }
index a081567..1bdae32 100644 (file)
@@ -96,7 +96,7 @@ nir_lower_to_source_mods_instr(nir_builder *b, nir_instr *instr,
          alu->src[i].swizzle[j] = parent->src[0].swizzle[alu->src[i].swizzle[j]];
       }
 
-      if (nir_ssa_def_is_unused(&parent->dest.dest.ssa))
+      if (nir_def_is_unused(&parent->dest.dest.ssa))
          nir_instr_remove(&parent->instr);
 
       progress = true;
index 3e15fa3..f638860 100644 (file)
@@ -18,10 +18,10 @@ lower_txs(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    b->cursor = nir_instr_remove(instr);
 
-   nir_ssa_def *idx = nir_imm_int(b, tex->texture_index);
-   nir_ssa_def *sizes = nir_load_texture_size_etna(b, 32, idx);
+   nir_def *idx = nir_imm_int(b, tex->texture_index);
+   nir_def *sizes = nir_load_texture_size_etna(b, 32, idx);
 
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, sizes);
+   nir_def_rewrite_uses(&tex->dest.ssa, sizes);
 
    return true;
 }
index 2c86f68..7ef827f 100644 (file)
@@ -50,26 +50,26 @@ is_const_ubo(const nir_instr *instr, const void *_data)
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_ubo_to_uniform(nir_builder *b, nir_instr *instr, void *_data)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    b->cursor = nir_before_instr(instr);
 
    /* Undo the operations done in nir_lower_uniforms_to_ubo. */
-   nir_ssa_def *ubo_offset = nir_ssa_for_src(b, intr->src[1], 1);
-   nir_ssa_def *range_base = nir_imm_int(b, nir_intrinsic_range_base(intr));
+   nir_def *ubo_offset = nir_ssa_for_src(b, intr->src[1], 1);
+   nir_def *range_base = nir_imm_int(b, nir_intrinsic_range_base(intr));
 
-   nir_ssa_def *uniform_offset =
+   nir_def *uniform_offset =
       nir_ushr_imm(b, nir_isub(b, ubo_offset, range_base), 4);
 
-   nir_ssa_def *uniform =
+   nir_def *uniform =
       nir_load_uniform(b, intr->num_components, intr->dest.ssa.bit_size, uniform_offset,
                        .base = nir_intrinsic_range_base(intr) / 16,
                        .range = nir_intrinsic_range(intr) / 16,
                        .dest_type = nir_type_float32);
 
-       nir_ssa_def_rewrite_uses(&intr->dest.ssa, uniform);
+       nir_def_rewrite_uses(&intr->dest.ssa, uniform);
 
    return uniform;
 }
index 24342b2..8aa2a09 100644 (file)
@@ -105,7 +105,7 @@ nir_lower_ubo_test::count_intrinsic(nir_intrinsic_op op)
 
 TEST_F(nir_lower_ubo_test, nothing_to_lower)
 {
-   nir_ssa_def *offset = nir_imm_int(&b, 4);
+   nir_def *offset = nir_imm_int(&b, 4);
 
    nir_load_uniform(&b, 1, 32, offset);
 
@@ -120,7 +120,7 @@ TEST_F(nir_lower_ubo_test, nothing_to_lower)
 
 TEST_F(nir_lower_ubo_test, basic)
 {
-   nir_ssa_def *offset = nir_imm_int(&b, 4);
+   nir_def *offset = nir_imm_int(&b, 4);
    nir_load_uniform(&b, 1, 32, offset);
 
    nir_lower_uniforms_to_ubo(b.shader, false, false);
@@ -137,8 +137,8 @@ TEST_F(nir_lower_ubo_test, basic)
 
 TEST_F(nir_lower_ubo_test, index_not_null)
 {
-   nir_ssa_def *index = nir_imm_int(&b, 1);
-   nir_ssa_def *offset = nir_imm_int(&b, 4);
+   nir_def *index = nir_imm_int(&b, 1);
+   nir_def *offset = nir_imm_int(&b, 4);
 
    nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8);
 
@@ -151,9 +151,9 @@ TEST_F(nir_lower_ubo_test, index_not_null)
 
 TEST_F(nir_lower_ubo_test, indirect_index)
 {
-   nir_ssa_def *one = nir_imm_int(&b, 1);
-   nir_ssa_def *index = nir_fadd(&b, one, one);
-   nir_ssa_def *offset = nir_imm_int(&b, 4);
+   nir_def *one = nir_imm_int(&b, 1);
+   nir_def *index = nir_fadd(&b, one, one);
+   nir_def *offset = nir_imm_int(&b, 4);
 
    nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8);
 
@@ -168,9 +168,9 @@ TEST_F(nir_lower_ubo_test, indirect_index)
 
 TEST_F(nir_lower_ubo_test, indirect_offset)
 {
-   nir_ssa_def *one = nir_imm_int(&b, 1);
-   nir_ssa_def *index = nir_imm_int(&b, 0);
-   nir_ssa_def *offset = nir_fadd(&b, one, one);
+   nir_def *one = nir_imm_int(&b, 1);
+   nir_def *index = nir_imm_int(&b, 0);
+   nir_def *offset = nir_fadd(&b, one, one);
 
    nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8);
 
index be1f506..0d844d4 100644 (file)
@@ -818,7 +818,7 @@ setup_input(struct ir2_context *ctx, nir_variable *in)
 }
 
 static void
-emit_undef(struct ir2_context *ctx, nir_ssa_undef_instr *undef)
+emit_undef(struct ir2_context *ctx, nir_undef_instr *undef)
 {
    /* TODO we don't want to emit anything for undefs */
 
index 2c0c4fe..ae845e2 100644 (file)
@@ -61,7 +61,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
    }
 
    unsigned set = ir3_shader_descriptor_set(b->shader->info.stage);
-   nir_ssa_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
+   nir_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
    src = nir_iadd_imm(b, src, desc_offset);
    /* An out-of-bounds index into an SSBO/image array can cause a GPU fault
     * on access to the descriptor (I don't see any hw mechanism to bound the
@@ -71,7 +71,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
     * can avoid the dmesg spam and users thinking this is a driver bug:
     */
    src = nir_umod_imm(b, src, IR3_BINDLESS_DESC_COUNT);
-   nir_ssa_def *bindless = nir_bindless_resource_ir3(b, 32, src, set);
+   nir_def *bindless = nir_bindless_resource_ir3(b, 32, src, set);
    nir_instr_rewrite_src_ssa(&intr->instr, &intr->src[buffer_src], bindless);
 
    return true;
index b54233c..f323628 100644 (file)
@@ -224,19 +224,19 @@ iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
                          .mocs = iris_mocs(res->bo, &screen->isl_dev, usage));
 }
 
-static nir_ssa_def *
+static nir_def *
 get_aoa_deref_offset(nir_builder *b,
                      nir_deref_instr *deref,
                      unsigned elem_size)
 {
    unsigned array_size = elem_size;
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
 
    while (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
 
       /* This level's element size is the previous level's array size */
-      nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+      nir_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
       assert(deref->arr.index.ssa);
       offset = nir_iadd(b, offset,
                            nir_imul_imm(b, index, array_size));
@@ -282,7 +282,7 @@ iris_lower_storage_image_derefs(nir_shader *nir)
             nir_variable *var = nir_deref_instr_get_variable(deref);
 
             b.cursor = nir_before_instr(&intrin->instr);
-            nir_ssa_def *index =
+            nir_def *index =
                nir_iadd_imm(&b, get_aoa_deref_offset(&b, deref, 1),
                                 var->data.driver_location);
             nir_rewrite_image_intrinsic(intrin, index, false);
@@ -464,7 +464,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
 
    nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
 
-   nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
+   nir_def *temp_ubo_name = nir_undef(&b, 1, 32);
 
    /* Turn system value intrinsics into uniforms */
    nir_foreach_block(block, impl) {
@@ -473,13 +473,13 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
             continue;
 
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-         nir_ssa_def *offset;
+         nir_def *offset;
 
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_base_workgroup_id: {
             /* GL doesn't have a concept of base workgroup */
             b.cursor = nir_instr_remove(&intrin->instr);
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+            nir_def_rewrite_uses(&intrin->dest.ssa,
                                      nir_imm_zero(&b, 3, 32));
             continue;
          }
@@ -493,7 +493,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
              */
             b.cursor = nir_instr_remove(&intrin->instr);
 
-            nir_ssa_def *offset =
+            nir_def *offset =
                nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
                                 nir_intrinsic_base(intrin));
 
@@ -508,16 +508,16 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
              */
             assert(IRIS_MEMZONE_SHADER_START >> 32 == 0ull);
 
-            nir_ssa_def *const_data_addr =
+            nir_def *const_data_addr =
                nir_iadd(&b, nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW), offset);
 
-            nir_ssa_def *data =
+            nir_def *data =
                nir_load_global_constant(&b, nir_u2u64(&b, const_data_addr),
                                         load_align,
                                         intrin->dest.ssa.num_components,
                                         intrin->dest.ssa.bit_size);
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+            nir_def_rewrite_uses(&intrin->dest.ssa,
                                      data);
             continue;
          }
@@ -662,7 +662,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
             continue;
          }
 
-         nir_ssa_def *load =
+         nir_def *load =
             nir_load_ubo(&b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size,
                          temp_ubo_name, offset,
                          .align_mul = 4,
@@ -670,7 +670,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
                          .range_base = 0,
                          .range = ~0);
 
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+         nir_def_rewrite_uses(&intrin->dest.ssa,
                                   load);
          nir_instr_remove(instr);
       }
@@ -707,7 +707,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
             b.cursor = nir_before_instr(instr);
 
             if (load->src[0].ssa == temp_ubo_name) {
-               nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
+               nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
                nir_instr_rewrite_src(instr, &load->src[0],
                                      nir_src_for_ssa(imm));
             }
@@ -842,7 +842,7 @@ rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
    assert(bt->sizes[group] > 0);
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *bti;
+   nir_def *bti;
    if (nir_src_is_const(*src)) {
       uint32_t index = nir_src_as_uint(*src);
       bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
index 5e551bf..b702d72 100644 (file)
@@ -38,7 +38,7 @@ gpir_reg *gpir_create_reg(gpir_compiler *comp)
    return reg;
 }
 
-static void register_node_ssa(gpir_block *block, gpir_node *node, nir_ssa_def *ssa)
+static void register_node_ssa(gpir_block *block, gpir_node *node, nir_def *ssa)
 {
    block->comp->node_for_ssa[ssa->index] = node;
    snprintf(node->name, sizeof(node->name), "ssa%d", ssa->index);
@@ -309,7 +309,7 @@ static bool gpir_emit_load_const(gpir_block *block, nir_instr *ni)
 
 static bool gpir_emit_ssa_undef(gpir_block *block, nir_instr *ni)
 {
-   gpir_error("nir_ssa_undef_instr is not supported\n");
+   gpir_error("nir_undef_instr is not supported\n");
    return false;
 }
 
index 87eb27d..0883e35 100644 (file)
@@ -25,7 +25,7 @@
 #include "nir_builder.h"
 #include "lima_ir.h"
 
-static nir_ssa_def *
+static nir_def *
 get_proj_index(nir_instr *coord_instr, nir_instr *proj_instr,
                int coord_components, int *proj_idx)
 {
@@ -41,8 +41,8 @@ get_proj_index(nir_instr *coord_instr, nir_instr *proj_instr,
        proj_alu->op != nir_op_mov)
       return NULL;
 
-   nir_ssa_def *coord_src_ssa = coord_alu->src[0].src.ssa;
-   nir_ssa_def *proj_src_ssa = proj_alu->src[0].src.ssa;
+   nir_def *coord_src_ssa = coord_alu->src[0].src.ssa;
+   nir_def *proj_src_ssa = proj_alu->src[0].src.ssa;
 
    if (coord_src_ssa != proj_src_ssa)
       return NULL;
@@ -101,16 +101,16 @@ lima_nir_lower_txp_instr(nir_builder *b, nir_instr *instr,
     * step back and use load_input SSA instead of mov as a source for
     * newly constructed vec4
     */
-   nir_ssa_def *proj_ssa = nir_ssa_for_src(b, tex->src[proj_idx].src, 1);
-   nir_ssa_def *coords_ssa = nir_ssa_for_src(b, tex->src[coords_idx].src,
+   nir_def *proj_ssa = nir_ssa_for_src(b, tex->src[proj_idx].src, 1);
+   nir_def *coords_ssa = nir_ssa_for_src(b, tex->src[coords_idx].src,
                                              nir_tex_instr_src_size(tex, coords_idx));
 
    int proj_idx_in_vec = -1;
-   nir_ssa_def *load_input = get_proj_index(coords_ssa->parent_instr,
+   nir_def *load_input = get_proj_index(coords_ssa->parent_instr,
                                             proj_ssa->parent_instr,
                                             tex->coord_components,
                                             &proj_idx_in_vec);
-   nir_ssa_def *combined;
+   nir_def *combined;
    if (load_input && proj_idx_in_vec == 3) {
       unsigned xyzw[] = { 0, 1, 2, 3 };
       combined = nir_swizzle(b, load_input, xyzw, 4);
index ab56628..29a554e 100644 (file)
@@ -31,7 +31,7 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *loads[4];
+   nir_def *loads[4];
    for (unsigned i = 0; i < intr->num_components; i++) {
       nir_intrinsic_instr *chan_intr =
          nir_intrinsic_instr_create(b->shader, intr->intrinsic);
@@ -51,7 +51,7 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
       loads[i] = &chan_intr->dest.ssa;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
+   nir_def_rewrite_uses(&intr->dest.ssa,
                             nir_vec(b, loads, intr->num_components));
    nir_instr_remove(&intr->instr);
 }
index 5e38749..dcc4587 100644 (file)
@@ -38,7 +38,7 @@ lima_nir_split_load_input_instr(nir_builder *b,
    if (alu->op != nir_op_mov)
       return false;
 
-   nir_ssa_def *ssa = alu->src[0].src.ssa;
+   nir_def *ssa = alu->src[0].src.ssa;
    if (ssa->parent_instr->type != nir_instr_type_intrinsic)
       return false;
 
@@ -80,7 +80,7 @@ lima_nir_split_load_input_instr(nir_builder *b,
    nir_src_copy(&new_intrin->src[0], &intrin->src[0], &new_intrin->instr);
 
    nir_builder_instr_insert(b, &new_intrin->instr);
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
+   nir_def_rewrite_uses(&alu->dest.dest.ssa,
                             &new_intrin->dest.ssa);
    nir_instr_remove(&alu->instr);
    return true;
index cb4a865..c347720 100644 (file)
@@ -36,7 +36,7 @@
  * down but won't split it.
  */
 
-static nir_ssa_def *
+static nir_def *
 clone_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    nir_intrinsic_instr *new_intrin =
@@ -63,12 +63,12 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
       struct hash_entry *entry =
          _mesa_hash_table_search(visited_instrs, src->parent_instr);
       if (entry && (src->parent_instr->type != nir_instr_type_phi)) {
-         nir_ssa_def *def = entry->data;
+         nir_def *def = entry->data;
          nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(def));
          continue;
       }
       b->cursor = nir_before_src(src);
-      nir_ssa_def *new = clone_intrinsic(b, intrin);
+      nir_def *new = clone_intrinsic(b, intrin);
       nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new));
       _mesa_hash_table_insert(visited_instrs, src->parent_instr, new);
    }
@@ -92,12 +92,12 @@ replace_load_const(nir_builder *b, nir_load_const_instr *load_const)
       struct hash_entry *entry =
          _mesa_hash_table_search(visited_instrs, src->parent_instr);
       if (entry && (src->parent_instr->type != nir_instr_type_phi)) {
-         nir_ssa_def *def = entry->data;
+         nir_def *def = entry->data;
          nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(def));
          continue;
       }
       b->cursor = nir_before_src(src);
-      nir_ssa_def *new = nir_build_imm(b, load_const->def.num_components,
+      nir_def *new = nir_build_imm(b, load_const->def.num_components,
                                        load_const->def.bit_size,
                                        load_const->value);
       nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new));
index 7626228..02310df 100644 (file)
@@ -34,7 +34,7 @@
 
 #include "ppir.h"
 
-static void *ppir_node_create_ssa(ppir_block *block, ppir_op op, nir_ssa_def *ssa)
+static void *ppir_node_create_ssa(ppir_block *block, ppir_op op, nir_def *ssa)
 {
    ppir_node *node = ppir_node_create(block, op, ssa->index, 0);
    if (!node)
@@ -53,7 +53,7 @@ static void *ppir_node_create_ssa(ppir_block *block, ppir_op op, nir_ssa_def *ss
 }
 
 static void *ppir_node_create_reg(ppir_block *block, ppir_op op,
-                                  nir_ssa_def *def, unsigned mask)
+                                  nir_def *def, unsigned mask)
 {
    ppir_node *node = ppir_node_create(block, op, def->index, mask);
    if (!node)
@@ -471,7 +471,7 @@ static bool ppir_emit_load_const(ppir_block *block, nir_instr *ni)
 
 static bool ppir_emit_ssa_undef(ppir_block *block, nir_instr *ni)
 {
-   nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(ni);
+   nir_undef_instr *undef = nir_instr_as_ssa_undef(ni);
    ppir_node *node = ppir_node_create_ssa(block, ppir_op_undef, &undef->def);
    if (!node)
       return false;
index b8b07cc..faadab1 100644 (file)
@@ -877,7 +877,7 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
    coord_var->data.location = VARYING_SLOT_VAR0;
    coord_var->data.interpolation = INTERP_MODE_NOPERSPECTIVE;
 
-   nir_ssa_def *coord = nir_load_var(&b, coord_var);
+   nir_def *coord = nir_load_var(&b, coord_var);
    if (ptarg == PIPE_TEXTURE_1D_ARRAY) {
       /* Adjust coordinates. Depth is in z, but TEX expects it to be in y. */
       coord = nir_channels(&b, coord, TGSI_WRITEMASK_XZ);
@@ -891,7 +891,7 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
    const struct glsl_type *sampler_type =
       glsl_sampler_type(sampler_dim, false, is_array, GLSL_TYPE_FLOAT);
 
-   nir_ssa_def *s = NULL;
+   nir_def *s = NULL;
    if (tex_s) {
       nir_variable *sampler =
          nir_variable_create(b.shader, nir_var_uniform,
@@ -904,7 +904,7 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
       s = nir_channel(&b, s, 0);
    }
 
-   nir_ssa_def *rgba = NULL, *z = NULL;
+   nir_def *rgba = NULL, *z = NULL;
    if (tex_rgbaz) {
       nir_variable *sampler =
          nir_variable_create(b.shader, nir_var_uniform,
@@ -923,14 +923,14 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
    }
 
    /* handle conversions */
-   nir_ssa_def *out_ssa;
+   nir_def *out_ssa;
    nir_component_mask_t out_mask = 0;
    if (cvt_un8) {
       if (tex_s) {
          s = nir_i2f32(&b, s);
          s = nir_fmul_imm(&b, s, 1.0f / 0xff);
       } else {
-         s = nir_ssa_undef(&b, 1, 32);
+         s = nir_undef(&b, 1, 32);
       }
 
       if (tex_rgbaz) {
@@ -944,7 +944,7 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
                                               1.0f / 0x00ff00,
                                               1.0f / 0xff0000));
       } else {
-         z = nir_ssa_undef(&b, 3, 32);
+         z = nir_undef(&b, 3, 32);
       }
 
       if (mode == NV50_BLIT_MODE_Z24S8 ||
@@ -973,8 +973,8 @@ nv50_blitter_make_fp(struct pipe_context *pipe,
          out_ssa = rgba;
          out_mask |= TGSI_WRITEMASK_XYZW;
       } else {
-         out_ssa = nir_vec2(&b, z ? z : nir_ssa_undef(&b, 1, 32),
-                                s ? s : nir_ssa_undef(&b, 1, 32));
+         out_ssa = nir_vec2(&b, z ? z : nir_undef(&b, 1, 32),
+                                s ? s : nir_undef(&b, 1, 32));
          if (tex_rgbaz) out_mask |= TGSI_WRITEMASK_X;
          if (tex_s)     out_mask |= TGSI_WRITEMASK_Y;
       }
index f670137..af7e889 100644 (file)
@@ -168,12 +168,12 @@ lower(nir_builder *b, nir_instr *instr, void *data)
    unsigned ubo_offset = (vec4_index * 16) + offset;
 
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *val =
+   nir_def *val =
       nir_load_ubo(b, nir_dest_num_components(*dest), nir_dest_bit_size(*dest),
                    nir_imm_int(b, ctx->sysval_ubo), nir_imm_int(b, ubo_offset),
                    .align_mul = nir_dest_bit_size(*dest) / 8, .align_offset = 0,
                    .range_base = offset, .range = nir_dest_bit_size(*dest) / 8);
-   nir_ssa_def_rewrite_uses(&dest->ssa, val);
+   nir_def_rewrite_uses(&dest->ssa, val);
    return true;
 }
 
index e580928..1339380 100644 (file)
@@ -949,27 +949,27 @@ public:
 
 private:
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
 
-   nir_ssa_def *lower_tex(nir_tex_instr *tex);
-   nir_ssa_def *lower_txf(nir_tex_instr *tex);
-   nir_ssa_def *lower_tg4(nir_tex_instr *tex);
-   nir_ssa_def *lower_txf_ms(nir_tex_instr *tex);
-   nir_ssa_def *lower_txf_ms_direct(nir_tex_instr *tex);
+   nir_def *lower_tex(nir_tex_instr *tex);
+   nir_def *lower_txf(nir_tex_instr *tex);
+   nir_def *lower_tg4(nir_tex_instr *tex);
+   nir_def *lower_txf_ms(nir_tex_instr *tex);
+   nir_def *lower_txf_ms_direct(nir_tex_instr *tex);
 
-   nir_ssa_def *
+   nir_def *
    prepare_coord(nir_tex_instr *tex, int& unnormalized_mask, int& used_coord_mask);
    int get_src_coords(nir_tex_instr *tex,
-                      std::array<nir_ssa_def *, 4>& coord,
+                      std::array<nir_def *, 4>& coord,
                       bool round_array_index);
-   nir_ssa_def *prep_src(std::array<nir_ssa_def *, 4>& coord, int& used_coord_mask);
-   nir_ssa_def *
-   finalize(nir_tex_instr *tex, nir_ssa_def *backend1, nir_ssa_def *backend2);
+   nir_def *prep_src(std::array<nir_def *, 4>& coord, int& used_coord_mask);
+   nir_def *
+   finalize(nir_tex_instr *tex, nir_def *backend1, nir_def *backend2);
 
-   nir_ssa_def *get_undef();
+   nir_def *get_undef();
 
    amd_gfx_level m_chip_class;
-   nir_ssa_def *m_undef {nullptr};
+   nir_def *m_undef {nullptr};
 };
 
 bool
@@ -1008,14 +1008,14 @@ LowerTexToBackend::filter(const nir_instr *instr) const
    return nir_tex_instr_src_index(tex, nir_tex_src_backend1) == -1;
 }
 
-nir_ssa_def *LowerTexToBackend::get_undef()
+nir_def *LowerTexToBackend::get_undef()
 {
    if (!m_undef)
-      m_undef = nir_ssa_undef(b, 1, 32);
+      m_undef = nir_undef(b, 1, 32);
    return m_undef;
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower(nir_instr *instr)
 {
    b->cursor = nir_before_instr(instr);
@@ -1041,23 +1041,23 @@ LowerTexToBackend::lower(nir_instr *instr)
    }
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower_tex(nir_tex_instr *tex)
 {
    int unnormalized_mask = 0;
    int used_coord_mask = 0;
 
-   nir_ssa_def *backend1 = prepare_coord(tex, unnormalized_mask, used_coord_mask);
+   nir_def *backend1 = prepare_coord(tex, unnormalized_mask, used_coord_mask);
 
-   nir_ssa_def *backend2 = nir_imm_ivec4(b, used_coord_mask, unnormalized_mask, 0, 0);
+   nir_def *backend2 = nir_imm_ivec4(b, used_coord_mask, unnormalized_mask, 0, 0);
 
    return finalize(tex, backend1, backend2);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower_txf(nir_tex_instr *tex)
 {
-   std::array<nir_ssa_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
+   std::array<nir_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
 
    get_src_coords(tex, new_coord, false);
 
@@ -1065,17 +1065,17 @@ LowerTexToBackend::lower_txf(nir_tex_instr *tex)
    new_coord[3] = tex->src[lod_idx].src.ssa;
 
    int used_coord_mask = 0;
-   nir_ssa_def *backend1 = prep_src(new_coord, used_coord_mask);
-   nir_ssa_def *backend2 =
+   nir_def *backend1 = prep_src(new_coord, used_coord_mask);
+   nir_def *backend2 =
       nir_imm_ivec4(b, used_coord_mask, tex->is_array ? 0x4 : 0, 0, 0);
 
    return finalize(tex, backend1, backend2);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower_tg4(nir_tex_instr *tex)
 {
-   std::array<nir_ssa_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
+   std::array<nir_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
 
    get_src_coords(tex, new_coord, false);
    uint32_t dest_swizzle =
@@ -1083,17 +1083,17 @@ LowerTexToBackend::lower_tg4(nir_tex_instr *tex)
 
    int used_coord_mask = 0;
    int unnormalized_mask = 0;
-   nir_ssa_def *backend1 = prepare_coord(tex, unnormalized_mask, used_coord_mask);
+   nir_def *backend1 = prepare_coord(tex, unnormalized_mask, used_coord_mask);
 
-   nir_ssa_def *backend2 =
+   nir_def *backend2 =
       nir_imm_ivec4(b, used_coord_mask, unnormalized_mask, tex->component, dest_swizzle);
    return finalize(tex, backend1, backend2);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex)
 {
-   std::array<nir_ssa_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
+   std::array<nir_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
 
    get_src_coords(tex, new_coord, false);
 
@@ -1112,8 +1112,8 @@ LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex)
    nir_ssa_dest_init(&fetch_sample->instr, &fetch_sample->dest, 4, 32);
 
    int used_coord_mask = 0;
-   nir_ssa_def *backend1 = prep_src(new_coord, used_coord_mask);
-   nir_ssa_def *backend2 = nir_imm_ivec4(b, used_coord_mask, 0xf, 1, 0);
+   nir_def *backend1 = prep_src(new_coord, used_coord_mask);
+   nir_def *backend2 = nir_imm_ivec4(b, used_coord_mask, 0xf, 1, 0);
 
    nir_builder_instr_insert(b, &fetch_sample->instr);
    finalize(fetch_sample, backend1, backend2);
@@ -1124,15 +1124,15 @@ LowerTexToBackend::lower_txf_ms(nir_tex_instr *tex)
                                         nir_ishl_imm(b, new_coord[3], 2)),
                                15);
 
-   nir_ssa_def *backend1b = prep_src(new_coord, used_coord_mask);
-   nir_ssa_def *backend2b = nir_imm_ivec4(b, used_coord_mask, 0, 0, 0);
+   nir_def *backend1b = prep_src(new_coord, used_coord_mask);
+   nir_def *backend2b = nir_imm_ivec4(b, used_coord_mask, 0, 0, 0);
    return finalize(tex, backend1b, backend2b);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::lower_txf_ms_direct(nir_tex_instr *tex)
 {
-   std::array<nir_ssa_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
+   std::array<nir_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
 
    get_src_coords(tex, new_coord, false);
 
@@ -1140,16 +1140,16 @@ LowerTexToBackend::lower_txf_ms_direct(nir_tex_instr *tex)
    new_coord[3] = tex->src[ms_index].src.ssa;
 
    int used_coord_mask = 0;
-   nir_ssa_def *backend1 = prep_src(new_coord, used_coord_mask);
-   nir_ssa_def *backend2 = nir_imm_ivec4(b, used_coord_mask, 0, 0, 0);
+   nir_def *backend1 = prep_src(new_coord, used_coord_mask);
+   nir_def *backend2 = nir_imm_ivec4(b, used_coord_mask, 0, 0, 0);
 
    return finalize(tex, backend1, backend2);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::finalize(nir_tex_instr *tex,
-                            nir_ssa_def *backend1,
-                            nir_ssa_def *backend2)
+                            nir_def *backend1,
+                            nir_def *backend2)
 {
    nir_tex_instr_add_src(tex, nir_tex_src_backend1, nir_src_for_ssa(backend1));
    nir_tex_instr_add_src(tex, nir_tex_src_backend2, nir_src_for_ssa(backend2));
@@ -1169,8 +1169,8 @@ LowerTexToBackend::finalize(nir_tex_instr *tex,
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
-LowerTexToBackend::prep_src(std::array<nir_ssa_def *, 4>& coord, int& used_coord_mask)
+nir_def *
+LowerTexToBackend::prep_src(std::array<nir_def *, 4>& coord, int& used_coord_mask)
 {
    int max_coord = 0;
    for (int i = 0; i < 4; ++i) {
@@ -1184,12 +1184,12 @@ LowerTexToBackend::prep_src(std::array<nir_ssa_def *, 4>& coord, int& used_coord
    return nir_vec(b, coord.data(), max_coord + 1);
 }
 
-nir_ssa_def *
+nir_def *
 LowerTexToBackend::prepare_coord(nir_tex_instr *tex,
                                  int& unnormalized_mask,
                                  int& used_coord_mask)
 {
-   std::array<nir_ssa_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
+   std::array<nir_def *, 4> new_coord = {nullptr, nullptr, nullptr, nullptr};
 
    unnormalized_mask = get_src_coords(tex, new_coord, true);
    used_coord_mask = 0;
@@ -1213,7 +1213,7 @@ LowerTexToBackend::prepare_coord(nir_tex_instr *tex,
 
 int
 LowerTexToBackend::get_src_coords(nir_tex_instr *tex,
-                                  std::array<nir_ssa_def *, 4>& coord,
+                                  std::array<nir_def *, 4>& coord,
                                   bool round_array_index)
 {
    int unnormalized_mask = 0;
index ee326fe..95780cf 100644 (file)
@@ -211,7 +211,7 @@ InstrFactory::process_jump(nir_jump_instr *instr, Shader& shader)
 }
 
 bool
-InstrFactory::process_undef(nir_ssa_undef_instr *undef, Shader& shader)
+InstrFactory::process_undef(nir_undef_instr *undef, Shader& shader)
 {
    for (int i = 0; i < undef->def.num_components; ++i) {
       auto dest = shader.value_factory().undef(undef->def.index, i);
index 2f57511..40967a3 100644 (file)
@@ -46,7 +46,7 @@ public:
 private:
    bool load_const(nir_load_const_instr *lc, Shader& shader);
    bool process_jump(nir_jump_instr *instr, Shader& shader);
-   bool process_undef(nir_ssa_undef_instr *undef, Shader& shader);
+   bool process_undef(nir_undef_instr *undef, Shader& shader);
 
    Instr::Pointer export_from_string(std::istream& is, bool is_last);
 
index 6ac0c41..15392a9 100644 (file)
@@ -64,7 +64,7 @@ NirLowerInstruction::filter_instr(const nir_instr *instr, const void *data)
    return me->filter(instr);
 }
 
-nir_ssa_def *
+nir_def *
 NirLowerInstruction::lower_instr(nir_builder *b, nir_instr *instr, void *data)
 {
    auto me = reinterpret_cast<NirLowerInstruction *>(data);
@@ -101,8 +101,8 @@ r600_nir_lower_scratch_address_impl(nir_builder *b, nir_intrinsic_instr *instr)
       align = instr->dest.ssa.num_components;
    }
 
-   nir_ssa_def *address = instr->src[address_index].ssa;
-   nir_ssa_def *new_address = nir_ishr_imm(b, address, 4 * align);
+   nir_def *address = instr->src[address_index].ssa;
+   nir_def *new_address = nir_ishr_imm(b, address, 4 * align);
 
    nir_instr_rewrite_src(&instr->instr,
                          &instr->src[address_index],
@@ -235,11 +235,11 @@ private:
       return nir_intrinsic_io_semantics(intr).location == VARYING_SLOT_CLIP_VERTEX;
    }
 
-   nir_ssa_def *lower(nir_instr *instr) override
+   nir_def *lower(nir_instr *instr) override
    {
 
       auto intr = nir_instr_as_intrinsic(instr);
-      nir_ssa_def *output[8] = {nullptr};
+      nir_def *output[8] = {nullptr};
 
       auto buf_id = nir_imm_int(b, R600_BUFFER_INFO_CONST_BUFFER);
 
@@ -269,7 +269,7 @@ private:
       }
       nir_intrinsic_set_base(intr, m_clipvtx);
 
-      nir_ssa_def *result = NIR_LOWER_INSTR_PROGRESS_REPLACE;
+      nir_def *result = NIR_LOWER_INSTR_PROGRESS_REPLACE;
       for (unsigned i = 0; i < m_so_info.num_outputs; ++i) {
          if (m_so_info.output[i].register_index == clip_vertex_index) {
             m_so_info.output[i].register_index = m_clipvtx;
@@ -307,7 +307,7 @@ private:
       return nir_intrinsic_base(intr) == 0;
    }
 
-   nir_ssa_def *lower(nir_instr *instr) override
+   nir_def *lower(nir_instr *instr) override
    {
       auto intr = nir_instr_as_intrinsic(instr);
       assert(intr->intrinsic == nir_intrinsic_load_ubo_vec4);
@@ -399,7 +399,7 @@ r600_lower_deref_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
 
    b->cursor = nir_before_instr(&instr->instr);
 
-   nir_ssa_def *offset = nir_imm_int(b, 0);
+   nir_def *offset = nir_imm_int(b, 0);
    for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
         d = nir_deref_instr_parent(d)) {
       assert(d->deref_type == nir_deref_type_array);
@@ -519,7 +519,7 @@ r600_lower_shared_io_impl(nir_function_impl *impl)
          b.cursor = nir_before_instr(instr);
 
          if (op->intrinsic == nir_intrinsic_load_shared) {
-            nir_ssa_def *addr = op->src[0].ssa;
+            nir_def *addr = op->src[0].ssa;
 
             switch (nir_dest_num_components(op->dest)) {
             case 2: {
@@ -545,10 +545,10 @@ r600_lower_shared_io_impl(nir_function_impl *impl)
             load->src[0] = nir_src_for_ssa(addr);
             nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
                               32);
-            nir_ssa_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa);
+            nir_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa);
             nir_builder_instr_insert(&b, &load->instr);
          } else {
-            nir_ssa_def *addr = op->src[1].ssa;
+            nir_def *addr = op->src[1].ssa;
             for (int i = 0; i < 2; ++i) {
                unsigned test_mask = (0x3 << 2 * i);
                if (!(nir_intrinsic_write_mask(op) & test_mask))
@@ -589,7 +589,7 @@ r600_lower_shared_io(nir_shader *nir)
    return progress;
 }
 
-static nir_ssa_def *
+static nir_def *
 r600_lower_fs_pos_input_impl(nir_builder *b, nir_instr *instr, void *_options)
 {
    (void)_options;
index 97e2742..06744f7 100644 (file)
@@ -45,12 +45,12 @@ public:
 
 private:
    static bool filter_instr(const nir_instr *instr, const void *data);
-   static nir_ssa_def *lower_instr(nir_builder *b, nir_instr *instr, void *data);
+   static nir_def *lower_instr(nir_builder *b, nir_instr *instr, void *data);
 
    void set_builder(nir_builder *_b) { b = _b; }
 
    virtual bool filter(const nir_instr *instr) const = 0;
-   virtual nir_ssa_def *lower(nir_instr *instr) = 0;
+   virtual nir_def *lower(nir_instr *instr) = 0;
 
 protected:
    nir_builder *b;
@@ -91,7 +91,7 @@ private:
 
 } // namespace r600
 
-static inline nir_ssa_def *
+static inline nir_def *
 r600_imm_ivec3(nir_builder *build, int x, int y, int z)
 {
    nir_const_value v[3] = {
index 3a28f25..3ef65dc 100644 (file)
@@ -30,7 +30,7 @@
 #include "nir_intrinsics_indices.h"
 #include "sfn_nir.h"
 
-static nir_ssa_def *
+static nir_def *
 r600_legalize_image_load_store_impl(nir_builder *b,
                                     nir_instr *instr,
                                     UNUSED void *_options)
@@ -38,9 +38,9 @@ r600_legalize_image_load_store_impl(nir_builder *b,
    b->cursor = nir_before_instr(instr);
    auto ir = nir_instr_as_intrinsic(instr);
 
-   nir_ssa_def *default_value = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
+   nir_def *default_value = nir_imm_vec4(b, 0.0, 0.0, 0.0, 0.0);
 
-   nir_ssa_def *result = NIR_LOWER_INSTR_PROGRESS_REPLACE;
+   nir_def *result = NIR_LOWER_INSTR_PROGRESS_REPLACE;
 
    bool load_value = ir->intrinsic != nir_intrinsic_image_store;
 
index 4db01ba..11b7436 100644 (file)
@@ -45,50 +45,50 @@ public:
    using VarSplit = pair<nir_variable *, nir_variable *>;
    using VarMap = map<unsigned, VarSplit>;
 
-   nir_ssa_def *split_double_load_deref(nir_intrinsic_instr *intr);
+   nir_def *split_double_load_deref(nir_intrinsic_instr *intr);
 
-   nir_ssa_def *split_double_store_deref(nir_intrinsic_instr *intr);
+   nir_def *split_double_store_deref(nir_intrinsic_instr *intr);
 
 private:
-   nir_ssa_def *split_load_deref_array(nir_intrinsic_instr *intr, nir_src& index);
+   nir_def *split_load_deref_array(nir_intrinsic_instr *intr, nir_src& index);
 
-   nir_ssa_def *split_load_deref_var(nir_intrinsic_instr *intr);
+   nir_def *split_load_deref_var(nir_intrinsic_instr *intr);
 
-   nir_ssa_def *split_store_deref_array(nir_intrinsic_instr *intr,
+   nir_def *split_store_deref_array(nir_intrinsic_instr *intr,
                                         nir_deref_instr *deref);
 
-   nir_ssa_def *split_store_deref_var(nir_intrinsic_instr *intr, nir_deref_instr *deref1);
+   nir_def *split_store_deref_var(nir_intrinsic_instr *intr, nir_deref_instr *deref1);
 
    VarSplit get_var_pair(nir_variable *old_var);
 
-   nir_ssa_def *
-   merge_64bit_loads(nir_ssa_def *load1, nir_ssa_def *load2, bool out_is_vec3);
+   nir_def *
+   merge_64bit_loads(nir_def *load1, nir_def *load2, bool out_is_vec3);
 
-   nir_ssa_def *split_double_load(nir_intrinsic_instr *load1);
+   nir_def *split_double_load(nir_intrinsic_instr *load1);
 
-   nir_ssa_def *split_store_output(nir_intrinsic_instr *store1);
+   nir_def *split_store_output(nir_intrinsic_instr *store1);
 
-   nir_ssa_def *split_double_load_uniform(nir_intrinsic_instr *intr);
+   nir_def *split_double_load_uniform(nir_intrinsic_instr *intr);
 
-   nir_ssa_def *split_double_load_ssbo(nir_intrinsic_instr *intr);
+   nir_def *split_double_load_ssbo(nir_intrinsic_instr *intr);
 
-   nir_ssa_def *split_double_load_ubo(nir_intrinsic_instr *intr);
+   nir_def *split_double_load_ubo(nir_intrinsic_instr *intr);
 
-   nir_ssa_def *
-   split_reduction(nir_ssa_def *src[2][2], nir_op op1, nir_op op2, nir_op reduction);
+   nir_def *
+   split_reduction(nir_def *src[2][2], nir_op op1, nir_op op2, nir_op reduction);
 
-   nir_ssa_def *
+   nir_def *
    split_reduction3(nir_alu_instr *alu, nir_op op1, nir_op op2, nir_op reduction);
 
-   nir_ssa_def *
+   nir_def *
    split_reduction4(nir_alu_instr *alu, nir_op op1, nir_op op2, nir_op reduction);
 
-   nir_ssa_def *split_bcsel(nir_alu_instr *alu);
+   nir_def *split_bcsel(nir_alu_instr *alu);
 
-   nir_ssa_def *split_load_const(nir_load_const_instr *lc);
+   nir_def *split_load_const(nir_load_const_instr *lc);
 
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
 
    VarMap m_varmap;
    vector<nir_variable *> m_old_vars;
@@ -97,7 +97,7 @@ private:
 
 class LowerLoad64Uniform : public NirLowerInstruction {
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
 };
 
 bool
@@ -115,7 +115,7 @@ LowerLoad64Uniform::filter(const nir_instr *instr) const
    return nir_dest_bit_size(intr->dest) == 64;
 }
 
-nir_ssa_def *
+nir_def *
 LowerLoad64Uniform::lower(nir_instr *instr)
 {
    auto intr = nir_instr_as_intrinsic(instr);
@@ -129,7 +129,7 @@ LowerLoad64Uniform::lower(nir_instr *instr)
        intr->intrinsic == nir_intrinsic_load_ubo_vec4)
       nir_intrinsic_set_component(intr, 2 * nir_intrinsic_component(intr));
 
-   nir_ssa_def *result_vec[2] = {nullptr, nullptr};
+   nir_def *result_vec[2] = {nullptr, nullptr};
 
    for (int i = 0; i < old_components; ++i) {
       result_vec[i] = nir_pack_64_2x32_split(b,
@@ -177,7 +177,7 @@ class LowerSplit64op : public NirLowerInstruction {
       }
    }
 
-   nir_ssa_def *lower(nir_instr *instr) override
+   nir_def *lower(nir_instr *instr) override
    {
 
       switch (instr->type) {
@@ -333,9 +333,9 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const
    }
 }
 
-nir_ssa_def *
-LowerSplit64BitVar::merge_64bit_loads(nir_ssa_def *load1,
-                                      nir_ssa_def *load2,
+nir_def *
+LowerSplit64BitVar::merge_64bit_loads(nir_def *load1,
+                                      nir_def *load2,
                                       bool out_is_vec3)
 {
    if (out_is_vec3)
@@ -360,7 +360,7 @@ LowerSplit64BitVar::~LowerSplit64BitVar()
       nir_instr_remove(v);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_store_deref(nir_intrinsic_instr *intr)
 {
    auto deref = nir_instr_as_deref(intr->src[0].ssa->parent_instr);
@@ -373,7 +373,7 @@ LowerSplit64BitVar::split_double_store_deref(nir_intrinsic_instr *intr)
    }
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_load_deref(nir_intrinsic_instr *intr)
 {
    auto deref = nir_instr_as_deref(intr->src[0].ssa->parent_instr);
@@ -387,7 +387,7 @@ LowerSplit64BitVar::split_double_load_deref(nir_intrinsic_instr *intr)
    m_old_stores.push_back(&intr->instr);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_load_deref_array(nir_intrinsic_instr *intr, nir_src& index)
 {
    auto old_var = nir_intrinsic_get_var(intr, 0);
@@ -411,7 +411,7 @@ LowerSplit64BitVar::split_load_deref_array(nir_intrinsic_instr *intr, nir_src& i
    return merge_64bit_loads(load1, load2, old_components == 3);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr,
                                             nir_deref_instr *deref)
 {
@@ -448,7 +448,7 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr,
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_store_deref_var(nir_intrinsic_instr *intr,
                                           UNUSED nir_deref_instr *deref)
 {
@@ -476,7 +476,7 @@ LowerSplit64BitVar::split_store_deref_var(nir_intrinsic_instr *intr,
    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_load_deref_var(nir_intrinsic_instr *intr)
 {
    auto old_var = nir_intrinsic_get_var(intr, 0);
@@ -529,7 +529,7 @@ LowerSplit64BitVar::get_var_pair(nir_variable *old_var)
    return m_varmap[old_var->data.driver_location];
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_load(nir_intrinsic_instr *load1)
 {
    unsigned old_components = nir_dest_num_components(load1->dest);
@@ -549,7 +549,7 @@ LowerSplit64BitVar::split_double_load(nir_intrinsic_instr *load1)
    return merge_64bit_loads(&load1->dest.ssa, &load2->dest.ssa, old_components == 3);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1)
 {
    auto src = store1->src[0];
@@ -577,7 +577,7 @@ LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
 {
    unsigned second_components = nir_dest_num_components(intr->dest) - 2;
@@ -607,7 +607,7 @@ LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
                       nir_channel(b, &load2->dest.ssa, 1));
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
 {
    unsigned second_components = nir_dest_num_components(intr->dest) - 2;
@@ -627,7 +627,7 @@ LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
    return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr)
 {
    unsigned second_components = nir_dest_num_components(intr->dest) - 2;
@@ -651,8 +651,8 @@ LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr)
    return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1);
 }
 
-nir_ssa_def *
-LowerSplit64BitVar::split_reduction(nir_ssa_def *src[2][2],
+nir_def *
+LowerSplit64BitVar::split_reduction(nir_def *src[2][2],
                                     nir_op op1,
                                     nir_op op2,
                                     nir_op reduction)
@@ -662,13 +662,13 @@ LowerSplit64BitVar::split_reduction(nir_ssa_def *src[2][2],
    return nir_build_alu(b, reduction, cmp0, cmp1, nullptr, nullptr);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_reduction3(nir_alu_instr *alu,
                                      nir_op op1,
                                      nir_op op2,
                                      nir_op reduction)
 {
-   nir_ssa_def *src[2][2];
+   nir_def *src[2][2];
 
    src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
    src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
@@ -679,13 +679,13 @@ LowerSplit64BitVar::split_reduction3(nir_alu_instr *alu,
    return split_reduction(src, op1, op2, reduction);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_reduction4(nir_alu_instr *alu,
                                      nir_op op1,
                                      nir_op op2,
                                      nir_op reduction)
 {
-   nir_ssa_def *src[2][2];
+   nir_def *src[2][2];
 
    src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2);
    src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2);
@@ -696,10 +696,10 @@ LowerSplit64BitVar::split_reduction4(nir_alu_instr *alu,
    return split_reduction(src, op1, op2, reduction);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_bcsel(nir_alu_instr *alu)
 {
-   static nir_ssa_def *dest[4];
+   static nir_def *dest[4];
    for (unsigned i = 0; i < nir_dest_num_components(alu->dest.dest); ++i) {
       dest[i] = nir_bcsel(b,
                           nir_channel(b, alu->src[0].src.ssa, i),
@@ -709,17 +709,17 @@ LowerSplit64BitVar::split_bcsel(nir_alu_instr *alu)
    return nir_vec(b, dest, nir_dest_num_components(alu->dest.dest));
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::split_load_const(nir_load_const_instr *lc)
 {
-   nir_ssa_def *ir[4];
+   nir_def *ir[4];
    for (unsigned i = 0; i < lc->def.num_components; ++i)
       ir[i] = nir_imm_double(b, lc->value[i].f64);
 
    return nir_vec(b, ir, lc->def.num_components);
 }
 
-nir_ssa_def *
+nir_def *
 LowerSplit64BitVar::lower(nir_instr *instr)
 {
    switch (instr->type) {
@@ -809,13 +809,13 @@ class Lower64BitToVec2 : public NirLowerInstruction {
 
 private:
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
 
-   nir_ssa_def *load_deref_64_to_vec2(nir_intrinsic_instr *intr);
-   nir_ssa_def *load_uniform_64_to_vec2(nir_intrinsic_instr *intr);
-   nir_ssa_def *load_ssbo_64_to_vec2(nir_intrinsic_instr *intr);
-   nir_ssa_def *load_64_to_vec2(nir_intrinsic_instr *intr);
-   nir_ssa_def *store_64_to_vec2(nir_intrinsic_instr *intr);
+   nir_def *load_deref_64_to_vec2(nir_intrinsic_instr *intr);
+   nir_def *load_uniform_64_to_vec2(nir_intrinsic_instr *intr);
+   nir_def *load_ssbo_64_to_vec2(nir_intrinsic_instr *intr);
+   nir_def *load_64_to_vec2(nir_intrinsic_instr *intr);
+   nir_def *store_64_to_vec2(nir_intrinsic_instr *intr);
 };
 
 bool
@@ -869,7 +869,7 @@ Lower64BitToVec2::filter(const nir_instr *instr) const
    }
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::lower(nir_instr *instr)
 {
    switch (instr->type) {
@@ -945,7 +945,7 @@ Lower64BitToVec2::lower(nir_instr *instr)
    }
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::load_deref_64_to_vec2(nir_intrinsic_instr *intr)
 {
    auto deref = nir_instr_as_deref(intr->src[0].ssa->parent_instr);
@@ -978,7 +978,7 @@ Lower64BitToVec2::load_deref_64_to_vec2(nir_intrinsic_instr *intr)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::store_64_to_vec2(nir_intrinsic_instr *intr)
 {
    auto deref = nir_instr_as_deref(intr->src[0].ssa->parent_instr);
@@ -1009,7 +1009,7 @@ Lower64BitToVec2::store_64_to_vec2(nir_intrinsic_instr *intr)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::load_uniform_64_to_vec2(nir_intrinsic_instr *intr)
 {
    intr->num_components *= 2;
@@ -1019,7 +1019,7 @@ Lower64BitToVec2::load_uniform_64_to_vec2(nir_intrinsic_instr *intr)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::load_64_to_vec2(nir_intrinsic_instr *intr)
 {
    intr->num_components *= 2;
@@ -1029,7 +1029,7 @@ Lower64BitToVec2::load_64_to_vec2(nir_intrinsic_instr *intr)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-nir_ssa_def *
+nir_def *
 Lower64BitToVec2::load_ssbo_64_to_vec2(nir_intrinsic_instr *intr)
 {
    intr->num_components *= 2;
@@ -1219,7 +1219,7 @@ StoreMerger::combine()
 void
 StoreMerger::combine_one_slot(vector<nir_intrinsic_instr *>& stores)
 {
-   nir_ssa_def *srcs[4] = {nullptr};
+   nir_def *srcs[4] = {nullptr};
 
    auto last_store = *stores.rbegin();
 
@@ -1329,20 +1329,20 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
 
    if (has_dest) {
       /* Merge the two loads' results back into a vector. */
-      nir_ssa_scalar channels[4] = {
+      nir_scalar channels[4] = {
          nir_get_ssa_scalar(&first->dest.ssa, 0),
          nir_get_ssa_scalar(&first->dest.ssa, 1),
          nir_get_ssa_scalar(&second->dest.ssa, 0),
          nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0),
       };
-      nir_ssa_def *new_ir = nir_vec_scalars(b, channels, instr->num_components);
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, new_ir);
+      nir_def *new_ir = nir_vec_scalars(b, channels, instr->num_components);
+      nir_def_rewrite_uses(&instr->dest.ssa, new_ir);
    } else {
       /* Split the src value across the two stores. */
       b->cursor = nir_before_instr(&instr->instr);
 
-      nir_ssa_def *src0 = instr->src[0].ssa;
-      nir_ssa_scalar channels[4] = {{0}};
+      nir_def *src0 = instr->src[0].ssa;
+      nir_scalar channels[4] = {{0}};
       for (int i = 0; i < instr->num_components; i++)
          channels[i] = nir_get_ssa_scalar(src0, i);
 
@@ -1379,7 +1379,7 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
    }
    if (offset_src != -1) {
       b->cursor = nir_before_instr(&second->instr);
-      nir_ssa_def *second_offset =
+      nir_def *second_offset =
          nir_iadd_imm(b, second->src[offset_src].ssa, offset_amount);
       nir_instr_rewrite_src(&second->instr,
                             &second->src[offset_src],
@@ -1424,14 +1424,14 @@ r600_lower_64bit_load_const(nir_builder *b, nir_load_const_instr *instr)
    nir_builder_instr_insert(b, &first->instr);
    nir_builder_instr_insert(b, &second->instr);
 
-   nir_ssa_def *channels[4] = {
+   nir_def *channels[4] = {
       nir_channel(b, &first->def, 0),
       nir_channel(b, &first->def, 1),
       nir_channel(b, &second->def, 0),
       num_components == 4 ? nir_channel(b, &second->def, 1) : NULL,
    };
-   nir_ssa_def *new_ir = nir_vec(b, channels, num_components);
-   nir_ssa_def_rewrite_uses(&instr->def, new_ir);
+   nir_def *new_ir = nir_vec(b, channels, num_components);
+   nir_def_rewrite_uses(&instr->def, new_ir);
    nir_instr_remove(&instr->instr);
 
    return true;
index 4a8710e..002aaf1 100644 (file)
@@ -33,7 +33,7 @@ namespace r600 {
 class Lower2x16 : public NirLowerInstruction {
 private:
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
 };
 
 bool
@@ -51,20 +51,20 @@ Lower2x16::filter(const nir_instr *instr) const
    }
 }
 
-nir_ssa_def *
+nir_def *
 Lower2x16::lower(nir_instr *instr)
 {
    nir_alu_instr *alu = nir_instr_as_alu(instr);
 
    switch (alu->op) {
    case nir_op_unpack_half_2x16: {
-      nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *packed = nir_ssa_for_alu_src(b, alu, 0);
       return nir_vec2(b,
                       nir_unpack_half_2x16_split_x(b, packed),
                       nir_unpack_half_2x16_split_y(b, packed));
    }
    case nir_op_pack_half_2x16: {
-      nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
+      nir_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
       return nir_pack_half_2x16_split(b,
                                       nir_channel(b, src_vec2, 0),
                                       nir_channel(b, src_vec2, 1));
@@ -83,7 +83,7 @@ public:
 
 private:
    bool filter(const nir_instr *instr) const override;
-   nir_ssa_def *lower(nir_instr *instr) override;
+   nir_def *lower(nir_instr *instr) override;
    amd_gfx_level m_gxf_level;
 };
 
@@ -103,7 +103,7 @@ LowerSinCos::filter(const nir_instr *instr) const
    }
 }
 
-nir_ssa_def *
+nir_def *
 LowerSinCos::lower(nir_instr *instr)
 {
    auto alu = nir_instr_as_alu(instr);
index 2f0693e..e9b5b26 100644 (file)
@@ -92,7 +92,7 @@ private:
    virtual void create_new_io(nir_builder *b,
                               nir_intrinsic_instr *intr,
                               nir_variable *var,
-                              nir_ssa_def **srcs,
+                              nir_def **srcs,
                               unsigned first_comp,
                               unsigned num_comps) = 0;
 
@@ -109,13 +109,13 @@ private:
    void create_new_io(nir_builder *b,
                       nir_intrinsic_instr *intr,
                       nir_variable *var,
-                      nir_ssa_def **srcs,
+                      nir_def **srcs,
                       unsigned first_comp,
                       unsigned num_comps) override;
    bool instr_can_rewrite_type(nir_intrinsic_instr *intr) const override;
 
-   nir_ssa_def *create_combined_vector(nir_builder *b,
-                                       nir_ssa_def **srcs,
+   nir_def *create_combined_vector(nir_builder *b,
+                                       nir_def **srcs,
                                        int first_comp,
                                        int num_comp);
 };
@@ -372,10 +372,10 @@ NirLowerIOToVector::vec_instr_stack_pop(nir_builder *b,
    }
 
    b->cursor = nir_after_instr(&intr->instr);
-   nir_ssa_undef_instr *instr_undef = nir_ssa_undef_instr_create(b->shader, 1, 32);
+   nir_undef_instr *instr_undef = nir_undef_instr_create(b->shader, 1, 32);
    nir_builder_instr_insert(b, &instr_undef->instr);
 
-   nir_ssa_def *srcs[4];
+   nir_def *srcs[4];
    for (int i = 0; i < 4; i++) {
       srcs[i] = &instr_undef->def;
    }
@@ -414,7 +414,7 @@ void
 NirLowerFSOutToVector::create_new_io(nir_builder *b,
                                      nir_intrinsic_instr *intr,
                                      nir_variable *var,
-                                     nir_ssa_def **srcs,
+                                     nir_def **srcs,
                                      unsigned first_comp,
                                      unsigned num_comps)
 {
@@ -451,9 +451,9 @@ NirLowerFSOutToVector::instr_can_rewrite_type(nir_intrinsic_instr *intr) const
    return var_can_rewrite(nir_deref_instr_get_variable(deref));
 }
 
-nir_ssa_def *
+nir_def *
 NirLowerFSOutToVector::create_combined_vector(nir_builder *b,
-                                              nir_ssa_def **srcs,
+                                              nir_def **srcs,
                                               int first_comp,
                                               int num_comp)
 {
@@ -477,7 +477,7 @@ NirLowerFSOutToVector::create_combined_vector(nir_builder *b,
    int i = 0;
    unsigned k = 0;
    while (i < num_comp) {
-      nir_ssa_def *s = srcs[first_comp + k];
+      nir_def *s = srcs[first_comp + k];
       for (uint8_t kk = 0; kk < s->num_components && i < num_comp; ++kk) {
          instr->src[i].src = nir_src_for_ssa(s);
          instr->src[i].swizzle[0] = kk;
index 54e8525..7edb1a4 100644 (file)
@@ -51,7 +51,7 @@ r600_lower_tess_io_filter(const nir_instr *instr, gl_shader_stage stage)
    return false;
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_load_param_base(nir_builder *b, nir_intrinsic_op op)
 {
    nir_intrinsic_instr *result = nir_intrinsic_instr_create(b->shader, op);
@@ -99,14 +99,14 @@ get_tcs_varying_offset(nir_intrinsic_instr *op)
    return 0;
 }
 
-static inline nir_ssa_def *
-r600_umad_24(nir_builder *b, nir_ssa_def *op1, nir_ssa_def *op2, nir_ssa_def *op3)
+static inline nir_def *
+r600_umad_24(nir_builder *b, nir_def *op1, nir_def *op2, nir_def *op3)
 {
    return nir_build_alu(b, nir_op_umad24, op1, op2, op3, NULL);
 }
 
-static inline nir_ssa_def *
-r600_tcs_base_address(nir_builder *b, nir_ssa_def *param_base, nir_ssa_def *rel_patch_id)
+static inline nir_def *
+r600_tcs_base_address(nir_builder *b, nir_def *param_base, nir_def *rel_patch_id)
 {
    return r600_umad_24(b,
                        nir_channel(b, param_base, 0),
@@ -114,13 +114,13 @@ r600_tcs_base_address(nir_builder *b, nir_ssa_def *param_base, nir_ssa_def *rel_
                        nir_channel(b, param_base, 3));
 }
 
-static nir_ssa_def *
+static nir_def *
 emil_lsd_in_addr(nir_builder *b,
-                 nir_ssa_def *base,
-                 nir_ssa_def *patch_id,
+                 nir_def *base,
+                 nir_def *patch_id,
                  nir_intrinsic_instr *op)
 {
-   nir_ssa_def *addr =
+   nir_def *addr =
       nir_build_alu(b, nir_op_umul24, nir_channel(b, base, 0), patch_id, NULL, NULL);
 
    auto idx1 = nir_src_as_const_value(op->src[0]);
@@ -136,18 +136,18 @@ emil_lsd_in_addr(nir_builder *b,
    return nir_iadd(b, addr, offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 emil_lsd_out_addr(nir_builder *b,
-                  nir_ssa_def *base,
-                  nir_ssa_def *patch_id,
+                  nir_def *base,
+                  nir_def *patch_id,
                   nir_intrinsic_instr *op,
                   UNUSED nir_variable_mode mode,
                   int src_offset)
 {
 
-   nir_ssa_def *addr1 =
+   nir_def *addr1 =
       r600_umad_24(b, nir_channel(b, base, 0), patch_id, nir_channel(b, base, 2));
-   nir_ssa_def *addr2 =
+   nir_def *addr2 =
       r600_umad_24(b, nir_channel(b, base, 1), op->src[src_offset].ssa, addr1);
    int offset = get_tcs_varying_offset(op);
    return nir_iadd_imm(b,
@@ -157,7 +157,7 @@ emil_lsd_out_addr(nir_builder *b,
                        offset);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_offset_group(nir_builder *b, int ncomponents)
 {
    switch (ncomponents) {
@@ -181,7 +181,7 @@ load_offset_group(nir_builder *b, int ncomponents)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 load_offset_group_from_mask(nir_builder *b, uint32_t mask)
 {
    auto full_mask = nir_imm_ivec4(b, 0, 4, 8, 12);
@@ -252,20 +252,20 @@ get_dest_usee_mask(nir_intrinsic_instr *op)
 }
 
 static void
-replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_ssa_def *addr)
+replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_def *addr)
 {
    uint32_t mask = get_dest_usee_mask(op);
    if (mask) {
-      nir_ssa_def *addr_outer = nir_iadd(b, addr, load_offset_group_from_mask(b, mask));
+      nir_def *addr_outer = nir_iadd(b, addr, load_offset_group_from_mask(b, mask));
       if (nir_intrinsic_component(op))
          addr_outer =
             nir_iadd_imm(b, addr_outer, 4 * nir_intrinsic_component(op));
 
       auto new_load = nir_load_local_shared_r600(b, 32, addr_outer);
 
-      auto undef = nir_ssa_undef(b, 1, 32);
+      auto undef = nir_undef(b, 1, 32);
       int comps = nir_dest_num_components(op->dest);
-      nir_ssa_def *remix[4] = {undef, undef, undef, undef};
+      nir_def *remix[4] = {undef, undef, undef, undef};
 
       int chan = 0;
       for (int i = 0; i < comps; ++i) {
@@ -274,12 +274,12 @@ replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_ssa_def *addr)
          }
       }
       auto new_load_remixed = nir_vec(b, remix, comps);
-      nir_ssa_def_rewrite_uses(&op->dest.ssa, new_load_remixed);
+      nir_def_rewrite_uses(&op->dest.ssa, new_load_remixed);
    }
    nir_instr_remove(&op->instr);
 }
 
-static nir_ssa_def *
+static nir_def *
 r600_load_rel_patch_id(nir_builder *b)
 {
    auto patch_id =
@@ -290,7 +290,7 @@ r600_load_rel_patch_id(nir_builder *b)
 }
 
 static void
-emit_store_lds(nir_builder *b, nir_intrinsic_instr *op, nir_ssa_def *addr)
+emit_store_lds(nir_builder *b, nir_intrinsic_instr *op, nir_def *addr)
 {
    uint32_t orig_writemask = nir_intrinsic_write_mask(op) << nir_intrinsic_component(op);
 
@@ -316,9 +316,9 @@ emit_store_lds(nir_builder *b, nir_intrinsic_instr *op, nir_ssa_def *addr)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 emil_tcs_io_offset(nir_builder *b,
-                   nir_ssa_def *addr,
+                   nir_def *addr,
                    nir_intrinsic_instr *op,
                    int src_offset)
 {
@@ -348,8 +348,8 @@ outer_tf_components(mesa_prim prim_type)
 static bool
 r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_type)
 {
-   static nir_ssa_def *load_in_param_base = nullptr;
-   static nir_ssa_def *load_out_param_base = nullptr;
+   static nir_def *load_in_param_base = nullptr;
+   static nir_def *load_out_param_base = nullptr;
 
    b->cursor = nir_before_instr(instr);
    nir_intrinsic_instr *op = nir_instr_as_intrinsic(instr);
@@ -374,19 +374,19 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty
 
    switch (op->intrinsic) {
    case nir_intrinsic_load_patch_vertices_in: {
-      nir_ssa_def *vertices_in;
+      nir_def *vertices_in;
       if (b->shader->info.stage == MESA_SHADER_TESS_CTRL)
          vertices_in = nir_channel(b, load_in_param_base, 2);
       else {
          auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_in_param_base_r600);
          vertices_in = nir_channel(b, base, 2);
       }
-      nir_ssa_def_rewrite_uses(&op->dest.ssa, vertices_in);
+      nir_def_rewrite_uses(&op->dest.ssa, vertices_in);
       nir_instr_remove(&op->instr);
       return true;
    }
    case nir_intrinsic_load_per_vertex_input: {
-      nir_ssa_def *addr =
+      nir_def *addr =
          b->shader->info.stage == MESA_SHADER_TESS_CTRL
             ? emil_lsd_in_addr(b, load_in_param_base, rel_patch_id, op)
             : emil_lsd_out_addr(
@@ -395,20 +395,20 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty
       return true;
    }
    case nir_intrinsic_store_per_vertex_output: {
-      nir_ssa_def *addr = emil_lsd_out_addr(
+      nir_def *addr = emil_lsd_out_addr(
          b, load_out_param_base, rel_patch_id, op, nir_var_shader_out, 1);
       emit_store_lds(b, op, addr);
       nir_instr_remove(instr);
       return true;
    }
    case nir_intrinsic_load_per_vertex_output: {
-      nir_ssa_def *addr = emil_lsd_out_addr(
+      nir_def *addr = emil_lsd_out_addr(
          b, load_out_param_base, rel_patch_id, op, nir_var_shader_out, 0);
       replace_load_instr(b, op, addr);
       return true;
    }
    case nir_intrinsic_store_output: {
-      nir_ssa_def *addr = (b->shader->info.stage == MESA_SHADER_TESS_CTRL)
+      nir_def *addr = (b->shader->info.stage == MESA_SHADER_TESS_CTRL)
                              ? r600_tcs_base_address(b, load_out_param_base, rel_patch_id)
                              : nir_build_alu(b,
                                              nir_op_umul24,
@@ -422,13 +422,13 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty
       return true;
    }
    case nir_intrinsic_load_output: {
-      nir_ssa_def *addr = r600_tcs_base_address(b, load_out_param_base, rel_patch_id);
+      nir_def *addr = r600_tcs_base_address(b, load_out_param_base, rel_patch_id);
       addr = emil_tcs_io_offset(b, addr, op, 0);
       replace_load_instr(b, op, addr);
       return true;
    }
    case nir_intrinsic_load_input: {
-      nir_ssa_def *addr = r600_tcs_base_address(b, load_in_param_base, rel_patch_id);
+      nir_def *addr = r600_tcs_base_address(b, load_in_param_base, rel_patch_id);
       addr = emil_tcs_io_offset(b, addr, op, 0);
       replace_load_instr(b, op, addr);
       return true;
@@ -444,8 +444,8 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty
       ncomps -= ncomps_correct;
       auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_out_param_base_r600);
       auto rel_patch_id = r600_load_rel_patch_id(b);
-      nir_ssa_def *addr0 = r600_tcs_base_address(b, base, rel_patch_id);
-      nir_ssa_def *addr_outer =
+      nir_def *addr0 = r600_tcs_base_address(b, base, rel_patch_id);
+      nir_def *addr_outer =
          nir_iadd(b, addr0, load_offset_group(b, tf_inner_address_offset + ncomps));
 
       auto tf =
@@ -455,14 +455,14 @@ r600_lower_tess_io_impl(nir_builder *b, nir_instr *instr, enum mesa_prim prim_ty
       nir_ssa_dest_init(&tf->instr, &tf->dest, tf->num_components, 32);
       nir_builder_instr_insert(b, &tf->instr);
       if (ncomps < 4 && b->shader->info.stage != MESA_SHADER_TESS_EVAL) {
-         auto undef = nir_ssa_undef(b, 1, 32);
-         nir_ssa_def *srcs[4] = {undef, undef, undef, undef};
+         auto undef = nir_undef(b, 1, 32);
+         nir_def *srcs[4] = {undef, undef, undef, undef};
          for (unsigned i = 0; i < ncomps; ++i)
             srcs[i] = nir_channel(b, &tf->dest.ssa, i);
          auto help = nir_vec(b, srcs, 4);
-         nir_ssa_def_rewrite_uses(&op->dest.ssa, help);
+         nir_def_rewrite_uses(&op->dest.ssa, help);
       } else {
-         nir_ssa_def_rewrite_uses(&op->dest.ssa, &tf->dest.ssa);
+         nir_def_rewrite_uses(&op->dest.ssa, &tf->dest.ssa);
       }
       nir_instr_remove(instr);
       return true;
@@ -497,7 +497,7 @@ r600_lower_tess_io(nir_shader *shader, enum mesa_prim prim_type)
 }
 
 bool
-r600_emit_tf(nir_builder *b, nir_ssa_def *val)
+r600_emit_tf(nir_builder *b, nir_def *val)
 {
    nir_intrinsic_instr *store_tf =
       nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_tf_r600);
@@ -552,9 +552,9 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type)
    auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_out_param_base_r600);
    auto rel_patch_id = r600_load_rel_patch_id(b);
 
-   nir_ssa_def *addr0 = r600_tcs_base_address(b, base, rel_patch_id);
+   nir_def *addr0 = r600_tcs_base_address(b, base, rel_patch_id);
 
-   nir_ssa_def *addr_outer = nir_iadd(b, addr0, load_offset_group(b, outer_comps));
+   nir_def *addr_outer = nir_iadd(b, addr0, load_offset_group(b, outer_comps));
    auto tf_outer =
       nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600);
    tf_outer->num_components = outer_comps;
@@ -563,7 +563,7 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type)
       &tf_outer->instr, &tf_outer->dest, tf_outer->num_components, 32);
    nir_builder_instr_insert(b, &tf_outer->instr);
 
-   std::vector<nir_ssa_def *> tf_out;
+   std::vector<nir_def *> tf_out;
 
    auto tf_out_base =
       nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_tess_factor_base_r600);
@@ -607,7 +607,7 @@ r600_append_tcs_TF_emission(nir_shader *shader, enum mesa_prim prim_type)
    }
 
    if (inner_comps) {
-      nir_ssa_def *addr1 = nir_iadd(b, addr0, load_offset_group(b, 4 + inner_comps));
+      nir_def *addr1 = nir_iadd(b, addr0, load_offset_group(b, 4 + inner_comps));
       auto tf_inner =
          nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600);
       tf_inner->num_components = inner_comps;
index 6369cfc..be36d94 100644 (file)
@@ -35,11 +35,11 @@ lower_coord_shift_normalized(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_before_instr(&tex->instr);
 
-   nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
-   nir_ssa_def *scale = nir_frcp(b, size);
+   nir_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
+   nir_def *scale = nir_frcp(b, size);
 
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
-   nir_ssa_def *corr = nullptr;
+   nir_def *corr = nullptr;
    if (unlikely(tex->array_is_lowered_cube)) {
       auto corr2 = nir_fadd(b,
                             nir_trim_vector(b, tex->src[coord_index].src.ssa, 2),
@@ -63,7 +63,7 @@ lower_coord_shift_unnormalized(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_before_instr(&tex->instr);
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
-   nir_ssa_def *corr = nullptr;
+   nir_def *corr = nullptr;
    if (unlikely(tex->array_is_lowered_cube)) {
       auto corr2 = nir_fadd_imm(b,
                                 nir_trim_vector(b, tex->src[coord_index].src.ssa, 2),
@@ -158,8 +158,8 @@ lower_txl_txf_array_or_cube(nir_builder *b, nir_tex_instr *tex)
    int min_lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_min_lod);
    assert(lod_idx >= 0 || bias_idx >= 0);
 
-   nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
-   nir_ssa_def *lod = (lod_idx >= 0) ? nir_ssa_for_src(b, tex->src[lod_idx].src, 1)
+   nir_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
+   nir_def *lod = (lod_idx >= 0) ? nir_ssa_for_src(b, tex->src[lod_idx].src, 1)
                                      : nir_get_texture_lod(b, tex);
 
    if (bias_idx >= 0)
@@ -170,8 +170,8 @@ lower_txl_txf_array_or_cube(nir_builder *b, nir_tex_instr *tex)
 
    /* max lod? */
 
-   nir_ssa_def *lambda_exp = nir_fexp2(b, lod);
-   nir_ssa_def *scale = NULL;
+   nir_def *lambda_exp = nir_fexp2(b, lod);
+   nir_def *scale = NULL;
 
    if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
       unsigned int swizzle[NIR_MAX_VEC_COMPONENTS] = {0, 0, 0, 0};
@@ -182,7 +182,7 @@ lower_txl_txf_array_or_cube(nir_builder *b, nir_tex_instr *tex)
       scale = nir_frcp(b, nir_channels(b, size, (nir_component_mask_t)cmp_mask));
    }
 
-   nir_ssa_def *grad = nir_fmul(b, lambda_exp, scale);
+   nir_def *grad = nir_fmul(b, lambda_exp, scale);
 
    if (lod_idx >= 0)
       nir_tex_instr_remove_src(tex, lod_idx);
@@ -256,7 +256,7 @@ r600_nir_lower_cube_to_2darray_filer(const nir_instr *instr, const void *_option
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_options)
 {
    b->cursor = nir_before_instr(instr);
@@ -272,7 +272,7 @@ r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_opt
                       nir_frcp(b, nir_fabs(b, nir_channel(b, cubed, 2))),
                       nir_imm_float(b, 1.5));
 
-   nir_ssa_def *z = nir_channel(b, cubed, 3);
+   nir_def *z = nir_channel(b, cubed, 3);
    if (tex->is_array && tex->op != nir_texop_lod) {
       auto slice = nir_fround_even(b, nir_channel(b, tex->src[coord_idx].src.ssa, 3));
       z =
index d12e961..91fe44d 100644 (file)
@@ -164,8 +164,8 @@ r600_create_new_load(nir_builder *b,
 
    for (unsigned i = 0; i < old_num_comps; ++i)
       channels[i] = comp - var->data.location_frac + i;
-   nir_ssa_def *load = nir_swizzle(b, &new_intr->dest.ssa, channels, old_num_comps);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+   nir_def *load = nir_swizzle(b, &new_intr->dest.ssa, channels, old_num_comps);
+   nir_def_rewrite_uses(&intr->dest.ssa, load);
 
    /* Remove the old load intrinsic */
    nir_instr_remove(&intr->instr);
index d995ffe..0e8ad0d 100644 (file)
@@ -349,7 +349,7 @@ ValueFactory::dummy_dest(unsigned chan)
 }
 
 PRegister
-ValueFactory::dest(const nir_ssa_def& ssa, int chan, Pin pin_channel, uint8_t chan_mask)
+ValueFactory::dest(const nir_def& ssa, int chan, Pin pin_channel, uint8_t chan_mask)
 {
    RegisterKey key(ssa.index, chan, vp_ssa);
 
@@ -410,7 +410,7 @@ ValueFactory::undef(int index, int chan)
 }
 
 PVirtualValue
-ValueFactory::ssa_src(const nir_ssa_def& ssa, int chan)
+ValueFactory::ssa_src(const nir_def& ssa, int chan)
 {
    RegisterKey key(ssa.index, chan, vp_ssa);
    sfn_log << SfnLog::reg << "search src with key" << key << "\n";
index 953fce7..274fbd9 100644 (file)
@@ -242,7 +242,7 @@ public:
    dest(const nir_dest& dest, int chan, Pin pin_channel, uint8_t chan_mask = 0xf);
 
    PRegister
-   dest(const nir_ssa_def& dest, int chan, Pin pin_channel, uint8_t chan_mask = 0xf);
+   dest(const nir_def& dest, int chan, Pin pin_channel, uint8_t chan_mask = 0xf);
 
    RegisterVec4 dest_vec4(const nir_dest& dest, Pin pin);
 
@@ -303,7 +303,7 @@ public:
    PRegister idx_reg(unsigned idx);
 
 private:
-   PVirtualValue ssa_src(const nir_ssa_def& dest, int chan);
+   PVirtualValue ssa_src(const nir_def& dest, int chan);
 
    int m_next_register_index;
    int m_next_temp_channel{0};
index 4cc5247..c97a4ba 100644 (file)
@@ -17,23 +17,23 @@ struct lower_abi_state {
    struct si_shader *shader;
    struct si_shader_args *args;
 
-   nir_ssa_def *esgs_ring;
-   nir_ssa_def *tess_offchip_ring;
-   nir_ssa_def *gsvs_ring[4];
+   nir_def *esgs_ring;
+   nir_def *tess_offchip_ring;
+   nir_def *gsvs_ring[4];
 };
 
 #define GET_FIELD_NIR(field) \
    ac_nir_unpack_arg(b, &args->ac, args->vs_state_bits, \
                      field##__SHIFT, util_bitcount(field##__MASK))
 
-nir_ssa_def *si_nir_load_internal_binding(nir_builder *b, struct si_shader_args *args,
+nir_def *si_nir_load_internal_binding(nir_builder *b, struct si_shader_args *args,
                                           unsigned slot, unsigned num_components)
 {
-   nir_ssa_def *addr = ac_nir_load_arg(b, &args->ac, args->internal_bindings);
+   nir_def *addr = ac_nir_load_arg(b, &args->ac, args->internal_bindings);
    return nir_load_smem_amd(b, num_components, addr, nir_imm_int(b, slot * 16));
 }
 
-static nir_ssa_def *get_num_vert_per_prim(nir_builder *b, struct si_shader *shader,
+static nir_def *get_num_vert_per_prim(nir_builder *b, struct si_shader *shader,
                                           struct si_shader_args *args)
 {
    const struct si_shader_info *info = &shader->selector->info;
@@ -49,7 +49,7 @@ static nir_ssa_def *get_num_vert_per_prim(nir_builder *b, struct si_shader *shad
          num_vertices = 2;
       else {
          /* Extract OUTPRIM field. */
-         nir_ssa_def *num = GET_FIELD_NIR(GS_STATE_OUTPRIM);
+         nir_def *num = GET_FIELD_NIR(GS_STATE_OUTPRIM);
          return nir_iadd_imm(b, num, 1);
       }
    } else {
@@ -65,18 +65,18 @@ static nir_ssa_def *get_num_vert_per_prim(nir_builder *b, struct si_shader *shad
    return nir_imm_int(b, num_vertices);
 }
 
-static nir_ssa_def *build_attr_ring_desc(nir_builder *b, struct si_shader *shader,
+static nir_def *build_attr_ring_desc(nir_builder *b, struct si_shader *shader,
                                          struct si_shader_args *args)
 {
    struct si_shader_selector *sel = shader->selector;
 
-   nir_ssa_def *attr_address =
+   nir_def *attr_address =
       sel->stage == MESA_SHADER_VERTEX && sel->info.base.vs.blit_sgprs_amd ?
       si_nir_load_internal_binding(b, args, SI_GS_ATTRIBUTE_RING, 4) :
       ac_nir_load_arg(b, &args->ac, args->gs_attr_address);
 
    unsigned stride = 16 * shader->info.nr_param_exports;
-   nir_ssa_def *comp[] = {
+   nir_def *comp[] = {
       attr_address,
       nir_imm_int(b, S_008F04_BASE_ADDRESS_HI(sel->screen->info.address32_hi) |
                   S_008F04_STRIDE(stride) |
@@ -93,7 +93,7 @@ static nir_ssa_def *build_attr_ring_desc(nir_builder *b, struct si_shader *shade
    return nir_vec(b, comp, 4);
 }
 
-static nir_ssa_def *
+static nir_def *
 fetch_framebuffer(nir_builder *b, struct si_shader_args *args,
                   struct si_shader_selector *sel, union si_shader_key *key)
 {
@@ -101,11 +101,11 @@ fetch_framebuffer(nir_builder *b, struct si_shader_args *args,
    STATIC_ASSERT(SI_PS_IMAGE_COLORBUF0 % 2 == 0);
    STATIC_ASSERT(SI_PS_IMAGE_COLORBUF0_FMASK % 2 == 0);
 
-   nir_ssa_def *zero = nir_imm_zero(b, 1, 32);
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
+   nir_def *zero = nir_imm_zero(b, 1, 32);
+   nir_def *undef = nir_undef(b, 1, 32);
 
    unsigned chan = 0;
-   nir_ssa_def *vec[4] = {undef, undef, undef, undef};
+   nir_def *vec[4] = {undef, undef, undef, undef};
 
    vec[chan++] = ac_nir_unpack_arg(b, &args->ac, args->pos_fixed_pt, 0, 16);
 
@@ -116,7 +116,7 @@ fetch_framebuffer(nir_builder *b, struct si_shader_args *args,
    if (key->ps.mono.fbfetch_layered)
       vec[chan++] = ac_nir_unpack_arg(b, &args->ac, args->ac.ancillary, 16, 11);
 
-   nir_ssa_def *coords = nir_vec(b, vec, 4);
+   nir_def *coords = nir_vec(b, vec, 4);
 
    enum glsl_sampler_dim dim;
    if (key->ps.mono.fbfetch_msaa)
@@ -126,32 +126,32 @@ fetch_framebuffer(nir_builder *b, struct si_shader_args *args,
    else
       dim = GLSL_SAMPLER_DIM_2D;
 
-   nir_ssa_def *sample_id;
+   nir_def *sample_id;
    if (key->ps.mono.fbfetch_msaa) {
       sample_id = ac_nir_unpack_arg(b, &args->ac, args->ac.ancillary, 8, 4);
 
       if (sel->screen->info.gfx_level < GFX11 &&
           !(sel->screen->debug_flags & DBG(NO_FMASK))) {
-         nir_ssa_def *desc =
+         nir_def *desc =
             si_nir_load_internal_binding(b, args, SI_PS_IMAGE_COLORBUF0_FMASK, 8);
 
-         nir_ssa_def *fmask =
+         nir_def *fmask =
             nir_bindless_image_fragment_mask_load_amd(
                b, desc, coords,
                .image_dim = dim,
                .image_array = key->ps.mono.fbfetch_layered,
                .access = ACCESS_CAN_REORDER);
 
-         nir_ssa_def *offset = nir_ishl_imm(b, sample_id, 2);
+         nir_def *offset = nir_ishl_imm(b, sample_id, 2);
          /* 3 for EQAA handling, see lower_image_to_fragment_mask_load() */
-         nir_ssa_def *width = nir_imm_int(b, 3);
+         nir_def *width = nir_imm_int(b, 3);
          sample_id = nir_ubfe(b, fmask, offset, width);
       }
    } else {
       sample_id = zero;
    }
 
-   nir_ssa_def *desc = si_nir_load_internal_binding(b, args, SI_PS_IMAGE_COLORBUF0, 8);
+   nir_def *desc = si_nir_load_internal_binding(b, args, SI_PS_IMAGE_COLORBUF0, 8);
 
    return nir_bindless_image_load(b, 4, 32, desc, coords, sample_id, zero,
                                   .image_dim = dim,
@@ -159,10 +159,10 @@ fetch_framebuffer(nir_builder *b, struct si_shader_args *args,
                                   .access = ACCESS_CAN_REORDER);
 }
 
-static nir_ssa_def *build_tess_ring_desc(nir_builder *b, struct si_screen *screen,
+static nir_def *build_tess_ring_desc(nir_builder *b, struct si_screen *screen,
                                          struct si_shader_args *args)
 {
-   nir_ssa_def *addr = ac_nir_load_arg(b, &args->ac, args->tes_offchip_addr);
+   nir_def *addr = ac_nir_load_arg(b, &args->ac, args->tes_offchip_addr);
 
    uint32_t rsrc3 =
       S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
@@ -182,7 +182,7 @@ static nir_ssa_def *build_tess_ring_desc(nir_builder *b, struct si_screen *scree
                S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
    }
 
-   nir_ssa_def *comp[4] = {
+   nir_def *comp[4] = {
       addr,
       nir_imm_int(b, S_008F04_BASE_ADDRESS_HI(screen->info.address32_hi)),
       nir_imm_int(b, 0xffffffff),
@@ -192,15 +192,15 @@ static nir_ssa_def *build_tess_ring_desc(nir_builder *b, struct si_screen *scree
    return nir_vec(b, comp, 4);
 }
 
-static nir_ssa_def *build_esgs_ring_desc(nir_builder *b, enum amd_gfx_level gfx_level,
+static nir_def *build_esgs_ring_desc(nir_builder *b, enum amd_gfx_level gfx_level,
                                          struct si_shader_args *args)
 {
-   nir_ssa_def *desc = si_nir_load_internal_binding(b, args, SI_RING_ESGS, 4);
+   nir_def *desc = si_nir_load_internal_binding(b, args, SI_RING_ESGS, 4);
 
    if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
       return desc;
 
-   nir_ssa_def *vec[4];
+   nir_def *vec[4];
    for (int i = 0; i < 4; i++)
       vec[i] = nir_channel(b, desc, i);
 
@@ -225,7 +225,7 @@ static void build_gsvs_ring_desc(nir_builder *b, struct lower_abi_state *s)
    if (s->shader->is_gs_copy_shader) {
       s->gsvs_ring[0] = si_nir_load_internal_binding(b, s->args, SI_RING_GSVS, 4);
    } else if (sel->stage == MESA_SHADER_GEOMETRY && !key->ge.as_ngg) {
-      nir_ssa_def *base_addr = si_nir_load_internal_binding(b, s->args, SI_RING_GSVS, 2);
+      nir_def *base_addr = si_nir_load_internal_binding(b, s->args, SI_RING_GSVS, 2);
       base_addr = nir_pack_64_2x32(b, base_addr);
 
       /* The conceptual layout of the GSVS ring is
@@ -242,7 +242,7 @@ static void build_gsvs_ring_desc(nir_builder *b, struct lower_abi_state *s)
          if (!num_components)
             continue;
 
-         nir_ssa_def *desc[4];
+         nir_def *desc[4];
          desc[0] = nir_unpack_64_2x32_split_x(b, base_addr);
          desc[1] = nir_unpack_64_2x32_split_y(b, base_addr);
 
@@ -321,25 +321,25 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *replacement = NULL;
+   nir_def *replacement = NULL;
 
    switch (intrin->intrinsic) {
    case nir_intrinsic_load_first_vertex:
       replacement = ac_nir_load_arg(b, &args->ac, args->ac.base_vertex);
       break;
    case nir_intrinsic_load_base_vertex: {
-      nir_ssa_def *indexed = GET_FIELD_NIR(VS_STATE_INDEXED);
+      nir_def *indexed = GET_FIELD_NIR(VS_STATE_INDEXED);
       indexed = nir_i2b(b, indexed);
 
-      nir_ssa_def *base_vertex = ac_nir_load_arg(b, &args->ac, args->ac.base_vertex);
+      nir_def *base_vertex = ac_nir_load_arg(b, &args->ac, args->ac.base_vertex);
       replacement = nir_bcsel(b, indexed, base_vertex, nir_imm_int(b, 0));
       break;
    }
    case nir_intrinsic_load_workgroup_size: {
       assert(sel->info.base.workgroup_size_variable && sel->info.uses_variable_block_size);
 
-      nir_ssa_def *block_size = ac_nir_load_arg(b, &args->ac, args->block_size);
-      nir_ssa_def *comp[] = {
+      nir_def *block_size = ac_nir_load_arg(b, &args->ac, args->block_size);
+      nir_def *comp[] = {
          nir_ubfe_imm(b, block_size, 0, 10),
          nir_ubfe_imm(b, block_size, 10, 10),
          nir_ubfe_imm(b, block_size, 20, 10),
@@ -349,7 +349,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
    }
    case nir_intrinsic_load_tess_level_outer_default:
    case nir_intrinsic_load_tess_level_inner_default: {
-      nir_ssa_def *buf = si_nir_load_internal_binding(b, args, SI_HS_CONST_DEFAULT_TESS_LEVELS, 4);
+      nir_def *buf = si_nir_load_internal_binding(b, args, SI_HS_CONST_DEFAULT_TESS_LEVELS, 4);
       unsigned num_components = intrin->dest.ssa.num_components;
       unsigned offset =
          intrin->intrinsic == nir_intrinsic_load_tess_level_inner_default ? 16 : 0;
@@ -386,7 +386,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
          GET_FIELD_NIR(GS_STATE_ESGS_VERTEX_STRIDE);
       break;
    case nir_intrinsic_load_tcs_num_patches_amd: {
-      nir_ssa_def *tmp = ac_nir_unpack_arg(b, &args->ac, args->tcs_offchip_layout, 0, 6);
+      nir_def *tmp = ac_nir_unpack_arg(b, &args->ac, args->tcs_offchip_layout, 0, 6);
       replacement = nir_iadd_imm(b, tmp, 1);
       break;
    }
@@ -400,13 +400,13 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       replacement = ac_nir_load_arg(b, &args->ac, args->ac.es2gs_offset);
       break;
    case nir_intrinsic_load_clip_half_line_width_amd: {
-      nir_ssa_def *addr = ac_nir_load_arg(b, &args->ac, args->small_prim_cull_info);
+      nir_def *addr = ac_nir_load_arg(b, &args->ac, args->small_prim_cull_info);
       replacement = nir_load_smem_amd(b, 2, addr, nir_imm_int(b, 32));
       break;
    }
    case nir_intrinsic_load_viewport_xy_scale_and_offset: {
       bool prim_is_lines = key->ge.opt.ngg_culling & SI_NGG_CULL_LINES;
-      nir_ssa_def *addr = ac_nir_load_arg(b, &args->ac, args->small_prim_cull_info);
+      nir_def *addr = ac_nir_load_arg(b, &args->ac, args->small_prim_cull_info);
       unsigned offset = prim_is_lines ? 16 : 0;
       replacement = nir_load_smem_amd(b, 4, addr, nir_imm_int(b, offset));
       break;
@@ -428,7 +428,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       replacement = nir_imm_bool(b, key->ge.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE);
       break;
    case nir_intrinsic_load_cull_small_prim_precision_amd: {
-      nir_ssa_def *small_prim_precision =
+      nir_def *small_prim_precision =
          key->ge.opt.ngg_culling & SI_NGG_CULL_LINES ?
          GET_FIELD_NIR(GS_STATE_SMALL_PRIM_PRECISION_NO_AA) :
          GET_FIELD_NIR(GS_STATE_SMALL_PRIM_PRECISION);
@@ -457,7 +457,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       replacement = nir_i2b(b, GET_FIELD_NIR(VS_STATE_CLAMP_VERTEX_COLOR));
       break;
    case nir_intrinsic_load_user_clip_plane: {
-      nir_ssa_def *buf = si_nir_load_internal_binding(b, args, SI_VS_CONST_CLIP_PLANES, 4);
+      nir_def *buf = si_nir_load_internal_binding(b, args, SI_VS_CONST_CLIP_PLANES, 4);
       unsigned offset = nir_intrinsic_ucp_id(intrin) * 16;
       replacement = nir_load_ubo(b, 4, 32, buf, nir_imm_int(b, offset),
                                  .range = ~0);
@@ -470,7 +470,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
    }
    case nir_intrinsic_atomic_add_gs_emit_prim_count_amd:
    case nir_intrinsic_atomic_add_gs_invocation_count_amd: {
-      nir_ssa_def *buf =
+      nir_def *buf =
          si_nir_load_internal_binding(b, args, SI_GS_QUERY_EMULATED_COUNTERS_BUF, 4);
 
       enum pipe_statistics_query_index index =
@@ -478,21 +478,21 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
          PIPE_STAT_QUERY_GS_PRIMITIVES : PIPE_STAT_QUERY_GS_INVOCATIONS;
       unsigned offset = si_query_pipestat_end_dw_offset(sel->screen, index) * 4;
 
-      nir_ssa_def *count = intrin->src[0].ssa;
+      nir_def *count = intrin->src[0].ssa;
       nir_ssbo_atomic(b, 32, buf, nir_imm_int(b, offset), count,
                       .atomic_op = nir_atomic_op_iadd);
       break;
    }
    case nir_intrinsic_atomic_add_gen_prim_count_amd:
    case nir_intrinsic_atomic_add_xfb_prim_count_amd: {
-      nir_ssa_def *buf = si_nir_load_internal_binding(b, args, SI_GS_QUERY_BUF, 4);
+      nir_def *buf = si_nir_load_internal_binding(b, args, SI_GS_QUERY_BUF, 4);
 
       unsigned stream = nir_intrinsic_stream_id(intrin);
       unsigned offset = intrin->intrinsic == nir_intrinsic_atomic_add_gen_prim_count_amd ?
          offsetof(struct gfx11_sh_query_buffer_mem, stream[stream].generated_primitives) :
          offsetof(struct gfx11_sh_query_buffer_mem, stream[stream].emitted_primitives);
 
-      nir_ssa_def *prim_count = intrin->src[0].ssa;
+      nir_def *prim_count = intrin->src[0].ssa;
       nir_ssbo_atomic(b, 32, buf, nir_imm_int(b, offset), prim_count,
                       .atomic_op = nir_atomic_op_iadd);
       break;
@@ -501,7 +501,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       replacement = build_attr_ring_desc(b, shader, args);
       break;
    case nir_intrinsic_load_ring_attr_offset_amd: {
-      nir_ssa_def *offset = ac_nir_unpack_arg(b, &args->ac, args->ac.gs_attr_offset, 0, 15);
+      nir_def *offset = ac_nir_unpack_arg(b, &args->ac, args->ac.gs_attr_offset, 0, 15);
       replacement = nir_ishl_imm(b, offset, 9);
       break;
    }
@@ -549,12 +549,12 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       if (key->ps.mono.interpolate_at_sample_force_center) {
          replacement = nir_load_barycentric_pixel(b, 32, .interp_mode = mode);
       } else {
-         nir_ssa_def *sample_id = intrin->src[0].ssa;
+         nir_def *sample_id = intrin->src[0].ssa;
          /* offset = sample_id * 8  (8 = 2 floats containing samplepos.xy) */
-         nir_ssa_def *offset = nir_ishl_imm(b, sample_id, 3);
+         nir_def *offset = nir_ishl_imm(b, sample_id, 3);
 
-         nir_ssa_def *buf = si_nir_load_internal_binding(b, args, SI_PS_CONST_SAMPLE_POSITIONS, 4);
-         nir_ssa_def *sample_pos = nir_load_ubo(b, 2, 32, buf, offset, .range = ~0);
+         nir_def *buf = si_nir_load_internal_binding(b, args, SI_PS_CONST_SAMPLE_POSITIONS, 4);
+         nir_def *sample_pos = nir_load_ubo(b, 2, 32, buf, offset, .range = ~0);
 
          sample_pos = nir_fadd_imm(b, sample_pos, -0.5);
 
@@ -576,7 +576,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
    }
    case nir_intrinsic_load_ring_tess_factors_amd: {
       assert(s->tess_offchip_ring);
-      nir_ssa_def *addr = nir_channel(b, s->tess_offchip_ring, 0);
+      nir_def *addr = nir_channel(b, s->tess_offchip_ring, 0);
       addr = nir_iadd_imm(b, addr, sel->screen->hs.tess_offchip_ring_size);
       replacement = nir_vector_insert_imm(b, s->tess_offchip_ring, addr, 0);
       break;
@@ -588,7 +588,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       replacement = ac_nir_load_arg(b, &args->ac, args->alpha_reference);
       break;
    case nir_intrinsic_load_barycentric_optimize_amd: {
-      nir_ssa_def *prim_mask = ac_nir_load_arg(b, &args->ac, args->ac.prim_mask);
+      nir_def *prim_mask = ac_nir_load_arg(b, &args->ac, args->ac.prim_mask);
       /* enabled when bit 31 is set */
       replacement = nir_ilt_imm(b, prim_mask, 0);
       break;
@@ -606,18 +606,18 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
          offset = util_bitcount(colors_read & 0xf);
       }
 
-      nir_ssa_def *color[4];
+      nir_def *color[4];
       for (int i = 0; i < 4; i++) {
          color[i] = colors_read & BITFIELD_BIT(start + i) ?
             ac_nir_load_arg_at_offset(b, &args->ac, args->color_start, offset++) :
-            nir_ssa_undef(b, 1, 32);
+            nir_undef(b, 1, 32);
       }
 
       replacement = nir_vec(b, color, 4);
       break;
    }
    case nir_intrinsic_load_point_coord_maybe_flipped: {
-      nir_ssa_def *interp_param =
+      nir_def *interp_param =
          nir_load_barycentric_pixel(b, 32, .interp_mode = INTERP_MODE_NONE);
 
       /* Load point coordinates (x, y) which are written by the hw after the interpolated inputs */
@@ -655,7 +655,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
           *   shifted = v_mul_u32_u24 extracted, 0x80402u   ; shift the bits: 8->9, 9->19, 10->29
           *   result = v_and_b32 shifted, 0x20080200        ; remove garbage
           */
-         nir_ssa_def *tmp = ac_nir_load_arg(b, &args->ac, args->ac.gs_invocation_id);
+         nir_def *tmp = ac_nir_load_arg(b, &args->ac, args->ac.gs_invocation_id);
          tmp = nir_iand_imm(b, tmp, 0x700);
          tmp = nir_imul_imm(b, tmp, 0x80402);
          replacement = nir_iand_imm(b, tmp, 0x20080200);
@@ -697,7 +697,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
       unsigned stream_id = nir_intrinsic_stream_id(intrin);
       /* Unused nir_load_ring_gsvs_amd may not be eliminated yet. */
       replacement = s->gsvs_ring[stream_id] ?
-         s->gsvs_ring[stream_id] : nir_ssa_undef(b, 4, 32);
+         s->gsvs_ring[stream_id] : nir_undef(b, 4, 32);
       break;
    }
    case nir_intrinsic_load_user_data_amd:
@@ -709,7 +709,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s
    }
 
    if (replacement)
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+      nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
 
    nir_instr_remove(instr);
    nir_instr_free(instr);
@@ -744,15 +744,15 @@ static bool lower_tex(nir_builder *b, nir_instr *instr, struct lower_abi_state *
       int comp_index = nir_tex_instr_src_index(tex, nir_tex_src_comparator);
       assert(samp_index >= 0 && comp_index >= 0);
 
-      nir_ssa_def *sampler = tex->src[samp_index].src.ssa;
-      nir_ssa_def *compare = tex->src[comp_index].src.ssa;
+      nir_def *sampler = tex->src[samp_index].src.ssa;
+      nir_def *compare = tex->src[comp_index].src.ssa;
       /* Must have been lowered to descriptor. */
       assert(sampler->num_components > 1);
 
-      nir_ssa_def *upgraded = nir_channel(b, sampler, 3);
+      nir_def *upgraded = nir_channel(b, sampler, 3);
       upgraded = nir_i2b(b, nir_ubfe_imm(b, upgraded, 29, 1));
 
-      nir_ssa_def *clamped = nir_fsat(b, compare);
+      nir_def *clamped = nir_fsat(b, compare);
       compare = nir_bcsel(b, upgraded, clamped, compare);
 
       nir_instr_rewrite_src_ssa(instr, &tex->src[comp_index].src, compare);
index 6e721ad..66c000f 100644 (file)
@@ -25,10 +25,10 @@ struct lower_resource_state {
    struct si_shader_args *args;
 };
 
-static nir_ssa_def *load_ubo_desc_fast_path(nir_builder *b, nir_ssa_def *addr_lo,
+static nir_def *load_ubo_desc_fast_path(nir_builder *b, nir_def *addr_lo,
                                             struct si_shader_selector *sel)
 {
-   nir_ssa_def *addr_hi =
+   nir_def *addr_hi =
       nir_imm_int(b, S_008F04_BASE_ADDRESS_HI(sel->screen->info.address32_hi));
 
    uint32_t rsrc3 =
@@ -49,23 +49,23 @@ static nir_ssa_def *load_ubo_desc_fast_path(nir_builder *b, nir_ssa_def *addr_lo
                    nir_imm_int(b, rsrc3));
 }
 
-static nir_ssa_def *clamp_index(nir_builder *b, nir_ssa_def *index, unsigned max)
+static nir_def *clamp_index(nir_builder *b, nir_def *index, unsigned max)
 {
    if (util_is_power_of_two_or_zero(max))
       return nir_iand_imm(b, index, max - 1);
    else {
-      nir_ssa_def *clamp = nir_imm_int(b, max - 1);
-      nir_ssa_def *cond = nir_uge(b, clamp, index);
+      nir_def *clamp = nir_imm_int(b, max - 1);
+      nir_def *cond = nir_uge(b, clamp, index);
       return nir_bcsel(b, cond, index, clamp);
    }
 }
 
-static nir_ssa_def *load_ubo_desc(nir_builder *b, nir_ssa_def *index,
+static nir_def *load_ubo_desc(nir_builder *b, nir_def *index,
                                   struct lower_resource_state *s)
 {
    struct si_shader_selector *sel = s->shader->selector;
 
-   nir_ssa_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->const_and_shader_buffers);
+   nir_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->const_and_shader_buffers);
 
    if (sel->info.base.num_ubos == 1 && sel->info.base.num_ssbos == 0)
       return load_ubo_desc_fast_path(b, addr, sel);
@@ -73,11 +73,11 @@ static nir_ssa_def *load_ubo_desc(nir_builder *b, nir_ssa_def *index,
    index = clamp_index(b, index, sel->info.base.num_ubos);
    index = nir_iadd_imm(b, index, SI_NUM_SHADER_BUFFERS);
 
-   nir_ssa_def *offset = nir_ishl_imm(b, index, 4);
+   nir_def *offset = nir_ishl_imm(b, index, 4);
    return nir_load_smem_amd(b, 4, addr, offset);
 }
 
-static nir_ssa_def *load_ssbo_desc(nir_builder *b, nir_src *index,
+static nir_def *load_ssbo_desc(nir_builder *b, nir_src *index,
                                    struct lower_resource_state *s)
 {
    struct si_shader_selector *sel = s->shader->selector;
@@ -89,15 +89,15 @@ static nir_ssa_def *load_ssbo_desc(nir_builder *b, nir_src *index,
          return ac_nir_load_arg(b, &s->args->ac, s->args->cs_shaderbuf[slot]);
    }
 
-   nir_ssa_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->const_and_shader_buffers);
-   nir_ssa_def *slot = clamp_index(b, index->ssa, sel->info.base.num_ssbos);
+   nir_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->const_and_shader_buffers);
+   nir_def *slot = clamp_index(b, index->ssa, sel->info.base.num_ssbos);
    slot = nir_isub_imm(b, SI_NUM_SHADER_BUFFERS - 1, slot);
 
-   nir_ssa_def *offset = nir_ishl_imm(b, slot, 4);
+   nir_def *offset = nir_ishl_imm(b, slot, 4);
    return nir_load_smem_amd(b, 4, addr, offset);
 }
 
-static nir_ssa_def *fixup_image_desc(nir_builder *b, nir_ssa_def *rsrc, bool uses_store,
+static nir_def *fixup_image_desc(nir_builder *b, nir_def *rsrc, bool uses_store,
                                      struct lower_resource_state *s)
 {
    struct si_shader_selector *sel = s->shader->selector;
@@ -117,7 +117,7 @@ static nir_ssa_def *fixup_image_desc(nir_builder *b, nir_ssa_def *rsrc, bool use
    if (uses_store &&
        screen->info.gfx_level <= GFX9 &&
        screen->info.gfx_level >= GFX8) {
-      nir_ssa_def *tmp = nir_channel(b, rsrc, 6);
+      nir_def *tmp = nir_channel(b, rsrc, 6);
       tmp = nir_iand_imm(b, tmp, C_008F28_COMPRESSION_EN);
       rsrc = nir_vector_insert_imm(b, rsrc, tmp, 6);
    }
@@ -125,7 +125,7 @@ static nir_ssa_def *fixup_image_desc(nir_builder *b, nir_ssa_def *rsrc, bool use
    if (!uses_store &&
        screen->info.has_image_load_dcc_bug &&
        screen->always_allow_dcc_stores) {
-      nir_ssa_def *tmp = nir_channel(b, rsrc, 6);
+      nir_def *tmp = nir_channel(b, rsrc, 6);
       tmp = nir_iand_imm(b, tmp, C_00A018_WRITE_COMPRESS_ENABLE);
       rsrc = nir_vector_insert_imm(b, rsrc, tmp, 6);
    }
@@ -136,12 +136,12 @@ static nir_ssa_def *fixup_image_desc(nir_builder *b, nir_ssa_def *rsrc, bool use
 /* AC_DESC_FMASK is handled exactly like AC_DESC_IMAGE. The caller should
  * adjust "index" to point to FMASK.
  */
-static nir_ssa_def *load_image_desc(nir_builder *b, nir_ssa_def *list, nir_ssa_def *index,
+static nir_def *load_image_desc(nir_builder *b, nir_def *list, nir_def *index,
                                     enum ac_descriptor_type desc_type, bool uses_store,
                                     struct lower_resource_state *s)
 {
    /* index is in uvec8 unit, convert to offset in bytes */
-   nir_ssa_def *offset = nir_ishl_imm(b, index, 5);
+   nir_def *offset = nir_ishl_imm(b, index, 5);
 
    unsigned num_channels;
    if (desc_type == AC_DESC_BUFFER) {
@@ -152,7 +152,7 @@ static nir_ssa_def *load_image_desc(nir_builder *b, nir_ssa_def *list, nir_ssa_d
       num_channels = 8;
    }
 
-   nir_ssa_def *rsrc = nir_load_smem_amd(b, num_channels, list, offset);
+   nir_def *rsrc = nir_load_smem_amd(b, num_channels, list, offset);
 
    if (desc_type == AC_DESC_IMAGE)
       rsrc = fixup_image_desc(b, rsrc, uses_store, s);
@@ -160,14 +160,14 @@ static nir_ssa_def *load_image_desc(nir_builder *b, nir_ssa_def *list, nir_ssa_d
    return rsrc;
 }
 
-static nir_ssa_def *deref_to_index(nir_builder *b,
+static nir_def *deref_to_index(nir_builder *b,
                                    nir_deref_instr *deref,
                                    unsigned max_slots,
-                                   nir_ssa_def **dynamic_index_ret,
+                                   nir_def **dynamic_index_ret,
                                    unsigned *const_index_ret)
 {
    unsigned const_index = 0;
-   nir_ssa_def *dynamic_index = NULL;
+   nir_def *dynamic_index = NULL;
    while (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
       unsigned array_size = MAX2(glsl_get_aoa_size(deref->type), 1);
@@ -175,7 +175,7 @@ static nir_ssa_def *deref_to_index(nir_builder *b,
       if (nir_src_is_const(deref->arr.index)) {
          const_index += array_size * nir_src_as_uint(deref->arr.index);
       } else {
-         nir_ssa_def *tmp = nir_imul_imm(b, deref->arr.index.ssa, array_size);
+         nir_def *tmp = nir_imul_imm(b, deref->arr.index.ssa, array_size);
          dynamic_index = dynamic_index ? nir_iadd(b, dynamic_index, tmp) : tmp;
       }
 
@@ -189,7 +189,7 @@ static nir_ssa_def *deref_to_index(nir_builder *b,
    if (const_index >= max_slots)
       const_index = base_index;
 
-   nir_ssa_def *index = nir_imm_int(b, const_index);
+   nir_def *index = nir_imm_int(b, const_index);
    if (dynamic_index) {
       index = nir_iadd(b, dynamic_index, index);
 
@@ -213,16 +213,16 @@ static nir_ssa_def *deref_to_index(nir_builder *b,
    return index;
 }
 
-static nir_ssa_def *load_deref_image_desc(nir_builder *b, nir_deref_instr *deref,
+static nir_def *load_deref_image_desc(nir_builder *b, nir_deref_instr *deref,
                                           enum ac_descriptor_type desc_type, bool is_load,
                                           struct lower_resource_state *s)
 {
    unsigned const_index;
-   nir_ssa_def *dynamic_index;
-   nir_ssa_def *index = deref_to_index(b, deref, s->shader->selector->info.base.num_images,
+   nir_def *dynamic_index;
+   nir_def *index = deref_to_index(b, deref, s->shader->selector->info.base.num_images,
                                        &dynamic_index, &const_index);
 
-   nir_ssa_def *desc;
+   nir_def *desc;
    if (!dynamic_index && desc_type != AC_DESC_FMASK &&
        const_index < s->shader->selector->cs_num_images_in_user_sgprs) {
       /* Fast path if the image is in user SGPRs. */
@@ -237,14 +237,14 @@ static nir_ssa_def *load_deref_image_desc(nir_builder *b, nir_deref_instr *deref
 
       index = nir_isub_imm(b, SI_NUM_IMAGE_SLOTS - 1, index);
 
-      nir_ssa_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->samplers_and_images);
+      nir_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->samplers_and_images);
       desc = load_image_desc(b, list, index, desc_type, !is_load, s);
    }
 
    return desc;
 }
 
-static nir_ssa_def *load_bindless_image_desc(nir_builder *b, nir_ssa_def *index,
+static nir_def *load_bindless_image_desc(nir_builder *b, nir_def *index,
                                              enum ac_descriptor_type desc_type, bool is_load,
                                              struct lower_resource_state *s)
 {
@@ -255,7 +255,7 @@ static nir_ssa_def *load_bindless_image_desc(nir_builder *b, nir_ssa_def *index,
    if (desc_type == AC_DESC_FMASK)
       index = nir_iadd_imm(b, index, 1);
 
-   nir_ssa_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->bindless_samplers_and_images);
+   nir_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->bindless_samplers_and_images);
    return load_image_desc(b, list, index, desc_type, !is_load, s);
 }
 
@@ -266,7 +266,7 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
    case nir_intrinsic_load_ubo: {
       assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
 
-      nir_ssa_def *desc = load_ubo_desc(b, intrin->src[0].ssa, s);
+      nir_def *desc = load_ubo_desc(b, intrin->src[0].ssa, s);
       nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[0], desc);
       break;
    }
@@ -275,23 +275,23 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
    case nir_intrinsic_ssbo_atomic_swap: {
       assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
 
-      nir_ssa_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
+      nir_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
       nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[0], desc);
       break;
    }
    case nir_intrinsic_store_ssbo: {
       assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
 
-      nir_ssa_def *desc = load_ssbo_desc(b, &intrin->src[1], s);
+      nir_def *desc = load_ssbo_desc(b, &intrin->src[1], s);
       nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[1], desc);
       break;
    }
    case nir_intrinsic_get_ssbo_size: {
       assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
 
-      nir_ssa_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
-      nir_ssa_def *size = nir_channel(b, desc, 2);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
+      nir_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
+      nir_def *size = nir_channel(b, desc, 2);
+      nir_def_rewrite_uses(&intrin->dest.ssa, size);
       nir_instr_remove(&intrin->instr);
       break;
    }
@@ -320,10 +320,10 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
          intrin->intrinsic == nir_intrinsic_image_deref_fragment_mask_load_amd ||
          intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd;
 
-      nir_ssa_def *desc = load_deref_image_desc(b, deref, desc_type, is_load, s);
+      nir_def *desc = load_deref_image_desc(b, deref, desc_type, is_load, s);
 
       if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+         nir_def_rewrite_uses(&intrin->dest.ssa, desc);
          nir_instr_remove(&intrin->instr);
       } else {
          nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type));
@@ -354,12 +354,12 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
          intrin->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd ||
          intrin->intrinsic == nir_intrinsic_bindless_image_descriptor_amd;
 
-      nir_ssa_def *index = nir_u2u32(b, intrin->src[0].ssa);
+      nir_def *index = nir_u2u32(b, intrin->src[0].ssa);
 
-      nir_ssa_def *desc = load_bindless_image_desc(b, index, desc_type, is_load, s);
+      nir_def *desc = load_bindless_image_desc(b, index, desc_type, is_load, s);
 
       if (intrin->intrinsic == nir_intrinsic_bindless_image_descriptor_amd) {
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+         nir_def_rewrite_uses(&intrin->dest.ssa, desc);
          nir_instr_remove(&intrin->instr);
       } else {
          nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], nir_src_for_ssa(desc));
@@ -373,11 +373,11 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
    return true;
 }
 
-static nir_ssa_def *load_sampler_desc(nir_builder *b, nir_ssa_def *list, nir_ssa_def *index,
+static nir_def *load_sampler_desc(nir_builder *b, nir_def *list, nir_def *index,
                                       enum ac_descriptor_type desc_type)
 {
    /* index is in 16 dword unit, convert to offset in bytes */
-   nir_ssa_def *offset = nir_ishl_imm(b, index, 6);
+   nir_def *offset = nir_ishl_imm(b, index, 6);
 
    unsigned num_channels = 0;
    switch (desc_type) {
@@ -408,18 +408,18 @@ static nir_ssa_def *load_sampler_desc(nir_builder *b, nir_ssa_def *list, nir_ssa
    return nir_load_smem_amd(b, num_channels, list, offset);
 }
 
-static nir_ssa_def *load_deref_sampler_desc(nir_builder *b, nir_deref_instr *deref,
+static nir_def *load_deref_sampler_desc(nir_builder *b, nir_deref_instr *deref,
                                             enum ac_descriptor_type desc_type,
                                             struct lower_resource_state *s,
                                             bool return_descriptor)
 {
    unsigned max_slots = BITSET_LAST_BIT(b->shader->info.textures_used);
-   nir_ssa_def *index = deref_to_index(b, deref, max_slots, NULL, NULL);
+   nir_def *index = deref_to_index(b, deref, max_slots, NULL, NULL);
    index = nir_iadd_imm(b, index, SI_NUM_IMAGE_SLOTS / 2);
 
    /* return actual desc when required by caller */
    if (return_descriptor) {
-      nir_ssa_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->samplers_and_images);
+      nir_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->samplers_and_images);
       return load_sampler_desc(b, list, index, desc_type);
    }
 
@@ -430,11 +430,11 @@ static nir_ssa_def *load_deref_sampler_desc(nir_builder *b, nir_deref_instr *der
    return index;
 }
 
-static nir_ssa_def *load_bindless_sampler_desc(nir_builder *b, nir_ssa_def *index,
+static nir_def *load_bindless_sampler_desc(nir_builder *b, nir_def *index,
                                                enum ac_descriptor_type desc_type,
                                                struct lower_resource_state *s)
 {
-   nir_ssa_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->bindless_samplers_and_images);
+   nir_def *list = ac_nir_load_arg(b, &s->args->ac, s->args->bindless_samplers_and_images);
 
    /* 64 bit to 32 bit */
    index = nir_u2u32(b, index);
@@ -442,9 +442,9 @@ static nir_ssa_def *load_bindless_sampler_desc(nir_builder *b, nir_ssa_def *inde
    return load_sampler_desc(b, list, index, desc_type);
 }
 
-static nir_ssa_def *fixup_sampler_desc(nir_builder *b,
+static nir_def *fixup_sampler_desc(nir_builder *b,
                                        nir_tex_instr *tex,
-                                       nir_ssa_def *sampler,
+                                       nir_def *sampler,
                                        struct lower_resource_state *s)
 {
    const struct si_shader_selector *sel = s->shader->selector;
@@ -453,7 +453,7 @@ static nir_ssa_def *fixup_sampler_desc(nir_builder *b,
       return sampler;
 
    /* Set TRUNC_COORD=0 for textureGather(). */
-   nir_ssa_def *dword0 = nir_channel(b, sampler, 0);
+   nir_def *dword0 = nir_channel(b, sampler, 0);
    dword0 = nir_iand_imm(b, dword0, C_008F30_TRUNC_COORD);
    sampler = nir_vector_insert_imm(b, sampler, dword0, 0);
    return sampler;
@@ -464,8 +464,8 @@ static bool lower_resource_tex(nir_builder *b, nir_tex_instr *tex,
 {
    nir_deref_instr *texture_deref = NULL;
    nir_deref_instr *sampler_deref = NULL;
-   nir_ssa_def *texture_handle = NULL;
-   nir_ssa_def *sampler_handle = NULL;
+   nir_def *texture_handle = NULL;
+   nir_def *sampler_handle = NULL;
 
    for (unsigned i = 0; i < tex->num_srcs; i++) {
       switch (tex->src[i].src_type) {
@@ -493,32 +493,32 @@ static bool lower_resource_tex(nir_builder *b, nir_tex_instr *tex,
       desc_type = tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
 
    if (tex->op == nir_texop_descriptor_amd) {
-      nir_ssa_def *image;
+      nir_def *image;
       if (texture_deref)
          image = load_deref_sampler_desc(b, texture_deref, desc_type, s, true);
       else
          image = load_bindless_sampler_desc(b, texture_handle, desc_type, s);
-      nir_ssa_def_rewrite_uses(&tex->dest.ssa, image);
+      nir_def_rewrite_uses(&tex->dest.ssa, image);
       nir_instr_remove(&tex->instr);
       return true;
    }
 
    if (tex->op == nir_texop_sampler_descriptor_amd) {
-      nir_ssa_def *sampler;
+      nir_def *sampler;
       if (sampler_deref)
          sampler = load_deref_sampler_desc(b, sampler_deref, AC_DESC_SAMPLER, s, true);
       else
          sampler = load_bindless_sampler_desc(b, sampler_handle, AC_DESC_SAMPLER, s);
-      nir_ssa_def_rewrite_uses(&tex->dest.ssa, sampler);
+      nir_def_rewrite_uses(&tex->dest.ssa, sampler);
       nir_instr_remove(&tex->instr);
       return true;
    }
 
-   nir_ssa_def *image = texture_deref ?
+   nir_def *image = texture_deref ?
       load_deref_sampler_desc(b, texture_deref, desc_type, s, !tex->texture_non_uniform) :
       load_bindless_sampler_desc(b, texture_handle, desc_type, s);
 
-   nir_ssa_def *sampler = NULL;
+   nir_def *sampler = NULL;
    if (sampler_deref)
       sampler = load_deref_sampler_desc(b, sampler_deref, AC_DESC_SAMPLER, s, !tex->sampler_non_uniform);
    else if (sampler_handle)
index 06c8cd4..c82da81 100644 (file)
@@ -15,20 +15,20 @@ struct lower_vs_inputs_state {
    struct si_shader *shader;
    struct si_shader_args *args;
 
-   nir_ssa_def *instance_divisor_constbuf;
-   nir_ssa_def *vertex_index[16];
+   nir_def *instance_divisor_constbuf;
+   nir_def *vertex_index[16];
 };
 
 /* See fast_idiv_by_const.h. */
 /* If num != UINT_MAX, this more efficient version can be used. */
 /* Set: increment = util_fast_udiv_info::increment; */
-static nir_ssa_def *
-fast_udiv_nuw(nir_builder *b, nir_ssa_def *num, nir_ssa_def *divisor)
+static nir_def *
+fast_udiv_nuw(nir_builder *b, nir_def *num, nir_def *divisor)
 {
-   nir_ssa_def *multiplier = nir_channel(b, divisor, 0);
-   nir_ssa_def *pre_shift = nir_channel(b, divisor, 1);
-   nir_ssa_def *post_shift = nir_channel(b, divisor, 2);
-   nir_ssa_def *increment = nir_channel(b, divisor, 3);
+   nir_def *multiplier = nir_channel(b, divisor, 0);
+   nir_def *pre_shift = nir_channel(b, divisor, 1);
+   nir_def *post_shift = nir_channel(b, divisor, 2);
+   nir_def *increment = nir_channel(b, divisor, 3);
 
    num = nir_ushr(b, num, pre_shift);
    num = nir_iadd_nuw(b, num, increment);
@@ -36,7 +36,7 @@ fast_udiv_nuw(nir_builder *b, nir_ssa_def *num, nir_ssa_def *divisor)
    return nir_ushr(b, num, post_shift);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_vertex_index_for_mono_shader(nir_builder *b, int input_index,
                                  struct lower_vs_inputs_state *s)
 {
@@ -48,17 +48,17 @@ get_vertex_index_for_mono_shader(nir_builder *b, int input_index,
       key->ge.part.vs.prolog.instance_divisor_is_fetched & (1u << input_index);
 
    if (divisor_is_one || divisor_is_fetched) {
-      nir_ssa_def *instance_id = nir_load_instance_id(b);
+      nir_def *instance_id = nir_load_instance_id(b);
 
       /* This is used to determine vs vgpr count in si_get_vs_vgpr_comp_cnt(). */
       s->shader->info.uses_instanceid = true;
 
-      nir_ssa_def *index = NULL;
+      nir_def *index = NULL;
       if (divisor_is_one) {
          index = instance_id;
       } else {
-         nir_ssa_def *offset = nir_imm_int(b, input_index * 16);
-         nir_ssa_def *divisor = nir_load_ubo(b, 4, 32, s->instance_divisor_constbuf, offset,
+         nir_def *offset = nir_imm_int(b, input_index * 16);
+         nir_def *divisor = nir_load_ubo(b, 4, 32, s->instance_divisor_constbuf, offset,
                                              .range = ~0);
 
          /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
@@ -67,17 +67,17 @@ get_vertex_index_for_mono_shader(nir_builder *b, int input_index,
          index = fast_udiv_nuw(b, instance_id, divisor);
       }
 
-      nir_ssa_def *start_instance = nir_load_base_instance(b);
+      nir_def *start_instance = nir_load_base_instance(b);
       return nir_iadd(b, index, start_instance);
    } else {
-      nir_ssa_def *vertex_id = nir_load_vertex_id_zero_base(b);
-      nir_ssa_def *base_vertex = nir_load_first_vertex(b);
+      nir_def *vertex_id = nir_load_vertex_id_zero_base(b);
+      nir_def *base_vertex = nir_load_first_vertex(b);
 
       return nir_iadd(b, vertex_id, base_vertex);
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 get_vertex_index_for_part_shader(nir_builder *b, int input_index,
                                  struct lower_vs_inputs_state *s)
 {
@@ -110,27 +110,27 @@ get_vertex_index_for_all_inputs(nir_shader *nir, struct lower_vs_inputs_state *s
 static void
 load_vs_input_from_blit_sgpr(nir_builder *b, unsigned input_index,
                              struct lower_vs_inputs_state *s,
-                             nir_ssa_def *out[4])
+                             nir_def *out[4])
 {
-   nir_ssa_def *vertex_id = nir_load_vertex_id_zero_base(b);
-   nir_ssa_def *sel_x1 = nir_ule_imm(b, vertex_id, 1);
+   nir_def *vertex_id = nir_load_vertex_id_zero_base(b);
+   nir_def *sel_x1 = nir_ule_imm(b, vertex_id, 1);
    /* Use nir_ine, because we have 3 vertices and only
     * the middle one should use y2.
     */
-   nir_ssa_def *sel_y1 = nir_ine_imm(b, vertex_id, 1);
+   nir_def *sel_y1 = nir_ine_imm(b, vertex_id, 1);
 
    if (input_index == 0) {
       /* Position: */
-      nir_ssa_def *x1y1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 0);
-      nir_ssa_def *x2y2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 1);
+      nir_def *x1y1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 0);
+      nir_def *x2y2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 1);
 
       x1y1 = nir_i2i32(b, nir_unpack_32_2x16(b, x1y1));
       x2y2 = nir_i2i32(b, nir_unpack_32_2x16(b, x2y2));
 
-      nir_ssa_def *x1 = nir_channel(b, x1y1, 0);
-      nir_ssa_def *y1 = nir_channel(b, x1y1, 1);
-      nir_ssa_def *x2 = nir_channel(b, x2y2, 0);
-      nir_ssa_def *y2 = nir_channel(b, x2y2, 1);
+      nir_def *x1 = nir_channel(b, x1y1, 0);
+      nir_def *y1 = nir_channel(b, x1y1, 1);
+      nir_def *x2 = nir_channel(b, x2y2, 0);
+      nir_def *y2 = nir_channel(b, x2y2, 1);
 
       out[0] = nir_i2f32(b, nir_bcsel(b, sel_x1, x1, x2));
       out[1] = nir_i2f32(b, nir_bcsel(b, sel_y1, y1, y2));
@@ -147,10 +147,10 @@ load_vs_input_from_blit_sgpr(nir_builder *b, unsigned input_index,
       } else {
          assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
 
-         nir_ssa_def *x1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 3);
-         nir_ssa_def *y1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 4);
-         nir_ssa_def *x2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 5);
-         nir_ssa_def *y2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 6);
+         nir_def *x1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 3);
+         nir_def *y1 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 4);
+         nir_def *x2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 5);
+         nir_def *y2 = ac_nir_load_arg_at_offset(b, &s->args->ac, s->args->vs_blit_inputs, 6);
 
          out[0] = nir_bcsel(b, sel_x1, x1, x2);
          out[1] = nir_bcsel(b, sel_y1, y1, y2);
@@ -166,37 +166,37 @@ load_vs_input_from_blit_sgpr(nir_builder *b, unsigned input_index,
  * The input exponent is expected to be biased analogous to IEEE-754, i.e. by
  * 2^(exp_bits-1) - 1 (as defined in OpenGL and other graphics APIs).
  */
-static nir_ssa_def *
-ufN_to_float(nir_builder *b, nir_ssa_def *src, unsigned exp_bits, unsigned mant_bits)
+static nir_def *
+ufN_to_float(nir_builder *b, nir_def *src, unsigned exp_bits, unsigned mant_bits)
 {
    assert(src->bit_size == 32);
 
-   nir_ssa_def *mantissa = nir_iand_imm(b, src, (1 << mant_bits) - 1);
+   nir_def *mantissa = nir_iand_imm(b, src, (1 << mant_bits) - 1);
 
    /* Converting normal numbers is just a shift + correcting the exponent bias */
    unsigned normal_shift = 23 - mant_bits;
    unsigned bias_shift = 127 - ((1 << (exp_bits - 1)) - 1);
 
-   nir_ssa_def *shifted = nir_ishl_imm(b, src, normal_shift);
-   nir_ssa_def *normal = nir_iadd_imm(b, shifted, bias_shift << 23);
+   nir_def *shifted = nir_ishl_imm(b, src, normal_shift);
+   nir_def *normal = nir_iadd_imm(b, shifted, bias_shift << 23);
 
    /* Converting nan/inf numbers is the same, but with a different exponent update */
-   nir_ssa_def *naninf = nir_ior_imm(b, normal, 0xff << 23);
+   nir_def *naninf = nir_ior_imm(b, normal, 0xff << 23);
 
    /* Converting denormals is the complex case: determine the leading zeros of the
     * mantissa to obtain the correct shift for the mantissa and exponent correction.
     */
-   nir_ssa_def *ctlz = nir_uclz(b, mantissa);
+   nir_def *ctlz = nir_uclz(b, mantissa);
    /* Shift such that the leading 1 ends up as the LSB of the exponent field. */
-   nir_ssa_def *denormal = nir_ishl(b, mantissa, nir_iadd_imm(b, ctlz, -8));
+   nir_def *denormal = nir_ishl(b, mantissa, nir_iadd_imm(b, ctlz, -8));
 
    unsigned denormal_exp = bias_shift + (32 - mant_bits) - 1;
-   nir_ssa_def *tmp = nir_isub_imm(b, denormal_exp, ctlz);
+   nir_def *tmp = nir_isub_imm(b, denormal_exp, ctlz);
    denormal = nir_iadd(b, denormal, nir_ishl_imm(b, tmp, 23));
 
    /* Select the final result. */
-   nir_ssa_def *cond = nir_uge_imm(b, src, ((1ULL << exp_bits) - 1) << mant_bits);
-   nir_ssa_def *result = nir_bcsel(b, cond, naninf, normal);
+   nir_def *cond = nir_uge_imm(b, src, ((1ULL << exp_bits) - 1) << mant_bits);
+   nir_def *result = nir_bcsel(b, cond, naninf, normal);
 
    cond = nir_uge_imm(b, src, 1ULL << mant_bits);
    result = nir_bcsel(b, cond, result, denormal);
@@ -216,9 +216,9 @@ ufN_to_float(nir_builder *b, nir_ssa_def *src, unsigned exp_bits, unsigned mant_
  * - size = 8 bytes, format != {float,fixed} indicates a 2_10_10_10 data format
  */
 static void
-opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
+opencoded_load_format(nir_builder *b, nir_def *rsrc, nir_def *vindex,
                       union si_vs_fix_fetch fix_fetch, bool known_aligned,
-                      enum amd_gfx_level gfx_level, nir_ssa_def *out[4])
+                      enum amd_gfx_level gfx_level, nir_def *out[4])
 {
    unsigned log_size = fix_fetch.u.log_size;
    unsigned num_channels = fix_fetch.u.num_channels_m1 + 1;
@@ -248,12 +248,12 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       load_log_size += -log_recombine;
    }
 
-   nir_ssa_def *loads[32]; /* up to 32 bytes */
+   nir_def *loads[32]; /* up to 32 bytes */
    for (unsigned i = 0; i < load_num_channels; ++i) {
-      nir_ssa_def *soffset = nir_imm_int(b, i << load_log_size);
+      nir_def *soffset = nir_imm_int(b, i << load_log_size);
       unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2);
       unsigned bit_size = 8 << MIN2(load_log_size, 2);
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *zero = nir_imm_int(b, 0);
 
       loads[i] = nir_load_buffer_amd(b, num_channels, bit_size, rsrc, zero, soffset, vindex);
    }
@@ -263,9 +263,9 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       unsigned dst_bitsize = log_recombine == 2 ? 32 : 16;
 
       for (unsigned src = 0, dst = 0; src < load_num_channels; ++dst) {
-         nir_ssa_def *accum = NULL;
+         nir_def *accum = NULL;
          for (unsigned i = 0; i < (1 << log_recombine); ++i, ++src) {
-            nir_ssa_def *tmp = nir_u2uN(b, loads[src], dst_bitsize);
+            nir_def *tmp = nir_u2uN(b, loads[src], dst_bitsize);
             if (i == 0) {
                accum = tmp;
             } else {
@@ -279,7 +279,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       /* Split vectors of dwords */
       if (load_log_size > 2) {
          assert(load_num_channels == 1);
-         nir_ssa_def *loaded = loads[0];
+         nir_def *loaded = loads[0];
          unsigned log_split = load_log_size - 2;
          log_recombine += log_split;
          load_num_channels = 1 << log_split;
@@ -293,9 +293,9 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
          for (unsigned src = load_num_channels, dst = load_num_channels << -log_recombine;
               src > 0; --src) {
             unsigned dst_bits = 1 << (3 + load_log_size + log_recombine);
-            nir_ssa_def *loaded = loads[src - 1];
+            nir_def *loaded = loads[src - 1];
             for (unsigned i = 1 << -log_recombine; i > 0; --i, --dst) {
-               nir_ssa_def *tmp = nir_ushr_imm(b, loaded, dst_bits * (i - 1));
+               nir_def *tmp = nir_ushr_imm(b, loaded, dst_bits * (i - 1));
                loads[dst - 1] = nir_u2uN(b, tmp, dst_bits);
             }
          }
@@ -311,10 +311,10 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       }
       case AC_FETCH_FORMAT_FIXED: {
          /* 10_11_11_FLOAT */
-         nir_ssa_def *data = loads[0];
-         nir_ssa_def *red = nir_iand_imm(b, data, 2047);
-         nir_ssa_def *green = nir_iand_imm(b, nir_ushr_imm(b, data, 11), 2047);
-         nir_ssa_def *blue = nir_ushr_imm(b, data, 22);
+         nir_def *data = loads[0];
+         nir_def *red = nir_iand_imm(b, data, 2047);
+         nir_def *green = nir_iand_imm(b, nir_ushr_imm(b, data, 11), 2047);
+         nir_def *blue = nir_ushr_imm(b, data, 22);
 
          loads[0] = ufN_to_float(b, red, 5, 6);
          loads[1] = ufN_to_float(b, green, 5, 6);
@@ -329,7 +329,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       case AC_FETCH_FORMAT_UNORM:
       case AC_FETCH_FORMAT_USCALED: {
          /* 2_10_10_10 data formats */
-         nir_ssa_def *data = loads[0];
+         nir_def *data = loads[0];
 
          loads[0] = nir_ubfe_imm(b, data, 0, 10);
          loads[1] = nir_ubfe_imm(b, data, 10, 10);
@@ -343,7 +343,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       case AC_FETCH_FORMAT_SNORM:
       case AC_FETCH_FORMAT_SSCALED: {
          /* 2_10_10_10 data formats */
-         nir_ssa_def *data = loads[0];
+         nir_def *data = loads[0];
 
          loads[0] = nir_ibfe_imm(b, data, 0, 10);
          loads[1] = nir_ibfe_imm(b, data, 10, 10);
@@ -388,7 +388,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       break;
    case AC_FETCH_FORMAT_FIXED:
       for (unsigned chan = 0; chan < num_channels; ++chan) {
-         nir_ssa_def *tmp = nir_i2f32(b, loads[chan]);
+         nir_def *tmp = nir_i2f32(b, loads[chan]);
          loads[chan] = nir_fmul_imm(b, tmp, 1.0 / 0x10000);
       }
       break;
@@ -396,7 +396,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       for (unsigned chan = 0; chan < num_channels; ++chan) {
          /* 2_10_10_10 data formats */
          unsigned bits = log_size == 3 ? (chan == 3 ? 2 : 10) : (8 << log_size);
-         nir_ssa_def *tmp = nir_u2f32(b, loads[chan]);
+         nir_def *tmp = nir_u2f32(b, loads[chan]);
          loads[chan] = nir_fmul_imm(b, tmp, 1.0 / BITFIELD64_MASK(bits));
       }
       break;
@@ -404,7 +404,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
       for (unsigned chan = 0; chan < num_channels; ++chan) {
          /* 2_10_10_10 data formats */
          unsigned bits = log_size == 3 ? (chan == 3 ? 2 : 10) : (8 << log_size);
-         nir_ssa_def *tmp = nir_i2f32(b, loads[chan]);
+         nir_def *tmp = nir_i2f32(b, loads[chan]);
          tmp = nir_fmul_imm(b, tmp, 1.0 / BITFIELD64_MASK(bits - 1));
          /* Clamp to [-1, 1] */
          tmp = nir_fmax(b, tmp, nir_imm_float(b, -1));
@@ -425,7 +425,7 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
    }
 
    if (reverse) {
-      nir_ssa_def *tmp = loads[0];
+      nir_def *tmp = loads[0];
       loads[0] = loads[2];
       loads[2] = tmp;
    }
@@ -436,21 +436,21 @@ opencoded_load_format(nir_builder *b, nir_ssa_def *rsrc, nir_ssa_def *vindex,
 static void
 load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
                                  struct lower_vs_inputs_state *s,
-                                 unsigned bit_size, nir_ssa_def *out[4])
+                                 unsigned bit_size, nir_def *out[4])
 {
    const struct si_shader_selector *sel = s->shader->selector;
    const union si_shader_key *key = &s->shader->key;
 
-   nir_ssa_def *vb_desc;
+   nir_def *vb_desc;
    if (input_index < sel->info.num_vbos_in_user_sgprs) {
       vb_desc = ac_nir_load_arg(b, &s->args->ac, s->args->vb_descriptors[input_index]);
    } else {
       unsigned index = input_index - sel->info.num_vbos_in_user_sgprs;
-      nir_ssa_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
+      nir_def *addr = ac_nir_load_arg(b, &s->args->ac, s->args->ac.vertex_buffers);
       vb_desc = nir_load_smem_amd(b, 4, addr, nir_imm_int(b, index * 16));
    }
 
-   nir_ssa_def *vertex_index = s->vertex_index[input_index];
+   nir_def *vertex_index = s->vertex_index[input_index];
 
    /* Use the open-coded implementation for all loads of doubles and
     * of dword-sized data that needs fixups. We need to insert conversion
@@ -480,12 +480,12 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
    unsigned required_channels = util_last_bit(sel->info.input[input_index].usage_mask);
    if (required_channels == 0) {
       for (unsigned i = 0; i < 4; ++i)
-         out[i] = nir_ssa_undef(b, 1, bit_size);
+         out[i] = nir_undef(b, 1, bit_size);
       return;
    }
 
    /* Do multiple loads for special formats. */
-   nir_ssa_def *fetches[4];
+   nir_def *fetches[4];
    unsigned num_fetches;
    unsigned fetch_stride;
    unsigned channels_per_fetch;
@@ -501,7 +501,7 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
    }
 
    for (unsigned i = 0; i < num_fetches; ++i) {
-      nir_ssa_def *zero = nir_imm_int(b, 0);
+      nir_def *zero = nir_imm_int(b, 0);
       fetches[i] = nir_load_buffer_amd(b, channels_per_fetch, bit_size, vb_desc,
                                        zero, zero, vertex_index,
                                        .base = fetch_stride * i,
@@ -509,7 +509,7 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
    }
 
    if (num_fetches == 1 && channels_per_fetch > 1) {
-      nir_ssa_def *fetch = fetches[0];
+      nir_def *fetch = fetches[0];
       for (unsigned i = 0; i < channels_per_fetch; ++i)
          fetches[i] = nir_channel(b, fetch, i);
 
@@ -518,7 +518,7 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
    }
 
    for (unsigned i = num_fetches; i < 4; ++i)
-      fetches[i] = nir_ssa_undef(b, 1, bit_size);
+      fetches[i] = nir_undef(b, 1, bit_size);
 
    if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 && required_channels == 4) {
       if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
@@ -534,7 +534,7 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
       /* For 2_10_10_10, the hardware returns an unsigned value;
        * convert it to a signed one.
        */
-      nir_ssa_def *tmp = fetches[3];
+      nir_def *tmp = fetches[3];
 
       /* First, recover the sign-extended signed integer value. */
       if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
@@ -583,15 +583,15 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
    unsigned component = nir_intrinsic_component(intrin);
    unsigned num_components = intrin->dest.ssa.num_components;
 
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    if (s->shader->selector->info.base.vs.blit_sgprs_amd)
       load_vs_input_from_blit_sgpr(b, input_index, s, comp);
    else
       load_vs_input_from_vertex_buffer(b, input_index, s, intrin->dest.ssa.bit_size, comp);
 
-   nir_ssa_def *replacement = nir_vec(b, &comp[component], num_components);
+   nir_def *replacement = nir_vec(b, &comp[component], num_components);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, replacement);
+   nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
    nir_instr_remove(instr);
    nir_instr_free(instr);
 
index eda0cdf..7a76a00 100644 (file)
@@ -135,8 +135,8 @@ replace_tex_by_imm(nir_builder *b, nir_instr *instr, void *state)
       return false;
 
    b->cursor = nir_instr_remove(&tex->instr);
-   nir_ssa_def *imm = nir_imm_vec4(b, p->value[0], p->value[1], p->value[2], p->value[3]);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, imm);
+   nir_def *imm = nir_imm_vec4(b, p->value[0], p->value[1], p->value[2], p->value[3]);
+   nir_def_rewrite_uses(&tex->dest.ssa, imm);
    return true;
 }
 
index 8951a59..2acf1e9 100644 (file)
@@ -1618,9 +1618,9 @@ static bool clamp_vertex_color_instr(nir_builder *b, nir_instr *instr, void *sta
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *color = intrin->src[0].ssa;
-   nir_ssa_def *clamp = nir_load_clamp_vertex_color_amd(b);
-   nir_ssa_def *new_color = nir_bcsel(b, clamp, nir_fsat(b, color), color);
+   nir_def *color = intrin->src[0].ssa;
+   nir_def *clamp = nir_load_clamp_vertex_color_amd(b);
+   nir_def *new_color = nir_bcsel(b, clamp, nir_fsat(b, color), color);
    nir_instr_rewrite_src_ssa(instr, &intrin->src[0], new_color);
 
    return true;
@@ -1903,7 +1903,7 @@ static unsigned si_get_nr_pos_exports(const struct si_shader_selector *sel,
 
 static bool lower_ps_load_color_intrinsic(nir_builder *b, nir_instr *instr, void *state)
 {
-   nir_ssa_def **colors = (nir_ssa_def **)state;
+   nir_def **colors = (nir_def **)state;
 
    if (instr->type != nir_instr_type_intrinsic)
       return false;
@@ -1917,7 +1917,7 @@ static bool lower_ps_load_color_intrinsic(nir_builder *b, nir_instr *instr, void
    unsigned index = intrin->intrinsic == nir_intrinsic_load_color0 ? 0 : 1;
    assert(colors[index]);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, colors[index]);
+   nir_def_rewrite_uses(&intrin->dest.ssa, colors[index]);
 
    nir_instr_remove(&intrin->instr);
    return true;
@@ -1934,7 +1934,7 @@ static void si_nir_lower_ps_color_input(nir_shader *nir, struct si_shader *shade
    const union si_shader_key *key = &shader->key;
 
    /* Build ready to be used colors at the beginning of the shader. */
-   nir_ssa_def *colors[2] = {0};
+   nir_def *colors[2] = {0};
    for (int i = 0; i < 2; i++) {
       if (!(sel->info.colors_read & (0xf << (i * 4))))
          continue;
@@ -1953,7 +1953,7 @@ static void si_nir_lower_ps_color_input(nir_shader *nir, struct si_shader *shade
             INTERP_MODE_FLAT : INTERP_MODE_SMOOTH;
       }
 
-      nir_ssa_def *back_color = NULL;
+      nir_def *back_color = NULL;
       if (interp_mode == INTERP_MODE_FLAT) {
          colors[i] = nir_load_input(b, 4, 32, nir_imm_int(b, 0),
                                    .base = color_base);
@@ -1979,7 +1979,7 @@ static void si_nir_lower_ps_color_input(nir_shader *nir, struct si_shader *shade
             break;
          }
 
-         nir_ssa_def *barycentric = nir_load_barycentric(b, op, interp_mode);
+         nir_def *barycentric = nir_load_barycentric(b, op, interp_mode);
 
          colors[i] =
             nir_load_interpolated_input(b, 4, 32, barycentric, nir_imm_int(b, 0),
@@ -1993,7 +1993,7 @@ static void si_nir_lower_ps_color_input(nir_shader *nir, struct si_shader *shade
       }
 
       if (back_color) {
-         nir_ssa_def *is_front_face = nir_load_front_face(b, 1);
+         nir_def *is_front_face = nir_load_front_face(b, 1);
          colors[i] = nir_bcsel(b, is_front_face, colors[i], back_color);
       }
    }
@@ -2012,23 +2012,23 @@ static void si_nir_emit_polygon_stipple(nir_shader *nir, struct si_shader_args *
    nir_builder *b = &builder;
 
    /* Load the buffer descriptor. */
-   nir_ssa_def *desc =
+   nir_def *desc =
       si_nir_load_internal_binding(b, args, SI_PS_CONST_POLY_STIPPLE, 4);
 
    /* Use the fixed-point gl_FragCoord input.
     * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
     * per coordinate to get the repeating effect.
     */
-   nir_ssa_def *pos_x = ac_nir_unpack_arg(b, &args->ac, args->pos_fixed_pt, 0, 5);
-   nir_ssa_def *pos_y = ac_nir_unpack_arg(b, &args->ac, args->pos_fixed_pt, 16, 5);
+   nir_def *pos_x = ac_nir_unpack_arg(b, &args->ac, args->pos_fixed_pt, 0, 5);
+   nir_def *pos_y = ac_nir_unpack_arg(b, &args->ac, args->pos_fixed_pt, 16, 5);
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
    /* The stipple pattern is 32x32, each row has 32 bits. */
-   nir_ssa_def *offset = nir_ishl_imm(b, pos_y, 2);
-   nir_ssa_def *row = nir_load_buffer_amd(b, 1, 32, desc, offset, zero, zero);
-   nir_ssa_def *bit = nir_ubfe(b, row, pos_x, nir_imm_int(b, 1));
+   nir_def *offset = nir_ishl_imm(b, pos_y, 2);
+   nir_def *row = nir_load_buffer_amd(b, 1, 32, desc, offset, zero, zero);
+   nir_def *bit = nir_ubfe(b, row, pos_x, nir_imm_int(b, 1));
 
-   nir_ssa_def *pass = nir_i2b(b, bit);
+   nir_def *pass = nir_i2b(b, bit);
    nir_discard_if(b, nir_inot(b, pass));
 }
 
index 666511f..8e53860 100644 (file)
@@ -230,7 +230,7 @@ static void scan_io_usage(const nir_shader *nir, struct si_shader_info *info,
       bit_size = nir_src_bit_size(intr->src[0]);
       is_output_load = false;
    } else {
-      mask = nir_ssa_def_components_read(&intr->dest.ssa); /* load */
+      mask = nir_def_components_read(&intr->dest.ssa); /* load */
       bit_size = intr->dest.ssa.bit_size;
       is_output_load = !is_input;
    }
@@ -478,7 +478,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
          break;
       case nir_intrinsic_load_local_invocation_id:
       case nir_intrinsic_load_workgroup_id: {
-         unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
+         unsigned mask = nir_def_components_read(&intr->dest.ssa);
          while (mask) {
             unsigned i = u_bit_scan(&mask);
 
@@ -492,7 +492,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       case nir_intrinsic_load_color0:
       case nir_intrinsic_load_color1: {
          unsigned index = intr->intrinsic == nir_intrinsic_load_color1;
-         uint8_t mask = nir_ssa_def_components_read(&intr->dest.ssa);
+         uint8_t mask = nir_def_components_read(&intr->dest.ssa);
          info->colors_read |= mask << (index * 4);
 
          switch (info->color_interpolate[index]) {
@@ -541,10 +541,10 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
             info->uses_interp_at_sample = true;
          break;
       case nir_intrinsic_load_frag_coord:
-         info->reads_frag_coord_mask |= nir_ssa_def_components_read(&intr->dest.ssa);
+         info->reads_frag_coord_mask |= nir_def_components_read(&intr->dest.ssa);
          break;
       case nir_intrinsic_load_sample_pos:
-         info->reads_sample_pos_mask |= nir_ssa_def_components_read(&intr->dest.ssa);
+         info->reads_sample_pos_mask |= nir_def_components_read(&intr->dest.ssa);
          break;
       case nir_intrinsic_load_input:
       case nir_intrinsic_load_per_vertex_input:
index 0f2af78..ebf3b5c 100644 (file)
@@ -143,7 +143,7 @@ unsigned gfx10_ngg_get_scratch_dw_size(struct si_shader *shader);
 bool gfx10_ngg_calculate_subgroup_info(struct si_shader *shader);
 
 /* si_nir_lower_abi.c */
-nir_ssa_def *si_nir_load_internal_binding(nir_builder *b, struct si_shader_args *args,
+nir_def *si_nir_load_internal_binding(nir_builder *b, struct si_shader_args *args,
                                           unsigned slot, unsigned num_components);
 bool si_nir_lower_abi(nir_shader *nir, struct si_shader *shader, struct si_shader_args *args);
 
index 70fa288..236dffa 100644 (file)
@@ -222,7 +222,7 @@ lower_intrinsic_filter(const nir_instr *instr, const void *dummy)
    return instr->type == nir_instr_type_intrinsic;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_intrinsic_instr(nir_builder *b, nir_instr *instr, void *dummy)
 {
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
index 1c39631..7413728 100644 (file)
@@ -39,29 +39,29 @@ static void *create_shader_state(struct si_context *sctx, nir_shader *nir)
    }
 }
 
-static nir_ssa_def *get_global_ids(nir_builder *b, unsigned num_components)
+static nir_def *get_global_ids(nir_builder *b, unsigned num_components)
 {
    unsigned mask = BITFIELD_MASK(num_components);
 
-   nir_ssa_def *local_ids = nir_channels(b, nir_load_local_invocation_id(b), mask);
-   nir_ssa_def *block_ids = nir_channels(b, nir_load_workgroup_id(b, 32), mask);
-   nir_ssa_def *block_size = nir_channels(b, nir_load_workgroup_size(b), mask);
+   nir_def *local_ids = nir_channels(b, nir_load_local_invocation_id(b), mask);
+   nir_def *block_ids = nir_channels(b, nir_load_workgroup_id(b, 32), mask);
+   nir_def *block_size = nir_channels(b, nir_load_workgroup_size(b), mask);
    return nir_iadd(b, nir_imul(b, block_ids, block_size), local_ids);
 }
 
-static void unpack_2x16(nir_builder *b, nir_ssa_def *src, nir_ssa_def **x, nir_ssa_def **y)
+static void unpack_2x16(nir_builder *b, nir_def *src, nir_def **x, nir_def **y)
 {
    *x = nir_iand_imm(b, src, 0xffff);
    *y = nir_ushr_imm(b, src, 16);
 }
 
-static void unpack_2x16_signed(nir_builder *b, nir_ssa_def *src, nir_ssa_def **x, nir_ssa_def **y)
+static void unpack_2x16_signed(nir_builder *b, nir_def *src, nir_def **x, nir_def **y)
 {
    *x = nir_i2i32(b, nir_u2u16(b, src));
    *y = nir_ishr_imm(b, src, 16);
 }
 
-static nir_ssa_def *
+static nir_def *
 deref_ssa(nir_builder *b, nir_variable *var)
 {
    return &nir_build_deref_var(b, var)->dest.ssa;
@@ -87,9 +87,9 @@ void *si_create_copy_image_cs(struct si_context *sctx, bool src_is_1d_array, boo
    b.shader->info.workgroup_size_variable = true;
 
    b.shader->info.cs.user_data_components_amd = 3;
-   nir_ssa_def *ids = get_global_ids(&b, 3);
+   nir_def *ids = get_global_ids(&b, 3);
 
-   nir_ssa_def *coord_src = NULL, *coord_dst = NULL;
+   nir_def *coord_src = NULL, *coord_dst = NULL;
    unpack_2x16(&b, nir_load_user_data_amd(&b), &coord_src, &coord_dst);
 
    coord_src = nir_iadd(&b, coord_src, ids);
@@ -115,10 +115,10 @@ void *si_create_copy_image_cs(struct si_context *sctx, bool src_is_1d_array, boo
    nir_variable *img_dst = nir_variable_create(b.shader, nir_var_image, dst_img_type, "img_dst");
    img_dst->data.binding = 1;
 
-   nir_ssa_def *undef32 = nir_ssa_undef(&b, 1, 32);
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *undef32 = nir_undef(&b, 1, 32);
+   nir_def *zero = nir_imm_int(&b, 0);
 
-   nir_ssa_def *data = nir_image_deref_load(&b, /*num_components*/ 4, /*bit_size*/ 32,
+   nir_def *data = nir_image_deref_load(&b, /*num_components*/ 4, /*bit_size*/ 32,
       deref_ssa(&b, img_src), coord_src, undef32, zero);
 
    nir_image_deref_store(&b, deref_ssa(&b, img_dst), coord_dst, undef32, data, zero);
@@ -139,32 +139,32 @@ void *si_create_dcc_retile_cs(struct si_context *sctx, struct radeon_surf *surf)
    b.shader->info.num_ssbos = 1;
 
    /* Get user data SGPRs. */
-   nir_ssa_def *user_sgprs = nir_load_user_data_amd(&b);
+   nir_def *user_sgprs = nir_load_user_data_amd(&b);
 
    /* Relative offset from the displayable DCC to the non-displayable DCC in the same buffer. */
-   nir_ssa_def *src_dcc_offset = nir_channel(&b, user_sgprs, 0);
+   nir_def *src_dcc_offset = nir_channel(&b, user_sgprs, 0);
 
-   nir_ssa_def *src_dcc_pitch, *dst_dcc_pitch, *src_dcc_height, *dst_dcc_height;
+   nir_def *src_dcc_pitch, *dst_dcc_pitch, *src_dcc_height, *dst_dcc_height;
    unpack_2x16(&b, nir_channel(&b, user_sgprs, 1), &src_dcc_pitch, &src_dcc_height);
    unpack_2x16(&b, nir_channel(&b, user_sgprs, 2), &dst_dcc_pitch, &dst_dcc_height);
 
    /* Get the 2D coordinates. */
-   nir_ssa_def *coord = get_global_ids(&b, 2);
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *coord = get_global_ids(&b, 2);
+   nir_def *zero = nir_imm_int(&b, 0);
 
    /* Multiply the coordinates by the DCC block size (they are DCC block coordinates). */
    coord = nir_imul(&b, coord, nir_imm_ivec2(&b, surf->u.gfx9.color.dcc_block_width,
                                              surf->u.gfx9.color.dcc_block_height));
 
-   nir_ssa_def *src_offset =
+   nir_def *src_offset =
       ac_nir_dcc_addr_from_coord(&b, &sctx->screen->info, surf->bpe, &surf->u.gfx9.color.dcc_equation,
                                  src_dcc_pitch, src_dcc_height, zero, /* DCC slice size */
                                  nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), /* x, y */
                                  zero, zero, zero); /* z, sample, pipe_xor */
    src_offset = nir_iadd(&b, src_offset, src_dcc_offset);
-   nir_ssa_def *value = nir_load_ssbo(&b, 1, 8, zero, src_offset, .align_mul=1);
+   nir_def *value = nir_load_ssbo(&b, 1, 8, zero, src_offset, .align_mul=1);
 
-   nir_ssa_def *dst_offset =
+   nir_def *dst_offset =
       ac_nir_dcc_addr_from_coord(&b, &sctx->screen->info, surf->bpe, &surf->u.gfx9.color.display_dcc_equation,
                                  dst_dcc_pitch, dst_dcc_height, zero, /* DCC slice size */
                                  nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), /* x, y */
@@ -187,15 +187,15 @@ void *gfx9_create_clear_dcc_msaa_cs(struct si_context *sctx, struct si_texture *
    b.shader->info.num_ssbos = 1;
 
    /* Get user data SGPRs. */
-   nir_ssa_def *user_sgprs = nir_load_user_data_amd(&b);
-   nir_ssa_def *dcc_pitch, *dcc_height, *clear_value, *pipe_xor;
+   nir_def *user_sgprs = nir_load_user_data_amd(&b);
+   nir_def *dcc_pitch, *dcc_height, *clear_value, *pipe_xor;
    unpack_2x16(&b, nir_channel(&b, user_sgprs, 0), &dcc_pitch, &dcc_height);
    unpack_2x16(&b, nir_channel(&b, user_sgprs, 1), &clear_value, &pipe_xor);
    clear_value = nir_u2u16(&b, clear_value);
 
    /* Get the 2D coordinates. */
-   nir_ssa_def *coord = get_global_ids(&b, 3);
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *coord = get_global_ids(&b, 3);
+   nir_def *zero = nir_imm_int(&b, 0);
 
    /* Multiply the coordinates by the DCC block size (they are DCC block coordinates). */
    coord = nir_imul(&b, coord,
@@ -203,7 +203,7 @@ void *gfx9_create_clear_dcc_msaa_cs(struct si_context *sctx, struct si_texture *
                                       tex->surface.u.gfx9.color.dcc_block_height,
                                       tex->surface.u.gfx9.color.dcc_block_depth));
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       ac_nir_dcc_addr_from_coord(&b, &sctx->screen->info, tex->surface.bpe,
                                  &tex->surface.u.gfx9.color.dcc_equation,
                                  dcc_pitch, dcc_height, zero, /* DCC slice size */
@@ -235,16 +235,16 @@ void *si_create_clear_buffer_rmw_cs(struct si_context *sctx)
    b.shader->info.num_ssbos = 1;
 
    /* address = blockID * 64 + threadID; */
-   nir_ssa_def *address = get_global_ids(&b, 1);
+   nir_def *address = get_global_ids(&b, 1);
 
    /* address = address * 16; (byte offset, loading one vec4 per thread) */
    address = nir_ishl_imm(&b, address, 4);
    
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
-   nir_ssa_def *data = nir_load_ssbo(&b, 4, 32, zero, address, .align_mul = 4);
+   nir_def *zero = nir_imm_int(&b, 0);
+   nir_def *data = nir_load_ssbo(&b, 4, 32, zero, address, .align_mul = 4);
 
    /* Get user data SGPRs. */
-   nir_ssa_def *user_sgprs = nir_load_user_data_amd(&b);
+   nir_def *user_sgprs = nir_load_user_data_amd(&b);
 
    /* data &= inverted_writemask; */
    data = nir_iand(&b, data, nir_channel(&b, user_sgprs, 1));
@@ -282,13 +282,13 @@ void *si_create_passthrough_tcs(struct si_context *sctx)
    return create_shader_state(sctx, tcs);
 }
 
-static nir_ssa_def *convert_linear_to_srgb(nir_builder *b, nir_ssa_def *input)
+static nir_def *convert_linear_to_srgb(nir_builder *b, nir_def *input)
 {
    /* There are small precision differences compared to CB, so the gfx blit will return slightly
     * different results.
     */
 
-   nir_ssa_def *comp[4];
+   nir_def *comp[4];
    for (unsigned i = 0; i < 3; i++)
       comp[i] = nir_format_linear_to_srgb(b, nir_channel(b, input, i));
    comp[3] = nir_channel(b, input, 3);
@@ -296,7 +296,7 @@ static nir_ssa_def *convert_linear_to_srgb(nir_builder *b, nir_ssa_def *input)
    return nir_vec(b, comp, 4);
 }
 
-static nir_ssa_def *average_samples(nir_builder *b, nir_ssa_def **samples, unsigned num_samples)
+static nir_def *average_samples(nir_builder *b, nir_def **samples, unsigned num_samples)
 {
    /* This works like add-reduce by computing the sum of each pair independently, and then
     * computing the sum of each pair of sums, and so on, to get better instruction-level
@@ -320,11 +320,11 @@ static nir_ssa_def *average_samples(nir_builder *b, nir_ssa_def **samples, unsig
    return nir_fmul_imm(b, samples[0], 1.0 / num_samples); /* average the sum */
 }
 
-static nir_ssa_def *image_resolve_msaa(nir_builder *b, nir_variable *img, unsigned num_samples,
-                                       nir_ssa_def *coord, enum amd_gfx_level gfx_level)
+static nir_def *image_resolve_msaa(nir_builder *b, nir_variable *img, unsigned num_samples,
+                                       nir_def *coord, enum amd_gfx_level gfx_level)
 {
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *result = NULL;
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *result = NULL;
    nir_variable *var = NULL;
 
    /* Gfx11 doesn't support samples_identical, so we can't use it. */
@@ -345,12 +345,12 @@ static nir_ssa_def *image_resolve_msaa(nir_builder *b, nir_variable *img, unsign
     *
     * TODO: nir_group_loads could do this.
     */
-   nir_ssa_def *sample_index[16];
+   nir_def *sample_index[16];
    for (unsigned i = 0; i < num_samples; i++)
       sample_index[i] = nir_optimization_barrier_vgpr_amd(b, 32, nir_imm_int(b, i));
 
    /* Load all samples. */
-   nir_ssa_def *samples[16];
+   nir_def *samples[16];
    for (unsigned i = 0; i < num_samples; i++) {
       samples[i] = nir_image_deref_load(b, 4, 32, deref_ssa(b, img),
                                         coord, sample_index[i], zero);
@@ -368,7 +368,7 @@ static nir_ssa_def *image_resolve_msaa(nir_builder *b, nir_variable *img, unsign
    return result;
 }
 
-static nir_ssa_def *apply_blit_output_modifiers(nir_builder *b, nir_ssa_def *color,
+static nir_def *apply_blit_output_modifiers(nir_builder *b, nir_def *color,
                                                 const union si_compute_blit_shader_key *options)
 {
    if (options->sint_to_uint)
@@ -380,8 +380,8 @@ static nir_ssa_def *apply_blit_output_modifiers(nir_builder *b, nir_ssa_def *col
    if (options->dst_is_srgb)
       color = convert_linear_to_srgb(b, color);
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *one = options->use_integer_one ? nir_imm_int(b, 1) : nir_imm_float(b, 1);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *one = options->use_integer_one ? nir_imm_int(b, 1) : nir_imm_float(b, 1);
 
    /* Set channels not present in src to 0 or 1. This will eliminate code loading and resolving
     * those channels.
@@ -452,12 +452,12 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha
    nir_variable *img_dst = nir_variable_create(b.shader, nir_var_uniform, img_type[1], "img1");
    img_dst->data.binding = 1;
 
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *zero = nir_imm_int(&b, 0);
 
    /* Instructions. */
    /* Let's work with 0-based src and dst coordinates (thread IDs) first. */
-   nir_ssa_def *dst_xyz = get_global_ids(&b, 3);
-   nir_ssa_def *src_xyz = dst_xyz;
+   nir_def *dst_xyz = get_global_ids(&b, 3);
+   nir_def *src_xyz = dst_xyz;
 
    /* Flip src coordinates. */
    for (unsigned i = 0; i < 2; i++) {
@@ -466,14 +466,14 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha
           * The flipped blit should load from -dim to -1.
           * Therefore do: x = -x - 1;
           */
-         nir_ssa_def *comp = nir_channel(&b, src_xyz, i);
+         nir_def *comp = nir_channel(&b, src_xyz, i);
          comp = nir_iadd_imm(&b, nir_ineg(&b, comp), -1);
          src_xyz = nir_vector_insert_imm(&b, src_xyz, comp, i);
       }
    }
 
    /* Add box.xyz. */
-   nir_ssa_def *coord_src = NULL, *coord_dst = NULL;
+   nir_def *coord_src = NULL, *coord_dst = NULL;
    unpack_2x16_signed(&b, nir_trim_vector(&b, nir_load_user_data_amd(&b), 3),
                       &coord_src, &coord_dst);
    coord_dst = nir_iadd(&b, coord_dst, dst_xyz);
@@ -482,10 +482,10 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha
    /* Clamp to edge for src, only X and Y because Z can't be out of bounds. */
    if (options->xy_clamp_to_edge) {
       unsigned src_clamp_channels = options->src_is_1d ? 0x1 : 0x3;
-      nir_ssa_def *dim = nir_image_deref_size(&b, 4, 32, deref_ssa(&b, img_src), zero);
+      nir_def *dim = nir_image_deref_size(&b, 4, 32, deref_ssa(&b, img_src), zero);
       dim = nir_channels(&b, dim, src_clamp_channels);
 
-      nir_ssa_def *coord_src_clamped = nir_channels(&b, coord_src, src_clamp_channels);
+      nir_def *coord_src_clamped = nir_channels(&b, coord_src, src_clamp_channels);
       coord_src_clamped = nir_imax(&b, coord_src_clamped, nir_imm_int(&b, 0));
       coord_src_clamped = nir_imin(&b, coord_src_clamped, nir_iadd_imm(&b, dim, -1));
 
@@ -509,7 +509,7 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha
 
    /* Execute the image loads and stores. */
    unsigned num_samples = 1 << options->log2_samples;
-   nir_ssa_def *color;
+   nir_def *color;
 
    if (options->src_is_msaa && !options->dst_is_msaa && !options->sample0_only) {
       /* MSAA resolving (downsampling). */
@@ -520,7 +520,7 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha
 
    } else if (options->src_is_msaa && options->dst_is_msaa) {
       /* MSAA copy. */
-      nir_ssa_def *color[16];
+      nir_def *color[16];
       assert(num_samples > 1);
       /* Group loads together and then stores. */
       for (unsigned i = 0; i < num_samples; i++) {
index 0260bdd..8b47ca7 100644 (file)
@@ -660,29 +660,29 @@ v3d_get_sand8_fs(struct pipe_context *pctx, int cpp)
         nir_variable *pos_in =
                 nir_variable_create(b.shader, nir_var_shader_in, vec4, "pos");
         pos_in->data.location = VARYING_SLOT_POS;
-        nir_ssa_def *pos = nir_load_var(&b, pos_in);
+        nir_def *pos = nir_load_var(&b, pos_in);
 
-        nir_ssa_def *zero = nir_imm_int(&b, 0);
-        nir_ssa_def *one = nir_imm_int(&b, 1);
-        nir_ssa_def *two = nir_imm_int(&b, 2);
-        nir_ssa_def *six = nir_imm_int(&b, 6);
-        nir_ssa_def *seven = nir_imm_int(&b, 7);
-        nir_ssa_def *eight = nir_imm_int(&b, 8);
+        nir_def *zero = nir_imm_int(&b, 0);
+        nir_def *one = nir_imm_int(&b, 1);
+        nir_def *two = nir_imm_int(&b, 2);
+        nir_def *six = nir_imm_int(&b, 6);
+        nir_def *seven = nir_imm_int(&b, 7);
+        nir_def *eight = nir_imm_int(&b, 8);
 
-        nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
-        nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
+        nir_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
+        nir_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
 
         nir_variable *stride_in =
                 nir_variable_create(b.shader, nir_var_uniform, glsl_uint,
                                     "sand8_stride");
-        nir_ssa_def *stride =
+        nir_def *stride =
                 nir_load_uniform(&b, 1, 32, zero,
                                  .base = stride_in->data.driver_location,
                                  .range = 4,
                                  .dest_type = nir_type_uint32);
 
-        nir_ssa_def *x_offset;
-        nir_ssa_def *y_offset;
+        nir_def *x_offset;
+        nir_def *y_offset;
 
         /* UIF tiled format is composed by UIF blocks, Each block has
          * four 64 byte microtiles. Inside each microtile pixels are stored
@@ -709,11 +709,11 @@ v3d_get_sand8_fs(struct pipe_context *pctx, int cpp)
          * between microtiles to deal with this issue for luma planes.
          */
         if (cpp == 1) {
-                nir_ssa_def *intra_utile_x_offset =
+                nir_def *intra_utile_x_offset =
                         nir_ishl(&b, nir_iand_imm(&b, x, 1), two);
-                nir_ssa_def *inter_utile_x_offset =
+                nir_def *inter_utile_x_offset =
                         nir_ishl(&b, nir_iand_imm(&b, x, 60), one);
-                nir_ssa_def *stripe_offset=
+                nir_def *stripe_offset=
                         nir_ishl(&b,nir_imul(&b,nir_ishr_imm(&b, x, 6),
                                              stride),
                                  seven);
@@ -725,7 +725,7 @@ v3d_get_sand8_fs(struct pipe_context *pctx, int cpp)
                                     nir_ishl(&b, nir_iand_imm(&b, x, 2), six),
                                     nir_ishl(&b, y, eight));
         } else  {
-                nir_ssa_def *stripe_offset=
+                nir_def *stripe_offset=
                         nir_ishl(&b,nir_imul(&b,nir_ishr_imm(&b, x, 5),
                                                 stride),
                                  seven);
@@ -733,15 +733,15 @@ v3d_get_sand8_fs(struct pipe_context *pctx, int cpp)
                                nir_ishl(&b, nir_iand_imm(&b, x, 31), two));
                 y_offset = nir_ishl(&b, y, seven);
         }
-        nir_ssa_def *ubo_offset = nir_iadd(&b, x_offset, y_offset);
-        nir_ssa_def *load =
+        nir_def *ubo_offset = nir_iadd(&b, x_offset, y_offset);
+        nir_def *load =
         nir_load_ubo(&b, 1, 32, zero, ubo_offset,
                     .align_mul = 4,
                     .align_offset = 0,
                     .range_base = 0,
                     .range = ~0);
 
-        nir_ssa_def *output = nir_unpack_unorm_4x8(&b, load);
+        nir_def *output = nir_unpack_unorm_4x8(&b, load);
 
         nir_store_var(&b, color_out,
                       output,
@@ -911,30 +911,30 @@ v3d_get_sand30_vs(struct pipe_context *pctx)
  * in an uvec4. The start parameter defines where the sequence of 4 values
  * begins.
  */
-static nir_ssa_def *
+static nir_def *
 extract_unorm_2xrgb10a2_component_to_4xunorm16(nir_builder *b,
-                                               nir_ssa_def *value,
-                                               nir_ssa_def *start)
+                                               nir_def *value,
+                                               nir_def *start)
 {
         const unsigned mask = BITFIELD_MASK(10);
 
-        nir_ssa_def *shiftw0 = nir_imul_imm(b, start, 10);
-        nir_ssa_def *word0 = nir_iand_imm(b, nir_channel(b, value, 0),
+        nir_def *shiftw0 = nir_imul_imm(b, start, 10);
+        nir_def *word0 = nir_iand_imm(b, nir_channel(b, value, 0),
                                           BITFIELD_MASK(30));
-        nir_ssa_def *finalword0 = nir_ushr(b, word0, shiftw0);
-        nir_ssa_def *word1 = nir_channel(b, value, 1);
-        nir_ssa_def *shiftw0tow1 = nir_isub_imm(b, 30, shiftw0);
-        nir_ssa_def *word1toword0 =  nir_ishl(b, word1, shiftw0tow1);
+        nir_def *finalword0 = nir_ushr(b, word0, shiftw0);
+        nir_def *word1 = nir_channel(b, value, 1);
+        nir_def *shiftw0tow1 = nir_isub_imm(b, 30, shiftw0);
+        nir_def *word1toword0 =  nir_ishl(b, word1, shiftw0tow1);
         finalword0 = nir_ior(b, finalword0, word1toword0);
-        nir_ssa_def *finalword1 = nir_ushr(b, word1, shiftw0);
+        nir_def *finalword1 = nir_ushr(b, word1, shiftw0);
 
-        nir_ssa_def *val0 = nir_ishl_imm(b, nir_iand_imm(b, finalword0,
+        nir_def *val0 = nir_ishl_imm(b, nir_iand_imm(b, finalword0,
                                                          mask), 6);
-        nir_ssa_def *val1 = nir_ishr_imm(b, nir_iand_imm(b, finalword0,
+        nir_def *val1 = nir_ishr_imm(b, nir_iand_imm(b, finalword0,
                                                          mask << 10), 4);
-        nir_ssa_def *val2 = nir_ishr_imm(b, nir_iand_imm(b, finalword0,
+        nir_def *val2 = nir_ishr_imm(b, nir_iand_imm(b, finalword0,
                                                          mask << 20), 14);
-        nir_ssa_def *val3 = nir_ishl_imm(b, nir_iand_imm(b, finalword1,
+        nir_def *val3 = nir_ishl_imm(b, nir_iand_imm(b, finalword1,
                                                          mask), 6);
 
         return nir_vec4(b, val0, val1, val2, val3);
@@ -984,10 +984,10 @@ v3d_get_sand30_fs(struct pipe_context *pctx)
         nir_variable *pos_in =
                 nir_variable_create(b.shader, nir_var_shader_in, vec4, "pos");
         pos_in->data.location = VARYING_SLOT_POS;
-        nir_ssa_def *pos = nir_load_var(&b, pos_in);
+        nir_def *pos = nir_load_var(&b, pos_in);
 
-        nir_ssa_def *zero = nir_imm_int(&b, 0);
-        nir_ssa_def *three = nir_imm_int(&b, 3);
+        nir_def *zero = nir_imm_int(&b, 0);
+        nir_def *three = nir_imm_int(&b, 3);
 
         /* With a SAND128 stripe, in 128-bytes with rgb10a2 format we have 96
          * 10-bit values. So, it represents 96 pixels for Y plane and 48 pixels
@@ -996,8 +996,8 @@ v3d_get_sand30_fs(struct pipe_context *pctx)
          */
         uint32_t pixels_stripe = 24;
 
-        nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
-        nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
+        nir_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
+        nir_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
 
         /* UIF tiled format is composed by UIF blocks. Each block has four 64
          * byte microtiles. Inside each microtile pixels are stored in raster
@@ -1032,43 +1032,43 @@ v3d_get_sand30_fs(struct pipe_context *pctx)
         nir_variable *stride_in =
                 nir_variable_create(b.shader, nir_var_uniform,
                                     glsl_uint, "sand30_stride");
-        nir_ssa_def *stride =
+        nir_def *stride =
                 nir_load_uniform(&b, 1, 32, zero,
                                  .base = stride_in->data.driver_location,
                                  .range = 4,
                                  .dest_type = nir_type_uint32);
 
-        nir_ssa_def *real_x = nir_ior(&b, nir_iand_imm(&b, x, 1),
+        nir_def *real_x = nir_ior(&b, nir_iand_imm(&b, x, 1),
                                       nir_ishl_imm(&b,nir_ushr_imm(&b, x, 2),
                                       1));
-        nir_ssa_def *x_pos_in_stripe = nir_umod_imm(&b, real_x, pixels_stripe);
-        nir_ssa_def *component = nir_umod(&b, real_x, three);
-        nir_ssa_def *intra_utile_x_offset = nir_ishl_imm(&b, component, 2);
+        nir_def *x_pos_in_stripe = nir_umod_imm(&b, real_x, pixels_stripe);
+        nir_def *component = nir_umod(&b, real_x, three);
+        nir_def *intra_utile_x_offset = nir_ishl_imm(&b, component, 2);
 
-        nir_ssa_def *inter_utile_x_offset =
+        nir_def *inter_utile_x_offset =
                 nir_ishl_imm(&b, nir_udiv_imm(&b, x_pos_in_stripe, 3), 4);
 
-        nir_ssa_def *stripe_offset=
+        nir_def *stripe_offset=
                 nir_ishl_imm(&b,
                              nir_imul(&b,
                                       nir_udiv_imm(&b, real_x, pixels_stripe),
                                       stride),
                              7);
 
-        nir_ssa_def *x_offset = nir_iadd(&b, stripe_offset,
+        nir_def *x_offset = nir_iadd(&b, stripe_offset,
                                          nir_iadd(&b, intra_utile_x_offset,
                                                   inter_utile_x_offset));
-        nir_ssa_def *y_offset =
+        nir_def *y_offset =
                 nir_iadd(&b, nir_ishl_imm(&b, nir_iand_imm(&b, x, 2), 6),
                          nir_ishl_imm(&b, y, 8));
-        nir_ssa_def *ubo_offset = nir_iadd(&b, x_offset, y_offset);
+        nir_def *ubo_offset = nir_iadd(&b, x_offset, y_offset);
 
-        nir_ssa_def *load = nir_load_ubo(&b, 2, 32, zero, ubo_offset,
+        nir_def *load = nir_load_ubo(&b, 2, 32, zero, ubo_offset,
                                          .align_mul = 8,
                                          .align_offset = 0,
                                          .range_base = 0,
                                          .range = ~0);
-        nir_ssa_def *output =
+        nir_def *output =
                 extract_unorm_2xrgb10a2_component_to_4xunorm16(&b, load,
                                                                component);
         nir_store_var(&b, color_out,
index 2cf65b5..0a33704 100644 (file)
@@ -285,24 +285,24 @@ static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
    nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
                                               vec4, "pos");
    pos_in->data.location = VARYING_SLOT_POS;
-   nir_ssa_def *pos = nir_load_var(&b, pos_in);
+   nir_def *pos = nir_load_var(&b, pos_in);
 
-   nir_ssa_def *one = nir_imm_int(&b, 1);
-   nir_ssa_def *two = nir_imm_int(&b, 2);
+   nir_def *one = nir_imm_int(&b, 1);
+   nir_def *two = nir_imm_int(&b, 2);
 
-   nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
-   nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
+   nir_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
+   nir_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
 
    nir_variable *stride_in = nir_variable_create(b.shader, nir_var_uniform,
                                                  glsl_int, "stride");
-   nir_ssa_def *stride = nir_load_var(&b, stride_in);
+   nir_def *stride = nir_load_var(&b, stride_in);
 
-   nir_ssa_def *x_offset;
-   nir_ssa_def *y_offset;
+   nir_def *x_offset;
+   nir_def *y_offset;
    if (cpp == 1) {
-           nir_ssa_def *intra_utile_x_offset =
+           nir_def *intra_utile_x_offset =
                    nir_ishl(&b, nir_iand(&b, x, one), two);
-           nir_ssa_def *inter_utile_x_offset =
+           nir_def *inter_utile_x_offset =
                    nir_ishl(&b, nir_iand(&b, x, nir_imm_int(&b, ~3)), one);
 
            x_offset = nir_iadd(&b,
@@ -318,7 +318,7 @@ static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
            y_offset = nir_imul(&b, y, stride);
    }
 
-   nir_ssa_def *load =
+   nir_def *load =
       nir_load_ubo(&b, 1, 32, one, nir_iadd(&b, x_offset, y_offset),
                    .align_mul = 4,
                    .align_offset = 0,
index e90a57e..e4912c4 100644 (file)
@@ -54,17 +54,17 @@ blend_depends_on_dst_color(struct vc4_compile *c)
 }
 
 /** Emits a load of the previous fragment color from the tile buffer. */
-static nir_ssa_def *
+static nir_def *
 vc4_nir_get_dst_color(nir_builder *b, int sample)
 {
         return nir_load_input(b, 1, 32, nir_imm_int(b, 0),
                               .base = VC4_NIR_TLB_COLOR_READ_INPUT + sample);
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_blend_channel_f(nir_builder *b,
-                    nir_ssa_def **src,
-                    nir_ssa_def **dst,
+                    nir_def **src,
+                    nir_def **dst,
                     unsigned factor,
                     int channel)
 {
@@ -126,8 +126,8 @@ vc4_blend_channel_f(nir_builder *b,
         }
 }
 
-static nir_ssa_def *
-vc4_nir_set_packed_chan(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1,
+static nir_def *
+vc4_nir_set_packed_chan(nir_builder *b, nir_def *src0, nir_def *src1,
                         int chan)
 {
         unsigned chan_mask = 0xff << (chan * 8);
@@ -136,12 +136,12 @@ vc4_nir_set_packed_chan(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1,
                        nir_iand_imm(b, src1, chan_mask));
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_blend_channel_i(nir_builder *b,
-                    nir_ssa_def *src,
-                    nir_ssa_def *dst,
-                    nir_ssa_def *src_a,
-                    nir_ssa_def *dst_a,
+                    nir_def *src,
+                    nir_def *dst,
+                    nir_def *src_a,
+                    nir_def *dst_a,
                     unsigned factor,
                     int a_chan)
 {
@@ -195,8 +195,8 @@ vc4_blend_channel_i(nir_builder *b,
         }
 }
 
-static nir_ssa_def *
-vc4_blend_func_f(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst,
+static nir_def *
+vc4_blend_func_f(nir_builder *b, nir_def *src, nir_def *dst,
                  unsigned func)
 {
         switch (func) {
@@ -219,8 +219,8 @@ vc4_blend_func_f(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst,
         }
 }
 
-static nir_ssa_def *
-vc4_blend_func_i(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst,
+static nir_def *
+vc4_blend_func_i(nir_builder *b, nir_def *src, nir_def *dst,
                  unsigned func)
 {
         switch (func) {
@@ -244,8 +244,8 @@ vc4_blend_func_i(nir_builder *b, nir_ssa_def *src, nir_ssa_def *dst,
 }
 
 static void
-vc4_do_blending_f(struct vc4_compile *c, nir_builder *b, nir_ssa_def **result,
-                  nir_ssa_def **src_color, nir_ssa_def **dst_color)
+vc4_do_blending_f(struct vc4_compile *c, nir_builder *b, nir_def **result,
+                  nir_def **src_color, nir_def **dst_color)
 {
         struct pipe_rt_blend_state *blend = &c->fs_key->blend;
 
@@ -259,7 +259,7 @@ vc4_do_blending_f(struct vc4_compile *c, nir_builder *b, nir_ssa_def **result,
         for (int i = 0; i < 4; i++)
                 src_color[i] = nir_fsat(b, src_color[i]);
 
-        nir_ssa_def *src_blend[4], *dst_blend[4];
+        nir_def *src_blend[4], *dst_blend[4];
         for (int i = 0; i < 4; i++) {
                 int src_factor = ((i != 3) ? blend->rgb_src_factor :
                                   blend->alpha_src_factor);
@@ -282,17 +282,17 @@ vc4_do_blending_f(struct vc4_compile *c, nir_builder *b, nir_ssa_def **result,
         }
 }
 
-static nir_ssa_def *
-vc4_nir_splat(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+vc4_nir_splat(nir_builder *b, nir_def *src)
 {
-        nir_ssa_def *or1 = nir_ior(b, src, nir_ishl_imm(b, src, 8));
+        nir_def *or1 = nir_ior(b, src, nir_ishl_imm(b, src, 8));
         return nir_ior(b, or1, nir_ishl_imm(b, or1, 16));
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
-                  nir_ssa_def *src_color, nir_ssa_def *dst_color,
-                  nir_ssa_def *src_float_a)
+                  nir_def *src_color, nir_def *dst_color,
+                  nir_def *src_float_a)
 {
         struct pipe_rt_blend_state *blend = &c->fs_key->blend;
 
@@ -301,8 +301,8 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
 
         enum pipe_format color_format = c->fs_key->color_format;
         const uint8_t *format_swiz = vc4_get_format_swizzle(color_format);
-        nir_ssa_def *src_a = nir_pack_unorm_4x8(b, src_float_a);
-        nir_ssa_def *dst_a;
+        nir_def *src_a = nir_pack_unorm_4x8(b, src_float_a);
+        nir_def *dst_a;
         int alpha_chan;
         for (alpha_chan = 0; alpha_chan < 4; alpha_chan++) {
                 if (format_swiz[alpha_chan] == 3)
@@ -316,12 +316,12 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
                 dst_a = nir_imm_int(b, ~0);
         }
 
-        nir_ssa_def *src_factor = vc4_blend_channel_i(b,
+        nir_def *src_factor = vc4_blend_channel_i(b,
                                                       src_color, dst_color,
                                                       src_a, dst_a,
                                                       blend->rgb_src_factor,
                                                       alpha_chan);
-        nir_ssa_def *dst_factor = vc4_blend_channel_i(b,
+        nir_def *dst_factor = vc4_blend_channel_i(b,
                                                       src_color, dst_color,
                                                       src_a, dst_a,
                                                       blend->rgb_dst_factor,
@@ -329,7 +329,7 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
 
         if (alpha_chan != 4 &&
             blend->alpha_src_factor != blend->rgb_src_factor) {
-                nir_ssa_def *src_alpha_factor =
+                nir_def *src_alpha_factor =
                         vc4_blend_channel_i(b,
                                             src_color, dst_color,
                                             src_a, dst_a,
@@ -341,7 +341,7 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
         }
         if (alpha_chan != 4 &&
             blend->alpha_dst_factor != blend->rgb_dst_factor) {
-                nir_ssa_def *dst_alpha_factor =
+                nir_def *dst_alpha_factor =
                         vc4_blend_channel_i(b,
                                             src_color, dst_color,
                                             src_a, dst_a,
@@ -351,13 +351,13 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
                                                      dst_alpha_factor,
                                                      alpha_chan);
         }
-        nir_ssa_def *src_blend = nir_umul_unorm_4x8_vc4(b, src_color, src_factor);
-        nir_ssa_def *dst_blend = nir_umul_unorm_4x8_vc4(b, dst_color, dst_factor);
+        nir_def *src_blend = nir_umul_unorm_4x8_vc4(b, src_color, src_factor);
+        nir_def *dst_blend = nir_umul_unorm_4x8_vc4(b, dst_color, dst_factor);
 
-        nir_ssa_def *result =
+        nir_def *result =
                 vc4_blend_func_i(b, src_blend, dst_blend, blend->rgb_func);
         if (alpha_chan != 4 && blend->alpha_func != blend->rgb_func) {
-                nir_ssa_def *result_a = vc4_blend_func_i(b,
+                nir_def *result_a = vc4_blend_func_i(b,
                                                          src_blend,
                                                          dst_blend,
                                                          blend->alpha_func);
@@ -367,9 +367,9 @@ vc4_do_blending_i(struct vc4_compile *c, nir_builder *b,
         return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_logicop(nir_builder *b, int logicop_func,
-            nir_ssa_def *src, nir_ssa_def *dst)
+            nir_def *src, nir_def *dst)
 {
         switch (logicop_func) {
         case PIPE_LOGICOP_CLEAR:
@@ -410,14 +410,14 @@ vc4_logicop(nir_builder *b, int logicop_func,
         }
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_nir_swizzle_and_pack(struct vc4_compile *c, nir_builder *b,
-                         nir_ssa_def **colors)
+                         nir_def **colors)
 {
         enum pipe_format color_format = c->fs_key->color_format;
         const uint8_t *format_swiz = vc4_get_format_swizzle(color_format);
 
-        nir_ssa_def *swizzled[4];
+        nir_def *swizzled[4];
         for (int i = 0; i < 4; i++) {
                 swizzled[i] = vc4_nir_get_swizzled_channel(b, colors,
                                                            format_swiz[i]);
@@ -430,8 +430,8 @@ vc4_nir_swizzle_and_pack(struct vc4_compile *c, nir_builder *b,
 
 }
 
-static nir_ssa_def *
-vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
+static nir_def *
+vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_def *src,
                        int sample)
 {
         enum pipe_format color_format = c->fs_key->color_format;
@@ -439,9 +439,9 @@ vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
         bool srgb = util_format_is_srgb(color_format);
 
         /* Pull out the float src/dst color components. */
-        nir_ssa_def *packed_dst_color = vc4_nir_get_dst_color(b, sample);
-        nir_ssa_def *dst_vec4 = nir_unpack_unorm_4x8(b, packed_dst_color);
-        nir_ssa_def *src_color[4], *unpacked_dst_color[4];
+        nir_def *packed_dst_color = vc4_nir_get_dst_color(b, sample);
+        nir_def *dst_vec4 = nir_unpack_unorm_4x8(b, packed_dst_color);
+        nir_def *src_color[4], *unpacked_dst_color[4];
         for (unsigned i = 0; i < 4; i++) {
                 src_color[i] = nir_channel(b, src, i);
                 unpacked_dst_color[i] = nir_channel(b, dst_vec4, i);
@@ -450,10 +450,10 @@ vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
         if (c->fs_key->sample_alpha_to_one && c->fs_key->msaa)
                 src_color[3] = nir_imm_float(b, 1.0);
 
-        nir_ssa_def *packed_color;
+        nir_def *packed_color;
         if (srgb) {
                 /* Unswizzle the destination color. */
-                nir_ssa_def *dst_color[4];
+                nir_def *dst_color[4];
                 for (unsigned i = 0; i < 4; i++) {
                         dst_color[i] = vc4_nir_get_swizzled_channel(b,
                                                                     unpacked_dst_color,
@@ -464,7 +464,7 @@ vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
                 for (int i = 0; i < 3; i++)
                         dst_color[i] = nir_format_srgb_to_linear(b, dst_color[i]);
 
-                nir_ssa_def *blend_color[4];
+                nir_def *blend_color[4];
                 vc4_do_blending_f(c, b, blend_color, src_color, dst_color);
 
                 /* sRGB encode the output color */
@@ -473,7 +473,7 @@ vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
 
                 packed_color = vc4_nir_swizzle_and_pack(c, b, blend_color);
         } else {
-                nir_ssa_def *packed_src_color =
+                nir_def *packed_src_color =
                         vc4_nir_swizzle_and_pack(c, b, src_color);
 
                 packed_color =
@@ -503,7 +503,7 @@ vc4_nir_blend_pipeline(struct vc4_compile *c, nir_builder *b, nir_ssa_def *src,
 
 static void
 vc4_nir_store_sample_mask(struct vc4_compile *c, nir_builder *b,
-                          nir_ssa_def *val)
+                          nir_def *val)
 {
         nir_variable *sample_mask = nir_variable_create(c->s, nir_var_shader_out,
                                                         glsl_uint_type(),
@@ -519,16 +519,16 @@ static void
 vc4_nir_lower_blend_instr(struct vc4_compile *c, nir_builder *b,
                           nir_intrinsic_instr *intr)
 {
-        nir_ssa_def *frag_color = intr->src[0].ssa;
+        nir_def *frag_color = intr->src[0].ssa;
 
         if (c->fs_key->sample_alpha_to_coverage) {
-                nir_ssa_def *a = nir_channel(b, frag_color, 3);
+                nir_def *a = nir_channel(b, frag_color, 3);
 
                 /* XXX: We should do a nice dither based on the fragment
                  * coordinate, instead.
                  */
-                nir_ssa_def *num_bits = nir_f2i32(b, nir_fmul_imm(b, a, VC4_MAX_SAMPLES));
-                nir_ssa_def *bitmask = nir_iadd_imm(b,
+                nir_def *num_bits = nir_f2i32(b, nir_fmul_imm(b, a, VC4_MAX_SAMPLES));
+                nir_def *bitmask = nir_iadd_imm(b,
                                                     nir_ishl(b,
                                                              nir_imm_int(b, 1),
                                                              num_bits),
@@ -541,11 +541,11 @@ vc4_nir_lower_blend_instr(struct vc4_compile *c, nir_builder *b,
          * blending function separately for each destination sample value, and
          * then output the per-sample color using TLB_COLOR_MS.
          */
-        nir_ssa_def *blend_output;
+        nir_def *blend_output;
         if (c->fs_key->msaa && blend_depends_on_dst_color(c)) {
                 c->msaa_per_sample_output = true;
 
-                nir_ssa_def *samples[4];
+                nir_def *samples[4];
                 for (int i = 0; i < VC4_MAX_SAMPLES; i++)
                         samples[i] = vc4_nir_blend_pipeline(c, b, frag_color, i);
                 blend_output = nir_vec4(b,
index 74419bf..e3e7ffc 100644 (file)
 
 static void
 replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
-                           nir_ssa_def **comps)
+                           nir_def **comps)
 {
 
         /* Batch things back together into a vector.  This will get split by
          * the later ALU scalarization pass.
          */
-        nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
+        nir_def *vec = nir_vec(b, comps, intr->num_components);
 
         /* Replace the old intrinsic with a reference to our reconstructed
          * vector.
          */
-        nir_ssa_def_rewrite_uses(&intr->dest.ssa, vec);
+        nir_def_rewrite_uses(&intr->dest.ssa, vec);
         nir_instr_remove(&intr->instr);
 }
 
-static nir_ssa_def *
-vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
+static nir_def *
+vc4_nir_unpack_8i(nir_builder *b, nir_def *src, unsigned chan)
 {
         return nir_ubitfield_extract(b,
                                      src,
@@ -63,8 +63,8 @@ vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
 }
 
 /** Returns the 16 bit field as a sign-extended 32-bit value. */
-static nir_ssa_def *
-vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
+static nir_def *
+vc4_nir_unpack_16i(nir_builder *b, nir_def *src, unsigned chan)
 {
         return nir_ibitfield_extract(b,
                                      src,
@@ -73,8 +73,8 @@ vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
 }
 
 /** Returns the 16 bit field as an unsigned 32 bit value. */
-static nir_ssa_def *
-vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
+static nir_def *
+vc4_nir_unpack_16u(nir_builder *b, nir_def *src, unsigned chan)
 {
         if (chan == 0) {
                 return nir_iand_imm(b, src, 0xffff);
@@ -83,22 +83,22 @@ vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
         }
 }
 
-static nir_ssa_def *
-vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
+static nir_def *
+vc4_nir_unpack_8f(nir_builder *b, nir_def *src, unsigned chan)
 {
         return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
 }
 
-static nir_ssa_def *
+static nir_def *
 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
                               nir_builder *b,
-                              nir_ssa_def **vpm_reads,
+                              nir_def **vpm_reads,
                               uint8_t swiz,
                               const struct util_format_description *desc)
 {
         const struct util_format_channel_description *chan =
                 &desc->channel[swiz];
-        nir_ssa_def *temp;
+        nir_def *temp;
 
         if (swiz > PIPE_SWIZZLE_W) {
                 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
@@ -115,7 +115,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
         } else if (chan->size == 8 &&
                    (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
                     chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
-                nir_ssa_def *vpm = vpm_reads[0];
+                nir_def *vpm = vpm_reads[0];
                 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
                         temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
                         if (chan->normalized) {
@@ -140,7 +140,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
         } else if (chan->size == 16 &&
                    (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
                     chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
-                nir_ssa_def *vpm = vpm_reads[swiz / 2];
+                nir_def *vpm = vpm_reads[swiz / 2];
 
                 /* Note that UNPACK_16F eats a half float, not ints, so we use
                  * UNPACK_16_I for all of these.
@@ -184,7 +184,7 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
          * be reordered, the actual reads will be generated at the top of the
          * shader by ntq_setup_inputs().
          */
-        nir_ssa_def *vpm_reads[4];
+        nir_def *vpm_reads[4];
         for (int i = 0; i < align(attr_size, 4) / 4; i++)
                 vpm_reads[i] = nir_load_input(b, 1, 32, nir_imm_int(b, 0),
                                               .base = nir_intrinsic_base(intr),
@@ -194,7 +194,7 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
         const struct util_format_description *desc =
                 util_format_description(format);
 
-        nir_ssa_def *dests[4];
+        nir_def *dests[4];
         for (int i = 0; i < intr->num_components; i++) {
                 uint8_t swiz = desc->swizzle[i];
                 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
@@ -239,7 +239,7 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
                                         c->fs_key->point_sprite_mask)) {
                 assert(intr->num_components == 1);
 
-                nir_ssa_def *result = &intr->dest.ssa;
+                nir_def *result = &intr->dest.ssa;
 
                 switch (comp) {
                 case 0:
@@ -263,7 +263,7 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
                         result = nir_fsub_imm(b, 1.0, result);
 
                 if (result != &intr->dest.ssa) {
-                        nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
+                        nir_def_rewrite_uses_after(&intr->dest.ssa,
                                                        result,
                                                        result->parent_instr);
                 }
@@ -294,7 +294,7 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
         b->cursor = nir_before_instr(&intr->instr);
 
         /* Generate scalar loads equivalent to the original vector. */
-        nir_ssa_def *dests[4];
+        nir_def *dests[4];
         for (unsigned i = 0; i < intr->num_components; i++) {
                 nir_intrinsic_instr *intr_comp =
                         nir_intrinsic_instr_create(c->s, intr->intrinsic);
index bf0e6b5..872c2f6 100644 (file)
@@ -35,7 +35,7 @@
  * and do the math in the shader.
  */
 
-static nir_ssa_def *
+static nir_def *
 vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
 {
         nir_tex_instr *txf_ms = nir_instr_as_tex(instr);
@@ -49,7 +49,7 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
         txf->is_new_style_shadow = txf_ms->is_new_style_shadow;
         txf->dest_type = txf_ms->dest_type;
 
-        nir_ssa_def *coord = NULL, *sample_index = NULL;
+        nir_def *coord = NULL, *sample_index = NULL;
         for (int i = 0; i < txf_ms->num_srcs; i++) {
                 switch (txf_ms->src[i].src_type) {
                 case nir_tex_src_coord:
@@ -65,8 +65,8 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
         assert(coord);
         assert(sample_index);
 
-        nir_ssa_def *x = nir_channel(b, coord, 0);
-        nir_ssa_def *y = nir_channel(b, coord, 1);
+        nir_def *x = nir_channel(b, coord, 0);
+        nir_def *y = nir_channel(b, coord, 1);
 
         uint32_t tile_w = 32;
         uint32_t tile_h = 32;
@@ -78,22 +78,22 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
         uint32_t w = align(c->key->tex[unit].msaa_width, tile_w);
         uint32_t w_tiles = w / tile_w;
 
-        nir_ssa_def *x_tile = nir_ushr_imm(b, x, tile_w_shift);
-        nir_ssa_def *y_tile = nir_ushr_imm(b, y, tile_h_shift);
-        nir_ssa_def *tile_addr = nir_iadd(b,
+        nir_def *x_tile = nir_ushr_imm(b, x, tile_w_shift);
+        nir_def *y_tile = nir_ushr_imm(b, y, tile_h_shift);
+        nir_def *tile_addr = nir_iadd(b,
                                           nir_imul_imm(b, x_tile, tile_size),
                                           nir_imul_imm(b, y_tile, w_tiles *
                                                                   tile_size));
-        nir_ssa_def *x_subspan = nir_iand_imm(b, x, (tile_w - 1) & ~1);
-        nir_ssa_def *y_subspan = nir_iand_imm(b, y, (tile_h - 1) & ~1);
-        nir_ssa_def *subspan_addr = nir_iadd(b,
+        nir_def *x_subspan = nir_iand_imm(b, x, (tile_w - 1) & ~1);
+        nir_def *y_subspan = nir_iand_imm(b, y, (tile_h - 1) & ~1);
+        nir_def *subspan_addr = nir_iadd(b,
                                              nir_imul_imm(b, x_subspan,
                                                           2 * VC4_MAX_SAMPLES * sizeof(uint32_t)),
                                              nir_imul_imm(b, y_subspan,
                                                           tile_w * VC4_MAX_SAMPLES *
                                                           sizeof(uint32_t)));
 
-        nir_ssa_def *pixel_addr = nir_ior(b,
+        nir_def *pixel_addr = nir_ior(b,
                                           nir_iand_imm(b,
                                                        nir_ishl_imm(b, x, 2),
                                                        1 << 2),
@@ -101,9 +101,9 @@ vc4_nir_lower_txf_ms_instr(nir_builder *b, nir_instr *instr, void *data)
                                                        nir_ishl_imm(b, y, 3),
                                                        1 << 3));
 
-        nir_ssa_def *sample_addr = nir_ishl_imm(b, sample_index, 4);
+        nir_def *sample_addr = nir_ishl_imm(b, sample_index, 4);
 
-        nir_ssa_def *addr = nir_iadd(b,
+        nir_def *addr = nir_iadd(b,
                                      nir_ior(b, sample_addr, pixel_addr),
                                      nir_iadd(b, subspan_addr, tile_addr));
 
index dab38c6..dc17368 100644 (file)
@@ -140,8 +140,8 @@ vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
         return qir_TEX_RESULT(c);
 }
 
-nir_ssa_def *
-vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
+nir_def *
+vc4_nir_get_swizzled_channel(nir_builder *b, nir_def **srcs, int swiz)
 {
         switch (swiz) {
         default:
@@ -161,7 +161,7 @@ vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
 }
 
 static struct qreg *
-ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
+ntq_init_ssa_def(struct vc4_compile *c, nir_def *def)
 {
         struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
                                           def->num_components);
@@ -211,7 +211,7 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
 
                 qregs[chan] = result;
         } else {
-                nir_ssa_def *reg = store->src[1].ssa;
+                nir_def *reg = store->src[1].ssa;
                 ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
                 assert(nir_intrinsic_base(store) == 0);
                 assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -261,7 +261,7 @@ ntq_get_src(struct vc4_compile *c, nir_src src, int i)
                 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
                 assert(i < src.ssa->num_components);
         } else {
-                nir_ssa_def *reg = load->src[0].ssa;
+                nir_def *reg = load->src[0].ssa;
                 ASSERTED nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
                 assert(nir_intrinsic_base(load) == 0);
                 assert(nir_intrinsic_num_array_elems(decl) == 0);
@@ -1643,7 +1643,7 @@ ntq_setup_registers(struct vc4_compile *c, nir_function_impl *impl)
                 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
                                                   array_len * num_components);
 
-                nir_ssa_def *nir_reg = &decl->dest.ssa;
+                nir_def *nir_reg = &decl->dest.ssa;
                 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
 
                 for (int i = 0; i < array_len * num_components; i++)
@@ -1662,7 +1662,7 @@ ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
 }
 
 static void
-ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
+ntq_emit_ssa_undef(struct vc4_compile *c, nir_undef_instr *instr)
 {
         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
 
index 573d0f0..cb9653a 100644 (file)
@@ -389,7 +389,7 @@ struct vc4_compile {
         struct exec_list *cf_node_list;
 
         /**
-         * Mapping from nir_register * or nir_ssa_def * to array of struct
+         * Mapping from nir_register * or nir_def * to array of struct
          * qreg for the values.
          */
         struct hash_table *def_ht;
@@ -574,8 +574,8 @@ bool qir_opt_small_immediates(struct vc4_compile *c);
 bool qir_opt_vpm(struct vc4_compile *c);
 void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c);
 void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c);
-nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
-                                          nir_ssa_def **srcs, int swiz);
+nir_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
+                                          nir_def **srcs, int swiz);
 void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c);
 void qir_lower_uniforms(struct vc4_compile *c);
 
index e295959..83403ae 100644 (file)
@@ -161,7 +161,7 @@ get_nir_alu_type(const struct glsl_type *type)
 }
 
 static nir_alu_type
-infer_nir_alu_type_from_uses_ssa(nir_ssa_def *ssa);
+infer_nir_alu_type_from_uses_ssa(nir_def *ssa);
 
 static nir_alu_type
 infer_nir_alu_type_from_use(nir_src *src)
@@ -243,7 +243,7 @@ infer_nir_alu_type_from_use(nir_src *src)
 }
 
 static nir_alu_type
-infer_nir_alu_type_from_uses_ssa(nir_ssa_def *ssa)
+infer_nir_alu_type_from_uses_ssa(nir_def *ssa)
 {
    nir_alu_type atype = nir_type_invalid;
    /* try to infer a type: if it's wrong then whatever, but at least we tried */
@@ -1369,7 +1369,7 @@ get_vec_from_bit_size(struct ntv_context *ctx, uint32_t bit_size, uint32_t num_c
 }
 
 static SpvId
-get_src_ssa(struct ntv_context *ctx, const nir_ssa_def *ssa, nir_alu_type *atype)
+get_src_ssa(struct ntv_context *ctx, const nir_def *ssa, nir_alu_type *atype)
 {
    assert(ssa->index < ctx->num_defs);
    assert(ctx->defs[ssa->index] != 0);
@@ -1471,7 +1471,7 @@ get_alu_src_raw(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src, nir_a
 }
 
 static void
-store_ssa_def(struct ntv_context *ctx, nir_ssa_def *ssa, SpvId result, nir_alu_type atype)
+store_ssa_def(struct ntv_context *ctx, nir_def *ssa, SpvId result, nir_alu_type atype)
 {
    assert(result != 0);
    assert(ssa->index < ctx->num_defs);
@@ -3262,7 +3262,7 @@ emit_image_deref_store(struct ntv_context *ctx, nir_intrinsic_instr *intr)
 }
 
 static SpvId
-extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_ssa_def *dest_ssa)
+extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_def *dest_ssa)
 {
    /* Result Type must be an OpTypeStruct with two members.
     * The first member’s type must be an integer type scalar.
@@ -3456,7 +3456,7 @@ emit_is_sparse_texels_resident(struct ntv_context *ctx, nir_intrinsic_instr *int
    SpvId type = get_dest_type(ctx, &intr->dest, nir_type_uint);
 
    /* this will always be stored with the ssa index of the parent instr */
-   nir_ssa_def *ssa = intr->src[0].ssa;
+   nir_def *ssa = intr->src[0].ssa;
    assert(ssa->parent_instr->type == nir_instr_type_alu);
    nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
    unsigned index = alu->src[0].src.ssa->index;
@@ -3804,7 +3804,7 @@ emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
 }
 
 static void
-emit_undef(struct ntv_context *ctx, nir_ssa_undef_instr *undef)
+emit_undef(struct ntv_context *ctx, nir_undef_instr *undef)
 {
    SpvId type = undef->def.bit_size == 1 ? get_bvec_type(ctx, undef->def.num_components) :
                                            get_uvec_type(ctx, undef->def.bit_size,
index acba298..8572cfd 100644 (file)
@@ -64,7 +64,7 @@ copy_vars(nir_builder *b, nir_deref_instr *dst, nir_deref_instr *src)
          copy_vars(b, nir_build_deref_array_imm(b, dst, i), nir_build_deref_array_imm(b, src, i));
       }
    } else {
-      nir_ssa_def *load = nir_load_deref(b, src);
+      nir_def *load = nir_load_deref(b, src);
       nir_store_deref(b, dst, load, BITFIELD_MASK(load->num_components));
    }
 }
@@ -129,20 +129,20 @@ lower_64bit_vertex_attribs_instr(nir_builder *b, nir_instr *instr, void *data)
    b->cursor = nir_after_instr(instr);
 
    /* this is the first load instruction for the first half of the dvec3/4 components */
-   nir_ssa_def *load = nir_load_var(b, var);
+   nir_def *load = nir_load_var(b, var);
    /* this is the second load instruction for the second half of the dvec3/4 components */
-   nir_ssa_def *load2 = nir_load_var(b, var2);
+   nir_def *load2 = nir_load_var(b, var2);
 
-   nir_ssa_def *def[4];
+   nir_def *def[4];
    /* create a new dvec3/4 comprised of all the loaded components from both variables */
    def[0] = nir_vector_extract(b, load, nir_imm_int(b, 0));
    def[1] = nir_vector_extract(b, load, nir_imm_int(b, 1));
    def[2] = nir_vector_extract(b, load2, nir_imm_int(b, 0));
    if (total_num_components == 4)
       def[3] = nir_vector_extract(b, load2, nir_imm_int(b, 1));
-   nir_ssa_def *new_vec = nir_vec(b, def, total_num_components);
+   nir_def *new_vec = nir_vec(b, def, total_num_components);
    /* use the assembled dvec3/4 for all other uses of the load */
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, new_vec,
+   nir_def_rewrite_uses_after(&intr->dest.ssa, new_vec,
                                   new_vec->parent_instr);
 
    /* remove the original instr and its deref chain */
@@ -190,11 +190,11 @@ lower_64bit_uint_attribs_instr(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def *load = nir_load_var(b, var);
-   nir_ssa_def *casted[2];
+   nir_def *load = nir_load_var(b, var);
+   nir_def *casted[2];
    for (unsigned i = 0; i < num_components; i++)
      casted[i] = nir_pack_64_2x32(b, nir_channels(b, load, BITFIELD_RANGE(i * 2, 2)));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, casted, num_components));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, casted, num_components));
 
    /* remove the original instr and its deref chain */
    nir_instr *parent = intr->src[0].ssa->parent_instr;
@@ -238,13 +238,13 @@ lower_basevertex_instr(nir_builder *b, nir_instr *in, void *data)
    nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
    nir_builder_instr_insert(b, &load->instr);
 
-   nir_ssa_def *composite = nir_build_alu(b, nir_op_bcsel,
+   nir_def *composite = nir_build_alu(b, nir_op_bcsel,
                                           nir_build_alu(b, nir_op_ieq, &load->dest.ssa, nir_imm_int(b, 1), NULL, NULL),
                                           &instr->dest.ssa,
                                           nir_imm_int(b, 0),
                                           NULL);
 
-   nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, composite,
+   nir_def_rewrite_uses_after(&instr->dest.ssa, composite,
                                   composite->parent_instr);
    return true;
 }
@@ -278,7 +278,7 @@ lower_drawid_instr(nir_builder *b, nir_instr *in, void *data)
    nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
    nir_builder_instr_insert(b, &load->instr);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa);
+   nir_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa);
 
    return true;
 }
@@ -304,7 +304,7 @@ static bool
 lower_gl_point_gs_instr(nir_builder *b, nir_instr *instr, void *data)
 {
    struct lower_gl_point_state *state = data;
-   nir_ssa_def *vp_scale, *pos;
+   nir_def *vp_scale, *pos;
 
    if (instr->type != nir_instr_type_intrinsic)
       return false;
@@ -326,34 +326,34 @@ lower_gl_point_gs_instr(nir_builder *b, nir_instr *instr, void *data)
    b->cursor = nir_before_instr(instr);
 
    // viewport-map endpoints
-   nir_ssa_def *vp_const_pos = nir_imm_int(b, ZINK_GFX_PUSHCONST_VIEWPORT_SCALE);
+   nir_def *vp_const_pos = nir_imm_int(b, ZINK_GFX_PUSHCONST_VIEWPORT_SCALE);
    vp_scale = nir_load_push_constant_zink(b, 2, 32, vp_const_pos);
 
    // Load point info values
-   nir_ssa_def *point_size = nir_load_var(b, state->gl_point_size);
-   nir_ssa_def *point_pos = nir_load_var(b, state->gl_pos_out);
+   nir_def *point_size = nir_load_var(b, state->gl_point_size);
+   nir_def *point_pos = nir_load_var(b, state->gl_pos_out);
 
    // w_delta = gl_point_size / width_viewport_size_scale * gl_Position.w
-   nir_ssa_def *w_delta = nir_fdiv(b, point_size, nir_channel(b, vp_scale, 0));
+   nir_def *w_delta = nir_fdiv(b, point_size, nir_channel(b, vp_scale, 0));
    w_delta = nir_fmul(b, w_delta, nir_channel(b, point_pos, 3));
    // halt_w_delta = w_delta / 2
-   nir_ssa_def *half_w_delta = nir_fmul_imm(b, w_delta, 0.5);
+   nir_def *half_w_delta = nir_fmul_imm(b, w_delta, 0.5);
 
    // h_delta = gl_point_size / height_viewport_size_scale * gl_Position.w
-   nir_ssa_def *h_delta = nir_fdiv(b, point_size, nir_channel(b, vp_scale, 1));
+   nir_def *h_delta = nir_fdiv(b, point_size, nir_channel(b, vp_scale, 1));
    h_delta = nir_fmul(b, h_delta, nir_channel(b, point_pos, 3));
    // halt_h_delta = h_delta / 2
-   nir_ssa_def *half_h_delta = nir_fmul_imm(b, h_delta, 0.5);
+   nir_def *half_h_delta = nir_fmul_imm(b, h_delta, 0.5);
 
-   nir_ssa_def *point_dir[4][2] = {
+   nir_def *point_dir[4][2] = {
       { nir_imm_float(b, -1), nir_imm_float(b, -1) },
       { nir_imm_float(b, -1), nir_imm_float(b, 1) },
       { nir_imm_float(b, 1), nir_imm_float(b, -1) },
       { nir_imm_float(b, 1), nir_imm_float(b, 1) }
    };
 
-   nir_ssa_def *point_pos_x = nir_channel(b, point_pos, 0);
-   nir_ssa_def *point_pos_y = nir_channel(b, point_pos, 1);
+   nir_def *point_pos_x = nir_channel(b, point_pos, 0);
+   nir_def *point_pos_y = nir_channel(b, point_pos, 1);
 
    for (size_t i = 0; i < 4; i++) {
       pos = nir_vec4(b,
@@ -408,12 +408,12 @@ struct lower_pv_mode_state {
    unsigned prim;
 };
 
-static nir_ssa_def*
+static nir_def*
 lower_pv_mode_gs_ring_index(nir_builder *b,
                             struct lower_pv_mode_state *state,
-                            nir_ssa_def *index)
+                            nir_def *index)
 {
-   nir_ssa_def *ring_offset = nir_load_var(b, state->ring_offset);
+   nir_def *ring_offset = nir_load_var(b, state->ring_offset);
    return nir_imod_imm(b, nir_iadd(b, index, ring_offset),
                           state->ring_size);
 }
@@ -455,8 +455,8 @@ lower_pv_mode_gs_store(nir_builder *b,
       gl_varying_slot location = var->data.location;
       unsigned location_frac = var->data.location_frac;
       assert(state->varyings[location][location_frac]);
-      nir_ssa_def *pos_counter = nir_load_var(b, state->pos_counter);
-      nir_ssa_def *index = lower_pv_mode_gs_ring_index(b, state, pos_counter);
+      nir_def *pos_counter = nir_load_var(b, state->pos_counter);
+      nir_def *index = lower_pv_mode_gs_ring_index(b, state, pos_counter);
       nir_deref_instr *varying_deref = nir_build_deref_var(b, state->varyings[location][location_frac]);
       nir_deref_instr *ring_deref = nir_build_deref_array(b, varying_deref, index);
       // recreate the chain of deref that lead to the store.
@@ -472,10 +472,10 @@ lower_pv_mode_gs_store(nir_builder *b,
 static void
 lower_pv_mode_emit_rotated_prim(nir_builder *b,
                                 struct lower_pv_mode_state *state,
-                                nir_ssa_def *current_vertex)
+                                nir_def *current_vertex)
 {
-   nir_ssa_def *two = nir_imm_int(b, 2);
-   nir_ssa_def *three = nir_imm_int(b, 3);
+   nir_def *two = nir_imm_int(b, 2);
+   nir_def *three = nir_imm_int(b, 3);
    bool is_triangle = state->primitive_vert_count == 3;
    /* This shader will always see the last three vertices emitted by the user gs.
     * The following table is used to to rotate primitives within a strip generated
@@ -493,17 +493,17 @@ lower_pv_mode_emit_rotated_prim(nir_builder *b,
     *
     * odd or even primitive within draw
     */
-   nir_ssa_def *odd_prim = nir_imod(b, nir_load_primitive_id(b), two);
+   nir_def *odd_prim = nir_imod(b, nir_load_primitive_id(b), two);
    for (unsigned i = 0; i < state->primitive_vert_count; i++) {
       /* odd or even triangle within strip emitted by user GS
        * this is handled using the table
        */
-      nir_ssa_def *odd_user_prim = nir_imod(b, current_vertex, two);
+      nir_def *odd_user_prim = nir_imod(b, current_vertex, two);
       unsigned offset_even = vert_maps[is_triangle][0][i];
       unsigned offset_odd = vert_maps[is_triangle][1][i];
-      nir_ssa_def *offset_even_value = nir_imm_int(b, offset_even);
-      nir_ssa_def *offset_odd_value = nir_imm_int(b, offset_odd);
-      nir_ssa_def *rotated_i = nir_bcsel(b, nir_b2b1(b, odd_user_prim),
+      nir_def *offset_even_value = nir_imm_int(b, offset_even);
+      nir_def *offset_odd_value = nir_imm_int(b, offset_odd);
+      nir_def *rotated_i = nir_bcsel(b, nir_b2b1(b, odd_user_prim),
                                             offset_odd_value, offset_even_value);
       /* Here we account for how triangles are provided to the gs from a strip.
        * For even primitives we rotate by 3, meaning we do nothing.
@@ -526,7 +526,7 @@ lower_pv_mode_emit_rotated_prim(nir_builder *b,
          gl_varying_slot location = var->data.location;
          unsigned location_frac = var->data.location_frac;
          if (state->varyings[location][location_frac]) {
-            nir_ssa_def *index = lower_pv_mode_gs_ring_index(b, state, rotated_i);
+            nir_def *index = lower_pv_mode_gs_ring_index(b, state, rotated_i);
             nir_deref_instr *value = nir_build_deref_array(b, nir_build_deref_var(b, state->varyings[location][location_frac]), index);
             copy_vars(b, nir_build_deref_var(b, var), value);
          }
@@ -543,7 +543,7 @@ lower_pv_mode_gs_emit_vertex(nir_builder *b,
    b->cursor = nir_before_instr(&intrin->instr);
 
    // increment pos_counter
-   nir_ssa_def *pos_counter = nir_load_var(b, state->pos_counter);
+   nir_def *pos_counter = nir_load_var(b, state->pos_counter);
    nir_store_var(b, state->pos_counter, nir_iadd_imm(b, pos_counter, 1), 1);
 
    nir_instr_remove(&intrin->instr);
@@ -557,10 +557,10 @@ lower_pv_mode_gs_end_primitive(nir_builder *b,
 {
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *pos_counter = nir_load_var(b, state->pos_counter);
+   nir_def *pos_counter = nir_load_var(b, state->pos_counter);
    nir_push_loop(b);
    {
-      nir_ssa_def *out_pos_counter = nir_load_var(b, state->out_pos_counter);
+      nir_def *out_pos_counter = nir_load_var(b, state->out_pos_counter);
       nir_push_if(b, nir_ilt(b, nir_isub(b, pos_counter, out_pos_counter),
                                 nir_imm_int(b, state->primitive_vert_count)));
       nir_jump(b, nir_jump_break);
@@ -686,12 +686,12 @@ struct lower_line_stipple_state {
    bool line_rectangular;
 };
 
-static nir_ssa_def *
-viewport_map(nir_builder *b, nir_ssa_def *vert,
-             nir_ssa_def *scale)
+static nir_def *
+viewport_map(nir_builder *b, nir_def *vert,
+             nir_def *scale)
 {
-   nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, vert, 3));
-   nir_ssa_def *ndc_point = nir_fmul(b, nir_trim_vector(b, vert, 2),
+   nir_def *w_recip = nir_frcp(b, nir_channel(b, vert, 3));
+   nir_def *ndc_point = nir_fmul(b, nir_trim_vector(b, vert, 2),
                                         w_recip);
    return nir_fmul(b, ndc_point, scale);
 }
@@ -712,19 +712,19 @@ lower_line_stipple_gs_instr(nir_builder *b, nir_instr *instr, void *data)
 
    nir_push_if(b, nir_ine_imm(b, nir_load_var(b, state->pos_counter), 0));
    // viewport-map endpoints
-   nir_ssa_def *vp_scale = nir_load_push_constant_zink(b, 2, 32,
+   nir_def *vp_scale = nir_load_push_constant_zink(b, 2, 32,
                                                        nir_imm_int(b, ZINK_GFX_PUSHCONST_VIEWPORT_SCALE));
-   nir_ssa_def *prev = nir_load_var(b, state->prev_pos);
-   nir_ssa_def *curr = nir_load_var(b, state->pos_out);
+   nir_def *prev = nir_load_var(b, state->prev_pos);
+   nir_def *curr = nir_load_var(b, state->pos_out);
    prev = viewport_map(b, prev, vp_scale);
    curr = viewport_map(b, curr, vp_scale);
 
    // calculate length of line
-   nir_ssa_def *len;
+   nir_def *len;
    if (state->line_rectangular)
       len = nir_fast_distance(b, prev, curr);
    else {
-      nir_ssa_def *diff = nir_fabs(b, nir_fsub(b, prev, curr));
+      nir_def *diff = nir_fabs(b, nir_fsub(b, prev, curr));
       len = nir_fmax(b, nir_channel(b, diff, 0), nir_channel(b, diff, 1));
    }
    // update stipple_counter
@@ -815,38 +815,38 @@ lower_line_stipple_fs(nir_shader *shader)
       sample_mask_out->data.location = FRAG_RESULT_SAMPLE_MASK;
    }
 
-   nir_ssa_def *pattern = nir_load_push_constant_zink(&b, 1, 32,
+   nir_def *pattern = nir_load_push_constant_zink(&b, 1, 32,
                                                       nir_imm_int(&b, ZINK_GFX_PUSHCONST_LINE_STIPPLE_PATTERN));
-   nir_ssa_def *factor = nir_i2f32(&b, nir_ishr_imm(&b, pattern, 16));
+   nir_def *factor = nir_i2f32(&b, nir_ishr_imm(&b, pattern, 16));
    pattern = nir_iand_imm(&b, pattern, 0xffff);
 
-   nir_ssa_def *sample_mask_in = nir_load_sample_mask_in(&b);
+   nir_def *sample_mask_in = nir_load_sample_mask_in(&b);
    nir_variable *v = nir_local_variable_create(entry, glsl_uint_type(), NULL);
    nir_variable *sample_mask = nir_local_variable_create(entry, glsl_uint_type(), NULL);
    nir_store_var(&b, v, sample_mask_in, 1);
    nir_store_var(&b, sample_mask, sample_mask_in, 1);
    nir_push_loop(&b);
    {
-      nir_ssa_def *value = nir_load_var(&b, v);
-      nir_ssa_def *index = nir_ufind_msb(&b, value);
-      nir_ssa_def *index_mask = nir_ishl(&b, nir_imm_int(&b, 1), index);
-      nir_ssa_def *new_value = nir_ixor(&b, value, index_mask);
+      nir_def *value = nir_load_var(&b, v);
+      nir_def *index = nir_ufind_msb(&b, value);
+      nir_def *index_mask = nir_ishl(&b, nir_imm_int(&b, 1), index);
+      nir_def *new_value = nir_ixor(&b, value, index_mask);
       nir_store_var(&b, v, new_value,  1);
       nir_push_if(&b, nir_ieq_imm(&b, value, 0));
       nir_jump(&b, nir_jump_break);
       nir_pop_if(&b, NULL);
 
-      nir_ssa_def *stipple_pos =
+      nir_def *stipple_pos =
          nir_interp_deref_at_sample(&b, 1, 32,
             &nir_build_deref_var(&b, stipple)->dest.ssa, index);
       stipple_pos = nir_fmod(&b, nir_fdiv(&b, stipple_pos, factor),
                                  nir_imm_float(&b, 16.0));
       stipple_pos = nir_f2i32(&b, stipple_pos);
-      nir_ssa_def *bit =
+      nir_def *bit =
          nir_iand_imm(&b, nir_ishr(&b, pattern, stipple_pos), 1);
       nir_push_if(&b, nir_ieq_imm(&b, bit, 0));
       {
-         nir_ssa_def *value = nir_load_var(&b, sample_mask);
+         nir_def *value = nir_load_var(&b, sample_mask);
          value = nir_ixor(&b, value, index_mask);
          nir_store_var(&b, sample_mask, value, 1);
       }
@@ -901,26 +901,26 @@ lower_line_smooth_gs_emit_vertex(nir_builder *b,
    b->cursor = nir_before_instr(&intrin->instr);
 
    nir_push_if(b, nir_ine_imm(b, nir_load_var(b, state->pos_counter), 0));
-   nir_ssa_def *vp_scale = nir_load_push_constant_zink(b, 2, 32,
+   nir_def *vp_scale = nir_load_push_constant_zink(b, 2, 32,
                                                        nir_imm_int(b, ZINK_GFX_PUSHCONST_VIEWPORT_SCALE));
-   nir_ssa_def *prev = nir_load_var(b, state->prev_pos);
-   nir_ssa_def *curr = nir_load_var(b, state->pos_out);
-   nir_ssa_def *prev_vp = viewport_map(b, prev, vp_scale);
-   nir_ssa_def *curr_vp = viewport_map(b, curr, vp_scale);
+   nir_def *prev = nir_load_var(b, state->prev_pos);
+   nir_def *curr = nir_load_var(b, state->pos_out);
+   nir_def *prev_vp = viewport_map(b, prev, vp_scale);
+   nir_def *curr_vp = viewport_map(b, curr, vp_scale);
 
-   nir_ssa_def *width = nir_load_push_constant_zink(b, 1, 32,
+   nir_def *width = nir_load_push_constant_zink(b, 1, 32,
                                                     nir_imm_int(b, ZINK_GFX_PUSHCONST_LINE_WIDTH));
-   nir_ssa_def *half_width = nir_fadd_imm(b, nir_fmul_imm(b, width, 0.5), 0.5);
+   nir_def *half_width = nir_fadd_imm(b, nir_fmul_imm(b, width, 0.5), 0.5);
 
    const unsigned yx[2] = { 1, 0 };
-   nir_ssa_def *vec = nir_fsub(b, curr_vp, prev_vp);
-   nir_ssa_def *len = nir_fast_length(b, vec);
-   nir_ssa_def *dir = nir_normalize(b, vec);
-   nir_ssa_def *half_length = nir_fmul_imm(b, len, 0.5);
+   nir_def *vec = nir_fsub(b, curr_vp, prev_vp);
+   nir_def *len = nir_fast_length(b, vec);
+   nir_def *dir = nir_normalize(b, vec);
+   nir_def *half_length = nir_fmul_imm(b, len, 0.5);
    half_length = nir_fadd_imm(b, half_length, 0.5);
 
-   nir_ssa_def *vp_scale_rcp = nir_frcp(b, vp_scale);
-   nir_ssa_def *tangent =
+   nir_def *vp_scale_rcp = nir_frcp(b, vp_scale);
+   nir_def *tangent =
       nir_fmul(b,
                nir_fmul(b,
                         nir_swizzle(b, dir, yx, 2),
@@ -930,7 +930,7 @@ lower_line_smooth_gs_emit_vertex(nir_builder *b,
    tangent = nir_pad_vector_imm_int(b, tangent, 0, 4);
    dir = nir_fmul_imm(b, nir_fmul(b, dir, vp_scale_rcp), 0.5);
 
-   nir_ssa_def *line_offets[8] = {
+   nir_def *line_offets[8] = {
       nir_fadd(b, tangent, nir_fneg(b, dir)),
       nir_fadd(b, nir_fneg(b, tangent), nir_fneg(b, dir)),
       tangent,
@@ -940,9 +940,9 @@ lower_line_smooth_gs_emit_vertex(nir_builder *b,
       nir_fadd(b, tangent, dir),
       nir_fadd(b, nir_fneg(b, tangent), dir),
    };
-   nir_ssa_def *line_coord =
+   nir_def *line_coord =
       nir_vec4(b, half_width, half_width, half_length, half_length);
-   nir_ssa_def *line_coords[8] = {
+   nir_def *line_coords[8] = {
       nir_fmul(b, line_coord, nir_imm_vec4(b, -1,  1,  -1,  1)),
       nir_fmul(b, line_coord, nir_imm_vec4(b,  1,  1,  -1,  1)),
       nir_fmul(b, line_coord, nir_imm_vec4(b, -1,  1,   0,  1)),
@@ -1136,7 +1136,7 @@ lower_line_smooth_fs(nir_shader *shader, bool lower_stipple)
       // initialize stipple_pattern
       nir_function_impl *entry = nir_shader_get_entrypoint(shader);
       b = nir_builder_at(nir_before_cf_list(&entry->body));
-      nir_ssa_def *pattern = nir_load_push_constant_zink(&b, 1, 32,
+      nir_def *pattern = nir_load_push_constant_zink(&b, 1, 32,
                                                          nir_imm_int(&b, ZINK_GFX_PUSHCONST_LINE_STIPPLE_PATTERN));
       nir_store_var(&b, stipple_pattern, pattern, 1);
    }
@@ -1169,8 +1169,8 @@ lower_64bit_pack_instr(nir_builder *b, nir_instr *instr, void *data)
        alu_instr->op != nir_op_unpack_64_2x32)
       return false;
    b->cursor = nir_before_instr(&alu_instr->instr);
-   nir_ssa_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
-   nir_ssa_def *dest;
+   nir_def *src = nir_ssa_for_alu_src(b, alu_instr, 0);
+   nir_def *dest;
    switch (alu_instr->op) {
    case nir_op_pack_64_2x32:
       dest = nir_pack_64_2x32_split(b, nir_channel(b, src, 0), nir_channel(b, src, 1));
@@ -1181,7 +1181,7 @@ lower_64bit_pack_instr(nir_builder *b, nir_instr *instr, void *data)
    default:
       unreachable("Impossible opcode");
    }
-   nir_ssa_def_rewrite_uses(&alu_instr->dest.dest.ssa, dest);
+   nir_def_rewrite_uses(&alu_instr->dest.dest.ssa, dest);
    nir_instr_remove(&alu_instr->instr);
    return true;
 }
@@ -1260,11 +1260,11 @@ zink_create_quads_emulation_gs(const nir_shader_compiler_options *options,
 
    int mapping_first[] = {0, 1, 2, 0, 2, 3};
    int mapping_last[] = {0, 1, 3, 1, 2, 3};
-   nir_ssa_def *last_pv_vert_def = nir_load_provoking_last(&b);
+   nir_def *last_pv_vert_def = nir_load_provoking_last(&b);
    last_pv_vert_def = nir_ine_imm(&b, last_pv_vert_def, 0);
    for (unsigned i = 0; i < 6; ++i) {
       /* swap indices 2 and 3 */
-      nir_ssa_def *idx = nir_bcsel(&b, last_pv_vert_def,
+      nir_def *idx = nir_bcsel(&b, last_pv_vert_def,
                                    nir_imm_int(&b, mapping_last[i]),
                                    nir_imm_int(&b, mapping_first[i]));
       /* Copy inputs to outputs. */
@@ -1307,11 +1307,11 @@ lower_system_values_to_inlined_uniforms_instr(nir_builder *b, nir_instr *instr,
    }
 
    b->cursor = nir_before_instr(&intrin->instr);
-   nir_ssa_def *new_dest_def = nir_load_ubo(b, 1, 32, nir_imm_int(b, 0),
+   nir_def *new_dest_def = nir_load_ubo(b, 1, 32, nir_imm_int(b, 0),
                                             nir_imm_int(b, inlined_uniform_offset),
                                             .align_mul = 4, .align_offset = 0,
                                             .range_base = 0, .range = ~0);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest_def);
+   nir_def_rewrite_uses(&intrin->dest.ssa, new_dest_def);
    nir_instr_remove(instr);
    return true;
 }
@@ -1521,7 +1521,7 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       return false;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    nir_variable *var = NULL;
-   nir_ssa_def *offset = NULL;
+   nir_def *offset = NULL;
    bool is_load = true;
    b->cursor = nir_before_instr(instr);
 
@@ -1557,7 +1557,7 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    unsigned rewrites = 0;
-   nir_ssa_def *result[2];
+   nir_def *result[2];
    for (unsigned i = 0; i < intr->num_components; i++) {
       if (offset_bytes + i >= size) {
          rewrites++;
@@ -1567,8 +1567,8 @@ bound_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    }
    assert(rewrites == intr->num_components);
    if (is_load) {
-      nir_ssa_def *load = nir_vec(b, result, intr->num_components);
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+      nir_def *load = nir_vec(b, result, intr->num_components);
+      nir_def_rewrite_uses(&intr->dest.ssa, load);
    }
    nir_instr_remove(instr);
    return true;
@@ -1655,10 +1655,10 @@ lower_fbfetch_instr(nir_builder *b, nir_instr *instr, void *data)
    enum glsl_sampler_dim dim = ms ? GLSL_SAMPLER_DIM_SUBPASS_MS : GLSL_SAMPLER_DIM_SUBPASS;
    fbfetch->type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
    nir_shader_add_variable(b->shader, fbfetch);
-   nir_ssa_def *deref = &nir_build_deref_var(b, fbfetch)->dest.ssa;
-   nir_ssa_def *sample = ms ? nir_load_sample_id(b) : nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *load = nir_image_deref_load(b, 4, 32, deref, nir_imm_vec4(b, 0, 0, 0, 1), sample, nir_imm_int(b, 0));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+   nir_def *deref = &nir_build_deref_var(b, fbfetch)->dest.ssa;
+   nir_def *sample = ms ? nir_load_sample_id(b) : nir_undef(b, 1, 32);
+   nir_def *load = nir_image_deref_load(b, 4, 32, deref, nir_imm_vec4(b, 0, 0, 0, 1), sample, nir_imm_int(b, 0));
+   nir_def_rewrite_uses(&intr->dest.ssa, load);
    return true;
 }
 
@@ -1701,7 +1701,7 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data)
    if (nir_src_is_const(lod_src) && nir_src_as_const_value(lod_src)->u32 == 0)
       return false;
 
-   nir_ssa_def *lod = lod_src.ssa;
+   nir_def *lod = lod_src.ssa;
 
    int offset_idx = nir_tex_instr_src_index(txf, nir_tex_src_texture_offset);
    int handle_idx = nir_tex_instr_src_index(txf, nir_tex_src_texture_handle);
@@ -1731,12 +1731,12 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data)
    unsigned bit_size = nir_alu_type_get_type_size(txf->dest_type);
    oob_values[3] = (txf->dest_type & nir_type_float) ?
                    nir_const_value_for_float(1.0, bit_size) : nir_const_value_for_uint(1, bit_size);
-   nir_ssa_def *oob_val = nir_build_imm(b, nir_tex_instr_dest_size(txf), bit_size, oob_values);
+   nir_def *oob_val = nir_build_imm(b, nir_tex_instr_dest_size(txf), bit_size, oob_values);
 
    nir_pop_if(b, lod_oob_else);
-   nir_ssa_def *robust_txf = nir_if_phi(b, &new_txf->dest.ssa, oob_val);
+   nir_def *robust_txf = nir_if_phi(b, &new_txf->dest.ssa, oob_val);
 
-   nir_ssa_def_rewrite_uses(&txf->dest.ssa, robust_txf);
+   nir_def_rewrite_uses(&txf->dest.ssa, robust_txf);
    nir_instr_remove_v(in);
    return true;
 }
@@ -2077,7 +2077,7 @@ lower_attrib(nir_builder *b, nir_instr *instr, void *data)
       return false;
    unsigned num_components = glsl_get_vector_elements(split[0]->type);
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *loads[4];
+   nir_def *loads[4];
    for (unsigned i = 0; i < (state->needs_w ? num_components - 1 : num_components); i++)
       loads[i] = nir_load_deref(b, nir_build_deref_var(b, split[i+1]));
    if (state->needs_w) {
@@ -2085,8 +2085,8 @@ lower_attrib(nir_builder *b, nir_instr *instr, void *data)
       loads[3] = nir_channel(b, loads[0], 3);
       loads[0] = nir_channel(b, loads[0], 0);
    }
-   nir_ssa_def *new_load = nir_vec(b, loads, num_components);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, new_load);
+   nir_def *new_load = nir_vec(b, loads, num_components);
+   nir_def_rewrite_uses(&intr->dest.ssa, new_load);
    nir_instr_remove_v(instr);
    return true;
 }
@@ -2142,7 +2142,7 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    case nir_intrinsic_ssbo_atomic:
    case nir_intrinsic_ssbo_atomic_swap: {
       /* convert offset to uintN_t[idx] */
-      nir_ssa_def *offset = nir_udiv_imm(b, intr->src[1].ssa, nir_dest_bit_size(intr->dest) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, nir_dest_bit_size(intr->dest) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
       return true;
    }
@@ -2155,14 +2155,14 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
                         nir_dest_bit_size(intr->dest) == 64 &&
                         nir_intrinsic_align_offset(intr) % 8 != 0;
       force_2x32 |= nir_dest_bit_size(intr->dest) == 64 && !has_int64;
-      nir_ssa_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (force_2x32) {
          /* this is always scalarized */
          assert(intr->dest.ssa.num_components == 1);
          /* rewrite as 2x32 */
-         nir_ssa_def *load[2];
+         nir_def *load[2];
          for (unsigned i = 0; i < 2; i++) {
             if (intr->intrinsic == nir_intrinsic_load_ssbo)
                load[i] = nir_load_ssbo(b, 1, 32, intr->src[0].ssa, nir_iadd_imm(b, intr->src[1].ssa, i), .align_mul = 4, .align_offset = 0);
@@ -2171,8 +2171,8 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
             nir_intrinsic_set_access(nir_instr_as_intrinsic(load[i]->parent_instr), nir_intrinsic_access(intr));
          }
          /* cast back to 64bit */
-         nir_ssa_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, casted);
+         nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
+         nir_def_rewrite_uses(&intr->dest.ssa, casted);
          nir_instr_remove(instr);
       }
       return true;
@@ -2180,19 +2180,19 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    case nir_intrinsic_load_shared:
       b->cursor = nir_before_instr(instr);
       bool force_2x32 = nir_dest_bit_size(intr->dest) == 64 && !has_int64;
-      nir_ssa_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : nir_dest_bit_size(intr->dest)) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[0], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (force_2x32) {
          /* this is always scalarized */
          assert(intr->dest.ssa.num_components == 1);
          /* rewrite as 2x32 */
-         nir_ssa_def *load[2];
+         nir_def *load[2];
          for (unsigned i = 0; i < 2; i++)
             load[i] = nir_load_shared(b, 1, 32, nir_iadd_imm(b, intr->src[0].ssa, i), .align_mul = 4, .align_offset = 0);
          /* cast back to 64bit */
-         nir_ssa_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, casted);
+         nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
+         nir_def_rewrite_uses(&intr->dest.ssa, casted);
          nir_instr_remove(instr);
          return true;
       }
@@ -2200,13 +2200,13 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    case nir_intrinsic_store_ssbo: {
       b->cursor = nir_before_instr(instr);
       bool force_2x32 = nir_src_bit_size(intr->src[0]) == 64 && !has_int64;
-      nir_ssa_def *offset = nir_udiv_imm(b, intr->src[2].ssa, (force_2x32 ? 32 : nir_src_bit_size(intr->src[0])) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[2].ssa, (force_2x32 ? 32 : nir_src_bit_size(intr->src[0])) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[2], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (force_2x32) {
          /* this is always scalarized */
          assert(intr->src[0].ssa->num_components == 1);
-         nir_ssa_def *vals[2] = {nir_unpack_64_2x32_split_x(b, intr->src[0].ssa), nir_unpack_64_2x32_split_y(b, intr->src[0].ssa)};
+         nir_def *vals[2] = {nir_unpack_64_2x32_split_x(b, intr->src[0].ssa), nir_unpack_64_2x32_split_y(b, intr->src[0].ssa)};
          for (unsigned i = 0; i < 2; i++)
             nir_store_ssbo(b, vals[i], intr->src[1].ssa, nir_iadd_imm(b, intr->src[2].ssa, i), .align_mul = 4, .align_offset = 0);
          nir_instr_remove(instr);
@@ -2216,13 +2216,13 @@ rewrite_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    case nir_intrinsic_store_shared: {
       b->cursor = nir_before_instr(instr);
       bool force_2x32 = nir_src_bit_size(intr->src[0]) == 64 && !has_int64;
-      nir_ssa_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : nir_src_bit_size(intr->src[0])) / 8);
+      nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : nir_src_bit_size(intr->src[0])) / 8);
       nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
       /* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
       if (nir_src_bit_size(intr->src[0]) == 64 && !has_int64) {
          /* this is always scalarized */
          assert(intr->src[0].ssa->num_components == 1);
-         nir_ssa_def *vals[2] = {nir_unpack_64_2x32_split_x(b, intr->src[0].ssa), nir_unpack_64_2x32_split_y(b, intr->src[0].ssa)};
+         nir_def *vals[2] = {nir_unpack_64_2x32_split_x(b, intr->src[0].ssa), nir_unpack_64_2x32_split_y(b, intr->src[0].ssa)};
          for (unsigned i = 0; i < 2; i++)
             nir_store_shared(b, vals[i], nir_iadd_imm(b, intr->src[1].ssa, i), .align_mul = 4, .align_offset = 0);
          nir_instr_remove(instr);
@@ -2307,18 +2307,18 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
       op = nir_intrinsic_deref_atomic_swap;
    else
       unreachable("unknown intrinsic");
-   nir_ssa_def *offset = intr->src[1].ssa;
+   nir_def *offset = intr->src[1].ssa;
    nir_src *src = &intr->src[0];
    nir_variable *var = get_bo_var(b->shader, bo, true, src, nir_dest_bit_size(intr->dest));
    nir_deref_instr *deref_var = nir_build_deref_var(b, var);
-   nir_ssa_def *idx = src->ssa;
+   nir_def *idx = src->ssa;
    if (bo->first_ssbo)
       idx = nir_iadd_imm(b, idx, -bo->first_ssbo);
    nir_deref_instr *deref_array = nir_build_deref_array(b, deref_var, idx);
    nir_deref_instr *deref_struct = nir_build_deref_struct(b, deref_array, 0);
 
    /* generate new atomic deref ops for every component */
-   nir_ssa_def *result[4];
+   nir_def *result[4];
    unsigned num_components = nir_dest_num_components(intr->dest);
    for (unsigned i = 0; i < num_components; i++) {
       nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
@@ -2336,8 +2336,8 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
       offset = nir_iadd_imm(b, offset, 1);
    }
 
-   nir_ssa_def *load = nir_vec(b, result, num_components);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+   nir_def *load = nir_vec(b, result, num_components);
+   nir_def_rewrite_uses(&intr->dest.ssa, load);
    nir_instr_remove(instr);
 }
 
@@ -2349,7 +2349,7 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
       return false;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    nir_variable *var = NULL;
-   nir_ssa_def *offset = NULL;
+   nir_def *offset = NULL;
    bool is_load = true;
    b->cursor = nir_before_instr(instr);
    nir_src *src;
@@ -2382,7 +2382,7 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    assert(var);
    assert(offset);
    nir_deref_instr *deref_var = nir_build_deref_var(b, var);
-   nir_ssa_def *idx = !ssbo && var->data.driver_location ? nir_iadd_imm(b, src->ssa, -1) : src->ssa;
+   nir_def *idx = !ssbo && var->data.driver_location ? nir_iadd_imm(b, src->ssa, -1) : src->ssa;
    if (!ssbo && bo->first_ubo && var->data.driver_location)
       idx = nir_iadd_imm(b, idx, -bo->first_ubo);
    else if (ssbo && bo->first_ssbo)
@@ -2391,7 +2391,7 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
    nir_deref_instr *deref_struct = nir_build_deref_struct(b, deref_array, 0);
    assert(intr->num_components <= 2);
    if (is_load) {
-      nir_ssa_def *result[2];
+      nir_def *result[2];
       for (unsigned i = 0; i < intr->num_components; i++) {
          nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, nir_i2iN(b, offset, nir_dest_bit_size(deref_struct->dest)));
          result[i] = nir_load_deref(b, deref_arr);
@@ -2399,8 +2399,8 @@ remove_bo_access_instr(nir_builder *b, nir_instr *instr, void *data)
             nir_intrinsic_set_access(nir_instr_as_intrinsic(result[i]->parent_instr), nir_intrinsic_access(intr));
          offset = nir_iadd_imm(b, offset, 1);
       }
-      nir_ssa_def *load = nir_vec(b, result, intr->num_components);
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+      nir_def *load = nir_vec(b, result, intr->num_components);
+      nir_def_rewrite_uses(&intr->dest.ssa, load);
    } else {
       nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, nir_i2iN(b, offset, nir_dest_bit_size(deref_struct->dest)));
       nir_build_store_deref(b, &deref_arr->dest.ssa, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr));
@@ -2441,11 +2441,11 @@ struct clamp_layer_output_state {
 static void
 clamp_layer_output_emit(nir_builder *b, struct clamp_layer_output_state *state)
 {
-   nir_ssa_def *is_layered = nir_load_push_constant_zink(b, 1, 32,
+   nir_def *is_layered = nir_load_push_constant_zink(b, 1, 32,
                                                          nir_imm_int(b, ZINK_GFX_PUSHCONST_FRAMEBUFFER_IS_LAYERED));
    nir_deref_instr *original_deref = nir_build_deref_var(b, state->original);
    nir_deref_instr *clamped_deref = nir_build_deref_var(b, state->clamped);
-   nir_ssa_def *layer = nir_bcsel(b, nir_ieq_imm(b, is_layered, 1),
+   nir_def *layer = nir_bcsel(b, nir_ieq_imm(b, is_layered, 1),
                                   nir_load_deref(b, original_deref),
                                   nir_imm_int(b, 0));
    nir_store_deref(b, clamped_deref, layer, 0);
@@ -2636,7 +2636,7 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
    if (deref_var != var)
       return false;
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest));
+   nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest));
    if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
       switch (var->data.location) {
       case VARYING_SLOT_COL0:
@@ -2651,7 +2651,7 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
          break;
       }
    }
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, zero);
+   nir_def_rewrite_uses(&intr->dest.ssa, zero);
    nir_instr_remove(instr);
    return true;
 }
@@ -2837,7 +2837,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
             b.cursor = nir_before_instr(instr);
             nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
             unsigned num_components = intr->num_components * 2;
-            nir_ssa_def *comp[NIR_MAX_VEC_COMPONENTS];
+            nir_def *comp[NIR_MAX_VEC_COMPONENTS];
             /* this is the stored matrix type from the deref */
             struct hash_entry *he = _mesa_hash_table_search(derefs, deref);
             const struct glsl_type *matrix = he ? he->data : NULL;
@@ -2847,7 +2847,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
             if (intr->intrinsic == nir_intrinsic_store_deref) {
                /* first, unpack the src data to 32bit vec2 components */
                for (unsigned i = 0; i < intr->num_components; i++) {
-                  nir_ssa_def *ssa = nir_unpack_64_2x32(&b, nir_channel(&b, intr->src[1].ssa, i));
+                  nir_def *ssa = nir_unpack_64_2x32(&b, nir_channel(&b, intr->src[1].ssa, i));
                   comp[i * 2] = nir_channel(&b, ssa, 0);
                   comp[i * 2 + 1] = nir_channel(&b, ssa, 1);
                }
@@ -2863,7 +2863,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   assert(deref->deref_type == nir_deref_type_array);
                   nir_deref_instr *var_deref = nir_deref_instr_parent(deref);
                   /* let optimization clean up consts later */
-                  nir_ssa_def *index = deref->arr.index.ssa;
+                  nir_def *index = deref->arr.index.ssa;
                   /* this might be an indirect array index:
                      * - iterate over matrix columns
                      * - add if blocks for each column
@@ -2888,7 +2888,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                         nir_deref_instr *strct = nir_build_deref_struct(&b, var_deref, member);
                         unsigned incr = MIN2(remaining, 4);
                         /* assemble the write component vec */
-                        nir_ssa_def *val = nir_vec(&b, &comp[i], incr);
+                        nir_def *val = nir_vec(&b, &comp[i], incr);
                         /* use the number of components being written as the writemask */
                         if (glsl_get_vector_elements(strct->type) > val->num_components)
                            val = nir_pad_vector(&b, val, glsl_get_vector_elements(strct->type));
@@ -2901,7 +2901,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   _mesa_set_add(deletes, &deref->instr);
                } else if (num_components <= 4) {
                   /* simple store case: just write out the components */
-                  nir_ssa_def *dest = nir_vec(&b, comp, num_components);
+                  nir_def *dest = nir_vec(&b, comp, num_components);
                   nir_store_deref(&b, deref, dest, mask);
                } else {
                   /* writing > 4 components: access the struct and write to the appropriate vec4 members */
@@ -2909,7 +2909,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                      if (!(mask & BITFIELD_MASK(4)))
                         continue;
                      nir_deref_instr *strct = nir_build_deref_struct(&b, deref, i);
-                     nir_ssa_def *dest = nir_vec(&b, &comp[i * 4], MIN2(num_components, 4));
+                     nir_def *dest = nir_vec(&b, &comp[i * 4], MIN2(num_components, 4));
                      if (glsl_get_vector_elements(strct->type) > dest->num_components)
                         dest = nir_pad_vector(&b, dest, glsl_get_vector_elements(strct->type));
                      nir_store_deref(&b, strct, dest, mask & BITFIELD_MASK(4));
@@ -2917,20 +2917,20 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   }
                }
             } else {
-               nir_ssa_def *dest = NULL;
+               nir_def *dest = NULL;
                if (matrix) {
                   /* matrix types always come from array (row) derefs */
                   assert(deref->deref_type == nir_deref_type_array);
                   nir_deref_instr *var_deref = nir_deref_instr_parent(deref);
                   /* let optimization clean up consts later */
-                  nir_ssa_def *index = deref->arr.index.ssa;
+                  nir_def *index = deref->arr.index.ssa;
                   /* this might be an indirect array index:
                      * - iterate over matrix columns
                      * - add if blocks for each column
                      * - phi the loads using the array index
                      */
                   unsigned cols = glsl_get_matrix_columns(matrix);
-                  nir_ssa_def *dests[4];
+                  nir_def *dests[4];
                   for (unsigned idx = 0; idx < cols; idx++) {
                      /* don't add an if for the final row: this will be handled in the else */
                      if (idx < cols - 1)
@@ -2949,7 +2949,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                      for (unsigned i = 0; i < num_components; member++) {
                         assert(member < glsl_get_length(var_deref->type));
                         nir_deref_instr *strct = nir_build_deref_struct(&b, var_deref, member);
-                        nir_ssa_def *load = nir_load_deref(&b, strct);
+                        nir_def *load = nir_load_deref(&b, strct);
                         unsigned incr = MIN2(remaining, 4);
                         /* repack the loads to 64bit */
                         for (unsigned c = 0; c < incr / 2; c++, comp_idx++)
@@ -2969,7 +2969,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   _mesa_set_add(deletes, &deref->instr);
                } else if (num_components <= 4) {
                   /* simple load case */
-                  nir_ssa_def *load = nir_load_deref(&b, deref);
+                  nir_def *load = nir_load_deref(&b, deref);
                   /* pack 32bit loads into 64bit: this will automagically get optimized out later */
                   for (unsigned i = 0; i < intr->num_components; i++) {
                      comp[i] = nir_pack_64_2x32(&b, nir_channels(&b, load, BITFIELD_RANGE(i * 2, 2)));
@@ -2979,7 +2979,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   /* writing > 4 components: access the struct and load the appropriate vec4 members */
                   for (unsigned i = 0; i < 2; i++, num_components -= 4) {
                      nir_deref_instr *strct = nir_build_deref_struct(&b, deref, i);
-                     nir_ssa_def *load = nir_load_deref(&b, strct);
+                     nir_def *load = nir_load_deref(&b, strct);
                      comp[i * 2] = nir_pack_64_2x32(&b,
                                                     nir_trim_vector(&b, load, 2));
                      if (num_components > 2)
@@ -2987,7 +2987,7 @@ lower_64bit_vars_function(nir_shader *shader, nir_function_impl *impl, nir_varia
                   }
                   dest = nir_vec(&b, comp, intr->num_components);
                }
-               nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, dest, instr);
+               nir_def_rewrite_uses_after(&intr->dest.ssa, dest, instr);
             }
             _mesa_set_add(deletes, instr);
             break;
@@ -3091,8 +3091,8 @@ split_blocks(nir_shader *nir)
                   deref->modes = nir_var_shader_temp;
                   parent->modes = nir_var_shader_temp;
                   b.cursor = nir_before_instr(instr);
-                  nir_ssa_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->dest.ssa;
-                  nir_ssa_def_rewrite_uses_after(&deref->dest.ssa, dest, &deref->instr);
+                  nir_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->dest.ssa;
+                  nir_def_rewrite_uses_after(&deref->dest.ssa, dest, &deref->instr);
                   nir_instr_remove(&deref->instr);
                   func_progress = true;
                   break;
@@ -3297,7 +3297,7 @@ flag_shadow_tex(nir_variable *var, struct zink_shader *zs)
    zs->fs.legacy_shadow_mask |= BITFIELD_BIT(sampler_id);
 }
 
-static nir_ssa_def *
+static nir_def *
 rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct zink_shader *zs)
 {
    assert(var);
@@ -3311,14 +3311,14 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z
    bool rewrite_depth = tex->is_shadow && num_components > 1 && tex->op != nir_texop_tg4 && !tex->is_sparse;
    if (bit_size == dest_size && !rewrite_depth)
       return NULL;
-   nir_ssa_def *dest = &tex->dest.ssa;
+   nir_def *dest = &tex->dest.ssa;
    if (rewrite_depth && zs) {
       /* If only .x is used in the NIR, then it's effectively not a legacy depth
        * sample anyway and we don't want to ask for shader recompiles.  This is
        * the typical path, since GL_DEPTH_TEXTURE_MODE defaults to either RED or
        * LUMINANCE, so apps just use the first channel.
        */
-      if (nir_ssa_def_components_read(dest) & ~1) {
+      if (nir_def_components_read(dest) & ~1) {
          if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
             flag_shadow_tex(var, zs);
          else
@@ -3340,7 +3340,7 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z
       }
       if (rewrite_depth)
          return dest;
-      nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, dest, dest->parent_instr);
+      nir_def_rewrite_uses_after(&tex->dest.ssa, dest, dest->parent_instr);
    } else if (rewrite_depth) {
       return dest;
    }
@@ -3391,7 +3391,7 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
    unsigned num_components = nir_dest_num_components(tex->dest);
    if (tex->is_shadow)
       tex->is_new_style_shadow = true;
-   nir_ssa_def *dest = rewrite_tex_dest(b, tex, var, NULL);
+   nir_def *dest = rewrite_tex_dest(b, tex, var, NULL);
    assert(dest || !state->shadow_only);
    if (!dest && !(swizzle_key->mask & BITFIELD_BIT(sampler_id)))
       return false;
@@ -3403,7 +3403,7 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
       /* these require manual swizzles */
       if (tex->op == nir_texop_tg4) {
          assert(!tex->is_shadow);
-         nir_ssa_def *swizzle;
+         nir_def *swizzle;
          switch (swizzle_key->swizzle[sampler_id].s[tex->component]) {
          case PIPE_SWIZZLE_0:
             swizzle = nir_imm_zero(b, 4, nir_dest_bit_size(tex->dest));
@@ -3420,10 +3420,10 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
             tex->component = 0;
             return true;
          }
-         nir_ssa_def_rewrite_uses_after(dest, swizzle, swizzle->parent_instr);
+         nir_def_rewrite_uses_after(dest, swizzle, swizzle->parent_instr);
          return true;
       }
-      nir_ssa_def *vec[4];
+      nir_def *vec[4];
       for (unsigned i = 0; i < ARRAY_SIZE(vec); i++) {
          switch (swizzle_key->swizzle[sampler_id].s[i]) {
          case PIPE_SWIZZLE_0:
@@ -3440,13 +3440,13 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
             break;
          }
       }
-      nir_ssa_def *swizzle = nir_vec(b, vec, num_components);
-      nir_ssa_def_rewrite_uses_after(dest, swizzle, swizzle->parent_instr);
+      nir_def *swizzle = nir_vec(b, vec, num_components);
+      nir_def_rewrite_uses_after(dest, swizzle, swizzle->parent_instr);
    } else {
       assert(tex->is_shadow);
-      nir_ssa_def *vec[4] = {dest, dest, dest, dest};
-      nir_ssa_def *splat = nir_vec(b, vec, num_components);
-      nir_ssa_def_rewrite_uses_after(dest, splat, splat->parent_instr);
+      nir_def *vec[4] = {dest, dest, dest, dest};
+      nir_def *splat = nir_vec(b, vec, num_components);
+      nir_def_rewrite_uses_after(dest, splat, splat->parent_instr);
    }
    return true;
 }
@@ -3480,9 +3480,9 @@ invert_point_coord_instr(nir_builder *b, nir_instr *instr, void *data)
    if (intr->intrinsic != nir_intrinsic_load_point_coord)
       return false;
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *def = nir_vec2(b, nir_channel(b, &intr->dest.ssa, 0),
+   nir_def *def = nir_vec2(b, nir_channel(b, &intr->dest.ssa, 0),
                                   nir_fsub_imm(b, 1.0, nir_channel(b, &intr->dest.ssa, 1)));
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
    return true;
 }
 
@@ -3806,8 +3806,8 @@ lower_baseinstance_instr(nir_builder *b, nir_instr *instr, void *data)
    if (intr->intrinsic != nir_intrinsic_load_instance_id)
       return false;
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *def = nir_isub(b, &intr->dest.ssa, nir_load_base_instance(b));
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
+   nir_def *def = nir_isub(b, &intr->dest.ssa, nir_load_base_instance(b));
+   nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
    return true;
 }
 
@@ -4054,7 +4054,7 @@ lower_bindless_instr(nir_builder *b, nir_instr *in, void *data)
       unsigned c = nir_tex_instr_src_index(tex, nir_tex_src_coord);
       unsigned coord_components = nir_src_num_components(tex->src[c].src);
       if (coord_components < needed_components) {
-         nir_ssa_def *def = nir_pad_vector(b, tex->src[c].src.ssa, needed_components);
+         nir_def *def = nir_pad_vector(b, tex->src[c].src.ssa, needed_components);
          nir_instr_rewrite_src_ssa(in, &tex->src[c].src, def);
          tex->coord_components = needed_components;
       }
@@ -4134,9 +4134,9 @@ lower_bindless_io_instr(nir_builder *b, nir_instr *in, void *data)
    b->cursor = nir_before_instr(in);
    nir_deref_instr *deref = nir_build_deref_var(b, var);
    if (instr->intrinsic == nir_intrinsic_load_deref) {
-       nir_ssa_def *def = nir_load_deref(b, deref);
+       nir_def *def = nir_load_deref(b, deref);
        nir_instr_rewrite_src_ssa(in, &instr->src[0], def);
-       nir_ssa_def_rewrite_uses(&instr->dest.ssa, def);
+       nir_def_rewrite_uses(&instr->dest.ssa, def);
    } else {
       nir_store_deref(b, deref, instr->src[1].ssa, nir_intrinsic_write_mask(instr));
    }
@@ -4276,8 +4276,8 @@ convert_1d_shadow_tex(nir_builder *b, nir_instr *instr, void *data)
          continue;
       if (tex->src[c].src.ssa->num_components == tex->coord_components)
          continue;
-      nir_ssa_def *def;
-      nir_ssa_def *zero = nir_imm_zero(b, 1, tex->src[c].src.ssa->bit_size);
+      nir_def *def;
+      nir_def *zero = nir_imm_zero(b, 1, tex->src[c].src.ssa->bit_size);
       if (tex->src[c].src.ssa->num_components == 1)
          def = nir_vec2(b, tex->src[c].src.ssa, zero);
       else
@@ -4292,8 +4292,8 @@ convert_1d_shadow_tex(nir_builder *b, nir_instr *instr, void *data)
       assert(num_components < 3);
       /* take either xz or just x since this is promoted to 2D from 1D */
       uint32_t mask = num_components == 2 ? (1|4) : 1;
-      nir_ssa_def *dst = nir_channels(b, &tex->dest.ssa, mask);
-      nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, dst, dst->parent_instr);
+      nir_def *dst = nir_channels(b, &tex->dest.ssa, mask);
+      nir_def_rewrite_uses_after(&tex->dest.ssa, dst, dst->parent_instr);
    }
    return true;
 }
@@ -4372,7 +4372,7 @@ scan_nir(struct zink_screen *screen, nir_shader *shader, struct zink_shader *zs)
 }
 
 static bool
-is_residency_code(nir_ssa_def *src)
+is_residency_code(nir_def *src)
 {
    nir_instr *parent = src->parent_instr;
    while (1) {
@@ -4397,18 +4397,18 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
    nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
    if (instr->intrinsic == nir_intrinsic_sparse_residency_code_and) {
       b->cursor = nir_before_instr(&instr->instr);
-      nir_ssa_def *src0;
+      nir_def *src0;
       if (is_residency_code(instr->src[0].ssa))
          src0 = nir_is_sparse_texels_resident(b, 1, instr->src[0].ssa);
       else
          src0 = instr->src[0].ssa;
-      nir_ssa_def *src1;
+      nir_def *src1;
       if (is_residency_code(instr->src[1].ssa))
          src1 = nir_is_sparse_texels_resident(b, 1, instr->src[1].ssa);
       else
          src1 = instr->src[1].ssa;
-      nir_ssa_def *def = nir_iand(b, src0, src1);
-      nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, def, in);
+      nir_def *def = nir_iand(b, src0, src1);
+      nir_def_rewrite_uses_after(&instr->dest.ssa, def, in);
       nir_instr_remove(in);
       return true;
    }
@@ -4424,10 +4424,10 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
    if (is_residency_code(instr->src[0].ssa)) {
       assert(parent->type == nir_instr_type_alu);
       nir_alu_instr *alu = nir_instr_as_alu(parent);
-      nir_ssa_def_rewrite_uses_after(instr->src[0].ssa, nir_channel(b, alu->src[0].src.ssa, 0), parent);
+      nir_def_rewrite_uses_after(instr->src[0].ssa, nir_channel(b, alu->src[0].src.ssa, 0), parent);
       nir_instr_remove(parent);
    } else {
-      nir_ssa_def *src;
+      nir_def *src;
       if (parent->type == nir_instr_type_intrinsic) {
          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
          assert(intr->intrinsic == nir_intrinsic_is_sparse_texels_resident);
@@ -4443,7 +4443,7 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
          else
             src = nir_u2uN(b, src, instr->dest.ssa.bit_size);
       }
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, src);
+      nir_def_rewrite_uses(&instr->dest.ssa, src);
       nir_instr_remove(in);
    }
    return true;
@@ -4506,7 +4506,7 @@ split_bitfields_instr(nir_builder *b, nir_instr *in, void *data)
    if (num_components == 1)
       return false;
    b->cursor = nir_before_instr(in);
-   nir_ssa_def *dests[NIR_MAX_VEC_COMPONENTS];
+   nir_def *dests[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < num_components; i++) {
       if (alu->op == nir_op_bitfield_insert)
          dests[i] = nir_bitfield_insert(b,
@@ -4525,8 +4525,8 @@ split_bitfields_instr(nir_builder *b, nir_instr *in, void *data)
                                           nir_channel(b, alu->src[1].src.ssa, alu->src[1].swizzle[i]),
                                           nir_channel(b, alu->src[2].src.ssa, alu->src[2].swizzle[i]));
    }
-   nir_ssa_def *dest = nir_vec(b, dests, num_components);
-   nir_ssa_def_rewrite_uses_after(&alu->dest.dest.ssa, dest, in);
+   nir_def *dest = nir_vec(b, dests, num_components);
+   nir_def_rewrite_uses_after(&alu->dest.dest.ssa, dest, in);
    nir_instr_remove(in);
    return true;
 }
@@ -5242,7 +5242,7 @@ zink_shader_tcs_create(struct zink_screen *screen, nir_shader *tes, unsigned ver
 
    nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
 
-   nir_ssa_def *invocation_id = nir_load_invocation_id(&b);
+   nir_def *invocation_id = nir_load_invocation_id(&b);
 
    nir_foreach_shader_in_variable(var, tes) {
       if (var->data.location == VARYING_SLOT_TESS_LEVEL_INNER || var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
@@ -5283,9 +5283,9 @@ zink_shader_tcs_create(struct zink_screen *screen, nir_shader *tes, unsigned ver
 
    create_gfx_pushconst(nir);
 
-   nir_ssa_def *load_inner = nir_load_push_constant_zink(&b, 2, 32,
+   nir_def *load_inner = nir_load_push_constant_zink(&b, 2, 32,
                                                          nir_imm_int(&b, ZINK_GFX_PUSHCONST_DEFAULT_INNER_LEVEL));
-   nir_ssa_def *load_outer = nir_load_push_constant_zink(&b, 4, 32,
+   nir_def *load_outer = nir_load_push_constant_zink(&b, 4, 32,
                                                          nir_imm_int(&b, ZINK_GFX_PUSHCONST_DEFAULT_OUTER_LEVEL));
 
    for (unsigned i = 0; i < 2; i++) {
index f31cc16..9e702d5 100644 (file)
@@ -76,27 +76,27 @@ lower_cubemap_to_array_filter(const nir_instr *instr, const void *mask)
 }
 
 typedef struct {
-   nir_ssa_def *rx;
-   nir_ssa_def *ry;
-   nir_ssa_def *rz;
-   nir_ssa_def *arx;
-   nir_ssa_def *ary;
-   nir_ssa_def *arz;
-   nir_ssa_def *array;
+   nir_def *rx;
+   nir_def *ry;
+   nir_def *rz;
+   nir_def *arx;
+   nir_def *ary;
+   nir_def *arz;
+   nir_def *array;
 } coord_t;
 
 
 /* This is taken from from sp_tex_sample:convert_cube */
-static nir_ssa_def *
+static nir_def *
 evaluate_face_x(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->rx);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->rx, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
+   nir_def *sign = nir_fsign(b, coord->rx);
+   nir_def *positive = nir_fge_imm(b, coord->rx, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
@@ -104,16 +104,16 @@ evaluate_face_x(nir_builder *b, coord_t *coord)
    return nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
+static nir_def *
 evaluate_face_y(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->ry);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->ry, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
+   nir_def *sign = nir_fsign(b, coord->ry);
+   nir_def *positive = nir_fge_imm(b, coord->ry, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
@@ -121,16 +121,16 @@ evaluate_face_y(nir_builder *b, coord_t *coord)
    return nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
+static nir_def *
 evaluate_face_z(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->rz);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->rz, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
+   nir_def *sign = nir_fsign(b, coord->rz);
+   nir_def *positive = nir_fge_imm(b, coord->rz, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
@@ -138,8 +138,8 @@ evaluate_face_z(nir_builder *b, coord_t *coord)
    return nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
-create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord, nir_texop op)
+static nir_def *
+create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coord, nir_texop op)
 {
    nir_tex_instr *array_tex;
 
@@ -168,7 +168,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *
 
       array_tex->src[s].src_type = tex->src[i].src_type;
       if (psrc->ssa->num_components != nir_tex_instr_src_size(array_tex, s)) {
-         nir_ssa_def *c = nir_trim_vector(b, psrc->ssa,
+         nir_def *c = nir_trim_vector(b, psrc->ssa,
                                           nir_tex_instr_src_size(array_tex, s));
          array_tex->src[s].src = nir_src_for_ssa(c);
       } else
@@ -183,8 +183,8 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *
    return &array_tex->dest.ssa;
 }
 
-static nir_ssa_def *
-handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *face, nir_ssa_def *array_slice_cube_base, nir_ssa_def *tex_size)
+static nir_def *
+handle_cube_edge(nir_builder *b, nir_def *x, nir_def *y, nir_def *face, nir_def *array_slice_cube_base, nir_def *tex_size)
 {
    enum cube_remap
    {
@@ -252,22 +252,22 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
       cube_remap_pos_y
    };
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
    
    /* Doesn't matter since the texture is square */
    tex_size = nir_channel(b, tex_size, 0);
 
-   nir_ssa_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
-   nir_ssa_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
-   nir_ssa_def *one_on = nir_ixor(b, x_on, y_on);
+   nir_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
+   nir_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
+   nir_def *one_on = nir_ixor(b, x_on, y_on);
 
    /* If the sample did not fall off the face in either dimension, then set output = input */
-   nir_ssa_def *x_result = x;
-   nir_ssa_def *y_result = y;
-   nir_ssa_def *face_result = face;
+   nir_def *x_result = x;
+   nir_def *y_result = y;
+   nir_def *face_result = face;
 
    /* otherwise, if the sample fell off the face in either the X or the Y direction, remap to the new face */
-   nir_ssa_def *remap_predicates[4] =
+   nir_def *remap_predicates[4] =
    {
       nir_iand(b, one_on, nir_ilt(b, x, zero)),
       nir_iand(b, one_on, nir_ilt(b, tex_size, x)),
@@ -275,7 +275,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
       nir_iand(b, one_on, nir_ilt(b, tex_size, y)),
    };
 
-   nir_ssa_def *remap_array[cube_remap_size];
+   nir_def *remap_array[cube_remap_size];
 
    remap_array[cube_remap_zero] = zero;
    remap_array[cube_remap_x] = x;
@@ -290,7 +290,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
 
       /* For each possible original face */
       for (unsigned j = 0; j < 6; j++) {
-         nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
+         nir_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
 
          x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
          y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);
@@ -301,49 +301,49 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
    return nir_vec3(b, x_result, y_result, nir_iadd(b, face_result, array_slice_cube_base));
 }
 
-static nir_ssa_def *
-handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord)
+static nir_def *
+handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_def *coord)
 {
    tex->is_array = true;
-   nir_ssa_def *tex_size = nir_get_texture_size(b, tex);
+   nir_def *tex_size = nir_get_texture_size(b, tex);
 
    /* nir_get_texture_size puts the cursor before the tex op */
    b->cursor = nir_after_instr(coord->parent_instr);
 
-   nir_ssa_def *const_05 = nir_imm_float(b, 0.5f);
-   nir_ssa_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2),
+   nir_def *const_05 = nir_imm_float(b, 0.5f);
+   nir_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2),
                                         nir_i2f32(b, nir_trim_vector(b, tex_size, 2)));
 
-   nir_ssa_def *x_orig = nir_channel(b, texel_coords, 0);
-   nir_ssa_def *y_orig = nir_channel(b, texel_coords, 1);
+   nir_def *x_orig = nir_channel(b, texel_coords, 0);
+   nir_def *y_orig = nir_channel(b, texel_coords, 1);
 
-   nir_ssa_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
-   nir_ssa_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
-   nir_ssa_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
-   nir_ssa_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
-   nir_ssa_def *coords[4][2] = {
+   nir_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
+   nir_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
+   nir_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
+   nir_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
+   nir_def *coords[4][2] = {
       { x_neg, y_pos },
       { x_pos, y_pos },
       { x_pos, y_neg },
       { x_neg, y_neg },
    };
 
-   nir_ssa_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
-   nir_ssa_def *face = nir_imod_imm(b, array_slice_2d, 6);
-   nir_ssa_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
+   nir_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
+   nir_def *face = nir_imod_imm(b, array_slice_2d, 6);
+   nir_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
 
-   nir_ssa_def *channels[4];
+   nir_def *channels[4];
    for (unsigned i = 0; i < 4; ++i) {
-      nir_ssa_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
-      nir_ssa_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
+      nir_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
+      nir_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
       channels[i] = nir_channel(b, sampled_val, tex->component);
    }
 
    return nir_vec(b, channels, 4);
 }
 
-static nir_ssa_def *
-lower_cube_coords(nir_builder *b, nir_ssa_def *coord, bool is_array)
+static nir_def *
+lower_cube_coords(nir_builder *b, nir_def *coord, bool is_array)
 {
    coord_t coords;
    coords.rx = nir_channel(b, coord, 0);
@@ -356,30 +356,30 @@ lower_cube_coords(nir_builder *b, nir_ssa_def *coord, bool is_array)
    if (is_array)
       coords.array = nir_fmul_imm(b, nir_channel(b, coord, 3), 6.0f);
 
-   nir_ssa_def *use_face_x = nir_iand(b,
+   nir_def *use_face_x = nir_iand(b,
                                       nir_fge(b, coords.arx, coords.ary),
                                       nir_fge(b, coords.arx, coords.arz));
 
    nir_if *use_face_x_if = nir_push_if(b, use_face_x);
-   nir_ssa_def *face_x_coord = evaluate_face_x(b, &coords);
+   nir_def *face_x_coord = evaluate_face_x(b, &coords);
    nir_if *use_face_x_else = nir_push_else(b, use_face_x_if);
 
-   nir_ssa_def *use_face_y = nir_iand(b,
+   nir_def *use_face_y = nir_iand(b,
                                       nir_fge(b, coords.ary, coords.arx),
                                       nir_fge(b, coords.ary, coords.arz));
 
    nir_if *use_face_y_if = nir_push_if(b, use_face_y);
-   nir_ssa_def *face_y_coord = evaluate_face_y(b, &coords);
+   nir_def *face_y_coord = evaluate_face_y(b, &coords);
    nir_if *use_face_y_else = nir_push_else(b, use_face_y_if);
 
-   nir_ssa_def *face_z_coord = evaluate_face_z(b, &coords);
+   nir_def *face_z_coord = evaluate_face_z(b, &coords);
 
    nir_pop_if(b, use_face_y_else);
-   nir_ssa_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
+   nir_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
    nir_pop_if(b, use_face_x_else);
 
    // This contains in xy the normalized sample coordinates, and in z the face index
-   nir_ssa_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
+   nir_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
 
    return coord_and_face;
 }
@@ -438,7 +438,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
       txl->src[s].src_type = tex->src[i].src_type;
       s++;
    }
-   nir_ssa_def *lod = nir_get_texture_lod(b, tex);
+   nir_def *lod = nir_get_texture_lod(b, tex);
 
    if (bias_idx >= 0)
       lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
@@ -450,11 +450,11 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
                      nir_dest_num_components(tex->dest),
                      nir_dest_bit_size(tex->dest));
    nir_builder_instr_insert(b, &txl->instr);
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
+   nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
    return txl;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
 {
    if (!tex->is_shadow && (tex->op == nir_texop_txb || tex->op == nir_texop_tex)) {
@@ -465,8 +465,8 @@ lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
    assert(coord_index >= 0);
 
    /* Evaluate the face and the xy coordinates for a 2D tex op */
-   nir_ssa_def *coord = tex->src[coord_index].src.ssa;
-   nir_ssa_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array);
+   nir_def *coord = tex->src[coord_index].src.ssa;
+   nir_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array);
 
    rewrite_cube_var_type(b, tex);
 
@@ -476,7 +476,7 @@ lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
       return create_array_tex_from_cube_tex(b, tex, coord_and_face, tex->op);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_txs(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_after_instr(&tex->instr);
@@ -486,15 +486,15 @@ lower_cube_txs(nir_builder *b, nir_tex_instr *tex)
    /* force max components to unbreak textureSize().xy */
    tex->dest.ssa.num_components = 3;
    tex->is_array = true;
-   nir_ssa_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
-   nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
-   nir_ssa_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
+   nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
+   nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
+   nir_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
                                    nir_channel(b, &tex->dest.ssa, 1),
                                    cube_array_dim);
    return nir_trim_vector(b, size, num_components);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
 {
    switch (tex->op) {
@@ -512,7 +512,7 @@ lower_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cubemap_to_array_impl(nir_builder *b, nir_instr *instr,
                                UNUSED void *_options)
 {
index f3bcc98..d3d5253 100644 (file)
@@ -115,7 +115,7 @@ clover_lower_nir_filter(const nir_instr *instr, const void *)
    return instr->type == nir_instr_type_intrinsic;
 }
 
-static nir_ssa_def *
+static nir_def *
 clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state)
 {
    clover_lower_nir_state *state = reinterpret_cast<clover_lower_nir_state*>(_state);
@@ -137,7 +137,7 @@ clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state)
       return nir_load_var(b, state->printf_buffer);
    }
    case nir_intrinsic_load_base_global_invocation_id: {
-      nir_ssa_def *loads[3];
+      nir_def *loads[3];
 
       /* create variables if we didn't do so alrady */
       if (!state->offset_vars[0]) {
index e7cbc97..44d57ad 100644 (file)
@@ -203,8 +203,8 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint
                   for (unsigned i = 0; i < num_uniforms; i++) {
                      if (offset == uniform_dw_offsets[i]) {
                         b.cursor = nir_before_instr(&intr->instr);
-                        nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
-                        nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
+                        nir_def *def = nir_imm_int(&b, uniform_values[i]);
+                        nir_def_rewrite_uses(&intr->dest.ssa, def);
                         nir_instr_remove(&intr->instr);
                         break;
                      }
@@ -214,7 +214,7 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint
                    * found component load with constant load.
                    */
                   uint32_t max_offset = offset + num_components;
-                  nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
+                  nir_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
                   bool found = false;
 
                   b.cursor = nir_before_instr(&intr->instr);
@@ -248,7 +248,7 @@ lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint
                   }
 
                   /* Replace the original uniform load. */
-                  nir_ssa_def_rewrite_uses(&intr->dest.ssa,
+                  nir_def_rewrite_uses(&intr->dest.ssa,
                                            nir_vec(&b, components, num_components));
                   nir_instr_remove(&intr->instr);
                }
index 3e52871..b46f7ed 100644 (file)
@@ -25,7 +25,7 @@
 #include "nir_builder.h"
 #include "lvp_lower_vulkan_resource.h"
 
-static nir_ssa_def *
+static nir_def *
 load_frag_coord(nir_builder *b)
 {
    nir_variable *pos =
@@ -61,14 +61,14 @@ try_lower_input_load(nir_intrinsic_instr *load, bool use_fragcoord_sysval)
 
    nir_builder b = nir_builder_at(nir_before_instr(&load->instr));
 
-   nir_ssa_def *frag_coord = use_fragcoord_sysval ? nir_load_frag_coord(&b)
+   nir_def *frag_coord = use_fragcoord_sysval ? nir_load_frag_coord(&b)
                                                   : load_frag_coord(&b);
    frag_coord = nir_f2i32(&b, frag_coord);
-   nir_ssa_def *offset = nir_ssa_for_src(&b, load->src[1], 2);
-   nir_ssa_def *pos = nir_iadd(&b, frag_coord, offset);
+   nir_def *offset = nir_ssa_for_src(&b, load->src[1], 2);
+   nir_def *pos = nir_iadd(&b, frag_coord, offset);
 
-   nir_ssa_def *layer = nir_load_view_index(&b);
-   nir_ssa_def *coord =
+   nir_def *layer = nir_load_view_index(&b);
+   nir_def *coord =
       nir_vec4(&b, nir_channel(&b, pos, 0), nir_channel(&b, pos, 1), layer, nir_imm_int(&b, 0));
 
    nir_instr_rewrite_src(&load->instr, &load->src[1], nir_src_for_ssa(coord));
index eeaaaad..23b1c1b 100644 (file)
@@ -54,7 +54,7 @@ lower_vulkan_resource_index(const nir_instr *instr, const void *data_cb)
    return false;
 }
 
-static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
+static nir_def *lower_vri_intrin_vri(struct nir_builder *b,
                                            nir_instr *instr, void *data_cb)
 {
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
@@ -68,28 +68,28 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
                    nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *lower_vri_intrin_vrri(struct nir_builder *b,
+static nir_def *lower_vri_intrin_vrri(struct nir_builder *b,
                                           nir_instr *instr, void *data_cb)
 {
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-   nir_ssa_def *old_index = nir_ssa_for_src(b, intrin->src[0], 3);
-   nir_ssa_def *delta = nir_ssa_for_src(b, intrin->src[1], 1);
+   nir_def *old_index = nir_ssa_for_src(b, intrin->src[0], 3);
+   nir_def *delta = nir_ssa_for_src(b, intrin->src[1], 1);
    return nir_vec3(b, nir_channel(b, old_index, 0),
                    nir_iadd(b, nir_channel(b, old_index, 1), delta),
                    nir_channel(b, old_index, 2));
 }
 
-static nir_ssa_def *lower_vri_intrin_lvd(struct nir_builder *b,
+static nir_def *lower_vri_intrin_lvd(struct nir_builder *b,
                                          nir_instr *instr, void *data_cb)
 {
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
    return nir_ssa_for_src(b, intrin->src[0], 3);
 }
 
-static nir_ssa_def *
+static nir_def *
 vulkan_resource_from_deref(nir_builder *b, nir_deref_instr *deref, const struct lvp_pipeline_layout *layout)
 {
-   nir_ssa_def *index = nir_imm_int(b, 0);
+   nir_def *index = nir_imm_int(b, 0);
 
    while (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
@@ -129,7 +129,7 @@ static void lower_vri_instr_tex(struct nir_builder *b,
          continue;
       }
 
-      nir_ssa_def *resource = vulkan_resource_from_deref(b, deref, layout);
+      nir_def *resource = vulkan_resource_from_deref(b, deref, layout);
       nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, resource);
    }
 }
@@ -143,7 +143,7 @@ lower_image_intrinsic(nir_builder *b,
 
    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
 
-   nir_ssa_def *resource = vulkan_resource_from_deref(b, deref, layout);
+   nir_def *resource = vulkan_resource_from_deref(b, deref, layout);
    nir_rewrite_image_intrinsic(intrin, resource, true);
 }
 
@@ -173,13 +173,13 @@ lower_load_ubo(nir_builder *b, nir_instr *instr, void *data_cb)
 
    nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(nir_imm_int(b, binding.desc_set + 1)));
 
-   nir_ssa_def *offset = nir_iadd_imm(b, intrin->src[1].ssa, bind_layout->uniform_block_offset);
+   nir_def *offset = nir_iadd_imm(b, intrin->src[1].ssa, bind_layout->uniform_block_offset);
    nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(offset));
 
    return true;
 }
 
-static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
+static nir_def *lower_vri_instr(struct nir_builder *b,
                                     nir_instr *instr, void *data_cb)
 {
    if (instr->type == nir_instr_type_intrinsic) {
@@ -197,7 +197,7 @@ static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
       case nir_intrinsic_get_ssbo_size: {
          /* Ignore the offset component. */
          b->cursor = nir_before_instr(instr);
-         nir_ssa_def *resource = nir_ssa_for_src(b, intrin->src[0], 2);
+         nir_def *resource = nir_ssa_for_src(b, intrin->src[0], 2);
          nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
                                nir_src_for_ssa(resource));
          return NULL;
index bd9857e..3e2f966 100644 (file)
@@ -179,7 +179,7 @@ find_tex(const nir_instr *instr, const void *data_cb)
    return false;
 }
 
-static nir_ssa_def *
+static nir_def *
 fixup_tex_instr(struct nir_builder *b, nir_instr *instr, void *data_cb)
 {
    nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
index 40c9c4b..10ca33c 100644 (file)
@@ -11,7 +11,7 @@ rusticl_lower_intrinsics_filter(const nir_instr* instr, const void* state)
     return instr->type == nir_instr_type_intrinsic;
 }
 
-static nir_ssa_def*
+static nir_def*
 rusticl_lower_intrinsics_instr(
     nir_builder *b,
     nir_instr *instr,
@@ -25,7 +25,7 @@ rusticl_lower_intrinsics_instr(
     case nir_intrinsic_image_deref_order: {
         int32_t offset;
         nir_deref_instr *deref;
-        nir_ssa_def *val;
+        nir_def *val;
         nir_variable *var;
 
         if (intrins->intrinsic == nir_intrinsic_image_deref_format) {
@@ -84,18 +84,18 @@ rusticl_lower_intrinsics(nir_shader *nir, struct rusticl_lower_state* state)
     );
 }
 
-static nir_ssa_def*
+static nir_def*
 rusticl_lower_input_instr(struct nir_builder *b, nir_instr *instr, void *_)
 {
    nir_intrinsic_instr *intrins = nir_instr_as_intrinsic(instr);
    if (intrins->intrinsic != nir_intrinsic_load_kernel_input)
       return NULL;
 
-   nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
-   nir_ssa_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1);
+   nir_def *ubo_idx = nir_imm_int(b, 0);
+   nir_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1);
 
    assert(intrins->dest.ssa.bit_size >= 8);
-   nir_ssa_def *load_result =
+   nir_def *load_result =
       nir_load_ubo(b, intrins->num_components, intrins->dest.ssa.bit_size,
                    ubo_idx, nir_iadd_imm(b, uniform_offset, nir_intrinsic_base(intrins)));
 
index b633444..c9014b1 100644 (file)
@@ -45,11 +45,11 @@ static void lower_vulkan_resource_index(nir_builder *b,
    unsigned binding = nir_intrinsic_binding(intr);
    unsigned desc_type = nir_intrinsic_desc_type(intr);
 
-   nir_ssa_def *def = nir_vec3(b,
+   nir_def *def = nir_vec3(b,
                                nir_imm_int(b, desc_set),
                                nir_imm_int(b, binding),
                                nir_imm_int(b, desc_type));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
+   nir_def_rewrite_uses(&intr->dest.ssa, def);
    nir_instr_remove(&intr->instr);
 }
 
@@ -61,7 +61,7 @@ static void lower_load_global_constant_to_scalar(nir_builder *b,
 
    assert(intr->num_components > 1);
 
-   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
+   nir_def *loads[NIR_MAX_VEC_COMPONENTS];
 
    for (uint8_t i = 0; i < intr->num_components; i++) {
       nir_intrinsic_instr *chan_intr =
@@ -86,7 +86,7 @@ static void lower_load_global_constant_to_scalar(nir_builder *b,
       loads[i] = &chan_intr->dest.ssa;
    }
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
+   nir_def_rewrite_uses(&intr->dest.ssa,
                             nir_vec(b, loads, intr->num_components));
    nir_instr_remove(&intr->instr);
 }
index 8ffc0bf..4120bf4 100644 (file)
@@ -41,7 +41,7 @@ static void insert_pfo(nir_builder *b,
    /* TODO: Verify type is vec4. */
 
    /* Pack the output color components into U8888 format. */
-   nir_ssa_def *new_output_src_ssa = nir_pack_unorm_4x8(b, output_src->ssa);
+   nir_def *new_output_src_ssa = nir_pack_unorm_4x8(b, output_src->ssa);
    nir_src new_output_src = nir_src_for_ssa(new_output_src_ssa);
 
    /* Update the store_output intrinsic. */
index a4d3bf1..3908e87 100644 (file)
@@ -723,7 +723,7 @@ static inline void rogue_feedback_used_regs(rogue_build_ctx *ctx,
       rogue_count_used_regs(shader, ROGUE_REG_CLASS_INTERNAL);
 }
 
-static bool ssa_def_cb(nir_ssa_def *ssa, void *state)
+static bool ssa_def_cb(nir_def *ssa, void *state)
 {
    rogue_shader *shader = (rogue_shader *)state;
 
index 6ceb413..657b34e 100644 (file)
@@ -356,7 +356,7 @@ lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data)
       return false;
 
    b->cursor = nir_instr_remove(&intrin->instr);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
+   nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
    return true;
 }
 
index 46d20c1..e5ee799 100644 (file)
@@ -63,12 +63,12 @@ brw_blorp_blit_vars_init(nir_builder *b, struct brw_blorp_blit_vars *v,
 #undef LOAD_INPUT
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_blit_get_frag_coords(nir_builder *b,
                            const struct brw_blorp_blit_prog_key *key,
                            struct brw_blorp_blit_vars *v)
 {
-   nir_ssa_def *coord = nir_f2i32(b, nir_load_frag_coord(b));
+   nir_def *coord = nir_f2i32(b, nir_load_frag_coord(b));
 
    /* Account for destination surface intratile offset
     *
@@ -90,12 +90,12 @@ blorp_blit_get_frag_coords(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_blit_get_cs_dst_coords(nir_builder *b,
                              const struct brw_blorp_blit_prog_key *key,
                              struct brw_blorp_blit_vars *v)
 {
-   nir_ssa_def *coord = nir_load_global_invocation_id(b, 32);
+   nir_def *coord = nir_load_global_invocation_id(b, 32);
 
    /* Account for destination surface intratile offset
     *
@@ -116,15 +116,15 @@ blorp_blit_get_cs_dst_coords(nir_builder *b,
  * Emit code to translate from destination (X, Y) coordinates to source (X, Y)
  * coordinates.
  */
-static nir_ssa_def *
-blorp_blit_apply_transform(nir_builder *b, nir_ssa_def *src_pos,
+static nir_def *
+blorp_blit_apply_transform(nir_builder *b, nir_def *src_pos,
                            struct brw_blorp_blit_vars *v)
 {
-   nir_ssa_def *coord_transform = nir_load_var(b, v->v_coord_transform);
+   nir_def *coord_transform = nir_load_var(b, v->v_coord_transform);
 
-   nir_ssa_def *offset = nir_vec2(b, nir_channel(b, coord_transform, 1),
+   nir_def *offset = nir_vec2(b, nir_channel(b, coord_transform, 1),
                                      nir_channel(b, coord_transform, 3));
-   nir_ssa_def *mul = nir_vec2(b, nir_channel(b, coord_transform, 0),
+   nir_def *mul = nir_vec2(b, nir_channel(b, coord_transform, 0),
                                   nir_channel(b, coord_transform, 2));
 
    return nir_fadd(b, nir_fmul(b, src_pos, mul), offset);
@@ -132,7 +132,7 @@ blorp_blit_apply_transform(nir_builder *b, nir_ssa_def *src_pos,
 
 static nir_tex_instr *
 blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v,
-                           nir_texop op, nir_ssa_def *pos, unsigned num_srcs,
+                           nir_texop op, nir_def *pos, unsigned num_srcs,
                            nir_alu_type dst_type)
 {
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, num_srcs);
@@ -168,9 +168,9 @@ blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v,
    return tex;
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_nir_tex(nir_builder *b, struct brw_blorp_blit_vars *v,
-              const struct brw_blorp_blit_prog_key *key, nir_ssa_def *pos)
+              const struct brw_blorp_blit_prog_key *key, nir_def *pos)
 {
    if (key->need_src_offset)
       pos = nir_fadd(b, pos, nir_i2f32(b, nir_load_var(b, v->v_src_offset)));
@@ -192,9 +192,9 @@ blorp_nir_tex(nir_builder *b, struct brw_blorp_blit_vars *v,
    return &tex->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_nir_txf(nir_builder *b, struct brw_blorp_blit_vars *v,
-              nir_ssa_def *pos, nir_alu_type dst_type)
+              nir_def *pos, nir_alu_type dst_type)
 {
    nir_tex_instr *tex =
       blorp_create_nir_tex_instr(b, v, nir_texop_txf, pos, 2, dst_type);
@@ -207,9 +207,9 @@ blorp_nir_txf(nir_builder *b, struct brw_blorp_blit_vars *v,
    return &tex->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_nir_txf_ms(nir_builder *b, struct brw_blorp_blit_vars *v,
-                 nir_ssa_def *pos, nir_ssa_def *mcs, nir_alu_type dst_type)
+                 nir_def *pos, nir_def *mcs, nir_alu_type dst_type)
 {
    nir_tex_instr *tex =
       blorp_create_nir_tex_instr(b, v, nir_texop_txf_ms, pos, 3, dst_type);
@@ -234,9 +234,9 @@ blorp_nir_txf_ms(nir_builder *b, struct brw_blorp_blit_vars *v,
    return &tex->dest.ssa;
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_blit_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v,
-                      nir_ssa_def *pos)
+                      nir_def *pos)
 {
    nir_tex_instr *tex =
       blorp_create_nir_tex_instr(b, v, nir_texop_txf_ms_mcs_intel,
@@ -258,12 +258,12 @@ blorp_blit_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v,
  *
  * (See brw_blorp_build_nir_shader).
  */
-static inline nir_ssa_def *
-blorp_nir_retile_y_to_w(nir_builder *b, nir_ssa_def *pos)
+static inline nir_def *
+blorp_nir_retile_y_to_w(nir_builder *b, nir_def *pos)
 {
    assert(pos->num_components == 2);
-   nir_ssa_def *x_Y = nir_channel(b, pos, 0);
-   nir_ssa_def *y_Y = nir_channel(b, pos, 1);
+   nir_def *x_Y = nir_channel(b, pos, 0);
+   nir_def *y_Y = nir_channel(b, pos, 1);
 
    /* Given X and Y coordinates that describe an address using Y tiling,
     * translate to the X and Y coordinates that describe the same address
@@ -292,12 +292,12 @@ blorp_nir_retile_y_to_w(nir_builder *b, nir_ssa_def *pos)
     *   X' = (X & ~0b1011) >> 1 | (Y & 0b1) << 2 | X & 0b1         (4)
     *   Y' = (Y & ~0b1) << 1 | (X & 0b1000) >> 2 | (X & 0b10) >> 1
     */
-   nir_ssa_def *x_W = nir_imm_int(b, 0);
+   nir_def *x_W = nir_imm_int(b, 0);
    x_W = nir_mask_shift_or(b, x_W, x_Y, 0xfffffff4, -1);
    x_W = nir_mask_shift_or(b, x_W, y_Y, 0x1, 2);
    x_W = nir_mask_shift_or(b, x_W, x_Y, 0x1, 0);
 
-   nir_ssa_def *y_W = nir_imm_int(b, 0);
+   nir_def *y_W = nir_imm_int(b, 0);
    y_W = nir_mask_shift_or(b, y_W, y_Y, 0xfffffffe, 1);
    y_W = nir_mask_shift_or(b, y_W, x_Y, 0x8, -2);
    y_W = nir_mask_shift_or(b, y_W, x_Y, 0x2, -1);
@@ -314,12 +314,12 @@ blorp_nir_retile_y_to_w(nir_builder *b, nir_ssa_def *pos)
  *
  * (See brw_blorp_build_nir_shader).
  */
-static inline nir_ssa_def *
-blorp_nir_retile_w_to_y(nir_builder *b, nir_ssa_def *pos)
+static inline nir_def *
+blorp_nir_retile_w_to_y(nir_builder *b, nir_def *pos)
 {
    assert(pos->num_components == 2);
-   nir_ssa_def *x_W = nir_channel(b, pos, 0);
-   nir_ssa_def *y_W = nir_channel(b, pos, 1);
+   nir_def *x_W = nir_channel(b, pos, 0);
+   nir_def *y_W = nir_channel(b, pos, 1);
 
    /* Applying the same logic as above, but in reverse, we obtain the
     * formulas:
@@ -327,13 +327,13 @@ blorp_nir_retile_w_to_y(nir_builder *b, nir_ssa_def *pos)
     * X' = (X & ~0b101) << 1 | (Y & 0b10) << 2 | (Y & 0b1) << 1 | X & 0b1
     * Y' = (Y & ~0b11) >> 1 | (X & 0b100) >> 2
     */
-   nir_ssa_def *x_Y = nir_imm_int(b, 0);
+   nir_def *x_Y = nir_imm_int(b, 0);
    x_Y = nir_mask_shift_or(b, x_Y, x_W, 0xfffffffa, 1);
    x_Y = nir_mask_shift_or(b, x_Y, y_W, 0x2, 2);
    x_Y = nir_mask_shift_or(b, x_Y, y_W, 0x1, 1);
    x_Y = nir_mask_shift_or(b, x_Y, x_W, 0x1, 0);
 
-   nir_ssa_def *y_Y = nir_imm_int(b, 0);
+   nir_def *y_Y = nir_imm_int(b, 0);
    y_Y = nir_mask_shift_or(b, y_Y, y_W, 0xfffffffc, -1);
    y_Y = nir_mask_shift_or(b, y_Y, x_W, 0x4, -2);
 
@@ -350,8 +350,8 @@ blorp_nir_retile_w_to_y(nir_builder *b, nir_ssa_def *pos)
  *
  * (See brw_blorp_blit_program).
  */
-static inline nir_ssa_def *
-blorp_nir_encode_msaa(nir_builder *b, nir_ssa_def *pos,
+static inline nir_def *
+blorp_nir_encode_msaa(nir_builder *b, nir_def *pos,
                       unsigned num_samples, enum isl_msaa_layout layout)
 {
    assert(pos->num_components == 2 || pos->num_components == 3);
@@ -364,13 +364,13 @@ blorp_nir_encode_msaa(nir_builder *b, nir_ssa_def *pos,
       /* No translation needed */
       return pos;
    case ISL_MSAA_LAYOUT_INTERLEAVED: {
-      nir_ssa_def *x_in = nir_channel(b, pos, 0);
-      nir_ssa_def *y_in = nir_channel(b, pos, 1);
-      nir_ssa_def *s_in = pos->num_components == 2 ? nir_imm_int(b, 0) :
+      nir_def *x_in = nir_channel(b, pos, 0);
+      nir_def *y_in = nir_channel(b, pos, 1);
+      nir_def *s_in = pos->num_components == 2 ? nir_imm_int(b, 0) :
                                                      nir_channel(b, pos, 2);
 
-      nir_ssa_def *x_out = nir_imm_int(b, 0);
-      nir_ssa_def *y_out = nir_imm_int(b, 0);
+      nir_def *x_out = nir_imm_int(b, 0);
+      nir_def *y_out = nir_imm_int(b, 0);
       switch (num_samples) {
       case 2:
       case 4:
@@ -448,8 +448,8 @@ blorp_nir_encode_msaa(nir_builder *b, nir_ssa_def *pos,
  *
  * (See brw_blorp_blit_program).
  */
-static inline nir_ssa_def *
-blorp_nir_decode_msaa(nir_builder *b, nir_ssa_def *pos,
+static inline nir_def *
+blorp_nir_decode_msaa(nir_builder *b, nir_def *pos,
                       unsigned num_samples, enum isl_msaa_layout layout)
 {
    assert(pos->num_components == 2 || pos->num_components == 3);
@@ -465,12 +465,12 @@ blorp_nir_decode_msaa(nir_builder *b, nir_ssa_def *pos,
    case ISL_MSAA_LAYOUT_INTERLEAVED: {
       assert(pos->num_components == 2);
 
-      nir_ssa_def *x_in = nir_channel(b, pos, 0);
-      nir_ssa_def *y_in = nir_channel(b, pos, 1);
+      nir_def *x_in = nir_channel(b, pos, 0);
+      nir_def *y_in = nir_channel(b, pos, 1);
 
-      nir_ssa_def *x_out = nir_imm_int(b, 0);
-      nir_ssa_def *y_out = nir_imm_int(b, 0);
-      nir_ssa_def *s_out = nir_imm_int(b, 0);
+      nir_def *x_out = nir_imm_int(b, 0);
+      nir_def *y_out = nir_imm_int(b, 0);
+      nir_def *s_out = nir_imm_int(b, 0);
       switch (num_samples) {
       case 2:
       case 4:
@@ -556,9 +556,9 @@ static inline int count_trailing_one_bits(unsigned value)
 #endif
 }
 
-static nir_ssa_def *
+static nir_def *
 blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
-                          nir_ssa_def *pos, unsigned tex_samples,
+                          nir_def *pos, unsigned tex_samples,
                           enum isl_aux_usage tex_aux_usage,
                           nir_alu_type dst_type,
                           enum blorp_filter filter)
@@ -566,7 +566,7 @@ blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
    nir_variable *color =
       nir_local_variable_create(b->impl, glsl_vec4_type(), "color");
 
-   nir_ssa_def *mcs = NULL;
+   nir_def *mcs = NULL;
    if (isl_aux_usage_has_mcs(tex_aux_usage))
       mcs = blorp_blit_txf_ms_mcs(b, v, pos);
 
@@ -632,7 +632,7 @@ blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
     * For integer formats, we replace the add operations with average
     * operations and skip the final division.
     */
-   nir_ssa_def *texture_data[5];
+   nir_def *texture_data[5];
    texture_data[0] = NULL; /* Avoid maybe-uninitialized warning with GCC 10 */
    unsigned stack_depth = 0;
    for (unsigned i = 0; i < tex_samples; ++i) {
@@ -641,7 +641,7 @@ blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
       /* Push sample i onto the stack */
       assert(stack_depth < ARRAY_SIZE(texture_data));
 
-      nir_ssa_def *ms_pos = nir_vec3(b, nir_channel(b, pos, 0),
+      nir_def *ms_pos = nir_vec3(b, nir_channel(b, pos, 0),
                                         nir_channel(b, pos, 1),
                                         nir_imm_int(b, i));
       texture_data[stack_depth++] = blorp_nir_txf_ms(b, v, ms_pos, mcs, dst_type);
@@ -666,12 +666,12 @@ blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
           * clear color and we can skip the remaining fetches just like we do
           * when MCS == 0.
           */
-         nir_ssa_def *mcs_zero = nir_ieq_imm(b, nir_channel(b, mcs, 0), 0);
+         nir_def *mcs_zero = nir_ieq_imm(b, nir_channel(b, mcs, 0), 0);
          if (tex_samples == 16) {
             mcs_zero = nir_iand(b, mcs_zero,
                nir_ieq_imm(b, nir_channel(b, mcs, 1), 0));
          }
-         nir_ssa_def *mcs_clear =
+         nir_def *mcs_clear =
             blorp_nir_mcs_is_clear_color(b, mcs, tex_samples);
 
          nir_push_if(b, nir_ior(b, mcs_zero, mcs_clear));
@@ -710,15 +710,15 @@ blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v,
    return nir_load_var(b, color);
 }
 
-static nir_ssa_def *
-blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
+static nir_def *
+blorp_nir_manual_blend_bilinear(nir_builder *b, nir_def *pos,
                                 unsigned tex_samples,
                                 const struct brw_blorp_blit_prog_key *key,
                                 struct brw_blorp_blit_vars *v)
 {
-   nir_ssa_def *pos_xy = nir_trim_vector(b, pos, 2);
-   nir_ssa_def *rect_grid = nir_load_var(b, v->v_rect_grid);
-   nir_ssa_def *scale = nir_imm_vec2(b, key->x_scale, key->y_scale);
+   nir_def *pos_xy = nir_trim_vector(b, pos, 2);
+   nir_def *rect_grid = nir_load_var(b, v->v_rect_grid);
+   nir_def *scale = nir_imm_vec2(b, key->x_scale, key->y_scale);
 
    /* Translate coordinates to lay out the samples in a rectangular  grid
     * roughly corresponding to sample locations.
@@ -737,25 +737,25 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
    /* Store the fractional parts to be used as bilinear interpolation
     * coefficients.
     */
-   nir_ssa_def *frac_xy = nir_ffract(b, pos_xy);
+   nir_def *frac_xy = nir_ffract(b, pos_xy);
    /* Round the float coordinates down to nearest integer */
    pos_xy = nir_fdiv(b, nir_ftrunc(b, pos_xy), scale);
 
-   nir_ssa_def *tex_data[4];
+   nir_def *tex_data[4];
    for (unsigned i = 0; i < 4; ++i) {
       float sample_off_x = (float)(i & 0x1) / key->x_scale;
       float sample_off_y = (float)((i >> 1) & 0x1) / key->y_scale;
-      nir_ssa_def *sample_off = nir_imm_vec2(b, sample_off_x, sample_off_y);
+      nir_def *sample_off = nir_imm_vec2(b, sample_off_x, sample_off_y);
 
-      nir_ssa_def *sample_coords = nir_fadd(b, pos_xy, sample_off);
-      nir_ssa_def *sample_coords_int = nir_f2i32(b, sample_coords);
+      nir_def *sample_coords = nir_fadd(b, pos_xy, sample_off);
+      nir_def *sample_coords_int = nir_f2i32(b, sample_coords);
 
       /* The MCS value we fetch has to match up with the pixel that we're
        * sampling from. Since we sample from different pixels in each
        * iteration of this "for" loop, the call to mcs_fetch() should be
        * here inside the loop after computing the pixel coordinates.
        */
-      nir_ssa_def *mcs = NULL;
+      nir_def *mcs = NULL;
       if (isl_aux_usage_has_mcs(key->tex_aux_usage))
          mcs = blorp_blit_txf_ms_mcs(b, v, sample_coords_int);
 
@@ -809,8 +809,8 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
        * This is equivalent to
        * S' = (0xe58b602cd31479af >> (S * 4)) & 0xf
        */
-      nir_ssa_def *frac = nir_ffract(b, sample_coords);
-      nir_ssa_def *sample =
+      nir_def *frac = nir_ffract(b, sample_coords);
+      nir_def *sample =
          nir_fdot2(b, frac, nir_imm_vec2(b, key->x_scale,
                                             key->x_scale * key->y_scale));
       sample = nir_f2i32(b, sample);
@@ -822,11 +822,11 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
                                            nir_ishl_imm(b, sample, 2)),
                                0xf);
       } else if (tex_samples == 16) {
-         nir_ssa_def *sample_low =
+         nir_def *sample_low =
             nir_iand_imm(b, nir_ishr(b, nir_imm_int(b, 0xd31479af),
                                      nir_ishl_imm(b, sample, 2)),
                          0xf);
-         nir_ssa_def *sample_high =
+         nir_def *sample_high =
             nir_iand_imm(b, nir_ishr(b, nir_imm_int(b, 0xe58b602c),
                                      nir_ishl_imm(b, nir_iadd_imm(b, sample, -8),
                                                   2)),
@@ -835,14 +835,14 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
          sample = nir_bcsel(b, nir_ilt_imm(b, sample, 8),
                             sample_low, sample_high);
       }
-      nir_ssa_def *pos_ms = nir_vec3(b, nir_channel(b, sample_coords_int, 0),
+      nir_def *pos_ms = nir_vec3(b, nir_channel(b, sample_coords_int, 0),
                                         nir_channel(b, sample_coords_int, 1),
                                         sample);
       tex_data[i] = blorp_nir_txf_ms(b, v, pos_ms, mcs, key->texture_data_type);
    }
 
-   nir_ssa_def *frac_x = nir_channel(b, frac_xy, 0);
-   nir_ssa_def *frac_y = nir_channel(b, frac_xy, 1);
+   nir_def *frac_x = nir_channel(b, frac_xy, 0);
+   nir_def *frac_y = nir_channel(b, frac_xy, 1);
    return nir_flrp(b, nir_flrp(b, tex_data[0], tex_data[1], frac_x),
                       nir_flrp(b, tex_data[2], tex_data[3], frac_x),
                       frac_y);
@@ -857,8 +857,8 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos,
  * to R16G16_UINT.  This function generates code to shuffle bits around to get
  * us from one to the other.
  */
-static nir_ssa_def *
-bit_cast_color(struct nir_builder *b, nir_ssa_def *color,
+static nir_def *
+bit_cast_color(struct nir_builder *b, nir_def *color,
                const struct brw_blorp_blit_prog_key *key)
 {
    if (key->src_format == key->dst_format)
@@ -878,7 +878,7 @@ bit_cast_color(struct nir_builder *b, nir_ssa_def *color,
       assert(dst_fmtl->channels.r.type == ISL_UINT ||
              dst_fmtl->channels.r.type == ISL_UNORM);
 
-      nir_ssa_def *packed = nir_imm_int(b, 0);
+      nir_def *packed = nir_imm_int(b, 0);
       for (unsigned c = 0; c < 4; c++) {
          if (src_fmtl->channels_array[c].bits == 0)
             continue;
@@ -886,14 +886,14 @@ bit_cast_color(struct nir_builder *b, nir_ssa_def *color,
          const unsigned chan_start_bit = src_fmtl->channels_array[c].start_bit;
          const unsigned chan_bits = src_fmtl->channels_array[c].bits;
 
-         nir_ssa_def *chan =  nir_channel(b, color, c);
+         nir_def *chan =  nir_channel(b, color, c);
          if (src_fmtl->channels_array[c].type == ISL_UNORM)
             chan = nir_format_float_to_unorm(b, chan, &chan_bits);
 
          packed = nir_ior(b, packed, nir_shift_imm(b, chan, chan_start_bit));
       }
 
-      nir_ssa_def *chans[4] = { };
+      nir_def *chans[4] = { };
       for (unsigned c = 0; c < 4; c++) {
          if (dst_fmtl->channels_array[c].bits == 0) {
             chans[c] = nir_imm_int(b, 0);
@@ -939,15 +939,15 @@ bit_cast_color(struct nir_builder *b, nir_ssa_def *color,
    }
 
    /* Blorp likes to assume that colors are vec4s */
-   nir_ssa_def *u = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *chans[4] = { u, u, u, u };
+   nir_def *u = nir_undef(b, 1, 32);
+   nir_def *chans[4] = { u, u, u, u };
    for (unsigned i = 0; i < color->num_components; i++)
       chans[i] = nir_channel(b, color, i);
    return nir_vec4(b, chans[0], chans[1], chans[2], chans[3]);
 }
 
-static nir_ssa_def *
-select_color_channel(struct nir_builder *b, nir_ssa_def *color,
+static nir_def *
+select_color_channel(struct nir_builder *b, nir_def *color,
                      nir_alu_type data_type,
                      enum isl_channel_select chan)
 {
@@ -969,8 +969,8 @@ select_color_channel(struct nir_builder *b, nir_ssa_def *color,
    }
 }
 
-static nir_ssa_def *
-swizzle_color(struct nir_builder *b, nir_ssa_def *color,
+static nir_def *
+swizzle_color(struct nir_builder *b, nir_def *color,
               struct isl_swizzle swizzle, nir_alu_type data_type)
 {
    return nir_vec4(b,
@@ -980,14 +980,14 @@ swizzle_color(struct nir_builder *b, nir_ssa_def *color,
                    select_color_channel(b, color, data_type, swizzle.a));
 }
 
-static nir_ssa_def *
-convert_color(struct nir_builder *b, nir_ssa_def *color,
+static nir_def *
+convert_color(struct nir_builder *b, nir_def *color,
               const struct brw_blorp_blit_prog_key *key)
 {
    /* All of our color conversions end up generating a single-channel color
     * value that we need to write out.
     */
-   nir_ssa_def *value;
+   nir_def *value;
 
    if (key->dst_format == ISL_FORMAT_R24_UNORM_X8_TYPELESS) {
       /* The destination image is bound as R32_UINT but the data needs to be
@@ -1008,12 +1008,12 @@ convert_color(struct nir_builder *b, nir_ssa_def *color,
       unreachable("Unsupported format conversion");
    }
 
-   nir_ssa_def *out_comps[4];
+   nir_def *out_comps[4];
    for (unsigned i = 0; i < 4; i++) {
       if (i < value->num_components)
          out_comps[i] = nir_channel(b, value, i);
       else
-         out_comps[i] = nir_ssa_undef(b, 1, 32);
+         out_comps[i] = nir_undef(b, 1, 32);
    }
    return nir_vec(b, out_comps, 4);
 }
@@ -1163,7 +1163,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
                            const struct brw_blorp_blit_prog_key *key)
 {
    const struct intel_device_info *devinfo = blorp->isl_dev->info;
-   nir_ssa_def *src_pos, *dst_pos, *color;
+   nir_def *src_pos, *dst_pos, *color;
 
    /* Sanity checks */
    if (key->dst_tiled_w && key->rt_samples > 1) {
@@ -1235,7 +1235,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
                                       key->dst_layout);
    }
 
-   nir_ssa_def *comp = NULL;
+   nir_def *comp = NULL;
    if (key->dst_rgb) {
       /* The destination image is bound as a red texture three times as wide
        * as the actual image.  Our shader is effectively running one color
@@ -1243,7 +1243,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
        * the destination position.
        */
       assert(dst_pos->num_components == 2);
-      nir_ssa_def *dst_x = nir_channel(&b, dst_pos, 0);
+      nir_def *dst_x = nir_channel(&b, dst_pos, 0);
       comp = nir_umod_imm(&b, dst_x, 3);
       dst_pos = nir_vec2(&b, nir_idiv(&b, dst_x, nir_imm_int(&b, 3)),
                              nir_channel(&b, dst_pos, 1));
@@ -1259,8 +1259,8 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
     */
    nir_if *bounds_if = NULL;
    if (key->use_kill) {
-      nir_ssa_def *bounds_rect = nir_load_var(&b, v.v_bounds_rect);
-      nir_ssa_def *in_bounds = blorp_check_in_bounds(&b, bounds_rect,
+      nir_def *bounds_rect = nir_load_var(&b, v.v_bounds_rect);
+      nir_def *in_bounds = blorp_check_in_bounds(&b, bounds_rect,
                                                      dst_pos);
       if (!compute)
          nir_discard_if(&b, nir_inot(&b, in_bounds));
@@ -1338,7 +1338,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
       if (key->src_samples == 1) {
          color = blorp_nir_txf(&b, &v, src_pos, key->texture_data_type);
       } else {
-         nir_ssa_def *mcs = NULL;
+         nir_def *mcs = NULL;
          if (isl_aux_usage_has_mcs(key->tex_aux_usage))
             mcs = blorp_blit_txf_ms_mcs(&b, &v, src_pos);
 
@@ -1448,19 +1448,19 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
        */
       assert(dst_pos->num_components == 2);
 
-      nir_ssa_def *color_component =
+      nir_def *color_component =
          nir_bcsel(&b, nir_ieq_imm(&b, comp, 0),
                        nir_channel(&b, color, 0),
                        nir_bcsel(&b, nir_ieq_imm(&b, comp, 1),
                                      nir_channel(&b, color, 1),
                                      nir_channel(&b, color, 2)));
 
-      nir_ssa_def *u = nir_ssa_undef(&b, 1, 32);
+      nir_def *u = nir_undef(&b, 1, 32);
       color = nir_vec4(&b, color_component, u, u, u);
    }
 
    if (compute) {
-      nir_ssa_def *store_pos = nir_load_global_invocation_id(&b, 32);
+      nir_def *store_pos = nir_load_global_invocation_id(&b, 32);
       nir_image_store(&b, nir_imm_int(&b, 0),
                       nir_pad_vector_imm_int(&b, store_pos, 0, 4),
                       nir_imm_int(&b, 0),
index fd6e387..1c85619 100644 (file)
@@ -76,11 +76,11 @@ blorp_params_get_clear_kernel_fs(struct blorp_batch *batch,
 
    nir_variable *v_color =
       BLORP_CREATE_NIR_INPUT(b.shader, clear_color, glsl_vec4_type());
-   nir_ssa_def *color = nir_load_var(&b, v_color);
+   nir_def *color = nir_load_var(&b, v_color);
 
    if (clear_rgb_as_red) {
-      nir_ssa_def *pos = nir_f2i32(&b, nir_load_frag_coord(&b));
-      nir_ssa_def *comp = nir_umod_imm(&b, nir_channel(&b, pos, 0), 3);
+      nir_def *pos = nir_f2i32(&b, nir_load_frag_coord(&b));
+      nir_def *comp = nir_umod_imm(&b, nir_channel(&b, pos, 0), 3);
       color = nir_pad_vec4(&b, nir_vector_extract(&b, color, comp));
    }
 
@@ -137,19 +137,19 @@ blorp_params_get_clear_kernel_cs(struct blorp_batch *batch,
    blorp_nir_init_shader(&b, mem_ctx, MESA_SHADER_COMPUTE, "BLORP-gpgpu-clear");
    blorp_set_cs_dims(b.shader, blorp_key.local_y);
 
-   nir_ssa_def *dst_pos = nir_load_global_invocation_id(&b, 32);
+   nir_def *dst_pos = nir_load_global_invocation_id(&b, 32);
 
    nir_variable *v_color =
       BLORP_CREATE_NIR_INPUT(b.shader, clear_color, glsl_vec4_type());
-   nir_ssa_def *color = nir_load_var(&b, v_color);
+   nir_def *color = nir_load_var(&b, v_color);
 
    nir_variable *v_bounds_rect =
       BLORP_CREATE_NIR_INPUT(b.shader, bounds_rect, glsl_vec4_type());
-   nir_ssa_def *bounds_rect = nir_load_var(&b, v_bounds_rect);
-   nir_ssa_def *in_bounds = blorp_check_in_bounds(&b, bounds_rect, dst_pos);
+   nir_def *bounds_rect = nir_load_var(&b, v_bounds_rect);
+   nir_def *in_bounds = blorp_check_in_bounds(&b, bounds_rect, dst_pos);
 
    if (clear_rgb_as_red) {
-      nir_ssa_def *comp = nir_umod_imm(&b, nir_channel(&b, dst_pos, 0), 3);
+      nir_def *comp = nir_umod_imm(&b, nir_channel(&b, dst_pos, 0), 3);
       color = nir_pad_vec4(&b, nir_vector_extract(&b, color, comp));
    }
 
@@ -248,9 +248,9 @@ blorp_params_get_layer_offset_vs(struct blorp_batch *batch,
    v_layer->data.location = VARYING_SLOT_LAYER;
 
    /* Compute the layer id */
-   nir_ssa_def *header = nir_load_var(&b, a_header);
-   nir_ssa_def *base_layer = nir_channel(&b, header, 0);
-   nir_ssa_def *instance = nir_channel(&b, header, 1);
+   nir_def *header = nir_load_var(&b, a_header);
+   nir_def *base_layer = nir_channel(&b, header, 0);
+   nir_def *instance = nir_channel(&b, header, 1);
    nir_store_var(&b, v_layer, nir_iadd(&b, instance, base_layer), 0x1);
 
    /* Then we copy the vertex from the next slot to VARYING_SLOT_POS */
@@ -1321,8 +1321,8 @@ blorp_ccs_resolve(struct blorp_batch *batch,
    }
 }
 
-static nir_ssa_def *
-blorp_nir_bit(nir_builder *b, nir_ssa_def *src, unsigned bit)
+static nir_def *
+blorp_nir_bit(nir_builder *b, nir_def *src, unsigned bit)
 {
    return nir_iand_imm(b, nir_ushr_imm(b, src, bit), 1);
 }
@@ -1368,16 +1368,16 @@ blorp_params_get_mcs_partial_resolve_kernel(struct blorp_batch *batch,
    frag_color->data.location = FRAG_RESULT_COLOR;
 
    /* Do an MCS fetch and check if it is equal to the magic clear value */
-   nir_ssa_def *mcs =
+   nir_def *mcs =
       blorp_nir_txf_ms_mcs(&b, nir_f2i32(&b, nir_load_frag_coord(&b)),
                                nir_load_layer_id(&b));
-   nir_ssa_def *is_clear =
+   nir_def *is_clear =
       blorp_nir_mcs_is_clear_color(&b, mcs, blorp_key.num_samples);
 
    /* If we aren't the clear value, discard. */
    nir_discard_if(&b, nir_inot(&b, is_clear));
 
-   nir_ssa_def *clear_color = nir_load_var(&b, v_color);
+   nir_def *clear_color = nir_load_var(&b, v_color);
    if (blorp_key.indirect_clear_color && blorp->isl_dev->info->ver <= 8) {
       /* Gfx7-8 clear colors are stored as single 0/1 bits */
       clear_color = nir_vec4(&b, blorp_nir_bit(&b, clear_color, 31),
index 545e9b5..d9c9b5a 100644 (file)
@@ -35,15 +35,15 @@ blorp_nir_init_shader(nir_builder *b,
       b->shader->info.fs.origin_upper_left = true;
 }
 
-static inline nir_ssa_def *
-blorp_nir_txf_ms_mcs(nir_builder *b, nir_ssa_def *xy_pos, nir_ssa_def *layer)
+static inline nir_def *
+blorp_nir_txf_ms_mcs(nir_builder *b, nir_def *xy_pos, nir_def *layer)
 {
    nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1);
    tex->op = nir_texop_txf_ms_mcs_intel;
    tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
    tex->dest_type = nir_type_int32;
 
-   nir_ssa_def *coord;
+   nir_def *coord;
    if (layer) {
       tex->is_array = true;
       tex->coord_components = 3;
@@ -67,9 +67,9 @@ blorp_nir_txf_ms_mcs(nir_builder *b, nir_ssa_def *xy_pos, nir_ssa_def *layer)
    return &tex->dest.ssa;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 blorp_nir_mcs_is_clear_color(nir_builder *b,
-                             nir_ssa_def *mcs,
+                             nir_def *mcs,
                              uint32_t samples)
 {
    switch (samples) {
@@ -97,22 +97,22 @@ blorp_nir_mcs_is_clear_color(nir_builder *b,
    }
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 blorp_check_in_bounds(nir_builder *b,
-                      nir_ssa_def *bounds_rect,
-                      nir_ssa_def *pos)
+                      nir_def *bounds_rect,
+                      nir_def *pos)
 {
-   nir_ssa_def *x0 = nir_channel(b, bounds_rect, 0);
-   nir_ssa_def *x1 = nir_channel(b, bounds_rect, 1);
-   nir_ssa_def *y0 = nir_channel(b, bounds_rect, 2);
-   nir_ssa_def *y1 = nir_channel(b, bounds_rect, 3);
+   nir_def *x0 = nir_channel(b, bounds_rect, 0);
+   nir_def *x1 = nir_channel(b, bounds_rect, 1);
+   nir_def *y0 = nir_channel(b, bounds_rect, 2);
+   nir_def *y1 = nir_channel(b, bounds_rect, 3);
 
-   nir_ssa_def *c0 = nir_uge(b, nir_channel(b, pos, 0), x0);
-   nir_ssa_def *c1 = nir_ult(b, nir_channel(b, pos, 0), x1);
-   nir_ssa_def *c2 = nir_uge(b, nir_channel(b, pos, 1), y0);
-   nir_ssa_def *c3 = nir_ult(b, nir_channel(b, pos, 1), y1);
+   nir_def *c0 = nir_uge(b, nir_channel(b, pos, 0), x0);
+   nir_def *c1 = nir_ult(b, nir_channel(b, pos, 0), x1);
+   nir_def *c2 = nir_uge(b, nir_channel(b, pos, 1), y0);
+   nir_def *c3 = nir_ult(b, nir_channel(b, pos, 1), y1);
 
-   nir_ssa_def *in_bounds =
+   nir_def *in_bounds =
       nir_iand(b, nir_iand(b, c0, c1), nir_iand(b, c2, c3));
 
    return in_bounds;
index 188b60c..3794741 100644 (file)
@@ -7354,7 +7354,7 @@ fs_visitor::run_mesh(bool allow_spilling)
 }
 
 static bool
-is_used_in_not_interp_frag_coord(nir_ssa_def *def)
+is_used_in_not_interp_frag_coord(nir_def *def)
 {
    nir_foreach_use_including_if(src, def) {
       if (src->is_if)
@@ -8027,7 +8027,7 @@ filter_simd(const nir_instr *instr, const void * /* options */)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_simd(nir_builder *b, nir_instr *instr, void *options)
 {
    uintptr_t simd_width = (uintptr_t)options;
index 0098c73..11f719b 100644 (file)
@@ -389,7 +389,7 @@ public:
    nir_component_mask_t get_nir_write_mask(const nir_alu_dest &dest);
    fs_reg get_resource_nir_src(const nir_src &src);
    fs_reg try_rebuild_resource(const brw::fs_builder &bld,
-                               nir_ssa_def *resource_def);
+                               nir_def *resource_def);
    fs_reg get_indirect_offset(nir_intrinsic_instr *instr);
    fs_reg get_tcs_single_patch_icp_handle(const brw::fs_builder &bld,
                                           nir_intrinsic_instr *instr);
index d71e62c..e2ae3ae 100644 (file)
@@ -980,7 +980,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
       nir_intrinsic_instr *store_reg =
          nir_store_reg_for_def(&instr->dest.dest.ssa);
       if (store_reg != NULL) {
-         nir_ssa_def *dest_reg = store_reg->src[1].ssa;
+         nir_def *dest_reg = store_reg->src[1].ssa;
          for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
             nir_intrinsic_instr *load_reg =
                nir_load_reg_for_def(instr->src[i].src.ssa);
@@ -3992,7 +3992,7 @@ brw_cond_mod_for_nir_reduction_op(nir_op op)
 
 struct rebuild_resource {
    unsigned idx;
-   std::vector<nir_ssa_def *> array;
+   std::vector<nir_def *> array;
 };
 
 static bool
@@ -4000,7 +4000,7 @@ add_rebuild_src(nir_src *src, void *state)
 {
    struct rebuild_resource *res = (struct rebuild_resource *) state;
 
-   for (nir_ssa_def *def : res->array) {
+   for (nir_def *def : res->array) {
       if (def == src->ssa)
          return true;
    }
@@ -4011,7 +4011,7 @@ add_rebuild_src(nir_src *src, void *state)
 }
 
 fs_reg
-fs_visitor::try_rebuild_resource(const brw::fs_builder &bld, nir_ssa_def *resource_def)
+fs_visitor::try_rebuild_resource(const brw::fs_builder &bld, nir_def *resource_def)
 {
    /* Create a build at the location of the resource_intel intrinsic */
    fs_builder ubld1 = bld.exec_all().group(1, 0);
@@ -4025,7 +4025,7 @@ fs_visitor::try_rebuild_resource(const brw::fs_builder &bld, nir_ssa_def *resour
    resources.array.push_back(resource_def);
 
    if (resources.array.size() == 1) {
-      nir_ssa_def *def = resources.array[0];
+      nir_def *def = resources.array[0];
 
       if (def->parent_instr->type == nir_instr_type_load_const) {
          nir_load_const_instr *load_const =
@@ -4045,7 +4045,7 @@ fs_visitor::try_rebuild_resource(const brw::fs_builder &bld, nir_ssa_def *resour
    }
 
    for (unsigned i = 0; i < resources.array.size(); i++) {
-      nir_ssa_def *def = resources.array[i];
+      nir_def *def = resources.array[i];
 
       nir_instr *instr = def->parent_instr;
       switch (instr->type) {
@@ -6685,7 +6685,7 @@ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
    const unsigned dest_size = nir_tex_instr_dest_size(instr);
    if (devinfo->ver >= 9 &&
        instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
-      unsigned write_mask = nir_ssa_def_components_read(&instr->dest.ssa);
+      unsigned write_mask = nir_def_components_read(&instr->dest.ssa);
       assert(write_mask != 0); /* dead code should have been eliminated */
       if (instr->is_sparse) {
          inst->size_written = (util_last_bit(write_mask) - 1) *
index f6930b3..65444ea 100644 (file)
@@ -82,7 +82,7 @@ implement_atomic_builtin(nir_function *func, nir_atomic_op atomic_op,
    nir_intrinsic_set_atomic_op(atomic, atomic_op);
 
    for (unsigned i = 0; i < nir_intrinsic_infos[op].num_srcs; i++) {
-      nir_ssa_def *src = nir_load_param(&b, p++);
+      nir_def *src = nir_load_param(&b, p++);
       if (i == 0) {
          /* The first source is our deref */
          assert(nir_intrinsic_infos[op].src_components[i] == -1);
@@ -104,7 +104,7 @@ implement_sub_group_ballot_builtin(nir_function *func)
    nir_deref_instr *ret =
       nir_build_deref_cast(&b, nir_load_param(&b, 0),
                            nir_var_function_temp, glsl_uint_type(), 0);
-   nir_ssa_def *cond = nir_load_param(&b, 1);
+   nir_def *cond = nir_load_param(&b, 1);
 
    nir_intrinsic_instr *ballot =
       nir_intrinsic_instr_create(b.shader, nir_intrinsic_ballot);
@@ -187,17 +187,17 @@ lower_kernel_intrinsics(nir_shader *nir)
                               intrin->dest.ssa.bit_size);
             nir_builder_instr_insert(&b, &load->instr);
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa);
+            nir_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa);
             progress = true;
             break;
          }
 
          case nir_intrinsic_load_constant_base_ptr: {
             b.cursor = nir_instr_remove(&intrin->instr);
-            nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(&b,
+            nir_def *const_data_base_addr = nir_pack_64_2x32_split(&b,
                nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW),
                nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, const_data_base_addr);
+            nir_def_rewrite_uses(&intrin->dest.ssa, const_data_base_addr);
             progress = true;
             break;
          }
@@ -216,10 +216,10 @@ lower_kernel_intrinsics(nir_shader *nir)
             nir_builder_instr_insert(&b, &load->instr);
 
             /* We may need to do a bit-size cast here */
-            nir_ssa_def *num_work_groups =
+            nir_def *num_work_groups =
                nir_u2uN(&b, &load->dest.ssa, intrin->dest.ssa.bit_size);
 
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, num_work_groups);
+            nir_def_rewrite_uses(&intrin->dest.ssa, num_work_groups);
             progress = true;
             break;
          }
index 76808d3..de6a60c 100644 (file)
@@ -44,7 +44,7 @@ brw_nir_lower_load_uniforms_filter(const nir_instr *instr,
    return intrin->intrinsic == nir_intrinsic_load_uniform;
 }
 
-static nir_ssa_def *
+static nir_def *
 brw_nir_lower_load_uniforms_impl(nir_builder *b, nir_instr *instr,
                                  UNUSED void *data)
 {
@@ -107,12 +107,12 @@ brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
 
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *local_invocation_index = nir_load_local_invocation_index(b);
+   nir_def *local_invocation_index = nir_load_local_invocation_index(b);
 
    /* Make sure that the mesh workgroup size is taken from the first invocation
     * (nir_intrinsic_launch_mesh_workgroups requirement)
     */
-   nir_ssa_def *cmp = nir_ieq_imm(b, local_invocation_index, 0);
+   nir_def *cmp = nir_ieq_imm(b, local_invocation_index, 0);
    nir_if *if_stmt = nir_push_if(b, cmp);
    {
       /* TUE header contains 4 words:
@@ -122,11 +122,11 @@ brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
        * - Words 1-3 used for "Dispatch Dimensions" feature, to allow mapping a
        *   3D dispatch into the 1D dispatch supported by HW.
        */
-      nir_ssa_def *x = nir_channel(b, intrin->src[0].ssa, 0);
-      nir_ssa_def *y = nir_channel(b, intrin->src[0].ssa, 1);
-      nir_ssa_def *z = nir_channel(b, intrin->src[0].ssa, 2);
-      nir_ssa_def *task_count = nir_imul(b, x, nir_imul(b, y, z));
-      nir_ssa_def *tue_header = nir_vec4(b, task_count, x, y, z);
+      nir_def *x = nir_channel(b, intrin->src[0].ssa, 0);
+      nir_def *y = nir_channel(b, intrin->src[0].ssa, 1);
+      nir_def *z = nir_channel(b, intrin->src[0].ssa, 2);
+      nir_def *task_count = nir_imul(b, x, nir_imul(b, y, z));
+      nir_def *tue_header = nir_vec4(b, task_count, x, y, z);
       nir_store_task_payload(b, tue_header, nir_imm_int(b, 0));
    }
    nir_pop_if(b, if_stmt);
@@ -202,7 +202,7 @@ brw_nir_adjust_task_payload_offsets_instr(struct nir_builder *b,
        * TODO(mesh): Figure out how to handle 8-bit, 16-bit.
        */
 
-      nir_ssa_def *offset = nir_ishr_imm(b, offset_src->ssa, 2);
+      nir_def *offset = nir_ishr_imm(b, offset_src->ssa, 2);
       nir_instr_rewrite_src(&intrin->instr, offset_src, nir_src_for_ssa(offset));
 
       unsigned base = nir_intrinsic_base(intrin);
@@ -1105,8 +1105,8 @@ brw_nir_initialize_mue(nir_shader *nir,
    nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
    b = nir_builder_at(nir_before_block(nir_start_block(entrypoint)));
 
-   nir_ssa_def *dw_off = nir_imm_int(&b, 0);
-   nir_ssa_def *zerovec = nir_imm_vec4(&b, 0, 0, 0, 0);
+   nir_def *dw_off = nir_imm_int(&b, 0);
+   nir_def *zerovec = nir_imm_vec4(&b, 0, 0, 0, 0);
 
    /* TODO(mesh): can we write in bigger batches, generating fewer SENDs? */
 
@@ -1124,13 +1124,13 @@ brw_nir_initialize_mue(nir_shader *nir,
     * Reserved, RTAIndex, ViewportIndex, CullPrimitiveMask.
     */
 
-   nir_ssa_def *local_invocation_index = nir_load_local_invocation_index(&b);
+   nir_def *local_invocation_index = nir_load_local_invocation_index(&b);
 
    /* Zero primitive headers distanced by workgroup_size, starting from
     * invocation index.
     */
    for (unsigned prim_in_inv = 0; prim_in_inv < prims_per_inv; ++prim_in_inv) {
-      nir_ssa_def *prim = nir_iadd_imm(&b, local_invocation_index,
+      nir_def *prim = nir_iadd_imm(&b, local_invocation_index,
                                            prim_in_inv * workgroup_size);
 
       nir_store_per_primitive_output(&b, zerovec, prim, dw_off,
@@ -1147,10 +1147,10 @@ brw_nir_initialize_mue(nir_shader *nir,
       /* Zero "remaining" primitive headers starting from the last one covered
        * by the loop above + workgroup_size.
        */
-      nir_ssa_def *cmp = nir_ilt_imm(&b, local_invocation_index, remaining);
+      nir_def *cmp = nir_ilt_imm(&b, local_invocation_index, remaining);
       nir_if *if_stmt = nir_push_if(&b, cmp);
       {
-         nir_ssa_def *prim = nir_iadd_imm(&b, local_invocation_index,
+         nir_def *prim = nir_iadd_imm(&b, local_invocation_index,
                                                prims_per_inv * workgroup_size);
 
          nir_store_per_primitive_output(&b, zerovec, prim, dw_off,
@@ -1186,7 +1186,7 @@ brw_nir_adjust_offset(nir_builder *b, nir_intrinsic_instr *intrin, uint32_t pitc
    nir_src *offset_src = nir_get_io_offset_src(intrin);
 
    b->cursor = nir_before_instr(&intrin->instr);
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_iadd(b,
                offset_src->ssa,
                nir_imul_imm(b, index_src->ssa, pitch));
@@ -1370,10 +1370,10 @@ brw_pack_primitive_indices_instr(nir_builder *b, nir_instr *instr, void *data)
          nir_build_deref_array(b, new_var_deref, array_deref->arr.index.ssa);
 
    nir_src *data_src = &intrin->src[1];
-   nir_ssa_def *data_def =
+   nir_def *data_def =
          nir_ssa_for_src(b, *data_src, vertices_per_primitive);
 
-   nir_ssa_def *new_data =
+   nir_def *new_data =
          nir_ior(b, nir_ishl_imm(b, nir_channel(b, data_def, 0), 0),
                     nir_ishl_imm(b, nir_channel(b, data_def, 1), 8));
 
index 4f90010..8d3dd09 100644 (file)
@@ -38,7 +38,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
    bool out_of_bounds = false;
    bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
    unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
-   nir_ssa_def *src = NULL, *dest = NULL;
+   nir_def *src = NULL, *dest = NULL;
 
    if (write) {
       assert(intr->num_components == intr->src[0].ssa->num_components);
@@ -60,9 +60,9 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
 
             intr->num_components = 4;
 
-            nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-            nir_ssa_def *x = nir_channel(b, intr->src[0].ssa, 0);
-            nir_ssa_def *y = nir_channel(b, intr->src[0].ssa, 1);
+            nir_def *undef = nir_undef(b, 1, 32);
+            nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
+            nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
             src = nir_vec4(b, undef, undef, y, x);
             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
          } else if (intr->dest.ssa.num_components > 1) {
@@ -128,9 +128,9 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
          if (write) {
             assert(intr->src[0].ssa->num_components == 4);
 
-            nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-            nir_ssa_def *x = nir_channel(b, intr->src[0].ssa, 0);
-            nir_ssa_def *y = nir_channel(b, intr->src[0].ssa, 1);
+            nir_def *undef = nir_undef(b, 1, 32);
+            nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
+            nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
             src = nir_vec4(b, undef, undef, x, y);
             mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
          } else {
@@ -147,7 +147,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
 
    if (out_of_bounds) {
       if (!write)
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_ssa_undef(b, 1, 32));
+         nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, 32));
       nir_instr_remove(&intr->instr);
    } else if (write) {
       nir_intrinsic_set_write_mask(intr, mask);
@@ -157,7 +157,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
                                nir_src_for_ssa(src));
       }
    } else if (dest) {
-      nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, dest,
+      nir_def_rewrite_uses_after(&intr->dest.ssa, dest,
                                      dest->parent_instr);
    }
 
@@ -214,7 +214,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
                b->cursor = nir_before_instr(&intrin->instr);
 
                /* Multiply by the number of per-vertex slots. */
-               nir_ssa_def *vertex_offset =
+               nir_def *vertex_offset =
                   nir_imul(b,
                            nir_ssa_for_src(b, *vertex, 1),
                            nir_imm_int(b,
@@ -222,7 +222,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
 
                /* Add it to the existing offset */
                nir_src *offset = nir_get_io_offset_src(intrin);
-               nir_ssa_def *total_offset =
+               nir_def *total_offset =
                   nir_iadd(b, vertex_offset,
                            nir_ssa_for_src(b, *offset, 1));
 
@@ -331,7 +331,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
                nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
                nir_builder_instr_insert(&b, &load->instr);
 
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+               nir_def_rewrite_uses(&intrin->dest.ssa,
                                         &load->dest.ssa);
                nir_instr_remove(&intrin->instr);
                break;
@@ -453,10 +453,10 @@ lower_barycentric_per_sample(nir_builder *b,
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *centroid =
+   nir_def *centroid =
       nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
                            nir_intrinsic_interp_mode(intrin));
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, centroid);
+   nir_def_rewrite_uses(&intrin->dest.ssa, centroid);
    nir_instr_remove(instr);
    return true;
 }
@@ -490,7 +490,7 @@ lower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
    b->cursor = nir_before_instr(instr);
 
    assert(intrin->src[0].ssa);
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_imin(b, nir_imm_int(b, 7),
                nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
 
@@ -1075,9 +1075,9 @@ brw_nir_zero_inputs_instr(struct nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *zero = nir_imm_zero(b, 1, 32);
+   nir_def *zero = nir_imm_zero(b, 1, 32);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, zero);
+   nir_def_rewrite_uses(&intrin->dest.ssa, zero);
 
    nir_instr_remove(instr);
 
@@ -2047,16 +2047,16 @@ brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compile
    return nir;
 }
 
-nir_ssa_def *
+nir_def *
 brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
-      nir_ssa_def *base_addr, unsigned off)
+      nir_def *base_addr, unsigned off)
 {
    assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
 
    unsigned bit_size = load_uniform->dest.ssa.bit_size;
    assert(bit_size >= 8 && bit_size % 8 == 0);
    unsigned byte_size = bit_size / 8;
-   nir_ssa_def *sysval;
+   nir_def *sysval;
 
    if (nir_src_is_const(load_uniform->src[0])) {
       uint64_t offset = off +
@@ -2070,9 +2070,9 @@ brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
       uint64_t aligned_offset = offset - suboffset;
 
       /* Load two just in case we go over a 64B boundary */
-      nir_ssa_def *data[2];
+      nir_def *data[2];
       for (unsigned i = 0; i < 2; i++) {
-         nir_ssa_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
+         nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
          data[i] = nir_load_global_const_block_intel(b, 16, addr,
                                                      nir_imm_true(b));
       }
@@ -2080,10 +2080,10 @@ brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
       sysval = nir_extract_bits(b, data, 2, suboffset * 8,
                                 load_uniform->num_components, bit_size);
    } else {
-      nir_ssa_def *offset32 =
+      nir_def *offset32 =
          nir_iadd_imm(b, load_uniform->src[0].ssa,
                          off + nir_intrinsic_base(load_uniform));
-      nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
+      nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
       sysval = nir_load_global_constant(b, addr, byte_size,
                                         load_uniform->num_components, bit_size);
    }
index 1f85b69..8d0b786 100644 (file)
@@ -280,9 +280,9 @@ bool brw_nir_pulls_at_sample(nir_shader *shader);
 #define BRW_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
 
 bool brw_nir_move_interpolation_to_top(nir_shader *nir);
-nir_ssa_def *brw_nir_load_global_const(nir_builder *b,
+nir_def *brw_nir_load_global_const(nir_builder *b,
                                        nir_intrinsic_instr *load_uniform,
-                                       nir_ssa_def *base_addr,
+                                       nir_def *base_addr,
                                        unsigned off);
 
 const struct glsl_type *brw_nir_get_var_type(const struct nir_shader *nir,
index 54eed63..c150d6e 100644 (file)
@@ -48,15 +48,15 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data)
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def *val = &intrin->dest.ssa;
+   nir_def *val = &intrin->dest.ssa;
 
    /* Do GL_FIXED rescaling for GLES2.0.  Our GL_FIXED attributes
     * come in as floating point conversions of the integer values.
     */
    if (wa_flags & BRW_ATTRIB_WA_COMPONENT_MASK) {
-      nir_ssa_def *scaled =
+      nir_def *scaled =
          nir_fmul_imm(b, val, 1.0f / 65536.0f);
-      nir_ssa_def *comps[4];
+      nir_def *comps[4];
       for (int i = 0; i < val->num_components; i++) {
          bool rescale = i < (wa_flags & BRW_ATTRIB_WA_COMPONENT_MASK);
          comps[i] = nir_channel(b, rescale ? scaled : val, i);
@@ -67,7 +67,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data)
    /* Do sign recovery for 2101010 formats if required. */
    if (wa_flags & BRW_ATTRIB_WA_SIGN) {
       /* sign recovery shift: <22, 22, 22, 30> */
-      nir_ssa_def *shift = nir_imm_ivec4(b, 22, 22, 22, 30);
+      nir_def *shift = nir_imm_ivec4(b, 22, 22, 22, 30);
       val = nir_ishr(b, nir_ishl(b, val, shift), shift);
    }
 
@@ -90,7 +90,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data)
           * promote to the new higher version, and this is what Haswell+
           * hardware does anyway, we just always use this formula.
           */
-         nir_ssa_def *es3_normalize_factor =
+         nir_def *es3_normalize_factor =
             nir_imm_vec4(b, 1.0f / ((1 << 9) - 1), 1.0f / ((1 << 9) - 1),
                             1.0f / ((1 << 9) - 1), 1.0f / ((1 << 1) - 1));
          val = nir_fmax(b,
@@ -102,7 +102,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data)
           * 2.1 unsigned normalization
           * f = c/(2^n-1)
           */
-         nir_ssa_def *normalize_factor =
+         nir_def *normalize_factor =
             nir_imm_vec4(b, 1.0f / ((1 << 10) - 1), 1.0f / ((1 << 10) - 1),
                             1.0f / ((1 << 10) - 1), 1.0f / ((1 << 2)  - 1));
 
@@ -115,7 +115,7 @@ apply_attr_wa_instr(nir_builder *b, nir_instr *instr, void *cb_data)
                                             : nir_u2f32(b, val);
    }
 
-   nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, val,
+   nir_def_rewrite_uses_after(&intrin->dest.ssa, val,
                                   val->parent_instr);
 
    return true;
index a4bac23..798f336 100644 (file)
@@ -41,7 +41,7 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b,
                                             nir_instr *instr,
                                             UNUSED void *cb_data)
 {
-   nir_ssa_def *image_size = NULL;
+   nir_def *image_size = NULL;
 
    switch (instr->type) {
    case nir_instr_type_intrinsic: {
@@ -95,17 +95,17 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b,
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def *components[4];
+   nir_def *components[4];
    /* OR all the sizes for all components but the last. */
-   nir_ssa_def *or_components = nir_imm_int(b, 0);
+   nir_def *or_components = nir_imm_int(b, 0);
    for (int i = 0; i < image_size->num_components; i++) {
       if (i == (image_size->num_components - 1)) {
-         nir_ssa_def *null_or_size[2] = {
+         nir_def *null_or_size[2] = {
             nir_imm_int(b, 0),
             nir_imax(b, nir_channel(b, image_size, i),
                          nir_imm_int(b, 1)),
          };
-         nir_ssa_def *vec2_null_or_size = nir_vec(b, null_or_size, 2);
+         nir_def *vec2_null_or_size = nir_vec(b, null_or_size, 2);
 
          /* Using the ORed sizes select either the element 0 or 1
           * from this vec2. For NULL textures which have a size of
@@ -121,12 +121,12 @@ brw_nir_clamp_image_1d_2d_array_sizes_instr(nir_builder *b,
          or_components = nir_ior(b, components[i], or_components);
       }
    }
-   nir_ssa_def *image_size_replacement =
+   nir_def *image_size_replacement =
       nir_vec(b, components, image_size->num_components);
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def_rewrite_uses_after(image_size,
+   nir_def_rewrite_uses_after(image_size,
                                   image_size_replacement,
                                   image_size_replacement->parent_instr);
 
index afabe3f..fc0d98a 100644 (file)
@@ -102,7 +102,7 @@ lower_patch_vertices_instr(nir_builder *b, nir_instr *instr, void *cb_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_imm_int(b, *input_vertices));
+   nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_int(b, *input_vertices));
 
    return true;
 }
index 41ef112..f900a5b 100644 (file)
  *  0.9375 1111111111111110
  *  1.0000 1111111111111111
  */
-static nir_ssa_def *
-build_dither_mask(nir_builder *b, nir_ssa_def *color)
+static nir_def *
+build_dither_mask(nir_builder *b, nir_def *color)
 {
    assert(color->num_components == 4);
-   nir_ssa_def *alpha = nir_channel(b, color, 3);
+   nir_def *alpha = nir_channel(b, color, 3);
 
-   nir_ssa_def *m =
+   nir_def *m =
       nir_f2i32(b, nir_fmul_imm(b, nir_fsat(b, alpha), 16.0));
 
-   nir_ssa_def *part_a =
+   nir_def *part_a =
       nir_iand_imm(b, nir_ushr(b, nir_imm_int(b, 0xfea80),
                                   nir_iand_imm(b, m, ~3)),
                       0xf);
 
-   nir_ssa_def *part_b = nir_iand_imm(b, m, 2);
-   nir_ssa_def *part_c = nir_iand_imm(b, m, 1);
+   nir_def *part_b = nir_iand_imm(b, m, 2);
+   nir_def *part_c = nir_iand_imm(b, m, 1);
 
    return nir_ior(b, nir_imul_imm(b, part_a, 0x1111),
                      nir_ior(b, nir_imul_imm(b, part_b, 0x0808),
@@ -138,7 +138,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
 
    /* It's possible that shader_info may be out-of-date and the writes to
     * either gl_SampleMask or the first color value may have been removed.
-    * This can happen if, for instance a nir_ssa_undef is written to the
+    * This can happen if, for instance a nir_undef is written to the
     * color value.  In that case, just bail and don't do anything rather
     * than crashing.
     */
@@ -149,11 +149,11 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
     * assuming an alpha of 1.0 and letting the sample mask pass through
     * unaltered seems like the kindest thing to do to apps.
     */
-   nir_ssa_def *color0 = color0_write->src[0].ssa;
+   nir_def *color0 = color0_write->src[0].ssa;
    if (color0->num_components < 4)
       goto skip;
 
-   nir_ssa_def *sample_mask = sample_mask_write->src[0].ssa;
+   nir_def *sample_mask = sample_mask_write->src[0].ssa;
 
    if (sample_mask_write_first) {
       /* If the sample mask write comes before the write to color0, we need
@@ -168,13 +168,13 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
    nir_builder b = nir_builder_at(nir_before_instr(&sample_mask_write->instr));
 
    /* Combine dither_mask and the gl_SampleMask value */
-   nir_ssa_def *dither_mask = build_dither_mask(&b, color0);
+   nir_def *dither_mask = build_dither_mask(&b, color0);
    dither_mask = nir_iand(&b, sample_mask, dither_mask);
 
    if (key->alpha_to_coverage == BRW_SOMETIMES) {
-      nir_ssa_def *push_flags =
+      nir_def *push_flags =
          nir_load_uniform(&b, 1, 32, nir_imm_int(&b, prog_data->msaa_flags_param * 4));
-      nir_ssa_def *alpha_to_coverage =
+      nir_def *alpha_to_coverage =
          nir_i2b(&b,
                  nir_iadd_imm(&b, push_flags,
                               BRW_WM_MSAA_FLAG_ALPHA_TO_COVERAGE));
index 3650043..9133410 100644 (file)
@@ -43,10 +43,10 @@ split_conversion(nir_builder *b, nir_alu_instr *alu, nir_alu_type src_type,
                  nir_rounding_mode rnd)
 {
    b->cursor = nir_before_instr(&alu->instr);
-   nir_ssa_def *src = nir_ssa_for_alu_src(b, alu, 0);
-   nir_ssa_def *tmp = nir_type_convert(b, src, src_type, tmp_type, nir_rounding_mode_undef);
-   nir_ssa_def *res = nir_type_convert(b, tmp, tmp_type, dst_type, rnd);
-   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, res);
+   nir_def *src = nir_ssa_for_alu_src(b, alu, 0);
+   nir_def *tmp = nir_type_convert(b, src, src_type, tmp_type, nir_rounding_mode_undef);
+   nir_def *res = nir_type_convert(b, tmp, tmp_type, dst_type, rnd);
+   nir_def_rewrite_uses(&alu->dest.dest.ssa, res);
    nir_instr_remove(&alu->instr);
 }
 
index 8c5603e..1eb447e 100644 (file)
@@ -34,27 +34,27 @@ struct lower_intrinsics_state {
 static void
 compute_local_index_id(nir_builder *b,
                        nir_shader *nir,
-                       nir_ssa_def **local_index,
-                       nir_ssa_def **local_id)
+                       nir_def **local_index,
+                       nir_def **local_id)
 {
-   nir_ssa_def *subgroup_id = nir_load_subgroup_id(b);
+   nir_def *subgroup_id = nir_load_subgroup_id(b);
 
-   nir_ssa_def *thread_local_id =
+   nir_def *thread_local_id =
       nir_imul(b, subgroup_id, nir_load_simd_width_intel(b));
-   nir_ssa_def *channel = nir_load_subgroup_invocation(b);
-   nir_ssa_def *linear = nir_iadd(b, channel, thread_local_id);
+   nir_def *channel = nir_load_subgroup_invocation(b);
+   nir_def *linear = nir_iadd(b, channel, thread_local_id);
 
-   nir_ssa_def *size_x;
-   nir_ssa_def *size_y;
+   nir_def *size_x;
+   nir_def *size_y;
    if (nir->info.workgroup_size_variable) {
-      nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
+      nir_def *size_xyz = nir_load_workgroup_size(b);
       size_x = nir_channel(b, size_xyz, 0);
       size_y = nir_channel(b, size_xyz, 1);
    } else {
       size_x = nir_imm_int(b, nir->info.workgroup_size[0]);
       size_y = nir_imm_int(b, nir->info.workgroup_size[1]);
    }
-   nir_ssa_def *size_xy = nir_imul(b, size_x, size_y);
+   nir_def *size_xy = nir_imul(b, size_x, size_y);
 
    /* The local invocation index and ID must respect the following
     *
@@ -73,7 +73,7 @@ compute_local_index_id(nir_builder *b,
     * large so it can safely be omitted.
     */
 
-   nir_ssa_def *id_x, *id_y, *id_z;
+   nir_def *id_x, *id_y, *id_z;
    switch (nir->info.cs.derivative_group) {
    case DERIVATIVE_GROUP_NONE:
       if (nir->info.num_images == 0 &&
@@ -96,7 +96,7 @@ compute_local_index_id(nir_builder *b,
           * (1,2) (1,3) (2,0) ... (size_x-1,3) (0,4) (0,5) (0,6) (0,7) (1,4) ...
           */
          const unsigned height = 4;
-         nir_ssa_def *block = nir_udiv_imm(b, linear, height);
+         nir_def *block = nir_udiv_imm(b, linear, height);
          id_x = nir_umod(b, block, size_x);
          id_y = nir_umod(b,
                          nir_iadd(b,
@@ -139,19 +139,19 @@ compute_local_index_id(nir_builder *b,
        * invocation index.  Skipping Z simplify index calculation.
        */
 
-      nir_ssa_def *one = nir_imm_int(b, 1);
-      nir_ssa_def *double_size_x = nir_ishl(b, size_x, one);
+      nir_def *one = nir_imm_int(b, 1);
+      nir_def *double_size_x = nir_ishl(b, size_x, one);
 
       /* ID within a pair of rows, where each group of 4 is 2x2 quad. */
-      nir_ssa_def *row_pair_id = nir_umod(b, linear, double_size_x);
-      nir_ssa_def *y_row_pairs = nir_udiv(b, linear, double_size_x);
+      nir_def *row_pair_id = nir_umod(b, linear, double_size_x);
+      nir_def *y_row_pairs = nir_udiv(b, linear, double_size_x);
 
-      nir_ssa_def *x =
+      nir_def *x =
          nir_ior(b,
                  nir_iand(b, row_pair_id, one),
                  nir_iand(b, nir_ishr(b, row_pair_id, one),
                           nir_imm_int(b, 0xfffffffe)));
-      nir_ssa_def *y =
+      nir_def *y =
          nir_ior(b,
                  nir_ishl(b, y_row_pairs, one),
                  nir_iand(b, nir_ishr(b, row_pair_id, one), one));
@@ -176,8 +176,8 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
    nir_shader *nir = state->nir;
 
    /* Reuse calculated values inside the block. */
-   nir_ssa_def *local_index = NULL;
-   nir_ssa_def *local_id = NULL;
+   nir_def *local_index = NULL;
+   nir_def *local_id = NULL;
 
    nir_foreach_instr_safe(instr, block) {
       if (instr->type != nir_instr_type_intrinsic)
@@ -187,7 +187,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
 
       b->cursor = nir_after_instr(&intrinsic->instr);
 
-      nir_ssa_def *sysval;
+      nir_def *sysval;
       switch (intrinsic->intrinsic) {
       case nir_intrinsic_load_workgroup_size:
       case nir_intrinsic_load_workgroup_id:
@@ -196,7 +196,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
          if (intrinsic->dest.ssa.bit_size == 64) {
             intrinsic->dest.ssa.bit_size = 32;
             sysval = nir_u2u64(b, &intrinsic->dest.ssa);
-            nir_ssa_def_rewrite_uses_after(&intrinsic->dest.ssa,
+            nir_def_rewrite_uses_after(&intrinsic->dest.ssa,
                                            sysval,
                                            sysval->parent_instr);
          }
@@ -207,7 +207,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
          if (!local_index && !nir->info.workgroup_size_variable) {
             const uint16_t *ws = nir->info.workgroup_size;
             if (ws[0] * ws[1] * ws[2] == 1) {
-               nir_ssa_def *zero = nir_imm_int(b, 0);
+               nir_def *zero = nir_imm_int(b, 0);
                local_index = zero;
                local_id = nir_replicate(b, zero, 3);
             }
@@ -237,12 +237,12 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
       }
 
       case nir_intrinsic_load_num_subgroups: {
-         nir_ssa_def *size;
+         nir_def *size;
          if (state->nir->info.workgroup_size_variable) {
-            nir_ssa_def *size_xyz = nir_load_workgroup_size(b);
-            nir_ssa_def *size_x = nir_channel(b, size_xyz, 0);
-            nir_ssa_def *size_y = nir_channel(b, size_xyz, 1);
-            nir_ssa_def *size_z = nir_channel(b, size_xyz, 2);
+            nir_def *size_xyz = nir_load_workgroup_size(b);
+            nir_def *size_x = nir_channel(b, size_xyz, 0);
+            nir_def *size_y = nir_channel(b, size_xyz, 1);
+            nir_def *size_z = nir_channel(b, size_xyz, 2);
             size = nir_imul(b, nir_imul(b, size_x, size_y), size_z);
          } else {
             size = nir_imm_int(b, nir->info.workgroup_size[0] *
@@ -251,7 +251,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
          }
 
          /* Calculate the equivalent of DIV_ROUND_UP. */
-         nir_ssa_def *simd_width = nir_load_simd_width_intel(b);
+         nir_def *simd_width = nir_load_simd_width_intel(b);
          sysval =
             nir_udiv(b, nir_iadd_imm(b, nir_iadd(b, size, simd_width), -1),
                         simd_width);
@@ -265,7 +265,7 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
       if (intrinsic->dest.ssa.bit_size == 64)
          sysval = nir_u2u64(b, sysval);
 
-      nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, sysval);
+      nir_def_rewrite_uses(&intrinsic->dest.ssa, sysval);
       nir_instr_remove(&intrinsic->instr);
 
       state->progress = true;
index f636cfe..4bbcf23 100644 (file)
@@ -58,9 +58,9 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
    nir_builder build = nir_builder_at(nir_before_cf_list(&impl->body));
    nir_builder *b = &build;
 
-   nir_ssa_def *commit_ptr = nir_load_param(b, 0);
-   nir_ssa_def *hit_t = nir_load_param(b, 1);
-   nir_ssa_def *hit_kind = nir_load_param(b, 2);
+   nir_def *commit_ptr = nir_load_param(b, 0);
+   nir_def *hit_t = nir_load_param(b, 1);
+   nir_def *hit_kind = nir_load_param(b, 2);
 
    nir_deref_instr *commit =
       nir_build_deref_cast(b, commit_ptr, nir_var_function_temp,
@@ -92,13 +92,13 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
                break;
 
             case nir_intrinsic_load_ray_t_max:
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+               nir_def_rewrite_uses(&intrin->dest.ssa,
                                         hit_t);
                nir_instr_remove(&intrin->instr);
                break;
 
             case nir_intrinsic_load_ray_hit_kind:
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+               nir_def_rewrite_uses(&intrin->dest.ssa,
                                         hit_kind);
                nir_instr_remove(&intrin->instr);
                break;
@@ -157,7 +157,7 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
    nir_builder build = nir_builder_at(nir_before_cf_list(&impl->body));
    nir_builder *b = &build;
 
-   nir_ssa_def *t_addr = brw_nir_rt_mem_hit_addr(b, false /* committed */);
+   nir_def *t_addr = brw_nir_rt_mem_hit_addr(b, false /* committed */);
    nir_variable *commit =
       nir_local_variable_create(impl, glsl_bool_type(), "ray_commit");
    nir_store_var(b, commit, nir_imm_false(b), 0x1);
@@ -169,8 +169,8 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
       nir_push_if(b, nir_load_var(b, commit));
       {
          /* Set the "valid" bit in mem_hit */
-         nir_ssa_def *ray_addr = brw_nir_rt_mem_hit_addr(b, false /* committed */);
-         nir_ssa_def *flags_dw_addr = nir_iadd_imm(b, ray_addr, 12);
+         nir_def *ray_addr = brw_nir_rt_mem_hit_addr(b, false /* committed */);
+         nir_def *flags_dw_addr = nir_iadd_imm(b, ray_addr, 12);
          nir_store_global(b, flags_dw_addr, 4,
             nir_ior(b, nir_load_global(b, flags_dw_addr, 4, 1, 32),
                        nir_imm_int(b, 1 << 16)), 0x1 /* write_mask */);
@@ -193,10 +193,10 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
             switch (intrin->intrinsic) {
             case nir_intrinsic_report_ray_intersection: {
                b->cursor = nir_instr_remove(&intrin->instr);
-               nir_ssa_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
-               nir_ssa_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
-               nir_ssa_def *min_t = nir_load_ray_t_min(b);
-               nir_ssa_def *max_t = nir_load_global(b, t_addr, 4, 1, 32);
+               nir_def *hit_t = nir_ssa_for_src(b, intrin->src[0], 1);
+               nir_def *hit_kind = nir_ssa_for_src(b, intrin->src[1], 1);
+               nir_def *min_t = nir_load_ray_t_min(b);
+               nir_def *max_t = nir_load_global(b, t_addr, 4, 1, 32);
 
                /* bool commit_tmp = false; */
                nir_variable *commit_tmp =
@@ -213,7 +213,7 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
                   if (any_hit_impl != NULL) {
                      nir_push_if(b, nir_inot(b, nir_load_leaf_opaque_intel(b)));
                      {
-                        nir_ssa_def *params[] = {
+                        nir_def *params[] = {
                            &nir_build_deref_var(b, commit_tmp)->dest.ssa,
                            hit_t,
                            hit_kind,
@@ -235,8 +235,8 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
                }
                nir_pop_if(b, NULL);
 
-               nir_ssa_def *accepted = nir_load_var(b, commit_tmp);
-               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+               nir_def *accepted = nir_load_var(b, commit_tmp);
+               nir_def_rewrite_uses(&intrin->dest.ssa,
                                         accepted);
                break;
             }
index 825a2f3..c9ab79f 100644 (file)
@@ -49,7 +49,7 @@ add_src_instr(nir_src *src, void *state)
 
 static nir_intrinsic_instr *
 find_resource_intel(struct util_dynarray *inst_array,
-                    nir_ssa_def *def)
+                    nir_def *def)
 {
    /* If resouce_intel is already directly in front of the instruction, there
     * is nothing to do.
@@ -131,8 +131,8 @@ brw_nir_lower_non_uniform_intrinsic(nir_builder *b,
    nir_intrinsic_instr *new_resource_intel =
       nir_instr_as_intrinsic(new_instr);
 
-   nir_src_rewrite_ssa(&new_resource_intel->src[1], intrin->src[source].ssa);
-   nir_src_rewrite_ssa(&intrin->src[source], &new_resource_intel->dest.ssa);
+   nir_src_rewrite(&new_resource_intel->src[1], intrin->src[source].ssa);
+   nir_src_rewrite(&intrin->src[source], &new_resource_intel->dest.ssa);
 
    return true;
 }
@@ -165,8 +165,8 @@ brw_nir_lower_non_uniform_tex(nir_builder *b,
       nir_intrinsic_instr *new_resource_intel =
          nir_instr_as_intrinsic(new_instr);
 
-      nir_src_rewrite_ssa(&new_resource_intel->src[1], tex->src[s].src.ssa);
-      nir_src_rewrite_ssa(&tex->src[s].src, &new_resource_intel->dest.ssa);
+      nir_src_rewrite(&new_resource_intel->src[1], tex->src[s].src.ssa);
+      nir_src_rewrite(&tex->src[s].src, &new_resource_intel->dest.ssa);
 
       progress = true;
    }
@@ -296,7 +296,7 @@ brw_nir_cleanup_resource_intel_instr(nir_builder *b,
          continue;
 
       progress = true;
-      nir_src_rewrite_ssa(src, intrin->src[1].ssa);
+      nir_src_rewrite(src, intrin->src[1].ssa);
    }
 
    return progress;
index 63f0990..5e11e5c 100644 (file)
@@ -37,7 +37,7 @@ struct lowering_state {
    uint32_t n_queries;
 
    struct brw_nir_rt_globals_defs globals;
-   nir_ssa_def *rq_globals;
+   nir_def *rq_globals;
 };
 
 struct brw_ray_query {
@@ -98,7 +98,7 @@ create_internal_var(struct brw_ray_query *rq, struct lowering_state *state)
 
 
 
-static nir_ssa_def *
+static nir_def *
 get_ray_query_shadow_addr(nir_builder *b,
                           nir_deref_instr *deref,
                           struct lowering_state *state,
@@ -117,7 +117,7 @@ get_ray_query_shadow_addr(nir_builder *b,
    /* Base address in the shadow memory of the variable associated with this
     * ray query variable.
     */
-   nir_ssa_def *base_addr =
+   nir_def *base_addr =
       nir_iadd_imm(b, state->globals.resume_sbt_addr,
                    brw_rt_ray_queries_shadow_stack_size(state->devinfo) * rq->id);
 
@@ -131,7 +131,7 @@ get_ray_query_shadow_addr(nir_builder *b,
    nir_deref_instr **p = &path.path[1];
    for (; *p; p++) {
       if ((*p)->deref_type == nir_deref_type_array) {
-         nir_ssa_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
+         nir_def *index = nir_ssa_for_src(b, (*p)->arr.index, 1);
 
          /**/
          *out_state_deref = nir_build_deref_array(b, *out_state_deref, index);
@@ -140,7 +140,7 @@ get_ray_query_shadow_addr(nir_builder *b,
          uint64_t size = MAX2(1, glsl_get_aoa_size((*p)->type)) *
             brw_rt_ray_queries_shadow_stack_size(state->devinfo);
 
-         nir_ssa_def *mul = nir_amul_imm(b, nir_i2i64(b, index), size);
+         nir_def *mul = nir_amul_imm(b, nir_i2i64(b, index), size);
 
          base_addr = nir_iadd(b, base_addr, mul);
       } else {
@@ -151,7 +151,7 @@ get_ray_query_shadow_addr(nir_builder *b,
    nir_deref_path_finish(&path);
 
    /* Add the lane offset to the shadow memory address */
-   nir_ssa_def *lane_offset =
+   nir_def *lane_offset =
       nir_imul_imm(
          b,
          nir_iadd(
@@ -169,14 +169,14 @@ get_ray_query_shadow_addr(nir_builder *b,
 static void
 update_trace_ctrl_level(nir_builder *b,
                         nir_deref_instr *state_deref,
-                        nir_ssa_def **out_old_ctrl,
-                        nir_ssa_def **out_old_level,
-                        nir_ssa_def *new_ctrl,
-                        nir_ssa_def *new_level)
+                        nir_def **out_old_ctrl,
+                        nir_def **out_old_level,
+                        nir_def *new_ctrl,
+                        nir_def *new_level)
 {
-   nir_ssa_def *old_value = nir_load_deref(b, state_deref);
-   nir_ssa_def *old_ctrl = nir_ishr_imm(b, old_value, 2);
-   nir_ssa_def *old_level = nir_iand_imm(b, old_value, 0x3);
+   nir_def *old_value = nir_load_deref(b, state_deref);
+   nir_def *old_ctrl = nir_ishr_imm(b, old_value, 2);
+   nir_def *old_level = nir_iand_imm(b, old_value, 0x3);
 
    if (out_old_ctrl)
       *out_old_ctrl = old_ctrl;
@@ -194,16 +194,16 @@ update_trace_ctrl_level(nir_builder *b,
       if (!new_level)
          new_level = old_level;
 
-      nir_ssa_def *new_value = nir_ior(b, nir_ishl_imm(b, new_ctrl, 2), new_level);
+      nir_def *new_value = nir_ior(b, nir_ishl_imm(b, new_ctrl, 2), new_level);
       nir_store_deref(b, state_deref, new_value, 0x1);
    }
 }
 
 static void
 fill_query(nir_builder *b,
-           nir_ssa_def *hw_stack_addr,
-           nir_ssa_def *shadow_stack_addr,
-           nir_ssa_def *ctrl)
+           nir_def *hw_stack_addr,
+           nir_def *shadow_stack_addr,
+           nir_def *ctrl)
 {
    brw_nir_memcpy_global(b, hw_stack_addr, 64, shadow_stack_addr, 64,
                          BRW_RT_SIZEOF_RAY_QUERY);
@@ -211,8 +211,8 @@ fill_query(nir_builder *b,
 
 static void
 spill_query(nir_builder *b,
-            nir_ssa_def *hw_stack_addr,
-            nir_ssa_def *shadow_stack_addr)
+            nir_def *hw_stack_addr,
+            nir_def *shadow_stack_addr)
 {
    brw_nir_memcpy_global(b, shadow_stack_addr, 64, hw_stack_addr, 64,
                          BRW_RT_SIZEOF_RAY_QUERY);
@@ -229,16 +229,16 @@ lower_ray_query_intrinsic(nir_builder *b,
    b->cursor = nir_instr_remove(&intrin->instr);
 
    nir_deref_instr *ctrl_level_deref;
-   nir_ssa_def *shadow_stack_addr =
+   nir_def *shadow_stack_addr =
       get_ray_query_shadow_addr(b, deref, state, &ctrl_level_deref);
-   nir_ssa_def *hw_stack_addr =
+   nir_def *hw_stack_addr =
       brw_nir_rt_sync_stack_addr(b, state->globals.base_mem_addr, state->devinfo);
-   nir_ssa_def *stack_addr = shadow_stack_addr ? shadow_stack_addr : hw_stack_addr;
+   nir_def *stack_addr = shadow_stack_addr ? shadow_stack_addr : hw_stack_addr;
 
    switch (intrin->intrinsic) {
    case nir_intrinsic_rq_initialize: {
-      nir_ssa_def *as_addr = intrin->src[1].ssa;
-      nir_ssa_def *ray_flags = intrin->src[2].ssa;
+      nir_def *as_addr = intrin->src[1].ssa;
+      nir_def *ray_flags = intrin->src[2].ssa;
       /* From the SPIR-V spec:
        *
        *    "Only the 8 least-significant bits of Cull Mask are used by
@@ -247,13 +247,13 @@ lower_ray_query_intrinsic(nir_builder *b,
        *    Only the 16 least-significant bits of Miss Index are used by
        *    this instruction - other bits are ignored."
        */
-      nir_ssa_def *cull_mask = nir_iand_imm(b, intrin->src[3].ssa, 0xff);
-      nir_ssa_def *ray_orig = intrin->src[4].ssa;
-      nir_ssa_def *ray_t_min = intrin->src[5].ssa;
-      nir_ssa_def *ray_dir = intrin->src[6].ssa;
-      nir_ssa_def *ray_t_max = intrin->src[7].ssa;
+      nir_def *cull_mask = nir_iand_imm(b, intrin->src[3].ssa, 0xff);
+      nir_def *ray_orig = intrin->src[4].ssa;
+      nir_def *ray_t_min = intrin->src[5].ssa;
+      nir_def *ray_dir = intrin->src[6].ssa;
+      nir_def *ray_t_max = intrin->src[7].ssa;
 
-      nir_ssa_def *root_node_ptr =
+      nir_def *root_node_ptr =
          brw_nir_rt_acceleration_structure_to_root_node(b, as_addr);
 
       struct brw_nir_rt_mem_ray_defs ray_defs = {
@@ -266,7 +266,7 @@ lower_ray_query_intrinsic(nir_builder *b,
          .t_far = ray_t_max,
       };
 
-      nir_ssa_def *ray_addr =
+      nir_def *ray_addr =
          brw_nir_rt_mem_ray_addr(b, stack_addr, BRW_RT_BVH_LEVEL_WORLD);
 
       brw_nir_rt_query_mark_init(b, stack_addr);
@@ -280,13 +280,13 @@ lower_ray_query_intrinsic(nir_builder *b,
    }
 
    case nir_intrinsic_rq_proceed: {
-      nir_ssa_def *not_done =
+      nir_def *not_done =
          nir_inot(b, brw_nir_rt_query_done(b, stack_addr));
-      nir_ssa_def *not_done_then, *not_done_else;
+      nir_def *not_done_then, *not_done_else;
 
       nir_push_if(b, not_done);
       {
-         nir_ssa_def *ctrl, *level;
+         nir_def *ctrl, *level;
          update_trace_ctrl_level(b, ctrl_level_deref,
                                  &ctrl, &level,
                                  NULL,
@@ -324,7 +324,7 @@ lower_ray_query_intrinsic(nir_builder *b,
       }
       nir_pop_if(b, NULL);
       not_done = nir_if_phi(b, not_done_then, not_done_else);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, not_done);
+      nir_def_rewrite_uses(&intrin->dest.ssa, not_done);
       break;
    }
 
@@ -366,7 +366,7 @@ lower_ray_query_intrinsic(nir_builder *b,
                                         BRW_RT_BVH_LEVEL_OBJECT);
       brw_nir_rt_load_mem_hit_from_addr(b, &hit_in, stack_addr, committed);
 
-      nir_ssa_def *sysval = NULL;
+      nir_def *sysval = NULL;
       switch (nir_intrinsic_ray_query_value(intrin)) {
       case nir_ray_query_value_intersection_type:
          if (committed) {
@@ -417,7 +417,7 @@ lower_ray_query_intrinsic(nir_builder *b,
       }
 
       case nir_ray_query_value_intersection_geometry_index: {
-         nir_ssa_def *geometry_index_dw =
+         nir_def *geometry_index_dw =
             nir_load_global(b, nir_iadd_imm(b, hit_in.prim_leaf_ptr, 4), 4,
                             1, 32);
          sysval = nir_iand_imm(b, geometry_index_dw, BITFIELD_MASK(29));
@@ -490,7 +490,7 @@ lower_ray_query_intrinsic(nir_builder *b,
       }
 
       assert(sysval);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, sysval);
+      nir_def_rewrite_uses(&intrin->dest.ssa, sysval);
       break;
    }
 
index e89bcde..39ffde7 100644 (file)
@@ -24,7 +24,7 @@
 #include "brw_nir_rt.h"
 #include "brw_nir_rt_builder.h"
 
-static nir_ssa_def *
+static nir_def *
 build_leaf_is_procedural(nir_builder *b, struct brw_nir_rt_mem_hit_defs *hit)
 {
    switch (b->shader->info.stage) {
@@ -56,8 +56,8 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
    struct brw_nir_rt_globals_defs globals;
    brw_nir_rt_load_globals(b, &globals);
 
-   nir_ssa_def *hotzone_addr = brw_nir_rt_sw_hotzone_addr(b, devinfo);
-   nir_ssa_def *hotzone = nir_load_global(b, hotzone_addr, 16, 4, 32);
+   nir_def *hotzone_addr = brw_nir_rt_sw_hotzone_addr(b, devinfo);
+   nir_def *hotzone = nir_load_global(b, hotzone_addr, 16, 4, 32);
 
    gl_shader_stage stage = b->shader->info.stage;
    struct brw_nir_rt_mem_ray_defs world_ray_in = {};
@@ -82,9 +82,9 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
       break;
    }
 
-   nir_ssa_def *thread_stack_base_addr = brw_nir_rt_sw_stack_addr(b, devinfo);
-   nir_ssa_def *stack_base_offset = nir_channel(b, hotzone, 0);
-   nir_ssa_def *stack_base_addr =
+   nir_def *thread_stack_base_addr = brw_nir_rt_sw_stack_addr(b, devinfo);
+   nir_def *stack_base_offset = nir_channel(b, hotzone, 0);
+   nir_def *stack_base_addr =
       nir_iadd(b, thread_stack_base_addr, nir_u2u64(b, stack_base_offset));
    ASSERTED bool seen_scratch_base_ptr_load = false;
    ASSERTED bool found_resume = false;
@@ -98,7 +98,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
 
          b->cursor = nir_after_instr(&intrin->instr);
 
-         nir_ssa_def *sysval = NULL;
+         nir_def *sysval = NULL;
          switch (intrin->intrinsic) {
          case nir_intrinsic_load_scratch_base_ptr:
             assert(nir_intrinsic_base(intrin) == 1);
@@ -109,7 +109,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
          case nir_intrinsic_btd_stack_push_intel: {
             int32_t stack_size = nir_intrinsic_stack_size(intrin);
             if (stack_size > 0) {
-               nir_ssa_def *child_stack_offset =
+               nir_def *child_stack_offset =
                   nir_iadd_imm(b, stack_base_offset, stack_size);
                nir_store_global(b, hotzone_addr, 16, child_stack_offset, 0x1);
             }
@@ -210,7 +210,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
          }
 
          case nir_intrinsic_load_ray_hit_kind: {
-            nir_ssa_def *tri_hit_kind =
+            nir_def *tri_hit_kind =
                nir_bcsel(b, hit_in.front_face,
                             nir_imm_int(b, BRW_RT_HIT_KIND_FRONT_FACE),
                             nir_imm_int(b, BRW_RT_HIT_KIND_BACK_FACE));
@@ -236,7 +236,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
             break;
 
          case nir_intrinsic_load_ray_geometry_index: {
-            nir_ssa_def *geometry_index_dw =
+            nir_def *geometry_index_dw =
                nir_load_global(b, nir_iadd_imm(b, hit_in.prim_leaf_ptr, 4), 4,
                                1, 32);
             sysval = nir_iand_imm(b, geometry_index_dw, BITFIELD_MASK(29));
@@ -325,7 +325,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
                 */
                sysval = hit_in.front_face;
             } else {
-               nir_ssa_def *flags_dw =
+               nir_def *flags_dw =
                   nir_load_global(b, nir_iadd_imm(b, hit_in.prim_leaf_ptr, 4), 4,
                                   1, 32);
                sysval = nir_i2b(b, nir_iand_imm(b, flags_dw, 1u << 30));
@@ -340,7 +340,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
          progress = true;
 
          if (sysval) {
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+            nir_def_rewrite_uses(&intrin->dest.ssa,
                                      sysval);
             nir_instr_remove(&intrin->instr);
          }
index a60ab0f..2978bd0 100644 (file)
@@ -130,13 +130,13 @@ store_resume_addr(nir_builder *b, nir_intrinsic_instr *call)
    /* First thing on the called shader's stack is the resume address
     * followed by a pointer to the payload.
     */
-   nir_ssa_def *resume_record_addr =
+   nir_def *resume_record_addr =
       nir_iadd_imm(b, nir_load_btd_resume_sbt_addr_intel(b),
                    call_idx * BRW_BTD_RESUME_SBT_STRIDE);
    /* By the time we get here, any remaining shader/function memory
     * pointers have been lowered to SSA values.
     */
-   nir_ssa_def *payload_addr =
+   nir_def *payload_addr =
       nir_get_shader_call_payload_src(call)->ssa;
    brw_nir_rt_store_scratch(b, offset, BRW_BTD_STACK_ALIGN,
                             nir_vec2(b, resume_record_addr, payload_addr),
@@ -164,8 +164,8 @@ lower_shader_trace_ray_instr(struct nir_builder *b, nir_instr *instr, void *data
 
    store_resume_addr(b, call);
 
-   nir_ssa_def *as_addr = call->src[0].ssa;
-   nir_ssa_def *ray_flags = call->src[1].ssa;
+   nir_def *as_addr = call->src[0].ssa;
+   nir_def *ray_flags = call->src[1].ssa;
    /* From the SPIR-V spec:
     *
     *    "Only the 8 least-significant bits of Cull Mask are used by this
@@ -177,16 +177,16 @@ lower_shader_trace_ray_instr(struct nir_builder *b, nir_instr *instr, void *data
     *    Only the 16 least-significant bits of Miss Index are used by this
     *    instruction - other bits are ignored."
     */
-   nir_ssa_def *cull_mask = nir_iand_imm(b, call->src[2].ssa, 0xff);
-   nir_ssa_def *sbt_offset = nir_iand_imm(b, call->src[3].ssa, 0xf);
-   nir_ssa_def *sbt_stride = nir_iand_imm(b, call->src[4].ssa, 0xf);
-   nir_ssa_def *miss_index = nir_iand_imm(b, call->src[5].ssa, 0xffff);
-   nir_ssa_def *ray_orig = call->src[6].ssa;
-   nir_ssa_def *ray_t_min = call->src[7].ssa;
-   nir_ssa_def *ray_dir = call->src[8].ssa;
-   nir_ssa_def *ray_t_max = call->src[9].ssa;
-
-   nir_ssa_def *root_node_ptr =
+   nir_def *cull_mask = nir_iand_imm(b, call->src[2].ssa, 0xff);
+   nir_def *sbt_offset = nir_iand_imm(b, call->src[3].ssa, 0xf);
+   nir_def *sbt_stride = nir_iand_imm(b, call->src[4].ssa, 0xf);
+   nir_def *miss_index = nir_iand_imm(b, call->src[5].ssa, 0xffff);
+   nir_def *ray_orig = call->src[6].ssa;
+   nir_def *ray_t_min = call->src[7].ssa;
+   nir_def *ray_dir = call->src[8].ssa;
+   nir_def *ray_t_max = call->src[9].ssa;
+
+   nir_def *root_node_ptr =
       brw_nir_rt_acceleration_structure_to_root_node(b, as_addr);
 
    /* The hardware packet requires an address to the first element of the
@@ -201,20 +201,20 @@ lower_shader_trace_ray_instr(struct nir_builder *b, nir_instr *instr, void *data
     * calls the SPIR-V stride value the "shader index multiplier" which is
     * a much more sane name.
     */
-   nir_ssa_def *hit_sbt_stride_B =
+   nir_def *hit_sbt_stride_B =
       nir_load_ray_hit_sbt_stride_intel(b);
-   nir_ssa_def *hit_sbt_offset_B =
+   nir_def *hit_sbt_offset_B =
       nir_umul_32x16(b, sbt_offset, nir_u2u32(b, hit_sbt_stride_B));
-   nir_ssa_def *hit_sbt_addr =
+   nir_def *hit_sbt_addr =
       nir_iadd(b, nir_load_ray_hit_sbt_addr_intel(b),
                   nir_u2u64(b, hit_sbt_offset_B));
 
    /* The hardware packet takes an address to the miss BSR. */
-   nir_ssa_def *miss_sbt_stride_B =
+   nir_def *miss_sbt_stride_B =
       nir_load_ray_miss_sbt_stride_intel(b);
-   nir_ssa_def *miss_sbt_offset_B =
+   nir_def *miss_sbt_offset_B =
       nir_umul_32x16(b, miss_index, nir_u2u32(b, miss_sbt_stride_B));
-   nir_ssa_def *miss_sbt_addr =
+   nir_def *miss_sbt_addr =
       nir_iadd(b, nir_load_ray_miss_sbt_addr_intel(b),
                   nir_u2u64(b, miss_sbt_offset_B));
 
@@ -268,10 +268,10 @@ lower_shader_call_instr(struct nir_builder *b, nir_instr *instr, void *data)
 
    store_resume_addr(b, call);
 
-   nir_ssa_def *sbt_offset32 =
+   nir_def *sbt_offset32 =
       nir_imul(b, call->src[0].ssa,
                nir_u2u32(b, nir_load_callable_sbt_stride_intel(b)));
-   nir_ssa_def *sbt_addr =
+   nir_def *sbt_addr =
       nir_iadd(b, nir_load_callable_sbt_addr_intel(b),
                nir_u2u64(b, sbt_offset32));
    brw_nir_btd_spawn(b, sbt_addr);
@@ -346,14 +346,14 @@ brw_nir_create_trivial_return_shader(const struct brw_compiler *compiler,
 
       b->cursor = nir_before_block(nir_start_block(impl));
 
-      nir_ssa_def *shader_type = nir_load_btd_shader_type_intel(b);
+      nir_def *shader_type = nir_load_btd_shader_type_intel(b);
 
-      nir_ssa_def *is_intersection_shader =
+      nir_def *is_intersection_shader =
          nir_ieq_imm(b, shader_type, GEN_RT_BTD_SHADER_TYPE_INTERSECTION);
-      nir_ssa_def *is_anyhit_shader =
+      nir_def *is_anyhit_shader =
          nir_ieq_imm(b, shader_type, GEN_RT_BTD_SHADER_TYPE_ANY_HIT);
 
-      nir_ssa_def *needs_commit_or_continue =
+      nir_def *needs_commit_or_continue =
          nir_ior(b, is_intersection_shader, is_anyhit_shader);
 
       nir_push_if(b, needs_commit_or_continue);
@@ -361,11 +361,11 @@ brw_nir_create_trivial_return_shader(const struct brw_compiler *compiler,
          struct brw_nir_rt_mem_hit_defs hit_in = {};
          brw_nir_rt_load_mem_hit(b, &hit_in, false /* committed */);
 
-         nir_ssa_def *ray_op =
+         nir_def *ray_op =
             nir_bcsel(b, is_intersection_shader,
                       nir_imm_int(b, GEN_RT_TRACE_RAY_CONTINUE),
                       nir_imm_int(b, GEN_RT_TRACE_RAY_COMMIT));
-         nir_ssa_def *ray_level = hit_in.bvh_level;
+         nir_def *ray_level = hit_in.bvh_level;
 
          nir_trace_ray_intel(b,
                              nir_load_btd_global_arg_addr_intel(b),
index 0a15a0e..f5dbbd7 100644 (file)
@@ -72,32 +72,32 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
    b->cursor = is_store ? nir_before_instr(instr) : nir_after_instr(instr);
 
    if (is_store) {
-      nir_ssa_def *bit_field = intrin->src[0].ssa;
-      nir_ssa_def *fp16_x =
+      nir_def *bit_field = intrin->src[0].ssa;
+      nir_def *fp16_x =
          nir_i2f16(b,
                    nir_ishl(b, nir_imm_int(b, 1),
                             nir_ishr_imm(b, bit_field, 2)));
-      nir_ssa_def *fp16_y =
+      nir_def *fp16_y =
          nir_i2f16(b,
                    nir_ishl(b, nir_imm_int(b, 1),
                             nir_iand_imm(b, bit_field, 0x3)));
-      nir_ssa_def *packed_fp16_xy = nir_pack_32_2x16_split(b, fp16_x, fp16_y);
+      nir_def *packed_fp16_xy = nir_pack_32_2x16_split(b, fp16_x, fp16_y);
 
       nir_instr_rewrite_src(instr, &intrin->src[0],
                             nir_src_for_ssa(packed_fp16_xy));
    } else {
-      nir_ssa_def *packed_fp16_xy = &intrin->dest.ssa;
+      nir_def *packed_fp16_xy = &intrin->dest.ssa;
 
-      nir_ssa_def *u32_x =
+      nir_def *u32_x =
          nir_i2i32(b, nir_unpack_32_2x16_split_x(b, packed_fp16_xy));
-      nir_ssa_def *u32_y =
+      nir_def *u32_y =
          nir_i2i32(b, nir_unpack_32_2x16_split_y(b, packed_fp16_xy));
 
-      nir_ssa_def *bit_field =
+      nir_def *bit_field =
          nir_ior(b, nir_ishl_imm(b, nir_ushr_imm(b, u32_x, 1), 2),
                     nir_ushr_imm(b, u32_y, 1));
 
-      nir_ssa_def_rewrite_uses_after(packed_fp16_xy, bit_field,
+      nir_def_rewrite_uses_after(packed_fp16_xy, bit_field,
                                      bit_field->parent_instr);
    }
 
index 9b74b51..ad3b864 100644 (file)
@@ -55,7 +55,7 @@ lower_is_sparse_texels_resident(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def_rewrite_uses(
+   nir_def_rewrite_uses(
       &intrin->dest.ssa,
       nir_i2b(b, nir_iand(b, intrin->src[0].ssa,
                               nir_ishl(b, nir_imm_int(b, 1),
@@ -67,7 +67,7 @@ lower_sparse_residency_code_and(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def_rewrite_uses(
+   nir_def_rewrite_uses(
       &intrin->dest.ssa,
       nir_iand(b, intrin->src[0].ssa, intrin->src[1].ssa));
 }
@@ -77,7 +77,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *img_load;
+   nir_def *img_load;
    nir_intrinsic_instr *new_intrin;
    if (intrin->intrinsic == nir_intrinsic_image_sparse_load) {
       img_load = nir_image_load(b,
@@ -106,7 +106,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
    nir_intrinsic_set_access(new_intrin, nir_intrinsic_access(intrin));
    nir_intrinsic_set_dest_type(new_intrin, nir_intrinsic_dest_type(intrin));
 
-   nir_ssa_def *dests[NIR_MAX_VEC_COMPONENTS];
+   nir_def *dests[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < intrin->num_components - 1; i++) {
       dests[i] = nir_channel(b, img_load, i);
    }
@@ -130,16 +130,16 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
    tex->src[0].src = nir_src_for_ssa(intrin->src[0].ssa);
 
    tex->coord_components = nir_image_intrinsic_coord_components(intrin);
-   nir_ssa_def *coord;
+   nir_def *coord;
    if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_CUBE &&
        nir_intrinsic_image_array(intrin)) {
       tex->coord_components++;
 
-      nir_ssa_def *img_layer = nir_channel(b, intrin->src[1].ssa, 2);
-      nir_ssa_def *tex_slice = nir_idiv(b, img_layer, nir_imm_int(b, 6));
-      nir_ssa_def *tex_face =
+      nir_def *img_layer = nir_channel(b, intrin->src[1].ssa, 2);
+      nir_def *tex_slice = nir_idiv(b, img_layer, nir_imm_int(b, 6));
+      nir_def *tex_face =
          nir_iadd(b, img_layer, nir_ineg(b, nir_imul_imm(b, tex_slice, 6)));
-      nir_ssa_def *comps[4] = {
+      nir_def *comps[4] = {
          nir_channel(b, intrin->src[1].ssa, 0),
          nir_channel(b, intrin->src[1].ssa, 1),
          tex_face,
@@ -163,7 +163,7 @@ lower_sparse_image_load(nir_builder *b, nir_intrinsic_instr *intrin)
 
    dests[intrin->num_components - 1] = nir_channel(b, &tex->dest.ssa, 4);
 
-   nir_ssa_def_rewrite_uses(
+   nir_def_rewrite_uses(
       &intrin->dest.ssa,
       nir_vec(b, dests, intrin->num_components));
 }
@@ -186,15 +186,15 @@ lower_tex_compare(nir_builder *b, nir_tex_instr *tex, int compare_idx)
    tex->is_sparse = false;
    tex->dest.ssa.num_components = tex->dest.ssa.num_components - 1;
 
-   nir_ssa_def *new_comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *new_comps[NIR_MAX_VEC_COMPONENTS];
    for (unsigned i = 0; i < tex->dest.ssa.num_components; i++)
       new_comps[i] = nir_channel(b, &tex->dest.ssa, i);
    new_comps[tex->dest.ssa.num_components] =
       nir_channel(b, &sparse_tex->dest.ssa, tex->dest.ssa.num_components);
 
-   nir_ssa_def *new_vec = nir_vec(b, new_comps, sparse_tex->dest.ssa.num_components);
+   nir_def *new_vec = nir_vec(b, new_comps, sparse_tex->dest.ssa.num_components);
 
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, new_vec, new_vec->parent_instr);
+   nir_def_rewrite_uses_after(&tex->dest.ssa, new_vec, new_vec->parent_instr);
 }
 
 static bool
index 695240e..9b66ce6 100644 (file)
@@ -27,7 +27,7 @@
 #include "compiler/nir/nir_builder.h"
 #include "compiler/nir/nir_format_convert.h"
 
-static nir_ssa_def *
+static nir_def *
 _load_image_param(nir_builder *b, nir_deref_instr *deref, unsigned offset)
 {
    nir_intrinsic_instr *load =
@@ -60,15 +60,15 @@ _load_image_param(nir_builder *b, nir_deref_instr *deref, unsigned offset)
 #define load_image_param(b, d, o) \
    _load_image_param(b, d, BRW_IMAGE_PARAM_##o##_OFFSET)
 
-static nir_ssa_def *
+static nir_def *
 image_coord_is_in_bounds(nir_builder *b, nir_deref_instr *deref,
-                         nir_ssa_def *coord)
+                         nir_def *coord)
 {
-   nir_ssa_def *size = load_image_param(b, deref, SIZE);
-   nir_ssa_def *cmp = nir_ilt(b, coord, size);
+   nir_def *size = load_image_param(b, deref, SIZE);
+   nir_def *cmp = nir_ilt(b, coord, size);
 
    unsigned coord_comps = glsl_get_sampler_coordinate_components(deref->type);
-   nir_ssa_def *in_bounds = nir_imm_true(b);
+   nir_def *in_bounds = nir_imm_true(b);
    for (unsigned i = 0; i < coord_comps; i++)
       in_bounds = nir_iand(b, in_bounds, nir_channel(b, cmp, i));
 
@@ -87,9 +87,9 @@ image_coord_is_in_bounds(nir_builder *b, nir_deref_instr *deref,
  * "Address Tiling Function" of the IVB PRM for an in-depth explanation of
  * the hardware tiling format.
  */
-static nir_ssa_def *
+static nir_def *
 image_address(nir_builder *b, const struct intel_device_info *devinfo,
-              nir_deref_instr *deref, nir_ssa_def *coord)
+              nir_deref_instr *deref, nir_def *coord)
 {
    if (glsl_get_sampler_dim(deref->type) == GLSL_SAMPLER_DIM_1D &&
        glsl_sampler_type_is_array(deref->type)) {
@@ -102,9 +102,9 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
       coord = nir_trim_vector(b, coord, dims);
    }
 
-   nir_ssa_def *offset = load_image_param(b, deref, OFFSET);
-   nir_ssa_def *tiling = load_image_param(b, deref, TILING);
-   nir_ssa_def *stride = load_image_param(b, deref, STRIDE);
+   nir_def *offset = load_image_param(b, deref, OFFSET);
+   nir_def *tiling = load_image_param(b, deref, TILING);
+   nir_def *stride = load_image_param(b, deref, STRIDE);
 
    /* Shift the coordinates by the fixed surface offset.  It may be non-zero
     * if the image is a single slice of a higher-dimensional surface, or if a
@@ -114,7 +114,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
     * the surface base address wouldn't give a well-formed tiled surface in
     * the general case.
     */
-   nir_ssa_def *xypos = (coord->num_components == 1) ?
+   nir_def *xypos = (coord->num_components == 1) ?
                         nir_vec2(b, coord, nir_imm_int(b, 0)) :
                         nir_trim_vector(b, coord, 2);
    xypos = nir_iadd(b, xypos, offset);
@@ -144,10 +144,10 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
       /* Decompose z into a major (tmp.y) and a minor (tmp.x)
        * index.
        */
-      nir_ssa_def *z = nir_channel(b, coord, 2);
-      nir_ssa_def *z_x = nir_ubfe(b, z, nir_imm_int(b, 0),
+      nir_def *z = nir_channel(b, coord, 2);
+      nir_def *z_x = nir_ubfe(b, z, nir_imm_int(b, 0),
                                   nir_channel(b, tiling, 2));
-      nir_ssa_def *z_y = nir_ushr(b, z, nir_channel(b, tiling, 2));
+      nir_def *z_y = nir_ushr(b, z, nir_channel(b, tiling, 2));
 
       /* Take into account the horizontal (tmp.x) and vertical (tmp.y)
        * slice offset.
@@ -156,7 +156,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
                                              nir_channels(b, stride, 0xc)));
    }
 
-   nir_ssa_def *addr;
+   nir_def *addr;
    if (coord->num_components > 1) {
       /* Calculate the major/minor x and y indices.  In order to
        * accommodate both X and Y tiling, the Y-major tiling format is
@@ -173,9 +173,9 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
        */
 
       /* Calculate the minor x and y indices. */
-      nir_ssa_def *minor = nir_ubfe(b, xypos, nir_imm_int(b, 0),
+      nir_def *minor = nir_ubfe(b, xypos, nir_imm_int(b, 0),
                                        nir_trim_vector(b, tiling, 2));
-      nir_ssa_def *major = nir_ushr(b, xypos, nir_trim_vector(b, tiling, 2));
+      nir_def *major = nir_ushr(b, xypos, nir_trim_vector(b, tiling, 2));
 
       /* Calculate the texel index from the start of the tile row and the
        * vertical coordinate of the row.
@@ -184,7 +184,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
        *           (minor.y << tile.x) + minor.x
        *   tmp.y = major.y << tile.y
        */
-      nir_ssa_def *idx_x, *idx_y;
+      nir_def *idx_x, *idx_y;
       idx_x = nir_ishl(b, nir_channel(b, major, 0), nir_channel(b, tiling, 1));
       idx_x = nir_iadd(b, idx_x, nir_channel(b, minor, 1));
       idx_x = nir_ishl(b, idx_x, nir_channel(b, tiling, 0));
@@ -192,7 +192,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
       idx_y = nir_ishl(b, nir_channel(b, major, 1), nir_channel(b, tiling, 1));
 
       /* Add it to the start of the tile row. */
-      nir_ssa_def *idx;
+      nir_def *idx;
       idx = nir_imul(b, idx_y, nir_channel(b, stride, 1));
       idx = nir_iadd(b, idx, idx_x);
 
@@ -210,12 +210,12 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
           * be 0xff causing the relevant bits of both tmp.x and .y to be zero,
           * what effectively disables swizzling.
           */
-         nir_ssa_def *swizzle = load_image_param(b, deref, SWIZZLING);
-         nir_ssa_def *shift0 = nir_ushr(b, addr, nir_channel(b, swizzle, 0));
-         nir_ssa_def *shift1 = nir_ushr(b, addr, nir_channel(b, swizzle, 1));
+         nir_def *swizzle = load_image_param(b, deref, SWIZZLING);
+         nir_def *shift0 = nir_ushr(b, addr, nir_channel(b, swizzle, 0));
+         nir_def *shift1 = nir_ushr(b, addr, nir_channel(b, swizzle, 1));
 
          /* XOR tmp.x and tmp.y with bit 6 of the memory address. */
-         nir_ssa_def *bit = nir_iand(b, nir_ixor(b, shift0, shift1),
+         nir_def *bit = nir_iand(b, nir_ixor(b, shift0, shift1),
                                         nir_imm_int(b, 1 << 6));
          addr = nir_ixor(b, addr, bit);
       }
@@ -225,7 +225,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo,
        * offset may have been applied above to select a non-zero slice or
        * level of a higher-dimensional texture.
        */
-      nir_ssa_def *idx;
+      nir_def *idx;
       idx = nir_imul(b, nir_channel(b, xypos, 1), nir_channel(b, stride, 1));
       idx = nir_iadd(b, nir_channel(b, xypos, 0), idx);
       addr = nir_imul(b, idx, nir_channel(b, stride, 0));
@@ -257,9 +257,9 @@ get_format_info(enum isl_format fmt)
    };
 }
 
-static nir_ssa_def *
+static nir_def *
 convert_color_for_load(nir_builder *b, const struct intel_device_info *devinfo,
-                       nir_ssa_def *color,
+                       nir_def *color,
                        enum isl_format image_fmt, enum isl_format lower_fmt,
                        unsigned dest_components)
 {
@@ -342,7 +342,7 @@ expand_vec:
    if (color->num_components == dest_components)
       return color;
 
-   nir_ssa_def *comps[4];
+   nir_def *comps[4];
    for (unsigned i = 0; i < color->num_components; i++)
       comps[i] = nir_channel(b, color, i);
 
@@ -383,15 +383,15 @@ lower_image_load_instr(nir_builder *b,
       /* Use an undef to hold the uses of the load while we do the color
        * conversion.
        */
-      nir_ssa_def *placeholder = nir_ssa_undef(b, 4, 32);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, placeholder);
+      nir_def *placeholder = nir_undef(b, 4, 32);
+      nir_def_rewrite_uses(&intrin->dest.ssa, placeholder);
 
       intrin->num_components = isl_format_get_num_channels(lower_fmt);
       intrin->dest.ssa.num_components = intrin->num_components;
 
       b->cursor = nir_after_instr(&intrin->instr);
 
-      nir_ssa_def *color = convert_color_for_load(b, devinfo,
+      nir_def *color = convert_color_for_load(b, devinfo,
                                                   &intrin->dest.ssa,
                                                   image_fmt, lower_fmt,
                                                   dest_components);
@@ -404,7 +404,7 @@ lower_image_load_instr(nir_builder *b,
          /* Carry over the sparse component without modifying it with the
           * converted color.
           */
-         nir_ssa_def *sparse_color[NIR_MAX_VEC_COMPONENTS];
+         nir_def *sparse_color[NIR_MAX_VEC_COMPONENTS];
          for (unsigned i = 0; i < dest_components; i++)
             sparse_color[i] = nir_channel(b, color, i);
          sparse_color[dest_components] =
@@ -412,7 +412,7 @@ lower_image_load_instr(nir_builder *b,
          color = nir_vec(b, sparse_color, dest_components + 1);
       }
 
-      nir_ssa_def_rewrite_uses(placeholder, color);
+      nir_def_rewrite_uses(placeholder, color);
       nir_instr_remove(placeholder->parent_instr);
    } else {
       /* This code part is only useful prior to Gfx9, we do not have plans to
@@ -431,9 +431,9 @@ lower_image_load_instr(nir_builder *b,
 
       b->cursor = nir_instr_remove(&intrin->instr);
 
-      nir_ssa_def *coord = intrin->src[1].ssa;
+      nir_def *coord = intrin->src[1].ssa;
 
-      nir_ssa_def *do_load = image_coord_is_in_bounds(b, deref, coord);
+      nir_def *do_load = image_coord_is_in_bounds(b, deref, coord);
       if (devinfo->verx10 == 70) {
          /* Check whether the first stride component (i.e. the Bpp value)
           * is greater than four, what on Gfx7 indicates that a surface of
@@ -441,39 +441,39 @@ lower_image_load_instr(nir_builder *b,
           * to a surface of type other than RAW using untyped surface
           * messages causes a hang on IVB and VLV.
           */
-         nir_ssa_def *stride = load_image_param(b, deref, STRIDE);
-         nir_ssa_def *is_raw =
+         nir_def *stride = load_image_param(b, deref, STRIDE);
+         nir_def *is_raw =
             nir_igt_imm(b, nir_channel(b, stride, 0), 4);
          do_load = nir_iand(b, do_load, is_raw);
       }
       nir_push_if(b, do_load);
 
-      nir_ssa_def *addr = image_address(b, devinfo, deref, coord);
-      nir_ssa_def *load =
+      nir_def *addr = image_address(b, devinfo, deref, coord);
+      nir_def *load =
          nir_image_deref_load_raw_intel(b, image_fmtl->bpb / 32, 32,
                                         &deref->dest.ssa, addr);
 
       nir_push_else(b, NULL);
 
-      nir_ssa_def *zero = nir_imm_zero(b, load->num_components, 32);
+      nir_def *zero = nir_imm_zero(b, load->num_components, 32);
 
       nir_pop_if(b, NULL);
 
-      nir_ssa_def *value = nir_if_phi(b, load, zero);
+      nir_def *value = nir_if_phi(b, load, zero);
 
-      nir_ssa_def *color = convert_color_for_load(b, devinfo, value,
+      nir_def *color = convert_color_for_load(b, devinfo, value,
                                                   image_fmt, raw_fmt,
                                                   dest_components);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, color);
+      nir_def_rewrite_uses(&intrin->dest.ssa, color);
    }
 
    return true;
 }
 
-static nir_ssa_def *
+static nir_def *
 convert_color_for_store(nir_builder *b, const struct intel_device_info *devinfo,
-                        nir_ssa_def *color,
+                        nir_def *color,
                         enum isl_format image_fmt, enum isl_format lower_fmt)
 {
    struct format_info image = get_format_info(image_fmt);
@@ -565,7 +565,7 @@ lower_image_store_instr(nir_builder *b,
       /* Color conversion goes before the store */
       b->cursor = nir_before_instr(&intrin->instr);
 
-      nir_ssa_def *color = convert_color_for_store(b, devinfo,
+      nir_def *color = convert_color_for_store(b, devinfo,
                                                    intrin->src[3].ssa,
                                                    image_fmt, lower_fmt);
       intrin->num_components = isl_format_get_num_channels(lower_fmt);
@@ -582,9 +582,9 @@ lower_image_store_instr(nir_builder *b,
 
       b->cursor = nir_instr_remove(&intrin->instr);
 
-      nir_ssa_def *coord = intrin->src[1].ssa;
+      nir_def *coord = intrin->src[1].ssa;
 
-      nir_ssa_def *do_store = image_coord_is_in_bounds(b, deref, coord);
+      nir_def *do_store = image_coord_is_in_bounds(b, deref, coord);
       if (devinfo->verx10 == 70) {
          /* Check whether the first stride component (i.e. the Bpp value)
           * is greater than four, what on Gfx7 indicates that a surface of
@@ -592,15 +592,15 @@ lower_image_store_instr(nir_builder *b,
           * to a surface of type other than RAW using untyped surface
           * messages causes a hang on IVB and VLV.
           */
-         nir_ssa_def *stride = load_image_param(b, deref, STRIDE);
-         nir_ssa_def *is_raw =
+         nir_def *stride = load_image_param(b, deref, STRIDE);
+         nir_def *is_raw =
             nir_igt_imm(b, nir_channel(b, stride, 0), 4);
          do_store = nir_iand(b, do_store, is_raw);
       }
       nir_push_if(b, do_store);
 
-      nir_ssa_def *addr = image_address(b, devinfo, deref, coord);
-      nir_ssa_def *color = convert_color_for_store(b, devinfo,
+      nir_def *addr = image_address(b, devinfo, deref, coord);
+      nir_def *color = convert_color_for_store(b, devinfo,
                                                    intrin->src[3].ssa,
                                                    image_fmt, raw_fmt);
 
@@ -632,24 +632,24 @@ lower_image_atomic_instr(nir_builder *b,
    b->cursor = nir_instr_remove(&intrin->instr);
 
    /* Use an undef to hold the uses of the load conversion. */
-   nir_ssa_def *placeholder = nir_ssa_undef(b, 4, 32);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, placeholder);
+   nir_def *placeholder = nir_undef(b, 4, 32);
+   nir_def_rewrite_uses(&intrin->dest.ssa, placeholder);
 
    /* Check the first component of the size field to find out if the
     * image is bound.  Necessary on IVB for typed atomics because
     * they don't seem to respect null surfaces and will happily
     * corrupt or read random memory when no image is bound.
     */
-   nir_ssa_def *size = load_image_param(b, deref, SIZE);
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *size = load_image_param(b, deref, SIZE);
+   nir_def *zero = nir_imm_int(b, 0);
    nir_push_if(b, nir_ine(b, nir_channel(b, size, 0), zero));
 
    nir_builder_instr_insert(b, &intrin->instr);
 
    nir_pop_if(b, NULL);
 
-   nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, zero);
-   nir_ssa_def_rewrite_uses(placeholder, result);
+   nir_def *result = nir_if_phi(b, &intrin->dest.ssa, zero);
+   nir_def_rewrite_uses(placeholder, result);
 
    return true;
 }
@@ -683,9 +683,9 @@ lower_image_size_instr(nir_builder *b,
 
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *size = load_image_param(b, deref, SIZE);
+   nir_def *size = load_image_param(b, deref, SIZE);
 
-   nir_ssa_def *comps[4] = { NULL, NULL, NULL, NULL };
+   nir_def *comps[4] = { NULL, NULL, NULL, NULL };
 
    assert(nir_intrinsic_image_dim(intrin) != GLSL_SAMPLER_DIM_CUBE);
    unsigned coord_comps = glsl_get_sampler_coordinate_components(deref->type);
@@ -695,8 +695,8 @@ lower_image_size_instr(nir_builder *b,
    for (unsigned c = coord_comps; c < intrin->dest.ssa.num_components; ++c)
       comps[c] = nir_imm_int(b, 1);
 
-   nir_ssa_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, vec);
+   nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
+   nir_def_rewrite_uses(&intrin->dest.ssa, vec);
 
    return true;
 }
index bd22622..41c7ffb 100644 (file)
@@ -30,7 +30,7 @@
  */
 
 static inline bool
-are_all_uses_fadd(nir_ssa_def *def)
+are_all_uses_fadd(nir_def *def)
 {
    nir_foreach_use_including_if(use_src, def) {
       if (use_src->is_if)
@@ -200,7 +200,7 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b,
 
    unsigned bit_size = add->dest.dest.ssa.bit_size;
 
-   nir_ssa_def *mul_src[2];
+   nir_def *mul_src[2];
    mul_src[0] = mul->src[0].src.ssa;
    mul_src[1] = mul->src[1].src.ssa;
 
@@ -234,7 +234,7 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b,
 
    nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
                      add->dest.dest.ssa.num_components, bit_size);
-   nir_ssa_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa);
+   nir_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa);
 
    nir_builder_instr_insert(b, &ffma->instr);
    assert(list_is_empty(&add->dest.dest.ssa.uses));
index eddbdfa..bf354b4 100644 (file)
@@ -48,7 +48,7 @@ replace_imul_instr(nir_builder *b, nir_alu_instr *imul, unsigned small_val,
    nir_ssa_dest_init(&imul_32x16->instr, &imul_32x16->dest.dest,
                      imul->dest.dest.ssa.num_components, 32);
 
-   nir_ssa_def_rewrite_uses(&imul->dest.dest.ssa,
+   nir_def_rewrite_uses(&imul->dest.dest.ssa,
                             &imul_32x16->dest.dest.ssa);
 
    nir_builder_instr_insert(b, &imul_32x16->instr);
@@ -67,19 +67,19 @@ enum root_operation {
 
 static enum root_operation
 signed_integer_range_analysis(nir_shader *shader, struct hash_table *range_ht,
-                              nir_ssa_scalar scalar, int *lo, int *hi)
+                              nir_scalar scalar, int *lo, int *hi)
 {
-   if (nir_ssa_scalar_is_const(scalar)) {
-      *lo = nir_ssa_scalar_as_int(scalar);
+   if (nir_scalar_is_const(scalar)) {
+      *lo = nir_scalar_as_int(scalar);
       *hi = *lo;
       return non_unary;
    }
 
-   if (nir_ssa_scalar_is_alu(scalar)) {
-      switch (nir_ssa_scalar_alu_op(scalar)) {
+   if (nir_scalar_is_alu(scalar)) {
+      switch (nir_scalar_alu_op(scalar)) {
       case nir_op_iabs:
          signed_integer_range_analysis(shader, range_ht,
-                                       nir_ssa_scalar_chase_alu_src(scalar, 0),
+                                       nir_scalar_chase_alu_src(scalar, 0),
                                        lo, hi);
 
          if (*lo == INT32_MIN) {
@@ -100,7 +100,7 @@ signed_integer_range_analysis(nir_shader *shader, struct hash_table *range_ht,
       case nir_op_ineg: {
          const enum root_operation root =
             signed_integer_range_analysis(shader, range_ht,
-                                          nir_ssa_scalar_chase_alu_src(scalar, 0),
+                                          nir_scalar_chase_alu_src(scalar, 0),
                                           lo, hi);
 
          if (*lo == INT32_MIN) {
@@ -124,10 +124,10 @@ signed_integer_range_analysis(nir_shader *shader, struct hash_table *range_ht,
          int src1_lo, src1_hi;
 
          signed_integer_range_analysis(shader, range_ht,
-                                       nir_ssa_scalar_chase_alu_src(scalar, 0),
+                                       nir_scalar_chase_alu_src(scalar, 0),
                                        &src0_lo, &src0_hi);
          signed_integer_range_analysis(shader, range_ht,
-                                       nir_ssa_scalar_chase_alu_src(scalar, 1),
+                                       nir_scalar_chase_alu_src(scalar, 1),
                                        &src1_lo, &src1_hi);
 
          *lo = MAX2(src0_lo, src1_lo);
@@ -141,10 +141,10 @@ signed_integer_range_analysis(nir_shader *shader, struct hash_table *range_ht,
          int src1_lo, src1_hi;
 
          signed_integer_range_analysis(shader, range_ht,
-                                       nir_ssa_scalar_chase_alu_src(scalar, 0),
+                                       nir_scalar_chase_alu_src(scalar, 0),
                                        &src0_lo, &src0_hi);
          signed_integer_range_analysis(shader, range_ht,
-                                       nir_ssa_scalar_chase_alu_src(scalar, 1),
+                                       nir_scalar_chase_alu_src(scalar, 1),
                                        &src1_lo, &src1_hi);
 
          *lo = MIN2(src0_lo, src1_lo);
@@ -238,7 +238,7 @@ brw_nir_opt_peephole_imul32x16_instr(nir_builder *b,
    if (imul->dest.dest.ssa.num_components > 1)
       return false;
 
-   const nir_ssa_scalar imul_scalar = { &imul->dest.dest.ssa, 0 };
+   const nir_scalar imul_scalar = { &imul->dest.dest.ssa, 0 };
    int idx = -1;
    enum root_operation prev_root = invalid_root;
 
@@ -249,7 +249,7 @@ brw_nir_opt_peephole_imul32x16_instr(nir_builder *b,
       if (imul->src[i].src.ssa->parent_instr->type == nir_instr_type_load_const)
          continue;
 
-      nir_ssa_scalar scalar = nir_ssa_scalar_chase_alu_src(imul_scalar, i);
+      nir_scalar scalar = nir_scalar_chase_alu_src(imul_scalar, i);
       int lo = INT32_MIN;
       int hi = INT32_MAX;
 
index 245202d..9ddbfa6 100644 (file)
@@ -37,7 +37,7 @@ resize_deref(nir_builder *b, nir_deref_instr *deref,
        (deref->deref_type == nir_deref_type_array ||
         deref->deref_type == nir_deref_type_ptr_as_array)) {
       b->cursor = nir_before_instr(&deref->instr);
-      nir_ssa_def *idx;
+      nir_def *idx;
       if (nir_src_is_const(deref->arr.index)) {
          idx = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index), bit_size);
       } else {
@@ -78,7 +78,7 @@ lower_rt_io_derefs(nir_shader *shader)
 
    nir_builder b = nir_builder_at(nir_before_cf_list(&impl->body));
 
-   nir_ssa_def *call_data_addr = NULL;
+   nir_def *call_data_addr = NULL;
    if (num_shader_call_vars > 0) {
       assert(shader->scratch_size >= BRW_BTD_STACK_CALLEE_DATA_SIZE);
       call_data_addr =
@@ -88,15 +88,15 @@ lower_rt_io_derefs(nir_shader *shader)
    }
 
    gl_shader_stage stage = shader->info.stage;
-   nir_ssa_def *hit_attrib_addr = NULL;
+   nir_def *hit_attrib_addr = NULL;
    if (num_ray_hit_attrib_vars > 0) {
       assert(stage == MESA_SHADER_ANY_HIT ||
              stage == MESA_SHADER_CLOSEST_HIT ||
              stage == MESA_SHADER_INTERSECTION);
-      nir_ssa_def *hit_addr =
+      nir_def *hit_addr =
          brw_nir_rt_mem_hit_addr(&b, stage == MESA_SHADER_CLOSEST_HIT);
       /* The vec2 barycentrics are in 2nd and 3rd dwords of MemHit */
-      nir_ssa_def *bary_addr = nir_iadd_imm(&b, hit_addr, 4);
+      nir_def *bary_addr = nir_iadd_imm(&b, hit_addr, 4);
       hit_attrib_addr = nir_bcsel(&b, nir_load_leaf_procedural_intel(&b),
                                       brw_nir_rt_hit_attrib_data_addr(&b),
                                       bary_addr);
@@ -117,7 +117,7 @@ lower_rt_io_derefs(nir_shader *shader)
                   nir_build_deref_cast(&b, call_data_addr,
                                        nir_var_function_temp,
                                        deref->var->type, 0);
-               nir_ssa_def_rewrite_uses(&deref->dest.ssa,
+               nir_def_rewrite_uses(&deref->dest.ssa,
                                         &cast->dest.ssa);
                nir_instr_remove(&deref->instr);
                progress = true;
@@ -130,7 +130,7 @@ lower_rt_io_derefs(nir_shader *shader)
                   nir_build_deref_cast(&b, hit_attrib_addr,
                                        nir_var_function_temp,
                                        deref->type, 0);
-               nir_ssa_def_rewrite_uses(&deref->dest.ssa,
+               nir_def_rewrite_uses(&deref->dest.ssa,
                                         &cast->dest.ssa);
                nir_instr_remove(&deref->instr);
                progress = true;
@@ -221,7 +221,7 @@ lower_rt_io_and_scratch(nir_shader *nir)
 static void
 build_terminate_ray(nir_builder *b)
 {
-   nir_ssa_def *skip_closest_hit = nir_test_mask(b, nir_load_ray_flags(b),
+   nir_def *skip_closest_hit = nir_test_mask(b, nir_load_ray_flags(b),
       BRW_RT_RAY_FLAG_SKIP_CLOSEST_HIT_SHADER);
    nir_push_if(b, skip_closest_hit);
    {
@@ -242,7 +242,7 @@ build_terminate_ray(nir_builder *b)
        * size of a SBT handle.  The BINDLESS_SHADER_RECORD for a closest hit
        * shader is the first one in the SBT handle.
        */
-      nir_ssa_def *closest_hit =
+      nir_def *closest_hit =
          nir_iadd_imm(b, nir_load_shader_record_ptr(b),
                         -BRW_RT_SBT_HANDLE_SIZE);
 
@@ -302,7 +302,7 @@ lower_ray_walk_intrinsics(nir_shader *shader,
          case nir_intrinsic_accept_ray_intersection: {
             b.cursor = nir_instr_remove(&intrin->instr);
 
-            nir_ssa_def *terminate = nir_test_mask(&b, nir_load_ray_flags(&b),
+            nir_def *terminate = nir_test_mask(&b, nir_load_ray_flags(&b),
                BRW_RT_RAY_FLAG_TERMINATE_ON_FIRST_HIT);
             nir_push_if(&b, terminate);
             {
@@ -399,7 +399,7 @@ brw_nir_lower_combined_intersection_any_hit(nir_shader *intersection,
    lower_rt_io_and_scratch(intersection);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_load_uniform(nir_builder *b, unsigned offset,
                    unsigned num_components, unsigned bit_size)
 {
@@ -434,14 +434,14 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
     * raygen BSR address here; the global data we'll deal with later.
     */
    b.shader->num_uniforms = 32;
-   nir_ssa_def *raygen_param_bsr_addr =
+   nir_def *raygen_param_bsr_addr =
       load_trampoline_param(&b, raygen_bsr_addr, 1, 64);
-   nir_ssa_def *is_indirect =
+   nir_def *is_indirect =
       nir_i2b(&b, load_trampoline_param(&b, is_indirect, 1, 8));
-   nir_ssa_def *local_shift =
+   nir_def *local_shift =
       nir_u2u32(&b, load_trampoline_param(&b, local_group_size_log2, 3, 8));
 
-   nir_ssa_def *raygen_indirect_bsr_addr;
+   nir_def *raygen_indirect_bsr_addr;
    nir_push_if(&b, is_indirect);
    {
       raygen_indirect_bsr_addr =
@@ -452,27 +452,27 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
    }
    nir_pop_if(&b, NULL);
 
-   nir_ssa_def *raygen_bsr_addr =
+   nir_def *raygen_bsr_addr =
       nir_if_phi(&b, raygen_indirect_bsr_addr, raygen_param_bsr_addr);
 
-   nir_ssa_def *global_id = nir_load_workgroup_id_zero_base(&b);
-   nir_ssa_def *simd_channel = nir_load_subgroup_invocation(&b);
-   nir_ssa_def *local_x =
+   nir_def *global_id = nir_load_workgroup_id_zero_base(&b);
+   nir_def *simd_channel = nir_load_subgroup_invocation(&b);
+   nir_def *local_x =
       nir_ubfe(&b, simd_channel, nir_imm_int(&b, 0),
                   nir_channel(&b, local_shift, 0));
-   nir_ssa_def *local_y =
+   nir_def *local_y =
       nir_ubfe(&b, simd_channel, nir_channel(&b, local_shift, 0),
                   nir_channel(&b, local_shift, 1));
-   nir_ssa_def *local_z =
+   nir_def *local_z =
       nir_ubfe(&b, simd_channel,
                   nir_iadd(&b, nir_channel(&b, local_shift, 0),
                               nir_channel(&b, local_shift, 1)),
                   nir_channel(&b, local_shift, 2));
-   nir_ssa_def *launch_id =
+   nir_def *launch_id =
       nir_iadd(&b, nir_ishl(&b, global_id, local_shift),
                   nir_vec3(&b, local_x, local_y, local_z));
 
-   nir_ssa_def *launch_size = nir_load_ray_launch_size(&b);
+   nir_def *launch_size = nir_load_ray_launch_size(&b);
    nir_push_if(&b, nir_ball(&b, nir_ult(&b, launch_id, launch_size)));
    {
       nir_store_global(&b, brw_nir_rt_sw_hotzone_addr(&b, devinfo), 16,
@@ -518,9 +518,9 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
             continue;
 
          b.cursor = nir_before_instr(&intrin->instr);
-         nir_ssa_def *global_arg_addr =
+         nir_def *global_arg_addr =
             load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
-         nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+         nir_def_rewrite_uses(&intrin->dest.ssa,
                                   global_arg_addr);
          nir_instr_remove(instr);
       }
index d63d772..3f8189e 100644 (file)
@@ -39,8 +39,8 @@
    ((b)->shader->info.stage == MESA_SHADER_FRAGMENT ? \
     ACCESS_INCLUDE_HELPERS : 0)
 
-static inline nir_ssa_def *
-brw_nir_rt_load(nir_builder *b, nir_ssa_def *addr, unsigned align,
+static inline nir_def *
+brw_nir_rt_load(nir_builder *b, nir_def *addr, unsigned align,
                 unsigned components, unsigned bit_size)
 {
    return nir_build_load_global(b, components, bit_size, addr,
@@ -49,8 +49,8 @@ brw_nir_rt_load(nir_builder *b, nir_ssa_def *addr, unsigned align,
 }
 
 static inline void
-brw_nir_rt_store(nir_builder *b, nir_ssa_def *addr, unsigned align,
-                 nir_ssa_def *value, unsigned write_mask)
+brw_nir_rt_store(nir_builder *b, nir_def *addr, unsigned align,
+                 nir_def *value, unsigned write_mask)
 {
    nir_build_store_global(b, value, addr,
                           .align_mul = align,
@@ -59,20 +59,20 @@ brw_nir_rt_store(nir_builder *b, nir_ssa_def *addr, unsigned align,
                           .access = is_access_for_builder(b));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_load_const(nir_builder *b, unsigned components,
-                      nir_ssa_def *addr, nir_ssa_def *pred)
+                      nir_def *addr, nir_def *pred)
 {
    return nir_load_global_const_block_intel(b, components, addr, pred);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_load_btd_dss_id(nir_builder *b)
 {
    return nir_load_topology_id_intel(b, .base = BRW_TOPOLOGY_ID_DSS);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_load_num_simd_lanes_per_dss(nir_builder *b,
                                        const struct intel_device_info *devinfo)
 {
@@ -81,13 +81,13 @@ brw_nir_rt_load_num_simd_lanes_per_dss(nir_builder *b,
                          16 /* The RT computation is based off SIMD16 */);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_load_eu_thread_simd(nir_builder *b)
 {
    return nir_load_topology_id_intel(b, .base = BRW_TOPOLOGY_ID_EU_THREAD_SIMD);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_async_stack_id(nir_builder *b)
 {
    return nir_iadd(b, nir_umul_32x16(b, nir_load_ray_num_dss_rt_stacks_intel(b),
@@ -95,7 +95,7 @@ brw_nir_rt_async_stack_id(nir_builder *b)
                       nir_load_btd_stack_id_intel(b));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_sync_stack_id(nir_builder *b)
 {
    return brw_load_eu_thread_simd(b);
@@ -105,11 +105,11 @@ brw_nir_rt_sync_stack_id(nir_builder *b)
  * memory read or write based on the scratch_base_ptr system value rather
  * than a load/store_scratch intrinsic.
  */
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_load_scratch(nir_builder *b, uint32_t offset, unsigned align,
                         unsigned num_components, unsigned bit_size)
 {
-   nir_ssa_def *addr =
+   nir_def *addr =
       nir_iadd_imm(b, nir_load_scratch_base_ptr(b, 1, 64, 1), offset);
    return brw_nir_rt_load(b, addr, MIN2(align, BRW_BTD_STACK_ALIGN),
                              num_components, bit_size);
@@ -117,16 +117,16 @@ brw_nir_rt_load_scratch(nir_builder *b, uint32_t offset, unsigned align,
 
 static inline void
 brw_nir_rt_store_scratch(nir_builder *b, uint32_t offset, unsigned align,
-                         nir_ssa_def *value, nir_component_mask_t write_mask)
+                         nir_def *value, nir_component_mask_t write_mask)
 {
-   nir_ssa_def *addr =
+   nir_def *addr =
       nir_iadd_imm(b, nir_load_scratch_base_ptr(b, 1, 64, 1), offset);
    brw_nir_rt_store(b, addr, MIN2(align, BRW_BTD_STACK_ALIGN),
                     value, write_mask);
 }
 
 static inline void
-brw_nir_btd_spawn(nir_builder *b, nir_ssa_def *record_addr)
+brw_nir_btd_spawn(nir_builder *b, nir_def *record_addr)
 {
    nir_btd_spawn_intel(b, nir_load_btd_global_arg_addr_intel(b), record_addr);
 }
@@ -145,20 +145,20 @@ brw_nir_btd_retire(nir_builder *b)
 static inline void
 brw_nir_btd_return(struct nir_builder *b)
 {
-   nir_ssa_def *resume_addr =
+   nir_def *resume_addr =
       brw_nir_rt_load_scratch(b, BRW_BTD_STACK_RESUME_BSR_ADDR_OFFSET,
                               8 /* align */, 1, 64);
    brw_nir_btd_spawn(b, resume_addr);
 }
 
 static inline void
-assert_def_size(nir_ssa_def *def, unsigned num_components, unsigned bit_size)
+assert_def_size(nir_def *def, unsigned num_components, unsigned bit_size)
 {
    assert(def->num_components == num_components);
    assert(def->bit_size == bit_size);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_num_rt_stacks(nir_builder *b,
                       const struct intel_device_info *devinfo)
 {
@@ -166,11 +166,11 @@ brw_nir_num_rt_stacks(nir_builder *b,
                           intel_device_info_dual_subslice_id_bound(devinfo));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_sw_hotzone_addr(nir_builder *b,
                            const struct intel_device_info *devinfo)
 {
-   nir_ssa_def *offset32 =
+   nir_def *offset32 =
       nir_imul_imm(b, brw_nir_rt_async_stack_id(b),
                       BRW_RT_SIZEOF_HOTZONE);
 
@@ -182,9 +182,9 @@ brw_nir_rt_sw_hotzone_addr(nir_builder *b,
                       nir_i2i64(b, offset32));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_sync_stack_addr(nir_builder *b,
-                           nir_ssa_def *base_mem_addr,
+                           nir_def *base_mem_addr,
                            const struct intel_device_info *devinfo)
 {
    /* For Ray queries (Synchronous Ray Tracing), the formula is similar but
@@ -197,7 +197,7 @@ brw_nir_rt_sync_stack_addr(nir_builder *b,
     * We assume that we can calculate a 32-bit offset first and then add it
     * to the 64-bit base address at the end.
     */
-   nir_ssa_def *offset32 =
+   nir_def *offset32 =
       nir_imul(b,
                nir_iadd(b,
                         nir_imul(b, brw_load_btd_dss_id(b),
@@ -207,7 +207,7 @@ brw_nir_rt_sync_stack_addr(nir_builder *b,
    return nir_isub(b, base_mem_addr, nir_u2u64(b, offset32));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_stack_addr(nir_builder *b)
 {
    /* From the BSpec "Address Computation for Memory Based Data Structures:
@@ -220,38 +220,38 @@ brw_nir_rt_stack_addr(nir_builder *b)
     * We assume that we can calculate a 32-bit offset first and then add it
     * to the 64-bit base address at the end.
     */
-   nir_ssa_def *offset32 =
+   nir_def *offset32 =
       nir_imul(b, brw_nir_rt_async_stack_id(b),
                   nir_load_ray_hw_stack_size_intel(b));
    return nir_iadd(b, nir_load_ray_base_mem_addr_intel(b),
                       nir_u2u64(b, offset32));
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_mem_hit_addr_from_addr(nir_builder *b,
-                        nir_ssa_def *stack_addr,
+                        nir_def *stack_addr,
                         bool committed)
 {
    return nir_iadd_imm(b, stack_addr, committed ? 0 : BRW_RT_SIZEOF_HIT_INFO);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_mem_hit_addr(nir_builder *b, bool committed)
 {
    return nir_iadd_imm(b, brw_nir_rt_stack_addr(b),
                           committed ? 0 : BRW_RT_SIZEOF_HIT_INFO);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_hit_attrib_data_addr(nir_builder *b)
 {
    return nir_iadd_imm(b, brw_nir_rt_stack_addr(b),
                           BRW_RT_OFFSETOF_HIT_ATTRIB_DATA);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_mem_ray_addr(nir_builder *b,
-                        nir_ssa_def *stack_addr,
+                        nir_def *stack_addr,
                         enum brw_rt_bvh_level bvh_level)
 {
    /* From the BSpec "Address Computation for Memory Based Data Structures:
@@ -267,51 +267,51 @@ brw_nir_rt_mem_ray_addr(nir_builder *b,
    return nir_iadd_imm(b, stack_addr, offset);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_sw_stack_addr(nir_builder *b,
                          const struct intel_device_info *devinfo)
 {
-   nir_ssa_def *addr = nir_load_ray_base_mem_addr_intel(b);
+   nir_def *addr = nir_load_ray_base_mem_addr_intel(b);
 
-   nir_ssa_def *offset32 = nir_imul(b, brw_nir_num_rt_stacks(b, devinfo),
+   nir_def *offset32 = nir_imul(b, brw_nir_num_rt_stacks(b, devinfo),
                                        nir_load_ray_hw_stack_size_intel(b));
    addr = nir_iadd(b, addr, nir_u2u64(b, offset32));
 
-   nir_ssa_def *offset_in_stack =
+   nir_def *offset_in_stack =
       nir_imul(b, nir_u2u64(b, brw_nir_rt_async_stack_id(b)),
                   nir_u2u64(b, nir_load_ray_sw_stack_size_intel(b)));
 
    return nir_iadd(b, addr, offset_in_stack);
 }
 
-static inline nir_ssa_def *
-nir_unpack_64_4x16_split_z(nir_builder *b, nir_ssa_def *val)
+static inline nir_def *
+nir_unpack_64_4x16_split_z(nir_builder *b, nir_def *val)
 {
    return nir_unpack_32_2x16_split_x(b, nir_unpack_64_2x32_split_y(b, val));
 }
 
 struct brw_nir_rt_globals_defs {
-   nir_ssa_def *base_mem_addr;
-   nir_ssa_def *call_stack_handler_addr;
-   nir_ssa_def *hw_stack_size;
-   nir_ssa_def *num_dss_rt_stacks;
-   nir_ssa_def *hit_sbt_addr;
-   nir_ssa_def *hit_sbt_stride;
-   nir_ssa_def *miss_sbt_addr;
-   nir_ssa_def *miss_sbt_stride;
-   nir_ssa_def *sw_stack_size;
-   nir_ssa_def *launch_size;
-   nir_ssa_def *call_sbt_addr;
-   nir_ssa_def *call_sbt_stride;
-   nir_ssa_def *resume_sbt_addr;
+   nir_def *base_mem_addr;
+   nir_def *call_stack_handler_addr;
+   nir_def *hw_stack_size;
+   nir_def *num_dss_rt_stacks;
+   nir_def *hit_sbt_addr;
+   nir_def *hit_sbt_stride;
+   nir_def *miss_sbt_addr;
+   nir_def *miss_sbt_stride;
+   nir_def *sw_stack_size;
+   nir_def *launch_size;
+   nir_def *call_sbt_addr;
+   nir_def *call_sbt_stride;
+   nir_def *resume_sbt_addr;
 };
 
 static inline void
 brw_nir_rt_load_globals_addr(nir_builder *b,
                              struct brw_nir_rt_globals_defs *defs,
-                             nir_ssa_def *addr)
+                             nir_def *addr)
 {
-   nir_ssa_def *data;
+   nir_def *data;
    data = brw_nir_rt_load_const(b, 16, addr, nir_imm_true(b));
    defs->base_mem_addr = nir_pack_64_2x32(b, nir_trim_vector(b, data, 2));
 
@@ -354,19 +354,19 @@ brw_nir_rt_load_globals(nir_builder *b,
    brw_nir_rt_load_globals_addr(b, defs, nir_load_btd_global_arg_addr_intel(b));
 }
 
-static inline nir_ssa_def *
-brw_nir_rt_unpack_leaf_ptr(nir_builder *b, nir_ssa_def *vec2)
+static inline nir_def *
+brw_nir_rt_unpack_leaf_ptr(nir_builder *b, nir_def *vec2)
 {
    /* Hit record leaf pointers are 42-bit and assumed to be in 64B chunks.
     * This leaves 22 bits at the top for other stuff.
     */
-   nir_ssa_def *ptr64 = nir_imul_imm(b, nir_pack_64_2x32(b, vec2), 64);
+   nir_def *ptr64 = nir_imul_imm(b, nir_pack_64_2x32(b, vec2), 64);
 
    /* The top 16 bits (remember, we shifted by 6 already) contain garbage
     * that we need to get rid of.
     */
-   nir_ssa_def *ptr_lo = nir_unpack_64_2x32_split_x(b, ptr64);
-   nir_ssa_def *ptr_hi = nir_unpack_64_2x32_split_y(b, ptr64);
+   nir_def *ptr_lo = nir_unpack_64_2x32_split_x(b, ptr64);
+   nir_def *ptr_hi = nir_unpack_64_2x32_split_y(b, ptr64);
    ptr_hi = nir_extract_i16(b, ptr_hi, nir_imm_int(b, 0));
    return nir_pack_64_2x32_split(b, ptr_lo, ptr_hi);
 }
@@ -391,34 +391,34 @@ brw_nir_rt_unpack_leaf_ptr(nir_builder *b, nir_ssa_def *vec2)
  *    - hitGroupRecPtr1 22      MSB of hit group record of the hit triangle (multiple of 32 bytes)
  */
 struct brw_nir_rt_mem_hit_defs {
-   nir_ssa_def *t;
-   nir_ssa_def *tri_bary; /**< Only valid for triangle geometry */
-   nir_ssa_def *aabb_hit_kind; /**< Only valid for AABB geometry */
-   nir_ssa_def *valid;
-   nir_ssa_def *leaf_type;
-   nir_ssa_def *prim_index_delta;
-   nir_ssa_def *prim_leaf_index;
-   nir_ssa_def *bvh_level;
-   nir_ssa_def *front_face;
-   nir_ssa_def *done; /**< Only for ray queries */
-   nir_ssa_def *prim_leaf_ptr;
-   nir_ssa_def *inst_leaf_ptr;
+   nir_def *t;
+   nir_def *tri_bary; /**< Only valid for triangle geometry */
+   nir_def *aabb_hit_kind; /**< Only valid for AABB geometry */
+   nir_def *valid;
+   nir_def *leaf_type;
+   nir_def *prim_index_delta;
+   nir_def *prim_leaf_index;
+   nir_def *bvh_level;
+   nir_def *front_face;
+   nir_def *done; /**< Only for ray queries */
+   nir_def *prim_leaf_ptr;
+   nir_def *inst_leaf_ptr;
 };
 
 static inline void
 brw_nir_rt_load_mem_hit_from_addr(nir_builder *b,
                                   struct brw_nir_rt_mem_hit_defs *defs,
-                                  nir_ssa_def *stack_addr,
+                                  nir_def *stack_addr,
                                   bool committed)
 {
-   nir_ssa_def *hit_addr =
+   nir_def *hit_addr =
       brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr, committed);
 
-   nir_ssa_def *data = brw_nir_rt_load(b, hit_addr, 16, 4, 32);
+   nir_def *data = brw_nir_rt_load(b, hit_addr, 16, 4, 32);
    defs->t = nir_channel(b, data, 0);
    defs->aabb_hit_kind = nir_channel(b, data, 1);
    defs->tri_bary = nir_channels(b, data, 0x6);
-   nir_ssa_def *bitfield = nir_channel(b, data, 3);
+   nir_def *bitfield = nir_channel(b, data, 3);
    defs->prim_index_delta =
       nir_ubitfield_extract(b, bitfield, nir_imm_int(b, 0), nir_imm_int(b, 16));
    defs->valid = nir_i2b(b, nir_iand_imm(b, bitfield, 1u << 16));
@@ -449,8 +449,8 @@ brw_nir_rt_load_mem_hit(nir_builder *b,
 
 static inline void
 brw_nir_memcpy_global(nir_builder *b,
-                      nir_ssa_def *dst_addr, uint32_t dst_align,
-                      nir_ssa_def *src_addr, uint32_t src_align,
+                      nir_def *dst_addr, uint32_t dst_align,
+                      nir_def *src_addr, uint32_t src_align,
                       uint32_t size)
 {
    /* We're going to copy in 16B chunks */
@@ -459,7 +459,7 @@ brw_nir_memcpy_global(nir_builder *b,
    src_align = MIN2(src_align, 16);
 
    for (unsigned offset = 0; offset < size; offset += 16) {
-      nir_ssa_def *data =
+      nir_def *data =
          brw_nir_rt_load(b, nir_iadd_imm(b, src_addr, offset), 16,
                          4, 32);
       brw_nir_rt_store(b, nir_iadd_imm(b, dst_addr, offset), 16,
@@ -469,22 +469,22 @@ brw_nir_memcpy_global(nir_builder *b,
 
 static inline void
 brw_nir_memclear_global(nir_builder *b,
-                        nir_ssa_def *dst_addr, uint32_t dst_align,
+                        nir_def *dst_addr, uint32_t dst_align,
                         uint32_t size)
 {
    /* We're going to copy in 16B chunks */
    assert(size % 16 == 0);
    dst_align = MIN2(dst_align, 16);
 
-   nir_ssa_def *zero = nir_imm_ivec4(b, 0, 0, 0, 0);
+   nir_def *zero = nir_imm_ivec4(b, 0, 0, 0, 0);
    for (unsigned offset = 0; offset < size; offset += 16) {
       brw_nir_rt_store(b, nir_iadd_imm(b, dst_addr, offset), dst_align,
                        zero, 0xf /* write_mask */);
    }
 }
 
-static inline nir_ssa_def *
-brw_nir_rt_query_done(nir_builder *b, nir_ssa_def *stack_addr)
+static inline nir_def *
+brw_nir_rt_query_done(nir_builder *b, nir_def *stack_addr)
 {
    struct brw_nir_rt_mem_hit_defs hit_in = {};
    brw_nir_rt_load_mem_hit_from_addr(b, &hit_in, stack_addr,
@@ -495,17 +495,17 @@ brw_nir_rt_query_done(nir_builder *b, nir_ssa_def *stack_addr)
 
 static inline void
 brw_nir_rt_set_dword_bit_at(nir_builder *b,
-                            nir_ssa_def *addr,
+                            nir_def *addr,
                             uint32_t addr_offset,
                             uint32_t bit)
 {
-   nir_ssa_def *dword_addr = nir_iadd_imm(b, addr, addr_offset);
-   nir_ssa_def *dword = brw_nir_rt_load(b, dword_addr, 4, 1, 32);
+   nir_def *dword_addr = nir_iadd_imm(b, addr, addr_offset);
+   nir_def *dword = brw_nir_rt_load(b, dword_addr, 4, 1, 32);
    brw_nir_rt_store(b, dword_addr, 4, nir_ior_imm(b, dword, 1u << bit), 0x1);
 }
 
 static inline void
-brw_nir_rt_query_mark_done(nir_builder *b, nir_ssa_def *stack_addr)
+brw_nir_rt_query_mark_done(nir_builder *b, nir_def *stack_addr)
 {
    brw_nir_rt_set_dword_bit_at(b,
                                brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr,
@@ -517,9 +517,9 @@ brw_nir_rt_query_mark_done(nir_builder *b, nir_ssa_def *stack_addr)
  * bit is located.
  */
 static inline void
-brw_nir_rt_query_mark_init(nir_builder *b, nir_ssa_def *stack_addr)
+brw_nir_rt_query_mark_init(nir_builder *b, nir_def *stack_addr)
 {
-   nir_ssa_def *dword_addr;
+   nir_def *dword_addr;
 
    for (uint32_t i = 0; i < 2; i++) {
       dword_addr =
@@ -535,15 +535,15 @@ brw_nir_rt_query_mark_init(nir_builder *b, nir_ssa_def *stack_addr)
  * structure, just adding the valid bit.
  */
 static inline void
-brw_nir_rt_commit_hit_addr(nir_builder *b, nir_ssa_def *stack_addr)
+brw_nir_rt_commit_hit_addr(nir_builder *b, nir_def *stack_addr)
 {
-   nir_ssa_def *dst_addr =
+   nir_def *dst_addr =
       brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr, true /* committed */);
-   nir_ssa_def *src_addr =
+   nir_def *src_addr =
       brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr, false /* committed */);
 
    for (unsigned offset = 0; offset < BRW_RT_SIZEOF_HIT_INFO; offset += 16) {
-      nir_ssa_def *data =
+      nir_def *data =
          brw_nir_rt_load(b, nir_iadd_imm(b, src_addr, offset), 16, 4, 32);
 
       if (offset == 0) {
@@ -568,16 +568,16 @@ brw_nir_rt_commit_hit_addr(nir_builder *b, nir_ssa_def *stack_addr)
 static inline void
 brw_nir_rt_commit_hit(nir_builder *b)
 {
-   nir_ssa_def *stack_addr = brw_nir_rt_stack_addr(b);
+   nir_def *stack_addr = brw_nir_rt_stack_addr(b);
    brw_nir_rt_commit_hit_addr(b, stack_addr);
 }
 
 static inline void
-brw_nir_rt_generate_hit_addr(nir_builder *b, nir_ssa_def *stack_addr, nir_ssa_def *t_val)
+brw_nir_rt_generate_hit_addr(nir_builder *b, nir_def *stack_addr, nir_def *t_val)
 {
-   nir_ssa_def *committed_addr =
+   nir_def *committed_addr =
       brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr, true /* committed */);
-   nir_ssa_def *potential_addr =
+   nir_def *potential_addr =
       brw_nir_rt_mem_hit_addr_from_addr(b, stack_addr, false /* committed */);
 
    /* Set:
@@ -585,7 +585,7 @@ brw_nir_rt_generate_hit_addr(nir_builder *b, nir_ssa_def *stack_addr, nir_ssa_de
     *   potential.t     = t_val;
     *   potential.valid = true;
     */
-   nir_ssa_def *potential_hit_dwords_0_3 =
+   nir_def *potential_hit_dwords_0_3 =
       brw_nir_rt_load(b, potential_addr, 16, 4, 32);
    potential_hit_dwords_0_3 =
       nir_vec4(b,
@@ -608,7 +608,7 @@ brw_nir_rt_generate_hit_addr(nir_builder *b, nir_ssa_def *stack_addr, nir_ssa_de
     *   committed.prim_leaf_index = 0;
     *   committed.done            = false;
     */
-   nir_ssa_def *committed_hit_dwords_0_3 =
+   nir_def *committed_hit_dwords_0_3 =
       brw_nir_rt_load(b, committed_addr, 16, 4, 32);
    committed_hit_dwords_0_3 =
       nir_vec4(b,
@@ -633,23 +633,23 @@ brw_nir_rt_generate_hit_addr(nir_builder *b, nir_ssa_def *stack_addr, nir_ssa_de
 }
 
 struct brw_nir_rt_mem_ray_defs {
-   nir_ssa_def *orig;
-   nir_ssa_def *dir;
-   nir_ssa_def *t_near;
-   nir_ssa_def *t_far;
-   nir_ssa_def *root_node_ptr;
-   nir_ssa_def *ray_flags;
-   nir_ssa_def *hit_group_sr_base_ptr;
-   nir_ssa_def *hit_group_sr_stride;
-   nir_ssa_def *miss_sr_ptr;
-   nir_ssa_def *shader_index_multiplier;
-   nir_ssa_def *inst_leaf_ptr;
-   nir_ssa_def *ray_mask;
+   nir_def *orig;
+   nir_def *dir;
+   nir_def *t_near;
+   nir_def *t_far;
+   nir_def *root_node_ptr;
+   nir_def *ray_flags;
+   nir_def *hit_group_sr_base_ptr;
+   nir_def *hit_group_sr_stride;
+   nir_def *miss_sr_ptr;
+   nir_def *shader_index_multiplier;
+   nir_def *inst_leaf_ptr;
+   nir_def *ray_mask;
 };
 
 static inline void
 brw_nir_rt_store_mem_ray_query_at_addr(nir_builder *b,
-                                       nir_ssa_def *ray_addr,
+                                       nir_def *ray_addr,
                                        const struct brw_nir_rt_mem_ray_defs *defs)
 {
    assert_def_size(defs->orig, 3, 32);
@@ -680,7 +680,7 @@ brw_nir_rt_store_mem_ray_query_at_addr(nir_builder *b,
       0x3 /* write mask */);
 
    /* leaf_ptr is optional */
-   nir_ssa_def *inst_leaf_ptr;
+   nir_def *inst_leaf_ptr;
    if (defs->inst_leaf_ptr) {
       inst_leaf_ptr = defs->inst_leaf_ptr;
    } else {
@@ -702,7 +702,7 @@ brw_nir_rt_store_mem_ray(nir_builder *b,
                          const struct brw_nir_rt_mem_ray_defs *defs,
                          enum brw_rt_bvh_level bvh_level)
 {
-   nir_ssa_def *ray_addr =
+   nir_def *ray_addr =
       brw_nir_rt_mem_ray_addr(b, brw_nir_rt_stack_addr(b), bvh_level);
 
    assert_def_size(defs->orig, 3, 32);
@@ -739,7 +739,7 @@ brw_nir_rt_store_mem_ray(nir_builder *b,
       ~0 /* write mask */);
 
    /* leaf_ptr is optional */
-   nir_ssa_def *inst_leaf_ptr;
+   nir_def *inst_leaf_ptr;
    if (defs->inst_leaf_ptr) {
       inst_leaf_ptr = defs->inst_leaf_ptr;
    } else {
@@ -767,14 +767,14 @@ brw_nir_rt_store_mem_ray(nir_builder *b,
 static inline void
 brw_nir_rt_load_mem_ray_from_addr(nir_builder *b,
                                   struct brw_nir_rt_mem_ray_defs *defs,
-                                  nir_ssa_def *ray_base_addr,
+                                  nir_def *ray_base_addr,
                                   enum brw_rt_bvh_level bvh_level)
 {
-   nir_ssa_def *ray_addr = brw_nir_rt_mem_ray_addr(b,
+   nir_def *ray_addr = brw_nir_rt_mem_ray_addr(b,
                                                    ray_base_addr,
                                                    bvh_level);
 
-   nir_ssa_def *data[4] = {
+   nir_def *data[4] = {
       brw_nir_rt_load(b, nir_iadd_imm(b, ray_addr,  0), 16, 4, 32),
       brw_nir_rt_load(b, nir_iadd_imm(b, ray_addr, 16), 16, 4, 32),
       brw_nir_rt_load(b, nir_iadd_imm(b, ray_addr, 32), 16, 4, 32),
@@ -824,20 +824,20 @@ brw_nir_rt_load_mem_ray(nir_builder *b,
 }
 
 struct brw_nir_rt_bvh_instance_leaf_defs {
-   nir_ssa_def *shader_index;
-   nir_ssa_def *contribution_to_hit_group_index;
-   nir_ssa_def *world_to_object[4];
-   nir_ssa_def *instance_id;
-   nir_ssa_def *instance_index;
-   nir_ssa_def *object_to_world[4];
+   nir_def *shader_index;
+   nir_def *contribution_to_hit_group_index;
+   nir_def *world_to_object[4];
+   nir_def *instance_id;
+   nir_def *instance_index;
+   nir_def *object_to_world[4];
 };
 
 static inline void
 brw_nir_rt_load_bvh_instance_leaf(nir_builder *b,
                                   struct brw_nir_rt_bvh_instance_leaf_defs *defs,
-                                  nir_ssa_def *leaf_addr)
+                                  nir_def *leaf_addr)
 {
-   nir_ssa_def *leaf_desc = brw_nir_rt_load(b, leaf_addr, 4, 2, 32);
+   nir_def *leaf_desc = brw_nir_rt_load(b, leaf_addr, 4, 2, 32);
 
    defs->shader_index =
       nir_iand_imm(b, nir_channel(b, leaf_desc, 0), (1 << 24) - 1);
@@ -856,7 +856,7 @@ brw_nir_rt_load_bvh_instance_leaf(nir_builder *b,
    defs->object_to_world[3] =
       brw_nir_rt_load(b, nir_iadd_imm(b, leaf_addr, 52), 4, 3, 32);
 
-   nir_ssa_def *data =
+   nir_def *data =
       brw_nir_rt_load(b, nir_iadd_imm(b, leaf_addr, 64), 4, 4, 32);
    defs->instance_id = nir_channel(b, data, 2);
    defs->instance_index = nir_channel(b, data, 3);
@@ -872,19 +872,19 @@ brw_nir_rt_load_bvh_instance_leaf(nir_builder *b,
 }
 
 struct brw_nir_rt_bvh_primitive_leaf_defs {
-   nir_ssa_def *shader_index;
-   nir_ssa_def *geom_mask;
-   nir_ssa_def *geom_index;
-   nir_ssa_def *type;
-   nir_ssa_def *geom_flags;
+   nir_def *shader_index;
+   nir_def *geom_mask;
+   nir_def *geom_index;
+   nir_def *type;
+   nir_def *geom_flags;
 };
 
 static inline void
 brw_nir_rt_load_bvh_primitive_leaf(nir_builder *b,
                                    struct brw_nir_rt_bvh_primitive_leaf_defs *defs,
-                                   nir_ssa_def *leaf_addr)
+                                   nir_def *leaf_addr)
 {
-   nir_ssa_def *desc = brw_nir_rt_load(b, leaf_addr, 4, 2, 32);
+   nir_def *desc = brw_nir_rt_load(b, leaf_addr, 4, 2, 32);
 
    defs->shader_index =
       nir_ubitfield_extract(b, nir_channel(b, desc, 0),
@@ -905,13 +905,13 @@ brw_nir_rt_load_bvh_primitive_leaf(nir_builder *b,
 }
 
 struct brw_nir_rt_bvh_primitive_leaf_positions_defs {
-   nir_ssa_def *positions[3];
+   nir_def *positions[3];
 };
 
 static inline void
 brw_nir_rt_load_bvh_primitive_leaf_positions(nir_builder *b,
                                              struct brw_nir_rt_bvh_primitive_leaf_positions_defs *defs,
-                                             nir_ssa_def *leaf_addr)
+                                             nir_def *leaf_addr)
 {
    for (unsigned i = 0; i < ARRAY_SIZE(defs->positions); i++) {
       defs->positions[i] =
@@ -919,9 +919,9 @@ brw_nir_rt_load_bvh_primitive_leaf_positions(nir_builder *b,
    }
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_load_primitive_id_from_hit(nir_builder *b,
-                                      nir_ssa_def *is_procedural,
+                                      nir_def *is_procedural,
                                       const struct brw_nir_rt_mem_hit_defs *defs)
 {
    if (!is_procedural) {
@@ -930,11 +930,11 @@ brw_nir_rt_load_primitive_id_from_hit(nir_builder *b,
                         BRW_RT_BVH_NODE_TYPE_PROCEDURAL);
    }
 
-   nir_ssa_def *prim_id_proc, *prim_id_quad;
+   nir_def *prim_id_proc, *prim_id_quad;
    nir_push_if(b, is_procedural);
    {
       /* For procedural leafs, the index is in dw[3]. */
-      nir_ssa_def *offset =
+      nir_def *offset =
          nir_iadd_imm(b, nir_ishl_imm(b, defs->prim_leaf_index, 2), 12);
       prim_id_proc = nir_load_global(b, nir_iadd(b, defs->prim_leaf_ptr,
                                                  nir_u2u64(b, offset)),
@@ -956,9 +956,9 @@ brw_nir_rt_load_primitive_id_from_hit(nir_builder *b,
    return nir_if_phi(b, prim_id_proc, prim_id_quad);
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 brw_nir_rt_acceleration_structure_to_root_node(nir_builder *b,
-                                               nir_ssa_def *as_addr)
+                                               nir_def *as_addr)
 {
    /* The HW memory structure in which we specify what acceleration structure
     * to traverse, takes the address to the root node in the acceleration
@@ -972,7 +972,7 @@ brw_nir_rt_acceleration_structure_to_root_node(nir_builder *b,
     * TODO: we could optimize this by assuming that for a given version of the
     * BVH, we can find the root node at a given offset.
     */
-   nir_ssa_def *root_node_ptr, *null_node_ptr;
+   nir_def *root_node_ptr, *null_node_ptr;
    nir_push_if(b, nir_ieq_imm(b, as_addr, 0));
    {
       null_node_ptr = nir_imm_int64(b, 0);
index 4073bf6..11facb5 100644 (file)
@@ -72,7 +72,7 @@
  * (TF[INSIDE_*] == 1.0) to (TF[INSIDE_*] <= 1.0).
  */
 
-static inline nir_ssa_def *
+static inline nir_def *
 load_output(nir_builder *b, int num_components, int offset, int component)
 {
    return nir_load_output(b, num_components, 32, nir_imm_int(b, 0),
@@ -85,10 +85,10 @@ emit_quads_workaround(nir_builder *b, nir_block *block)
 {
    b->cursor = nir_after_block_before_jump(block);
 
-   nir_ssa_def *inner = load_output(b, 2, 0, 2);
-   nir_ssa_def *outer = load_output(b, 4, 1, 0);
+   nir_def *inner = load_output(b, 2, 0, 2);
+   nir_def *outer = load_output(b, 4, 1, 0);
 
-   nir_ssa_def *any_greater_than_1 =
+   nir_def *any_greater_than_1 =
        nir_ior(b, nir_bany(b, nir_fgt_imm(b, outer, 1.0f)),
                   nir_bany(b, nir_fgt_imm(b, inner, 1.0f)));
 
index a13c322..cc99506 100644 (file)
@@ -306,7 +306,7 @@ public:
    virtual void nir_emit_alu(nir_alu_instr *instr);
    virtual void nir_emit_jump(nir_jump_instr *instr);
    virtual void nir_emit_texture(nir_tex_instr *instr);
-   virtual void nir_emit_undef(nir_ssa_undef_instr *instr);
+   virtual void nir_emit_undef(nir_undef_instr *instr);
    virtual void nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr);
 
    dst_reg get_nir_dest(const nir_dest &dest, enum brw_reg_type type);
index f4dd649..10c0842 100644 (file)
@@ -164,7 +164,7 @@ vec4_visitor::nir_emit_instr(nir_instr *instr)
 }
 
 static dst_reg
-dst_reg_for_nir_reg(vec4_visitor *v, nir_ssa_def *handle,
+dst_reg_for_nir_reg(vec4_visitor *v, nir_def *handle,
                     unsigned base_offset, nir_src *indirect)
 {
    nir_intrinsic_instr *decl = nir_reg_get_decl(handle);
@@ -2227,7 +2227,7 @@ vec4_visitor::emit_mcs_fetch(const glsl_type *coordinate_type,
 }
 
 void
-vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
+vec4_visitor::nir_emit_undef(nir_undef_instr *instr)
 {
    nir_ssa_values[instr->def.index] =
       dst_reg(VGRF, alloc.allocate(DIV_ROUND_UP(instr->def.bit_size, 32)));
index e7e33db..92a64db 100644 (file)
@@ -60,7 +60,7 @@ lower_vulkan_descriptors_instr(nir_builder *b, nir_instr *instr, void *cb_data)
    uint32_t binding = nir_intrinsic_binding(res_index_intrin);
    assert(binding < bind_map->num_bindings);
 
-   nir_ssa_def *desc_value = NULL;
+   nir_def *desc_value = NULL;
    if (bind_map->bindings[binding].push_constant) {
       desc_value =
          nir_vec2(b,
@@ -93,7 +93,7 @@ lower_vulkan_descriptors_instr(nir_builder *b, nir_instr *instr, void *cb_data)
                   nir_imm_int(b, 0));
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc_value);
+   nir_def_rewrite_uses(&intrin->dest.ssa, desc_value);
 
    return true;
 }
@@ -121,7 +121,7 @@ lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data)
       return false;
 
    b->cursor = nir_instr_remove(&intrin->instr);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
+   nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
    return true;
 }
 
@@ -137,7 +137,7 @@ lower_load_ubo_to_uniforms(nir_builder *b, nir_instr *instr, void *cb_data)
 
    b->cursor = nir_instr_remove(instr);
 
-   nir_ssa_def_rewrite_uses(
+   nir_def_rewrite_uses(
       &intrin->dest.ssa,
       nir_load_uniform(b,
                        intrin->dest.ssa.num_components,
index c1a4a30..08d43ba 100644 (file)
@@ -158,11 +158,11 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
    /* wait for all subgroups to finish */
    nir_barrier(&b, SCOPE_WORKGROUP);
 
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *zero = nir_imm_int(&b, 0);
 
-   nir_ssa_def *local_invocation_index = nir_load_local_invocation_index(&b);
+   nir_def *local_invocation_index = nir_load_local_invocation_index(&b);
 
-   nir_ssa_def *cmp = nir_ieq(&b, local_invocation_index, zero);
+   nir_def *cmp = nir_ieq(&b, local_invocation_index, zero);
    nir_if *if_stmt = nir_push_if(&b, cmp);
    {
       nir_variable *primitive_count_var = NULL;
@@ -283,7 +283,7 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
          mapping[location].per_prim_deref = nir_build_deref_var(&b, var);
       }
 
-      nir_ssa_def *trueconst = nir_imm_true(&b);
+      nir_def *trueconst = nir_imm_true(&b);
 
       /*
        * for each Primitive (0 : primitiveCount)
@@ -300,7 +300,7 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
        */
 
       /* primitive count */
-      nir_ssa_def *primitive_count = nir_load_var(&b, primitive_count_var);
+      nir_def *primitive_count = nir_load_var(&b, primitive_count_var);
 
       /* primitive index */
       nir_variable *primitive_var =
@@ -332,8 +332,8 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
 
       nir_loop *loop = nir_push_loop(&b);
       {
-         nir_ssa_def *primitive = nir_load_deref(&b, primitive_deref);
-         nir_ssa_def *cmp = nir_ige(&b, primitive, primitive_count);
+         nir_def *primitive = nir_load_deref(&b, primitive_deref);
+         nir_def *cmp = nir_ige(&b, primitive, primitive_count);
 
          nir_if *loop_check = nir_push_if(&b, cmp);
          nir_jump(&b, nir_jump_break);
@@ -342,19 +342,19 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
          nir_deref_instr *primitive_indices_deref =
                nir_build_deref_var(&b, primitive_indices_var);
          nir_deref_instr *indexed_primitive_indices_deref;
-         nir_ssa_def *src_vertex;
-         nir_ssa_def *prim_indices;
+         nir_def *src_vertex;
+         nir_def *prim_indices;
 
          /* array of vectors, we have to extract index out of array deref */
          indexed_primitive_indices_deref = nir_build_deref_array(&b, primitive_indices_deref, primitive);
          prim_indices = nir_load_deref(&b, indexed_primitive_indices_deref);
          src_vertex = nir_channel(&b, prim_indices, provoking_vertex);
 
-         nir_ssa_def *dst_vertex = nir_load_deref(&b, vertex_deref);
+         nir_def *dst_vertex = nir_load_deref(&b, vertex_deref);
 
          nir_deref_instr *indexed_used_vertex_deref =
                         nir_build_deref_array(&b, used_vertex_deref, src_vertex);
-         nir_ssa_def *used_vertex = nir_load_deref(&b, indexed_used_vertex_deref);
+         nir_def *used_vertex = nir_load_deref(&b, indexed_used_vertex_deref);
          if (!dup_vertices)
             used_vertex = nir_imm_false(&b);
 
@@ -369,7 +369,7 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
             }
 
             /* replace one component of primitive indices vector */
-            nir_ssa_def *new_val =
+            nir_def *new_val =
                   nir_vector_insert_imm(&b, prim_indices, dst_vertex, provoking_vertex);
 
             /* and store complete vector */
@@ -452,7 +452,7 @@ anv_frag_update_derefs_instr(struct nir_builder *b, nir_instr *instr, void *data
       return false;
 
    nir_instr_remove(&deref->instr);
-   nir_ssa_def_rewrite_uses(&deref->dest.ssa, &new_derefs[location]->dest.ssa);
+   nir_def_rewrite_uses(&deref->dest.ssa, &new_derefs[location]->dest.ssa);
 
    return true;
 }
index 3e7cd59..487ecc6 100644 (file)
@@ -305,18 +305,18 @@ nir_deref_find_descriptor(nir_deref_instr *deref,
    return find_descriptor_for_index_src(intrin->src[0], state);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_load_descriptor_mem(nir_builder *b,
-                          nir_ssa_def *desc_addr, unsigned desc_offset,
+                          nir_def *desc_addr, unsigned desc_offset,
                           unsigned num_components, unsigned bit_size,
                           const struct apply_pipeline_layout_state *state)
 
 {
    switch (state->desc_addr_format) {
    case nir_address_format_64bit_global_32bit_offset: {
-      nir_ssa_def *base_addr =
+      nir_def *base_addr =
          nir_pack_64_2x32(b, nir_trim_vector(b, desc_addr, 2));
-      nir_ssa_def *offset32 =
+      nir_def *offset32 =
          nir_iadd_imm(b, nir_channel(b, desc_addr, 3), desc_offset);
 
       return nir_load_global_constant_offset(b, num_components, bit_size,
@@ -326,8 +326,8 @@ build_load_descriptor_mem(nir_builder *b,
    }
 
    case nir_address_format_32bit_index_offset: {
-      nir_ssa_def *surface_index = nir_channel(b, desc_addr, 0);
-      nir_ssa_def *offset32 =
+      nir_def *surface_index = nir_channel(b, desc_addr, 0);
+      nir_def *offset32 =
          nir_iadd_imm(b, nir_channel(b, desc_addr, 1), desc_offset);
 
       return nir_load_ubo(b, num_components, bit_size,
@@ -354,21 +354,21 @@ build_load_descriptor_mem(nir_builder *b,
  * Fortunately we can reuse the Auxiliary surface adddress field to stash our
  * buffer size and just load a vec4.
  */
-static nir_ssa_def *
+static nir_def *
 build_load_render_surface_state_address(nir_builder *b,
-                                        nir_ssa_def *desc_addr,
+                                        nir_def *desc_addr,
                                         struct apply_pipeline_layout_state *state)
 
 {
    const struct intel_device_info *devinfo = &state->pdevice->info;
 
-   nir_ssa_def *surface_addr =
+   nir_def *surface_addr =
       build_load_descriptor_mem(b, desc_addr,
                                 RENDER_SURFACE_STATE_SurfaceBaseAddress_start(devinfo) / 8,
                                 4, 32, state);
-   nir_ssa_def *addr_ldw = nir_channel(b, surface_addr, 0);
-   nir_ssa_def *addr_udw = nir_channel(b, surface_addr, 1);
-   nir_ssa_def *length = nir_channel(b, surface_addr, 3);
+   nir_def *addr_ldw = nir_channel(b, surface_addr, 0);
+   nir_def *addr_udw = nir_channel(b, surface_addr, 1);
+   nir_def *length = nir_channel(b, surface_addr, 3);
 
    return nir_vec4(b, addr_ldw, addr_udw, length, nir_imm_int(b, 0));
 }
@@ -380,10 +380,10 @@ build_load_render_surface_state_address(nir_builder *b,
  *
  * This is necessary for VK_EXT_image_sliced_view_of_3d.
  */
-static nir_ssa_def *
+static nir_def *
 build_load_storage_3d_image_depth(nir_builder *b,
-                                  nir_ssa_def *desc_addr,
-                                  nir_ssa_def *resinfo_depth,
+                                  nir_def *desc_addr,
+                                  nir_def *resinfo_depth,
                                   struct apply_pipeline_layout_state *state)
 
 {
@@ -395,11 +395,11 @@ build_load_storage_3d_image_depth(nir_builder *b,
          offsetof(struct anv_storage_image_descriptor, image_depth),
          1, 32, state);
    } else {
-      nir_ssa_def *data = build_load_descriptor_mem(
+      nir_def *data = build_load_descriptor_mem(
          b, desc_addr,
          RENDER_SURFACE_STATE_RenderTargetViewExtent_start(devinfo) / 8,
          1, 32, state);
-      nir_ssa_def *depth =
+      nir_def *depth =
          nir_ushr_imm(
             b, data,
             RENDER_SURFACE_STATE_RenderTargetViewExtent_start(devinfo) % 32);
@@ -438,10 +438,10 @@ build_load_storage_3d_image_depth(nir_builder *b,
  * The load_vulkan_descriptor intrinsic exists to provide a transition point
  * between these two forms of derefs: descriptor and memory.
  */
-static nir_ssa_def *
+static nir_def *
 build_res_index(nir_builder *b,
                 uint32_t set, uint32_t binding,
-                nir_ssa_def *array_index,
+                nir_def *array_index,
                 struct apply_pipeline_layout_state *state)
 {
    const struct anv_descriptor_set_binding_layout *bind_layout =
@@ -472,10 +472,10 @@ build_res_index(nir_builder *b,
    }
 
    assert(bind_layout->dynamic_offset_index < MAX_DYNAMIC_BUFFERS);
-      nir_ssa_def *dynamic_offset_index;
+      nir_def *dynamic_offset_index;
       if (bind_layout->dynamic_offset_index >= 0) {
          if (state->has_independent_sets) {
-            nir_ssa_def *dynamic_offset_start =
+            nir_def *dynamic_offset_start =
                nir_load_desc_set_dynamic_index_intel(b, nir_imm_int(b, set));
             dynamic_offset_index =
                nir_iadd_imm(b, dynamic_offset_start,
@@ -494,7 +494,7 @@ build_res_index(nir_builder *b,
    assert(bind_layout->descriptor_stride % 8 == 0);
    const uint32_t desc_stride = bind_layout->descriptor_stride / 8;
 
-      nir_ssa_def *packed =
+      nir_def *packed =
          nir_ior_imm(b,
                      dynamic_offset_index,
                      (desc_stride << 24) |
@@ -509,20 +509,20 @@ build_res_index(nir_builder *b,
 }
 
 struct res_index_defs {
-   nir_ssa_def *bti_idx;
-   nir_ssa_def *set_idx;
-   nir_ssa_def *dyn_offset_base;
-   nir_ssa_def *desc_offset_base;
-   nir_ssa_def *array_index;
-   nir_ssa_def *desc_stride;
+   nir_def *bti_idx;
+   nir_def *set_idx;
+   nir_def *dyn_offset_base;
+   nir_def *desc_offset_base;
+   nir_def *array_index;
+   nir_def *desc_stride;
 };
 
 static struct res_index_defs
-unpack_res_index(nir_builder *b, nir_ssa_def *index)
+unpack_res_index(nir_builder *b, nir_def *index)
 {
    struct res_index_defs defs;
 
-   nir_ssa_def *packed = nir_channel(b, index, 0);
+   nir_def *packed = nir_channel(b, index, 0);
    defs.desc_stride =
       nir_imul_imm(b, nir_extract_u8(b, packed, nir_imm_int(b, 3)), 8);
    defs.bti_idx = nir_extract_u8(b, packed, nir_imm_int(b, 2));
@@ -560,8 +560,8 @@ is_binding_bindless(unsigned set, unsigned binding, bool sampler,
  * vulkan_resource_index intrinsic and we have to do it based on nothing but
  * the address format.
  */
-static nir_ssa_def *
-build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta)
+static nir_def *
+build_res_reindex(nir_builder *b, nir_def *orig, nir_def *delta)
 {
    return nir_vec4(b, nir_channel(b, orig, 0),
                       nir_channel(b, orig, 1),
@@ -579,15 +579,15 @@ build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta)
  * determine the descriptor stride for array descriptors.  The bind_layout is
  * optional for buffer descriptor types.
  */
-static nir_ssa_def *
+static nir_def *
 build_desc_addr_for_res_index(nir_builder *b,
                               const VkDescriptorType desc_type,
-                              nir_ssa_def *index, nir_address_format addr_format,
+                              nir_def *index, nir_address_format addr_format,
                               struct apply_pipeline_layout_state *state)
 {
    struct res_index_defs res = unpack_res_index(b, index);
 
-   nir_ssa_def *desc_offset = res.desc_offset_base;
+   nir_def *desc_offset = res.desc_offset_base;
    if (desc_type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
       /* Compute the actual descriptor offset.  For inline uniform blocks,
        * the array index is ignored as they are only allowed to be a single
@@ -603,7 +603,7 @@ build_desc_addr_for_res_index(nir_builder *b,
    case nir_address_format_64bit_bounded_global: {
       switch (state->desc_addr_format) {
       case nir_address_format_64bit_global_32bit_offset: {
-         nir_ssa_def *base_addr =
+         nir_def *base_addr =
             nir_load_desc_set_address_intel(b, res.set_idx);
          return nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_addr),
                             nir_unpack_64_2x32_split_y(b, base_addr),
@@ -629,10 +629,10 @@ build_desc_addr_for_res_index(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 build_desc_addr_for_binding(nir_builder *b,
                             unsigned set, unsigned binding,
-                            nir_ssa_def *array_index,
+                            nir_def *array_index,
                             const struct apply_pipeline_layout_state *state)
 {
    const struct anv_descriptor_set_binding_layout *bind_layout =
@@ -641,8 +641,8 @@ build_desc_addr_for_binding(nir_builder *b,
    switch (state->desc_addr_format) {
    case nir_address_format_64bit_global_32bit_offset:
    case nir_address_format_64bit_bounded_global: {
-      nir_ssa_def *set_addr = nir_load_desc_set_address_intel(b, nir_imm_int(b, set));
-      nir_ssa_def *desc_offset =
+      nir_def *set_addr = nir_load_desc_set_address_intel(b, nir_imm_int(b, set));
+      nir_def *desc_offset =
          nir_iadd_imm(b,
                       nir_imul_imm(b,
                                    array_index,
@@ -669,10 +669,10 @@ build_desc_addr_for_binding(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 build_surface_index_for_binding(nir_builder *b,
                                 unsigned set, unsigned binding,
-                                nir_ssa_def *array_index,
+                                nir_def *array_index,
                                 unsigned plane,
                                 bool non_uniform,
                                 const struct apply_pipeline_layout_state *state)
@@ -682,12 +682,12 @@ build_surface_index_for_binding(nir_builder *b,
    const bool is_bindless =
       is_binding_bindless(set, binding, false /* sampler */, state);
 
-   nir_ssa_def *set_offset, *surface_index;
+   nir_def *set_offset, *surface_index;
    if (is_bindless) {
       if (state->layout->type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_INDIRECT) {
          set_offset = nir_imm_int(b, 0xdeaddead);
 
-         nir_ssa_def *desc_addr =
+         nir_def *desc_addr =
             build_desc_addr_for_binding(b, set, binding, array_index, state);
 
          surface_index =
@@ -744,17 +744,17 @@ build_surface_index_for_binding(nir_builder *b,
                                   BINDING_PROPERTY_PUSHABLE) ? nir_resource_intel_pushable : 0));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_sampler_handle_for_binding(nir_builder *b,
                                  unsigned set, unsigned binding,
-                                 nir_ssa_def *array_index,
+                                 nir_def *array_index,
                                  unsigned plane,
                                  bool non_uniform,
                                  const struct apply_pipeline_layout_state *state)
 {
    const bool is_bindless =
       is_binding_bindless(set, binding, true /* sampler */, state);
-   nir_ssa_def *set_offset, *sampler_index;
+   nir_def *set_offset, *sampler_index;
 
    if (is_bindless) {
       const struct anv_descriptor_set_binding_layout *bind_layout =
@@ -763,13 +763,13 @@ build_sampler_handle_for_binding(nir_builder *b,
       if (state->layout->type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_INDIRECT) {
          set_offset = nir_imm_int(b, 0xdeaddead);
 
-         nir_ssa_def *desc_addr =
+         nir_def *desc_addr =
             build_desc_addr_for_binding(b, set, binding, array_index, state);
 
          /* This is anv_sampled_image_descriptor, the sampler handle is always
           * in component 1.
           */
-         nir_ssa_def *desc_data =
+         nir_def *desc_data =
             build_load_descriptor_mem(b, desc_addr, 0, 2, 32, state);
 
          sampler_index = nir_channel(b, desc_data, 1);
@@ -817,15 +817,15 @@ build_sampler_handle_for_binding(nir_builder *b,
                                 nir_resource_intel_sampler);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_buffer_dynamic_offset_for_res_index(nir_builder *b,
-                                          nir_ssa_def *dyn_offset_base,
-                                          nir_ssa_def *array_index,
+                                          nir_def *dyn_offset_base,
+                                          nir_def *array_index,
                                           struct apply_pipeline_layout_state *state)
 {
-   nir_ssa_def *dyn_offset_idx = nir_iadd(b, dyn_offset_base, array_index);
+   nir_def *dyn_offset_idx = nir_iadd(b, dyn_offset_base, array_index);
 
-   nir_ssa_def *dyn_load =
+   nir_def *dyn_load =
       nir_load_push_constant(b, 1, 32, nir_imul_imm(b, dyn_offset_idx, 4),
                              .base = offsetof(struct anv_push_constants, dynamic_offsets),
                              .range = sizeof_field(struct anv_push_constants, dynamic_offsets));
@@ -841,10 +841,10 @@ build_buffer_dynamic_offset_for_res_index(nir_builder *b,
  *
  * See build_res_index for details about each resource index format.
  */
-static nir_ssa_def *
+static nir_def *
 build_indirect_buffer_addr_for_res_index(nir_builder *b,
                                          const VkDescriptorType desc_type,
-                                         nir_ssa_def *res_index,
+                                         nir_def *res_index,
                                          nir_address_format addr_format,
                                          struct apply_pipeline_layout_state *state)
 {
@@ -859,33 +859,33 @@ build_indirect_buffer_addr_for_res_index(nir_builder *b,
                          nir_imm_int(b, 0));
    }
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr_for_res_index(b, desc_type, res_index,
                                     addr_format, state);
 
-   nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 4, 32, state);
+   nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 4, 32, state);
 
    if (state->has_dynamic_buffers) {
       /* This shader has dynamic offsets and we have no way of knowing
        * (save from the dynamic offset base index) if this buffer has a
        * dynamic offset.
        */
-      nir_ssa_def *dyn_offset_idx =
+      nir_def *dyn_offset_idx =
          nir_iadd(b, res.dyn_offset_base, res.array_index);
 
-      nir_ssa_def *dyn_load =
+      nir_def *dyn_load =
          nir_load_push_constant(b, 1, 32, nir_imul_imm(b, dyn_offset_idx, 4),
                                 .base = offsetof(struct anv_push_constants, dynamic_offsets),
                                 .range = MAX_DYNAMIC_BUFFERS * 4);
 
-      nir_ssa_def *dynamic_offset =
+      nir_def *dynamic_offset =
          nir_bcsel(b, nir_ieq_imm(b, res.dyn_offset_base, 0xff),
                       nir_imm_int(b, 0), dyn_load);
 
       /* The dynamic offset gets added to the base pointer so that we
        * have a sliding window range.
        */
-      nir_ssa_def *base_ptr =
+      nir_def *base_ptr =
          nir_pack_64_2x32(b, nir_trim_vector(b, desc, 2));
       base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
       desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
@@ -904,10 +904,10 @@ build_indirect_buffer_addr_for_res_index(nir_builder *b,
                       nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_direct_buffer_addr_for_res_index(nir_builder *b,
                                        const VkDescriptorType desc_type,
-                                       nir_ssa_def *res_index,
+                                       nir_def *res_index,
                                        nir_address_format addr_format,
                                        struct apply_pipeline_layout_state *state)
 {
@@ -923,11 +923,11 @@ build_direct_buffer_addr_for_res_index(nir_builder *b,
                       nir_imm_int(b, 0));
    }
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr_for_res_index(b, desc_type, res_index,
                                     addr_format, state);
 
-   nir_ssa_def *addr =
+   nir_def *addr =
       build_load_render_surface_state_address(b, desc_addr, state);
 
    if (state->has_dynamic_buffers) {
@@ -937,14 +937,14 @@ build_direct_buffer_addr_for_res_index(nir_builder *b,
        * from the dynamic offset base index) if this buffer has a dynamic
        * offset.
        */
-      nir_ssa_def *dynamic_offset =
+      nir_def *dynamic_offset =
          build_buffer_dynamic_offset_for_res_index(
             b, res.dyn_offset_base, res.array_index, state);
 
       /* The dynamic offset gets added to the base pointer so that we
        * have a sliding window range.
        */
-      nir_ssa_def *base_ptr =
+      nir_def *base_ptr =
          nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2));
       base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
       addr = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
@@ -963,10 +963,10 @@ build_direct_buffer_addr_for_res_index(nir_builder *b,
                       nir_imm_int(b, 0));
 }
 
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_res_index(nir_builder *b,
                                 const VkDescriptorType desc_type,
-                                nir_ssa_def *res_index,
+                                nir_def *res_index,
                                 nir_address_format addr_format,
                                 struct apply_pipeline_layout_state *state)
 {
@@ -976,12 +976,12 @@ build_buffer_addr_for_res_index(nir_builder *b,
       return build_direct_buffer_addr_for_res_index(b, desc_type, res_index, addr_format, state);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_binding(nir_builder *b,
                               const VkDescriptorType desc_type,
                               unsigned set,
                               unsigned binding,
-                              nir_ssa_def *res_index,
+                              nir_def *res_index,
                               nir_address_format addr_format,
                               struct apply_pipeline_layout_state *state)
 {
@@ -1011,7 +1011,7 @@ build_buffer_addr_for_binding(nir_builder *b,
  * The deref chain has to terminate at a variable with a descriptor_set and
  * binding set.  This is used for images, textures, and samplers.
  */
-static nir_ssa_def *
+static nir_def *
 build_load_var_deref_surface_handle(nir_builder *b, nir_deref_instr *deref,
                                     bool non_uniform,
                                     bool *out_is_bindless,
@@ -1025,7 +1025,7 @@ build_load_var_deref_surface_handle(nir_builder *b, nir_deref_instr *deref,
    *out_is_bindless =
       is_binding_bindless(set, binding, false /* sampler */, state);
 
-   nir_ssa_def *array_index;
+   nir_def *array_index;
    if (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
       assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
@@ -1045,7 +1045,7 @@ build_load_var_deref_surface_handle(nir_builder *b, nir_deref_instr *deref,
  * hopes of better CSE.  This means the cursor is not where you left it when
  * this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
                           nir_address_format addr_format,
                           uint32_t *set, uint32_t *binding,
@@ -1059,7 +1059,7 @@ build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
    } else {
       assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex);
       nir_intrinsic_instr *parent = nir_src_as_intrinsic(intrin->src[0]);
-      nir_ssa_def *index =
+      nir_def *index =
          build_res_index_for_chain(b, parent, addr_format,
                                    set, binding, state);
 
@@ -1073,14 +1073,14 @@ build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
  *
  * The cursor is not where you left it when this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_idx_intrin(nir_builder *b,
                                  nir_intrinsic_instr *idx_intrin,
                                  nir_address_format addr_format,
                                  struct apply_pipeline_layout_state *state)
 {
    uint32_t set = UINT32_MAX, binding = UINT32_MAX;
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       build_res_index_for_chain(b, idx_intrin, addr_format,
                                 &set, &binding, state);
 
@@ -1099,14 +1099,14 @@ build_buffer_addr_for_idx_intrin(nir_builder *b,
  *
  * The cursor is not where you left it when this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_deref(nir_builder *b, nir_deref_instr *deref,
                             nir_address_format addr_format,
                             struct apply_pipeline_layout_state *state)
 {
    nir_deref_instr *parent = nir_deref_instr_parent(deref);
    if (parent) {
-      nir_ssa_def *addr =
+      nir_def *addr =
          build_buffer_addr_for_deref(b, parent, addr_format, state);
 
       b->cursor = nir_before_instr(&deref->instr);
@@ -1205,7 +1205,7 @@ try_lower_direct_buffer_intrinsic(nir_builder *b,
        !descriptor_has_bti(desc, state))
       return false;
 
-   nir_ssa_def *addr =
+   nir_def *addr =
       build_buffer_addr_for_deref(b, deref, addr_format, state);
 
    b->cursor = nir_before_instr(&intrin->instr);
@@ -1231,22 +1231,22 @@ lower_load_accel_struct_desc(nir_builder *b,
       nir_address_format_64bit_bounded_global;
 
    uint32_t set = UINT32_MAX, binding = UINT32_MAX;
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       build_res_index_for_chain(b, idx_intrin, addr_format,
                                 &set, &binding, state);
 
    b->cursor = nir_before_instr(&load_desc->instr);
 
    struct res_index_defs res = unpack_res_index(b, res_index);
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr_for_binding(b, set, binding, res.array_index, state);
 
    /* Acceleration structure descriptors are always uint64_t */
-   nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
+   nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
 
    assert(load_desc->dest.ssa.bit_size == 64);
    assert(load_desc->dest.ssa.num_components == 1);
-   nir_ssa_def_rewrite_uses(&load_desc->dest.ssa, desc);
+   nir_def_rewrite_uses(&load_desc->dest.ssa, desc);
    nir_instr_remove(&load_desc->instr);
 
    return true;
@@ -1286,13 +1286,13 @@ lower_direct_buffer_instr(nir_builder *b, nir_instr *instr, void *_state)
       b->cursor = nir_before_instr(&intrin->instr);
 
       uint32_t set = UINT32_MAX, binding = UINT32_MAX;
-      nir_ssa_def *res_index =
+      nir_def *res_index =
          build_res_index_for_chain(b, idx_intrin, addr_format,
                                    &set, &binding, state);
 
       bool non_uniform = nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM;
 
-      nir_ssa_def *surface_index =
+      nir_def *surface_index =
          build_surface_index_for_binding(b, set, binding,
                                          nir_channel(b, res_index, 3),
                                          0 /* plane */,
@@ -1322,7 +1322,7 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *index =
+   nir_def *index =
       build_res_index(b, nir_intrinsic_desc_set(intrin),
                          nir_intrinsic_binding(intrin),
                          intrin->src[0].ssa,
@@ -1330,7 +1330,7 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
    assert(intrin->dest.ssa.bit_size == index->bit_size);
    assert(intrin->dest.ssa.num_components == index->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
+   nir_def_rewrite_uses(&intrin->dest.ssa, index);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -1342,13 +1342,13 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *index =
+   nir_def *index =
       build_res_reindex(b, intrin->src[0].ssa,
                            intrin->src[1].ssa);
 
    assert(intrin->dest.ssa.bit_size == index->bit_size);
    assert(intrin->dest.ssa.num_components == index->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
+   nir_def_rewrite_uses(&intrin->dest.ssa, index);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -1363,14 +1363,14 @@ lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
    const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
    nir_address_format addr_format = addr_format_for_desc_type(desc_type, state);
 
-   nir_ssa_def *desc =
+   nir_def *desc =
       build_buffer_addr_for_res_index(b,
                                       desc_type, intrin->src[0].ssa,
                                       addr_format, state);
 
    assert(intrin->dest.ssa.bit_size == desc->bit_size);
    assert(intrin->dest.ssa.num_components == desc->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+   nir_def_rewrite_uses(&intrin->dest.ssa, desc);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -1388,7 +1388,7 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
    const nir_address_format addr_format =
       nir_address_format_64bit_bounded_global;
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       nir_build_addr_iadd_imm(
          b,
          build_desc_addr_for_res_index(b,
@@ -1399,7 +1399,7 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
          nir_var_mem_ssbo,
          state->pdevice->isl_dev.ss.size);
 
-   nir_ssa_def *desc_range;
+   nir_def *desc_range;
    if (state->layout->type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_INDIRECT) {
       /* Load the anv_address_range_descriptor */
       desc_range =
@@ -1412,8 +1412,8 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
          build_load_render_surface_state_address(b, desc_addr, state);
    }
 
-   nir_ssa_def *size = nir_channel(b, desc_range, 2);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
+   nir_def *size = nir_channel(b, desc_range, 2);
+   nir_def_rewrite_uses(&intrin->dest.ssa, size);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -1429,7 +1429,7 @@ lower_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
    bool non_uniform = nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM;
    bool is_bindless;
-   nir_ssa_def *handle =
+   nir_def *handle =
       build_load_var_deref_surface_handle(b, deref, non_uniform,
                                           &is_bindless, state);
    nir_rewrite_image_intrinsic(intrin, handle, is_bindless);
@@ -1450,7 +1450,7 @@ lower_image_size_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
    bool non_uniform = nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM;
    bool is_bindless;
-   nir_ssa_def *handle =
+   nir_def *handle =
       build_load_var_deref_surface_handle(b, deref, non_uniform,
                                           &is_bindless, state);
    nir_rewrite_image_intrinsic(intrin, handle, is_bindless);
@@ -1459,7 +1459,7 @@ lower_image_size_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
    const uint32_t set = var->data.descriptor_set;
    const uint32_t binding = var->data.binding;
 
-   nir_ssa_def *array_index;
+   nir_def *array_index;
    if (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
       assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
@@ -1468,22 +1468,22 @@ lower_image_size_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
       array_index = nir_imm_int(b, 0);
    }
 
-   nir_ssa_def *desc_addr = build_desc_addr_for_binding(
+   nir_def *desc_addr = build_desc_addr_for_binding(
       b, set, binding, array_index, state);
 
    b->cursor = nir_after_instr(&intrin->instr);
 
-   nir_ssa_def *image_depth =
+   nir_def *image_depth =
       build_load_storage_3d_image_depth(b, desc_addr,
                                         nir_channel(b, &intrin->dest.ssa, 2),
                                         state);
 
-   nir_ssa_def *comps[4] = {};
+   nir_def *comps[4] = {};
    for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++)
       comps[c] = c == 2 ? image_depth : nir_channel(b, &intrin->dest.ssa, c);
 
-   nir_ssa_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
-   nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, vec, vec->parent_instr);
+   nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
+   nir_def_rewrite_uses_after(&intrin->dest.ssa, vec, vec->parent_instr);
 
    return true;
 }
@@ -1498,7 +1498,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
     * by constant folding.
     */
    assert(!nir_src_is_const(intrin->src[0]));
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
+   nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
                                       nir_intrinsic_base(intrin));
 
    unsigned load_size = intrin->dest.ssa.num_components *
@@ -1509,19 +1509,19 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
    unsigned max_offset = b->shader->constant_data_size - load_size;
    offset = nir_umin(b, offset, nir_imm_int(b, max_offset));
 
-   nir_ssa_def *const_data_addr = nir_pack_64_2x32_split(b,
+   nir_def *const_data_addr = nir_pack_64_2x32_split(b,
       nir_iadd(b,
          nir_load_reloc_const_intel(b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW),
          offset),
       nir_load_reloc_const_intel(b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
 
-   nir_ssa_def *data =
+   nir_def *data =
       nir_load_global_constant(b, const_data_addr,
                                load_align,
                                intrin->dest.ssa.num_components,
                                intrin->dest.ssa.bit_size);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, data);
+   nir_def_rewrite_uses(&intrin->dest.ssa, data);
 
    return true;
 }
@@ -1532,11 +1532,11 @@ lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *base_workgroup_id =
+   nir_def *base_workgroup_id =
       nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0),
                              .base = offsetof(struct anv_push_constants, cs.base_work_group_id),
                              .range = sizeof_field(struct anv_push_constants, cs.base_work_group_id));
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
+   nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
 
    return true;
 }
@@ -1559,7 +1559,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
    const unsigned binding = var->data.binding;
    const bool bindless = is_binding_bindless(set, binding, is_sampler, state);
 
-   nir_ssa_def *array_index = NULL;
+   nir_def *array_index = NULL;
    if (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
 
@@ -1569,7 +1569,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
    }
 
    nir_tex_src_type offset_src_type;
-   nir_ssa_def *index;
+   nir_def *index;
    if (deref_src_type == nir_tex_src_texture_deref) {
       index = build_surface_index_for_binding(b, set, binding, array_index,
                                               plane,
@@ -1609,8 +1609,8 @@ tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
    return plane;
 }
 
-static nir_ssa_def *
-build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx,
+static nir_def *
+build_def_array_select(nir_builder *b, nir_def **srcs, nir_def *idx,
                        unsigned start, unsigned end)
 {
    if (start == end - 1) {
@@ -1649,11 +1649,11 @@ lower_ray_query_globals(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *rq_globals =
+   nir_def *rq_globals =
       nir_load_push_constant(b, 1, 64, nir_imm_int(b, 0),
                              .base = offsetof(struct anv_push_constants, ray_query_globals),
                              .range = sizeof_field(struct anv_push_constants, ray_query_globals));
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, rq_globals);
+   nir_def_rewrite_uses(&intrin->dest.ssa, rq_globals);
 
    return true;
 }
index b6d047e..80d3918 100644 (file)
@@ -173,31 +173,31 @@ anv_nir_compute_push_layout(nir_shader *nir,
                case nir_intrinsic_load_desc_set_address_intel: {
                   assert(brw_shader_stage_requires_bindless_resources(nir->info.stage));
                   b->cursor = nir_before_instr(&intrin->instr);
-                  nir_ssa_def *pc_load = nir_load_uniform(b, 1, 32,
+                  nir_def *pc_load = nir_load_uniform(b, 1, 32,
                      nir_imul_imm(b, intrin->src[0].ssa, sizeof(uint32_t)),
                      .base = offsetof(struct anv_push_constants, desc_offsets),
                      .range = sizeof_field(struct anv_push_constants, desc_offsets),
                      .dest_type = nir_type_uint32);
                   pc_load = nir_iand_imm(b, pc_load, ANV_DESCRIPTOR_SET_OFFSET_MASK);
-                  nir_ssa_def *desc_addr =
+                  nir_def *desc_addr =
                      nir_pack_64_2x32_split(
                         b, pc_load,
                         nir_load_reloc_const_intel(
                            b, BRW_SHADER_RELOC_DESCRIPTORS_ADDR_HIGH));
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc_addr);
+                  nir_def_rewrite_uses(&intrin->dest.ssa, desc_addr);
                   break;
                }
 
                case nir_intrinsic_load_desc_set_dynamic_index_intel: {
                   b->cursor = nir_before_instr(&intrin->instr);
-                  nir_ssa_def *pc_load = nir_load_uniform(b, 1, 32,
+                  nir_def *pc_load = nir_load_uniform(b, 1, 32,
                      nir_imul_imm(b, intrin->src[0].ssa, sizeof(uint32_t)),
                      .base = offsetof(struct anv_push_constants, desc_offsets),
                      .range = sizeof_field(struct anv_push_constants, desc_offsets),
                      .dest_type = nir_type_uint32);
                   pc_load = nir_iand_imm(
                      b, pc_load, ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK);
-                  nir_ssa_def_rewrite_uses(&intrin->dest.ssa, pc_load);
+                  nir_def_rewrite_uses(&intrin->dest.ssa, pc_load);
                   break;
                }
 
index 7b07a30..18f5c4a 100644 (file)
@@ -47,7 +47,7 @@ lower_patch_vertices_in_instr(nir_builder *b, nir_instr *instr, UNUSED void *_da
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def_rewrite_uses(
+   nir_def_rewrite_uses(
       &load->dest.ssa,
       nir_load_push_constant(
          b, 1, 32,
index b46df4d..1b80752 100644 (file)
@@ -42,11 +42,11 @@ struct lower_multiview_state {
 
    uint32_t view_mask;
 
-   nir_ssa_def *instance_id;
-   nir_ssa_def *view_index;
+   nir_def *instance_id;
+   nir_def *view_index;
 };
 
-static nir_ssa_def *
+static nir_def *
 build_instance_id(struct lower_multiview_state *state)
 {
    assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
@@ -68,7 +68,7 @@ build_instance_id(struct lower_multiview_state *state)
    return state->instance_id;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_view_index(struct lower_multiview_state *state)
 {
    assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
@@ -90,7 +90,7 @@ build_view_index(struct lower_multiview_state *state)
           * id is given by instance_id % view_count.  We then have to convert
           * that to an actual view id.
           */
-         nir_ssa_def *compacted =
+         nir_def *compacted =
             nir_umod_imm(b, nir_load_instance_id(b),
                             util_bitcount(state->view_mask));
 
@@ -109,18 +109,18 @@ build_view_index(struct lower_multiview_state *state)
                remap |= (uint64_t)bit << (i++ * 4);
             }
 
-            nir_ssa_def *shift = nir_imul_imm(b, compacted, 4);
+            nir_def *shift = nir_imul_imm(b, compacted, 4);
 
             /* One of these days, when we have int64 everywhere, this will be
              * easier.
              */
-            nir_ssa_def *shifted;
+            nir_def *shifted;
             if (remap <= UINT32_MAX) {
                shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
             } else {
-               nir_ssa_def *shifted_low =
+               nir_def *shifted_low =
                   nir_ushr(b, nir_imm_int(b, remap), shift);
-               nir_ssa_def *shifted_high =
+               nir_def *shifted_high =
                   nir_ushr(b, nir_imm_int(b, remap >> 32),
                               nir_iadd_imm(b, shift, -32));
                shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
@@ -159,7 +159,7 @@ is_load_view_index(const nir_instr *instr, const void *data)
           nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
 }
 
-static nir_ssa_def *
+static nir_def *
 replace_load_view_index_with_zero(struct nir_builder *b,
                                   nir_instr *instr, void *data)
 {
@@ -167,7 +167,7 @@ replace_load_view_index_with_zero(struct nir_builder *b,
    return nir_imm_zero(b, 1, 32);
 }
 
-static nir_ssa_def *
+static nir_def *
 replace_load_view_index_with_layer_id(struct nir_builder *b,
                                       nir_instr *instr, void *data)
 {
@@ -236,7 +236,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
              load->intrinsic != nir_intrinsic_load_view_index)
             continue;
 
-         nir_ssa_def *value;
+         nir_def *value;
          if (load->intrinsic == nir_intrinsic_load_instance_id) {
             value = build_instance_id(&state);
          } else {
@@ -244,7 +244,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
             value = build_view_index(&state);
          }
 
-         nir_ssa_def_rewrite_uses(&load->dest.ssa, value);
+         nir_def_rewrite_uses(&load->dest.ssa, value);
 
          nir_instr_remove(&load->instr);
       }
@@ -254,7 +254,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
     * available in the VS.  If it's not a fragment shader, we need to pass
     * the view index on to the next stage.
     */
-   nir_ssa_def *view_index = build_view_index(&state);
+   nir_def *view_index = build_view_index(&state);
 
    nir_builder *b = &state.builder;
 
index e0d8631..b219031 100644 (file)
@@ -109,8 +109,8 @@ lower_resource_intel(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *set_offset = intrin->src[0].ssa;
-   nir_ssa_def *binding_offset = intrin->src[1].ssa;
+   nir_def *set_offset = intrin->src[0].ssa;
+   nir_def *binding_offset = intrin->src[1].ssa;
 
    /* When using indirect descriptor, the surface handles are loaded from the
     * descriptor buffer and do not need any offset.
index 202f675..b221534 100644 (file)
@@ -37,8 +37,8 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *base_addr = load->src[0].ssa;
-   nir_ssa_def *bound = NULL;
+   nir_def *base_addr = load->src[0].ssa;
+   nir_def *bound = NULL;
    if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)
       bound = load->src[2].ssa;
 
@@ -46,7 +46,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
    assert(bit_size >= 8 && bit_size % 8 == 0);
    unsigned byte_size = bit_size / 8;
 
-   nir_ssa_def *val;
+   nir_def *val;
    if (!nir_src_is_divergent(load->src[0]) && nir_src_is_const(load->src[1])) {
       uint32_t offset = nir_src_as_uint(load->src[1]);
 
@@ -59,16 +59,16 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       uint64_t aligned_offset = offset - suboffset;
 
       /* Load two just in case we go over a 64B boundary */
-      nir_ssa_def *data[2];
+      nir_def *data[2];
       for (unsigned i = 0; i < 2; i++) {
-         nir_ssa_def *pred;
+         nir_def *pred;
          if (bound) {
             pred = nir_igt_imm(b, bound, aligned_offset + i * 64 + 63);
          } else {
             pred = nir_imm_true(b);
          }
 
-         nir_ssa_def *addr = nir_iadd_imm(b, base_addr,
+         nir_def *addr = nir_iadd_imm(b, base_addr,
                                           aligned_offset + i * 64);
 
          data[i] = nir_load_global_const_block_intel(b, 16, addr, pred);
@@ -77,19 +77,19 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       val = nir_extract_bits(b, data, 2, suboffset * 8,
                              load->num_components, bit_size);
    } else {
-      nir_ssa_def *offset = load->src[1].ssa;
-      nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));
+      nir_def *offset = load->src[1].ssa;
+      nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));
 
       if (bound) {
-         nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
+         nir_def *zero = nir_imm_zero(b, load->num_components, bit_size);
 
          unsigned load_size = byte_size * load->num_components;
-         nir_ssa_def *in_bounds =
+         nir_def *in_bounds =
             nir_ilt(b, nir_iadd_imm(b, offset, load_size - 1), bound);
 
          nir_push_if(b, in_bounds);
 
-         nir_ssa_def *load_val =
+         nir_def *load_val =
             nir_build_load_global_constant(b, load->dest.ssa.num_components,
                                            load->dest.ssa.bit_size, addr,
                                            .access = nir_intrinsic_access(load),
@@ -108,7 +108,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       }
    }
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
    nir_instr_remove(&load->instr);
 
    return true;
index 3ff6655..7eac3e4 100644 (file)
@@ -49,7 +49,7 @@ struct lower_set_vtx_and_prim_count_state {
 };
 
 static nir_variable *
-anv_nir_prim_count_store(nir_builder *b, nir_ssa_def *val)
+anv_nir_prim_count_store(nir_builder *b, nir_def *val)
 {
    nir_variable *primitive_count =
          nir_variable_create(b->shader,
@@ -59,9 +59,9 @@ anv_nir_prim_count_store(nir_builder *b, nir_ssa_def *val)
    primitive_count->data.location = VARYING_SLOT_PRIMITIVE_COUNT;
    primitive_count->data.interpolation = INTERP_MODE_NONE;
 
-   nir_ssa_def *local_invocation_index = nir_load_local_invocation_index(b);
+   nir_def *local_invocation_index = nir_load_local_invocation_index(b);
 
-   nir_ssa_def *cmp = nir_ieq_imm(b, local_invocation_index, 0);
+   nir_def *cmp = nir_ieq_imm(b, local_invocation_index, 0);
    nir_if *if_stmt = nir_push_if(b, cmp);
    {
       nir_deref_instr *prim_count_deref = nir_build_deref_var(b, primitive_count);
@@ -120,7 +120,7 @@ anv_nir_lower_set_vtx_and_prim_count(nir_shader *nir)
       nir_builder b;
       nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
       b = nir_builder_at(nir_before_block(nir_start_block(entrypoint)));
-      nir_ssa_def *zero = nir_imm_int(&b, 0);
+      nir_def *zero = nir_imm_int(&b, 0);
       state.primitive_count = anv_nir_prim_count_store(&b, zero);
    }
 
index 63eb7cb..fbe3d62 100644 (file)
@@ -232,15 +232,15 @@ nir_deref_find_descriptor(nir_deref_instr *deref,
    return find_descriptor_for_index_src(intrin->src[0], state);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_load_descriptor_mem(nir_builder *b,
-                          nir_ssa_def *desc_addr, unsigned desc_offset,
+                          nir_def *desc_addr, unsigned desc_offset,
                           unsigned num_components, unsigned bit_size,
                           struct apply_pipeline_layout_state *state)
 
 {
-   nir_ssa_def *surface_index = nir_channel(b, desc_addr, 0);
-   nir_ssa_def *offset32 =
+   nir_def *surface_index = nir_channel(b, desc_addr, 0);
+   nir_def *offset32 =
       nir_iadd_imm(b, nir_channel(b, desc_addr, 1), desc_offset);
 
    return nir_load_ubo(b, num_components, bit_size,
@@ -271,9 +271,9 @@ build_load_descriptor_mem(nir_builder *b,
  * The load_vulkan_descriptor intrinsic exists to provide a transition point
  * between these two forms of derefs: descriptor and memory.
  */
-static nir_ssa_def *
+static nir_def *
 build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
-                nir_ssa_def *array_index, nir_address_format addr_format,
+                nir_def *array_index, nir_address_format addr_format,
                 struct apply_pipeline_layout_state *state)
 {
    const struct anv_descriptor_set_binding_layout *bind_layout =
@@ -323,19 +323,19 @@ build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
 }
 
 struct res_index_defs {
-   nir_ssa_def *set_idx;
-   nir_ssa_def *dyn_offset_base;
-   nir_ssa_def *desc_offset_base;
-   nir_ssa_def *array_index;
-   nir_ssa_def *desc_stride;
+   nir_def *set_idx;
+   nir_def *dyn_offset_base;
+   nir_def *desc_offset_base;
+   nir_def *array_index;
+   nir_def *desc_stride;
 };
 
 static struct res_index_defs
-unpack_res_index(nir_builder *b, nir_ssa_def *index)
+unpack_res_index(nir_builder *b, nir_def *index)
 {
    struct res_index_defs defs;
 
-   nir_ssa_def *packed = nir_channel(b, index, 0);
+   nir_def *packed = nir_channel(b, index, 0);
    defs.desc_stride = nir_extract_u8(b, packed, nir_imm_int(b, 2));
    defs.set_idx = nir_extract_u8(b, packed, nir_imm_int(b, 1));
    defs.dyn_offset_base = nir_extract_u8(b, packed, nir_imm_int(b, 0));
@@ -355,8 +355,8 @@ unpack_res_index(nir_builder *b, nir_ssa_def *index)
  * vulkan_resource_index intrinsic and we have to do it based on nothing but
  * the address format.
  */
-static nir_ssa_def *
-build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta,
+static nir_def *
+build_res_reindex(nir_builder *b, nir_def *orig, nir_def *delta,
                   nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -386,11 +386,11 @@ build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta,
  * determine the descriptor stride for array descriptors.  The bind_layout is
  * optional for buffer descriptor types.
  */
-static nir_ssa_def *
+static nir_def *
 build_desc_addr(nir_builder *b,
                 const struct anv_descriptor_set_binding_layout *bind_layout,
                 const VkDescriptorType desc_type,
-                nir_ssa_def *index, nir_address_format addr_format,
+                nir_def *index, nir_address_format addr_format,
                 struct apply_pipeline_layout_state *state)
 {
    switch (addr_format) {
@@ -398,7 +398,7 @@ build_desc_addr(nir_builder *b,
    case nir_address_format_64bit_bounded_global: {
       struct res_index_defs res = unpack_res_index(b, index);
 
-      nir_ssa_def *desc_offset = res.desc_offset_base;
+      nir_def *desc_offset = res.desc_offset_base;
       if (desc_type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
          /* Compute the actual descriptor offset.  For inline uniform blocks,
           * the array index is ignored as they are only allowed to be a single
@@ -428,10 +428,10 @@ build_desc_addr(nir_builder *b,
  *
  * See build_res_index for details about each resource index format.
  */
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_res_index(nir_builder *b,
                                 const VkDescriptorType desc_type,
-                                nir_ssa_def *res_index,
+                                nir_def *res_index,
                                 nir_address_format addr_format,
                                 struct apply_pipeline_layout_state *state)
 {
@@ -439,18 +439,18 @@ build_buffer_addr_for_res_index(nir_builder *b,
       assert(addr_format == nir_address_format_32bit_index_offset);
       return build_desc_addr(b, NULL, desc_type, res_index, addr_format, state);
    } else if (addr_format == nir_address_format_32bit_index_offset) {
-      nir_ssa_def *array_index = nir_channel(b, res_index, 0);
-      nir_ssa_def *packed = nir_channel(b, res_index, 1);
-      nir_ssa_def *surface_index = nir_extract_u16(b, packed, nir_imm_int(b, 0));
+      nir_def *array_index = nir_channel(b, res_index, 0);
+      nir_def *packed = nir_channel(b, res_index, 1);
+      nir_def *surface_index = nir_extract_u16(b, packed, nir_imm_int(b, 0));
 
       return nir_vec2(b, nir_iadd(b, surface_index, array_index),
                          nir_imm_int(b, 0));
    }
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr(b, NULL, desc_type, res_index, addr_format, state);
 
-   nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 4, 32, state);
+   nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 4, 32, state);
 
    if (state->has_dynamic_buffers) {
       struct res_index_defs res = unpack_res_index(b, res_index);
@@ -459,22 +459,22 @@ build_buffer_addr_for_res_index(nir_builder *b,
        * (save from the dynamic offset base index) if this buffer has a
        * dynamic offset.
        */
-      nir_ssa_def *dyn_offset_idx =
+      nir_def *dyn_offset_idx =
          nir_iadd(b, res.dyn_offset_base, res.array_index);
 
-      nir_ssa_def *dyn_load =
+      nir_def *dyn_load =
          nir_load_push_constant(b, 1, 32, nir_imul_imm(b, dyn_offset_idx, 4),
                                 .base = offsetof(struct anv_push_constants, dynamic_offsets),
                                 .range = MAX_DYNAMIC_BUFFERS * 4);
 
-      nir_ssa_def *dynamic_offset =
+      nir_def *dynamic_offset =
          nir_bcsel(b, nir_ieq_imm(b, res.dyn_offset_base, 0xff),
                       nir_imm_int(b, 0), dyn_load);
 
       /* The dynamic offset gets added to the base pointer so that we
        * have a sliding window range.
        */
-      nir_ssa_def *base_ptr =
+      nir_def *base_ptr =
          nir_pack_64_2x32(b, nir_trim_vector(b, desc, 2));
       base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
       desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
@@ -498,7 +498,7 @@ build_buffer_addr_for_res_index(nir_builder *b,
  * The deref chain has to terminate at a variable with a descriptor_set and
  * binding set.  This is used for images, textures, and samplers.
  */
-static nir_ssa_def *
+static nir_def *
 build_load_var_deref_descriptor_mem(nir_builder *b, nir_deref_instr *deref,
                                     unsigned desc_offset,
                                     unsigned num_components, unsigned bit_size,
@@ -511,7 +511,7 @@ build_load_var_deref_descriptor_mem(nir_builder *b, nir_deref_instr *deref,
    const struct anv_descriptor_set_binding_layout *bind_layout =
          &state->layout->set[set].layout->binding[binding];
 
-   nir_ssa_def *array_index;
+   nir_def *array_index;
    if (deref->deref_type != nir_deref_type_var) {
       assert(deref->deref_type == nir_deref_type_array);
       assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
@@ -527,10 +527,10 @@ build_load_var_deref_descriptor_mem(nir_builder *b, nir_deref_instr *deref,
    const nir_address_format addr_format =
       nir_address_format_64bit_bounded_global;
 
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       build_res_index(b, set, binding, array_index, addr_format, state);
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr(b, bind_layout, bind_layout->type,
                       res_index, addr_format, state);
 
@@ -545,7 +545,7 @@ build_load_var_deref_descriptor_mem(nir_builder *b, nir_deref_instr *deref,
  * hopes of better CSE.  This means the cursor is not where you left it when
  * this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
                           nir_address_format addr_format,
                           uint32_t *set, uint32_t *binding,
@@ -560,7 +560,7 @@ build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
    } else {
       assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex);
       nir_intrinsic_instr *parent = nir_src_as_intrinsic(intrin->src[0]);
-      nir_ssa_def *index =
+      nir_def *index =
          build_res_index_for_chain(b, parent, addr_format,
                                    set, binding, state);
 
@@ -574,14 +574,14 @@ build_res_index_for_chain(nir_builder *b, nir_intrinsic_instr *intrin,
  *
  * The cursor is not where you left it when this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_idx_intrin(nir_builder *b,
                                  nir_intrinsic_instr *idx_intrin,
                                  nir_address_format addr_format,
                                  struct apply_pipeline_layout_state *state)
 {
    uint32_t set = UINT32_MAX, binding = UINT32_MAX;
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       build_res_index_for_chain(b, idx_intrin, addr_format,
                                 &set, &binding, state);
 
@@ -599,14 +599,14 @@ build_buffer_addr_for_idx_intrin(nir_builder *b,
  *
  * The cursor is not where you left it when this function returns.
  */
-static nir_ssa_def *
+static nir_def *
 build_buffer_addr_for_deref(nir_builder *b, nir_deref_instr *deref,
                             nir_address_format addr_format,
                             struct apply_pipeline_layout_state *state)
 {
    nir_deref_instr *parent = nir_deref_instr_parent(deref);
    if (parent) {
-      nir_ssa_def *addr =
+      nir_def *addr =
          build_buffer_addr_for_deref(b, parent, addr_format, state);
 
       b->cursor = nir_before_instr(&deref->instr);
@@ -661,7 +661,7 @@ try_lower_direct_buffer_intrinsic(nir_builder *b,
          addr_format = nir_address_format_32bit_index_offset;
    }
 
-   nir_ssa_def *addr =
+   nir_def *addr =
       build_buffer_addr_for_deref(b, deref, addr_format, state);
 
    b->cursor = nir_before_instr(&intrin->instr);
@@ -687,7 +687,7 @@ lower_load_accel_struct_desc(nir_builder *b,
       nir_address_format_64bit_bounded_global;
 
    uint32_t set = UINT32_MAX, binding = UINT32_MAX;
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       build_res_index_for_chain(b, idx_intrin, addr_format,
                                 &set, &binding, state);
 
@@ -696,16 +696,16 @@ lower_load_accel_struct_desc(nir_builder *b,
 
    b->cursor = nir_before_instr(&load_desc->instr);
 
-   nir_ssa_def *desc_addr =
+   nir_def *desc_addr =
       build_desc_addr(b, bind_layout, bind_layout->type,
                       res_index, addr_format, state);
 
    /* Acceleration structure descriptors are always uint64_t */
-   nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
+   nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
 
    assert(load_desc->dest.ssa.bit_size == 64);
    assert(load_desc->dest.ssa.num_components == 1);
-   nir_ssa_def_rewrite_uses(&load_desc->dest.ssa, desc);
+   nir_def_rewrite_uses(&load_desc->dest.ssa, desc);
    nir_instr_remove(&load_desc->instr);
 
    return true;
@@ -747,7 +747,7 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
    nir_address_format addr_format =
       addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
 
-   nir_ssa_def *index =
+   nir_def *index =
       build_res_index(b, nir_intrinsic_desc_set(intrin),
                          nir_intrinsic_binding(intrin),
                          intrin->src[0].ssa,
@@ -755,7 +755,7 @@ lower_res_index_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
    assert(intrin->dest.ssa.bit_size == index->bit_size);
    assert(intrin->dest.ssa.num_components == index->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
+   nir_def_rewrite_uses(&intrin->dest.ssa, index);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -770,14 +770,14 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
    nir_address_format addr_format =
       addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
 
-   nir_ssa_def *index =
+   nir_def *index =
       build_res_reindex(b, intrin->src[0].ssa,
                            intrin->src[1].ssa,
                            addr_format);
 
    assert(intrin->dest.ssa.bit_size == index->bit_size);
    assert(intrin->dest.ssa.num_components == index->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
+   nir_def_rewrite_uses(&intrin->dest.ssa, index);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -792,13 +792,13 @@ lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
    const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
    nir_address_format addr_format = addr_format_for_desc_type(desc_type, state);
 
-   nir_ssa_def *desc =
+   nir_def *desc =
       build_buffer_addr_for_res_index(b, desc_type, intrin->src[0].ssa,
                                       addr_format, state);
 
    assert(intrin->dest.ssa.bit_size == desc->bit_size);
    assert(intrin->dest.ssa.num_components == desc->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+   nir_def_rewrite_uses(&intrin->dest.ssa, desc);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -816,15 +816,15 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
    nir_address_format addr_format =
       addr_format_for_desc_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, state);
 
-   nir_ssa_def *desc =
+   nir_def *desc =
       build_buffer_addr_for_res_index(b, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
                                       intrin->src[0].ssa, addr_format, state);
 
    switch (addr_format) {
    case nir_address_format_64bit_global_32bit_offset:
    case nir_address_format_64bit_bounded_global: {
-      nir_ssa_def *size = nir_channel(b, desc, 2);
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
+      nir_def *size = nir_channel(b, desc, 2);
+      nir_def_rewrite_uses(&intrin->dest.ssa, size);
       nir_instr_remove(&intrin->instr);
       break;
    }
@@ -869,14 +869,14 @@ lower_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
       const unsigned param = nir_intrinsic_base(intrin);
 
-      nir_ssa_def *desc =
+      nir_def *desc =
          build_load_var_deref_descriptor_mem(b, deref, param * 16,
                                              intrin->dest.ssa.num_components,
                                              intrin->dest.ssa.bit_size, state);
 
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+      nir_def_rewrite_uses(&intrin->dest.ssa, desc);
    } else {
-      nir_ssa_def *index = NULL;
+      nir_def *index = NULL;
       if (deref->deref_type != nir_deref_type_var) {
          assert(deref->deref_type == nir_deref_type_array);
          index = nir_ssa_for_src(b, deref->arr.index, 1);
@@ -901,10 +901,10 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
     * by constant folding.
     */
    assert(!nir_src_is_const(intrin->src[0]));
-   nir_ssa_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
+   nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
                                       nir_intrinsic_base(intrin));
 
-   nir_ssa_def *data;
+   nir_def *data;
    if (!anv_use_relocations(state->pdevice)) {
       unsigned load_size = intrin->dest.ssa.num_components *
                            intrin->dest.ssa.bit_size / 8;
@@ -914,7 +914,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
       unsigned max_offset = b->shader->constant_data_size - load_size;
       offset = nir_umin(b, offset, nir_imm_int(b, max_offset));
 
-      nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(b,
+      nir_def *const_data_base_addr = nir_pack_64_2x32_split(b,
          nir_load_reloc_const_intel(b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW),
          nir_load_reloc_const_intel(b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
 
@@ -924,7 +924,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
                                       intrin->dest.ssa.num_components,
                                       intrin->dest.ssa.bit_size);
    } else {
-      nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
+      nir_def *index = nir_imm_int(b, state->constants_offset);
 
       data = nir_load_ubo(b, intrin->num_components, intrin->dest.ssa.bit_size,
                           index, offset,
@@ -934,7 +934,7 @@ lower_load_constant(nir_builder *b, nir_intrinsic_instr *intrin,
                           .range = nir_intrinsic_range(intrin));
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, data);
+   nir_def_rewrite_uses(&intrin->dest.ssa, data);
 
    return true;
 }
@@ -945,11 +945,11 @@ lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *base_workgroup_id =
+   nir_def *base_workgroup_id =
       nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0),
                              .base = offsetof(struct anv_push_constants, cs.base_work_group_id),
                              .range = 3 * sizeof(uint32_t));
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
+   nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
 
    return true;
 }
@@ -981,12 +981,12 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
    }
 
    nir_tex_src_type offset_src_type;
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    if (binding_offset > MAX_BINDING_TABLE_SIZE) {
       const unsigned plane_offset =
          plane * sizeof(struct anv_sampled_image_descriptor);
 
-      nir_ssa_def *desc =
+      nir_def *desc =
          build_load_var_deref_descriptor_mem(b, deref, plane_offset,
                                              2, 32, state);
 
@@ -1065,8 +1065,8 @@ tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
    return plane;
 }
 
-static nir_ssa_def *
-build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx,
+static nir_def *
+build_def_array_select(nir_builder *b, nir_def **srcs, nir_def *idx,
                        unsigned start, unsigned end)
 {
    if (start == end - 1) {
@@ -1108,7 +1108,7 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane,
 
    const unsigned plane_offset =
       plane * sizeof(struct anv_texture_swizzle_descriptor);
-   nir_ssa_def *swiz =
+   nir_def *swiz =
       build_load_var_deref_descriptor_mem(b, deref, plane_offset,
                                           1, 32, state);
 
@@ -1118,8 +1118,8 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane,
    assert(tex->dest.ssa.num_components == 4);
 
    /* Initializing to undef is ok; nir_opt_undef will clean it up. */
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
-   nir_ssa_def *comps[8];
+   nir_def *undef = nir_undef(b, 1, 32);
+   nir_def *comps[8];
    for (unsigned i = 0; i < ARRAY_SIZE(comps); i++)
       comps[i] = undef;
 
@@ -1133,15 +1133,15 @@ lower_gfx7_tex_swizzle(nir_builder *b, nir_tex_instr *tex, unsigned plane,
    comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2);
    comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3);
 
-   nir_ssa_def *swiz_comps[4];
+   nir_def *swiz_comps[4];
    for (unsigned i = 0; i < 4; i++) {
-      nir_ssa_def *comp_swiz = nir_extract_u8(b, swiz, nir_imm_int(b, i));
+      nir_def *comp_swiz = nir_extract_u8(b, swiz, nir_imm_int(b, i));
       swiz_comps[i] = build_def_array_select(b, comps, comp_swiz, 0, 8);
    }
-   nir_ssa_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
+   nir_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
 
    /* Rewrite uses before we insert so we don't rewrite this use */
-   nir_ssa_def_rewrite_uses_after(&tex->dest.ssa,
+   nir_def_rewrite_uses_after(&tex->dest.ssa,
                                   swiz_tex_res,
                                   swiz_tex_res->parent_instr);
 }
index 16fae76..a409f1e 100644 (file)
@@ -42,11 +42,11 @@ struct lower_multiview_state {
 
    uint32_t view_mask;
 
-   nir_ssa_def *instance_id;
-   nir_ssa_def *view_index;
+   nir_def *instance_id;
+   nir_def *view_index;
 };
 
-static nir_ssa_def *
+static nir_def *
 build_instance_id(struct lower_multiview_state *state)
 {
    assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
@@ -68,7 +68,7 @@ build_instance_id(struct lower_multiview_state *state)
    return state->instance_id;
 }
 
-static nir_ssa_def *
+static nir_def *
 build_view_index(struct lower_multiview_state *state)
 {
    assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
@@ -90,7 +90,7 @@ build_view_index(struct lower_multiview_state *state)
           * id is given by instance_id % view_count.  We then have to convert
           * that to an actual view id.
           */
-         nir_ssa_def *compacted =
+         nir_def *compacted =
             nir_umod_imm(b, nir_load_instance_id(b),
                             util_bitcount(state->view_mask));
 
@@ -109,18 +109,18 @@ build_view_index(struct lower_multiview_state *state)
                remap |= (uint64_t)bit << (i++ * 4);
             }
 
-            nir_ssa_def *shift = nir_imul_imm(b, compacted, 4);
+            nir_def *shift = nir_imul_imm(b, compacted, 4);
 
             /* One of these days, when we have int64 everywhere, this will be
              * easier.
              */
-            nir_ssa_def *shifted;
+            nir_def *shifted;
             if (remap <= UINT32_MAX) {
                shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
             } else {
-               nir_ssa_def *shifted_low =
+               nir_def *shifted_low =
                   nir_ushr(b, nir_imm_int(b, remap), shift);
-               nir_ssa_def *shifted_high =
+               nir_def *shifted_high =
                   nir_ushr(b, nir_imm_int(b, remap >> 32),
                               nir_iadd_imm(b, shift, -32));
                shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
@@ -159,7 +159,7 @@ is_load_view_index(const nir_instr *instr, const void *data)
           nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
 }
 
-static nir_ssa_def *
+static nir_def *
 replace_load_view_index_with_zero(struct nir_builder *b,
                                   nir_instr *instr, void *data)
 {
@@ -167,7 +167,7 @@ replace_load_view_index_with_zero(struct nir_builder *b,
    return nir_imm_zero(b, 1, 32);
 }
 
-static nir_ssa_def *
+static nir_def *
 replace_load_view_index_with_layer_id(struct nir_builder *b,
                                       nir_instr *instr, void *data)
 {
@@ -211,7 +211,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
              load->intrinsic != nir_intrinsic_load_view_index)
             continue;
 
-         nir_ssa_def *value;
+         nir_def *value;
          if (load->intrinsic == nir_intrinsic_load_instance_id) {
             value = build_instance_id(&state);
          } else {
@@ -219,7 +219,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
             value = build_view_index(&state);
          }
 
-         nir_ssa_def_rewrite_uses(&load->dest.ssa, value);
+         nir_def_rewrite_uses(&load->dest.ssa, value);
 
          nir_instr_remove(&load->instr);
       }
@@ -229,7 +229,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
     * available in the VS.  If it's not a fragment shader, we need to pass
     * the view index on to the next stage.
     */
-   nir_ssa_def *view_index = build_view_index(&state);
+   nir_def *view_index = build_view_index(&state);
 
    nir_builder *b = &state.builder;
 
index c9d0ef7..5d715fb 100644 (file)
@@ -37,8 +37,8 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *base_addr = load->src[0].ssa;
-   nir_ssa_def *bound = NULL;
+   nir_def *base_addr = load->src[0].ssa;
+   nir_def *bound = NULL;
    if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)
       bound = load->src[2].ssa;
 
@@ -46,7 +46,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
    assert(bit_size >= 8 && bit_size % 8 == 0);
    unsigned byte_size = bit_size / 8;
 
-   nir_ssa_def *val;
+   nir_def *val;
    if (nir_src_is_const(load->src[1])) {
       uint32_t offset = nir_src_as_uint(load->src[1]);
 
@@ -59,16 +59,16 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       uint64_t aligned_offset = offset - suboffset;
 
       /* Load two just in case we go over a 64B boundary */
-      nir_ssa_def *data[2];
+      nir_def *data[2];
       for (unsigned i = 0; i < 2; i++) {
-         nir_ssa_def *pred;
+         nir_def *pred;
          if (bound) {
             pred = nir_igt_imm(b, bound, aligned_offset + i * 64 + 63);
          } else {
             pred = nir_imm_true(b);
          }
 
-         nir_ssa_def *addr = nir_iadd_imm(b, base_addr,
+         nir_def *addr = nir_iadd_imm(b, base_addr,
                                           aligned_offset + i * 64);
 
          data[i] = nir_load_global_const_block_intel(b, 16, addr, pred);
@@ -77,19 +77,19 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       val = nir_extract_bits(b, data, 2, suboffset * 8,
                              load->num_components, bit_size);
    } else {
-      nir_ssa_def *offset = load->src[1].ssa;
-      nir_ssa_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));
+      nir_def *offset = load->src[1].ssa;
+      nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset));
 
       if (bound) {
-         nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
+         nir_def *zero = nir_imm_zero(b, load->num_components, bit_size);
 
          unsigned load_size = byte_size * load->num_components;
-         nir_ssa_def *in_bounds =
+         nir_def *in_bounds =
             nir_ilt(b, nir_iadd_imm(b, offset, load_size - 1), bound);
 
          nir_push_if(b, in_bounds);
 
-         nir_ssa_def *load_val =
+         nir_def *load_val =
             nir_build_load_global_constant(b, load->dest.ssa.num_components,
                                            load->dest.ssa.bit_size, addr,
                                            .access = nir_intrinsic_access(load),
@@ -108,7 +108,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       }
    }
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
    nir_instr_remove(&load->instr);
 
    return true;
index 76b7538..47b6e93 100644 (file)
 
 struct ycbcr_state {
    nir_builder *builder;
-   nir_ssa_def *image_size;
+   nir_def *image_size;
    nir_tex_instr *origin_tex;
    nir_deref_instr *tex_deref;
    const struct vk_ycbcr_conversion *conversion;
 };
 
 /* TODO: we should probably replace this with a push constant/uniform. */
-static nir_ssa_def *
+static nir_def *
 get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
 {
    if (state->image_size)
@@ -64,10 +64,10 @@ get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
    return state->image_size;
 }
 
-static nir_ssa_def *
+static nir_def *
 implicit_downsampled_coord(nir_builder *b,
-                           nir_ssa_def *value,
-                           nir_ssa_def *max_value,
+                           nir_def *value,
+                           nir_def *max_value,
                            int div_scale)
 {
    return nir_fadd(b,
@@ -78,15 +78,15 @@ implicit_downsampled_coord(nir_builder *b,
                                      max_value)));
 }
 
-static nir_ssa_def *
+static nir_def *
 implicit_downsampled_coords(struct ycbcr_state *state,
-                            nir_ssa_def *old_coords,
+                            nir_def *old_coords,
                             const struct anv_format_plane *plane_format)
 {
    nir_builder *b = state->builder;
    const struct vk_ycbcr_conversion *conversion = state->conversion;
-   nir_ssa_def *image_size = get_texture_size(state, state->tex_deref);
-   nir_ssa_def *comp[4] = { NULL, };
+   nir_def *image_size = get_texture_size(state, state->tex_deref);
+   nir_def *comp[4] = { NULL, };
    int c;
 
    for (c = 0; c < ARRAY_SIZE(conversion->state.chroma_offsets); c++) {
@@ -108,7 +108,7 @@ implicit_downsampled_coords(struct ycbcr_state *state,
    return nir_vec(b, comp, old_coords->num_components);
 }
 
-static nir_ssa_def *
+static nir_def *
 create_plane_tex_instr_implicit(struct ycbcr_state *state,
                                 uint32_t plane)
 {
@@ -262,10 +262,10 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder,
    uint8_t y_bpc = y_isl_layout->channels_array[0].bits;
 
    /* |ycbcr_comp| holds components in the order : Cr-Y-Cb */
-   nir_ssa_def *zero = nir_imm_float(builder, 0.0f);
-   nir_ssa_def *one = nir_imm_float(builder, 1.0f);
+   nir_def *zero = nir_imm_float(builder, 0.0f);
+   nir_def *one = nir_imm_float(builder, 1.0f);
    /* Use extra 2 channels for following swizzle */
-   nir_ssa_def *ycbcr_comp[5] = { zero, zero, zero, one, zero };
+   nir_def *ycbcr_comp[5] = { zero, zero, zero, one, zero };
 
    uint8_t ycbcr_bpcs[5];
    memset(ycbcr_bpcs, y_bpc, sizeof(ycbcr_bpcs));
@@ -277,7 +277,7 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder,
     */
    for (uint32_t p = 0; p < format->n_planes; p++) {
       const struct anv_format_plane *plane_format = &format->planes[p];
-      nir_ssa_def *plane_sample = create_plane_tex_instr_implicit(&state, p);
+      nir_def *plane_sample = create_plane_tex_instr_implicit(&state, p);
 
       for (uint32_t pc = 0; pc < 4; pc++) {
          enum isl_channel_select ycbcr_swizzle =
@@ -296,7 +296,7 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder,
    }
 
    /* Now remaps components to the order specified by the conversion. */
-   nir_ssa_def *swizzled_comp[4] = { NULL, };
+   nir_def *swizzled_comp[4] = { NULL, };
    uint32_t swizzled_bpcs[4] = { 0, };
 
    for (uint32_t i = 0; i < ARRAY_SIZE(state.conversion->state.mapping); i++) {
@@ -320,7 +320,7 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder,
       }
    }
 
-   nir_ssa_def *result = nir_vec(builder, swizzled_comp, 4);
+   nir_def *result = nir_vec(builder, swizzled_comp, 4);
    if (state.conversion->state.ycbcr_model != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) {
       result = nir_convert_ycbcr_to_rgb(builder,
                                         state.conversion->state.ycbcr_model,
@@ -329,7 +329,7 @@ anv_nir_lower_ycbcr_textures_instr(nir_builder *builder,
                                         swizzled_bpcs);
    }
 
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, result);
+   nir_def_rewrite_uses(&tex->dest.ssa, result);
    nir_instr_remove(&tex->instr);
 
    return true;
index 40a8b9b..ba63f9d 100644 (file)
@@ -333,12 +333,12 @@ struct texenv_fragment_program {
 
    nir_variable *sampler_vars[MAX_TEXTURE_COORD_UNITS];
 
-   nir_ssa_def *src_texture[MAX_TEXTURE_COORD_UNITS];
+   nir_def *src_texture[MAX_TEXTURE_COORD_UNITS];
    /* ssa-def containing each texture unit's sampled texture color,
     * else NULL.
     */
 
-   nir_ssa_def *src_previous;   /**< Color from previous stage */
+   nir_def *src_previous;   /**< Color from previous stage */
 };
 
 static nir_variable *
@@ -374,7 +374,7 @@ register_state_var(struct texenv_fragment_program *p,
    return var;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_state_var(struct texenv_fragment_program *p,
                gl_state_index s0,
                gl_state_index s1,
@@ -386,7 +386,7 @@ load_state_var(struct texenv_fragment_program *p,
    return nir_load_var(p->b, var);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_input(struct texenv_fragment_program *p, gl_varying_slot slot,
            const struct glsl_type *type)
 {
@@ -399,7 +399,7 @@ load_input(struct texenv_fragment_program *p, gl_varying_slot slot,
    return nir_load_var(p->b, var);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_current_attrib(struct texenv_fragment_program *p, GLuint attrib)
 {
    return load_state_var(p, STATE_CURRENT_ATTRIB_MAYBE_VP_CLAMPED,
@@ -407,7 +407,7 @@ get_current_attrib(struct texenv_fragment_program *p, GLuint attrib)
                          glsl_vec4_type());
 }
 
-static nir_ssa_def *
+static nir_def *
 get_gl_Color(struct texenv_fragment_program *p)
 {
    if (p->state->inputs_available & VARYING_BIT_COL0) {
@@ -417,7 +417,7 @@ get_gl_Color(struct texenv_fragment_program *p)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 get_source(struct texenv_fragment_program *p,
            GLuint src, GLuint unit)
 {
@@ -462,13 +462,13 @@ get_source(struct texenv_fragment_program *p,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_combine_source(struct texenv_fragment_program *p,
                     GLuint unit,
                     GLuint source,
                     GLuint operand)
 {
-   nir_ssa_def *src;
+   nir_def *src;
 
    src = get_source(p, source, unit);
 
@@ -480,7 +480,7 @@ emit_combine_source(struct texenv_fragment_program *p,
       return src->num_components == 1 ? src : nir_channel(p->b, src, 3);
 
    case TEXENV_OPR_ONE_MINUS_ALPHA: {
-      nir_ssa_def *scalar =
+      nir_def *scalar =
          src->num_components == 1 ? src : nir_channel(p->b, src, 3);
 
       return nir_fsub_imm(p->b, 1.0, scalar);
@@ -535,8 +535,8 @@ static GLboolean args_match( const struct state_key *key, GLuint unit )
    return GL_TRUE;
 }
 
-static nir_ssa_def *
-smear(nir_builder *b, nir_ssa_def *val)
+static nir_def *
+smear(nir_builder *b, nir_def *val)
 {
    if (val->num_components != 1)
       return val;
@@ -544,15 +544,15 @@ smear(nir_builder *b, nir_ssa_def *val)
    return nir_replicate(b, val, 4);
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_combine(struct texenv_fragment_program *p,
              GLuint unit,
              GLuint nr,
              GLuint mode,
              const struct gl_tex_env_argument *opt)
 {
-   nir_ssa_def *src[MAX_COMBINER_TERMS];
-   nir_ssa_def *tmp0, *tmp1;
+   nir_def *src[MAX_COMBINER_TERMS];
+   nir_def *tmp0, *tmp1;
    GLuint i;
 
    assert(nr <= MAX_COMBINER_TERMS);
@@ -619,7 +619,7 @@ emit_combine(struct texenv_fragment_program *p,
 /**
  * Generate instructions for one texture unit's env/combiner mode.
  */
-static nir_ssa_def *
+static nir_def *
 emit_texenv(struct texenv_fragment_program *p, GLuint unit)
 {
    const struct state_key *key = p->state;
@@ -662,7 +662,7 @@ emit_texenv(struct texenv_fragment_program *p, GLuint unit)
    else
       alpha_saturate = GL_FALSE;
 
-   nir_ssa_def *val;
+   nir_def *val;
 
    /* Emit the RGB and A combine ops
     */
@@ -697,7 +697,7 @@ emit_texenv(struct texenv_fragment_program *p, GLuint unit)
       val = smear(p->b, val);
       if (rgb_saturate)
          val = nir_fsat(p->b, val);
-      nir_ssa_def *rgb = val;
+      nir_def *rgb = val;
 
       val = emit_combine(p, unit,
                          key->unit[unit].NumArgsA,
@@ -709,7 +709,7 @@ emit_texenv(struct texenv_fragment_program *p, GLuint unit)
 
       if (alpha_saturate)
          val = nir_fsat(p->b, val);
-      nir_ssa_def *a = val;
+      nir_def *a = val;
 
       val = nir_vector_insert_imm(p->b, rgb, a, 3);
    }
@@ -717,7 +717,7 @@ emit_texenv(struct texenv_fragment_program *p, GLuint unit)
    /* Deal with the final shift:
     */
    if (alpha_shift || rgb_shift) {
-      nir_ssa_def *shift;
+      nir_def *shift;
 
       if (rgb_shift == alpha_shift) {
          shift = nir_imm_float(p->b, (float)(1 << rgb_shift));
@@ -747,7 +747,7 @@ load_texture(struct texenv_fragment_program *p, GLuint unit)
       return;
 
    const GLuint texTarget = p->state->unit[unit].source_index;
-   nir_ssa_def *texcoord;
+   nir_def *texcoord;
 
    if (!(p->state->inputs_available & (VARYING_BIT_TEX0 << unit))) {
       texcoord = get_current_attrib(p, VERT_ATTRIB_TEX0 + unit);
@@ -804,7 +804,7 @@ load_texture(struct texenv_fragment_program *p, GLuint unit)
    tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
                                      &deref->dest.ssa);
 
-   nir_ssa_def *src2 =
+   nir_def *src2 =
       nir_channels(p->b, texcoord,
                    nir_component_mask(tex->coord_components));
    tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, src2);
@@ -814,7 +814,7 @@ load_texture(struct texenv_fragment_program *p, GLuint unit)
 
    if (p->state->unit[unit].shadow) {
       tex->is_shadow = true;
-      nir_ssa_def *src4 =
+      nir_def *src4 =
          nir_channel(p->b, texcoord, tex->coord_components);
       tex->src[4] = nir_tex_src_for_ssa(nir_tex_src_comparator, src4);
    }
@@ -899,12 +899,12 @@ emit_instructions(struct texenv_fragment_program *p)
       }
    }
 
-   nir_ssa_def *cf = get_source(p, TEXENV_SRC_PREVIOUS, 0);
+   nir_def *cf = get_source(p, TEXENV_SRC_PREVIOUS, 0);
 
    if (key->separate_specular) {
-      nir_ssa_def *spec_result = cf;
+      nir_def *spec_result = cf;
 
-      nir_ssa_def *secondary;
+      nir_def *secondary;
       if (p->state->inputs_available & VARYING_BIT_COL1)
          secondary = load_input(p, VARYING_SLOT_COL1, glsl_vec4_type());
       else
index 96ce175..d698550 100644 (file)
@@ -285,10 +285,10 @@ struct tnl_program {
 
    nir_builder *b;
 
-   nir_ssa_def *eye_position;
-   nir_ssa_def *eye_position_z;
-   nir_ssa_def *eye_position_normalized;
-   nir_ssa_def *transformed_normal;
+   nir_def *eye_position;
+   nir_def *eye_position_z;
+   nir_def *eye_position_normalized;
+   nir_def *transformed_normal;
 
    GLuint materials;
    GLuint color_materials;
@@ -317,7 +317,7 @@ register_state_var(struct tnl_program *p,
    return var;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_state_var(struct tnl_program *p,
                gl_state_index s0,
                gl_state_index s1,
@@ -329,7 +329,7 @@ load_state_var(struct tnl_program *p,
    return nir_load_var(p->b, var);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_state_vec4(struct tnl_program *p,
                 gl_state_index s0,
                 gl_state_index s1,
@@ -340,14 +340,14 @@ load_state_vec4(struct tnl_program *p,
 }
 
 static void
-load_state_mat4(struct tnl_program *p, nir_ssa_def *out[4],
+load_state_mat4(struct tnl_program *p, nir_def *out[4],
                 gl_state_index state_index, unsigned tex_index)
 {
    for (int i = 0; i < 4; ++i)
       out[i] = load_state_vec4(p, state_index, tex_index, i, i);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_input(struct tnl_program *p, gl_vert_attrib attr,
            const struct glsl_type *type)
 {
@@ -360,7 +360,7 @@ load_input(struct tnl_program *p, gl_vert_attrib attr,
       return load_state_var(p, STATE_CURRENT_ATTRIB, attr, 0, 0, type);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_input_vec4(struct tnl_program *p, gl_vert_attrib attr)
 {
    return load_input(p, attr, glsl_vec4_type());
@@ -378,7 +378,7 @@ register_output(struct tnl_program *p, gl_varying_slot slot,
 
 static void
 store_output_vec4_masked(struct tnl_program *p, gl_varying_slot slot,
-                         nir_ssa_def *value, unsigned mask)
+                         nir_def *value, unsigned mask)
 {
    assert(mask <= 0xf);
    nir_variable *var = register_output(p, slot, glsl_vec4_type());
@@ -387,24 +387,24 @@ store_output_vec4_masked(struct tnl_program *p, gl_varying_slot slot,
 
 static void
 store_output_vec4(struct tnl_program *p, gl_varying_slot slot,
-                  nir_ssa_def *value)
+                  nir_def *value)
 {
    store_output_vec4_masked(p, slot, value, 0xf);
 }
 
 static void
 store_output_float(struct tnl_program *p, gl_varying_slot slot,
-                   nir_ssa_def *value)
+                   nir_def *value)
 {
    nir_variable *var = register_output(p, slot, glsl_float_type());
    nir_store_var(p->b, var, value, 0x1);
 }
 
 
-static nir_ssa_def *
+static nir_def *
 emit_matrix_transform_vec4(nir_builder *b,
-                           nir_ssa_def *mat[4],
-                           nir_ssa_def *src)
+                           nir_def *mat[4],
+                           nir_def *src)
 {
    return nir_vec4(b,
                    nir_fdot4(b, src, mat[0]),
@@ -413,12 +413,12 @@ emit_matrix_transform_vec4(nir_builder *b,
                    nir_fdot4(b, src, mat[3]));
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_transpose_matrix_transform_vec4(nir_builder *b,
-                                     nir_ssa_def *mat[4],
-                                     nir_ssa_def *src)
+                                     nir_def *mat[4],
+                                     nir_def *src)
 {
-   nir_ssa_def *result;
+   nir_def *result;
    result = nir_fmul(b, nir_channel(b, src, 0), mat[0]);
    result = nir_fmad(b, nir_channel(b, src, 1), mat[1], result);
    result = nir_fmad(b, nir_channel(b, src, 2), mat[2], result);
@@ -426,10 +426,10 @@ emit_transpose_matrix_transform_vec4(nir_builder *b,
    return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_matrix_transform_vec3(nir_builder *b,
-                           nir_ssa_def *mat[3],
-                           nir_ssa_def *src)
+                           nir_def *mat[3],
+                           nir_def *src)
 {
    return nir_vec3(b,
                    nir_fdot3(b, src, mat[0]),
@@ -437,10 +437,10 @@ emit_matrix_transform_vec3(nir_builder *b,
                    nir_fdot3(b, src, mat[2]));
 }
 
-static nir_ssa_def *
-emit_normalize_vec3(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+emit_normalize_vec3(nir_builder *b, nir_def *src)
 {
-   nir_ssa_def *tmp = nir_frsq(b, nir_fdot3(b, src, src));
+   nir_def *tmp = nir_frsq(b, nir_fdot3(b, src, src));
    return nir_fmul(b, src, tmp);
 }
 
@@ -448,23 +448,23 @@ static void
 emit_passthrough(struct tnl_program *p, gl_vert_attrib attr,
                  gl_varying_slot varying)
 {
-   nir_ssa_def *val = load_input_vec4(p, attr);
+   nir_def *val = load_input_vec4(p, attr);
    store_output_vec4(p, varying, val);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_eye_position(struct tnl_program *p)
 {
    if (!p->eye_position) {
-      nir_ssa_def *pos =
+      nir_def *pos =
          load_input_vec4(p, VERT_ATTRIB_POS);
       if (p->mvp_with_dp4) {
-         nir_ssa_def *modelview[4];
+         nir_def *modelview[4];
          load_state_mat4(p, modelview, STATE_MODELVIEW_MATRIX, 0);
          p->eye_position =
             emit_matrix_transform_vec4(p->b, modelview, pos);
       } else {
-         nir_ssa_def *modelview[4];
+         nir_def *modelview[4];
          load_state_mat4(p, modelview,
                          STATE_MODELVIEW_MATRIX_TRANSPOSE, 0);
          p->eye_position =
@@ -475,24 +475,24 @@ get_eye_position(struct tnl_program *p)
    return p->eye_position;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_eye_position_z(struct tnl_program *p)
 {
    return nir_channel(p->b, get_eye_position(p), 2);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_eye_position_normalized(struct tnl_program *p)
 {
    if (!p->eye_position_normalized) {
-      nir_ssa_def *eye = get_eye_position(p);
+      nir_def *eye = get_eye_position(p);
       p->eye_position_normalized = emit_normalize_vec3(p->b, eye);
    }
 
    return p->eye_position_normalized;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_transformed_normal(struct tnl_program *p)
 {
    if (!p->transformed_normal &&
@@ -503,12 +503,12 @@ get_transformed_normal(struct tnl_program *p)
          load_input(p, VERT_ATTRIB_NORMAL,
                     glsl_vector_type(GLSL_TYPE_FLOAT, 3));
    } else if (!p->transformed_normal) {
-      nir_ssa_def *normal =
+      nir_def *normal =
          load_input(p, VERT_ATTRIB_NORMAL,
                     glsl_vector_type(GLSL_TYPE_FLOAT, 3));
 
       if (p->state->need_eye_coords) {
-         nir_ssa_def *mvinv[4];
+         nir_def *mvinv[4];
          load_state_mat4(p, mvinv, STATE_MODELVIEW_MATRIX_INVTRANS, 0);
          normal = emit_matrix_transform_vec3(p->b, mvinv, normal);
       }
@@ -518,7 +518,7 @@ get_transformed_normal(struct tnl_program *p)
       if (p->state->normalize)
          normal = emit_normalize_vec3(p->b, normal);
       else if (p->state->need_eye_coords == p->state->rescale_normals) {
-         nir_ssa_def *scale =
+         nir_def *scale =
             load_state_var(p, STATE_NORMAL_SCALE, 0, 0, 0,
                            glsl_float_type());
          normal = nir_fmul(p->b, normal, scale);
@@ -567,7 +567,7 @@ static void set_material_flags( struct tnl_program *p )
 }
 
 
-static nir_ssa_def *
+static nir_def *
 get_material(struct tnl_program *p, GLuint side,
              GLuint property)
 {
@@ -601,17 +601,17 @@ get_material(struct tnl_program *p, GLuint side,
  * lift it out of the main loop.  That way the programs created here
  * would be independent of the vertex_buffer details.
  */
-static nir_ssa_def *
+static nir_def *
 get_scenecolor(struct tnl_program *p, GLuint side)
 {
    if (p->materials & SCENE_COLOR_BITS(side)) {
-      nir_ssa_def *lm_ambient =
+      nir_def *lm_ambient =
          load_state_vec4(p, STATE_LIGHTMODEL_AMBIENT, 0, 0, 0);
-      nir_ssa_def *material_emission =
+      nir_def *material_emission =
          get_material(p, side, STATE_EMISSION);
-      nir_ssa_def *material_ambient =
+      nir_def *material_ambient =
          get_material(p, side, STATE_AMBIENT);
-      nir_ssa_def *material_diffuse =
+      nir_def *material_diffuse =
          get_material(p, side, STATE_DIFFUSE);
 
       // rgb: material_emission + material_ambient * lm_ambient
@@ -629,7 +629,7 @@ get_scenecolor(struct tnl_program *p, GLuint side)
       return load_state_vec4(p, STATE_LIGHTMODEL_SCENECOLOR, side, 0, 0);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_lightprod(struct tnl_program *p, GLuint light,
               GLuint side, GLuint property, bool *is_state_light)
 {
@@ -644,26 +644,26 @@ get_lightprod(struct tnl_program *p, GLuint light,
 }
 
 
-static nir_ssa_def *
+static nir_def *
 calculate_light_attenuation(struct tnl_program *p,
                             GLuint i,
-                            nir_ssa_def *VPpli,
-                            nir_ssa_def *dist)
+                            nir_def *VPpli,
+                            nir_def *dist)
 {
-   nir_ssa_def *attenuation = NULL;
-   nir_ssa_def *att = NULL;
+   nir_def *attenuation = NULL;
+   nir_def *att = NULL;
 
    /* Calculate spot attenuation:
     */
    if (!p->state->unit[i].light_spotcutoff_is_180) {
-       nir_ssa_def *spot_dir_norm =
+       nir_def *spot_dir_norm =
          load_state_vec4(p, STATE_LIGHT_SPOT_DIR_NORMALIZED, i, 0, 0);
       attenuation =
          load_state_vec4(p, STATE_LIGHT, i, STATE_ATTENUATION, 0);
 
-      nir_ssa_def *spot = nir_fdot3(p->b, nir_fneg(p->b, VPpli),
+      nir_def *spot = nir_fdot3(p->b, nir_fneg(p->b, VPpli),
                                     spot_dir_norm);
-      nir_ssa_def *cmp = nir_flt(p->b, nir_channel(p->b, spot_dir_norm, 3),
+      nir_def *cmp = nir_flt(p->b, nir_channel(p->b, spot_dir_norm, 3),
                                  spot);
       spot = nir_fpow(p->b, spot, nir_channel(p->b, attenuation, 3));
       att = nir_bcsel(p->b, cmp, spot, nir_imm_zero(p->b, 1, 32));
@@ -686,7 +686,7 @@ calculate_light_attenuation(struct tnl_program *p,
       dist = nir_frcp(p->b, dist);
 
       /* 1, d, d*d */
-      nir_ssa_def *tmp = nir_vec3(p->b,
+      nir_def *tmp = nir_vec3(p->b,
          nir_imm_float(p->b, 1.0f),
          dist,
          nir_fmul(p->b, dist, dist)
@@ -701,20 +701,20 @@ calculate_light_attenuation(struct tnl_program *p,
    return att;
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_lit(nir_builder *b,
-         nir_ssa_def *src)
+         nir_def *src)
 {
-   nir_ssa_def *zero = nir_imm_zero(b, 1, 32);
-   nir_ssa_def *one = nir_imm_float(b, 1.0f);
-   nir_ssa_def *src_x = nir_channel(b, src, 0);
-   nir_ssa_def *src_y = nir_channel(b, src, 1);
-   nir_ssa_def *src_w = nir_channel(b, src, 3);
+   nir_def *zero = nir_imm_zero(b, 1, 32);
+   nir_def *one = nir_imm_float(b, 1.0f);
+   nir_def *src_x = nir_channel(b, src, 0);
+   nir_def *src_y = nir_channel(b, src, 1);
+   nir_def *src_w = nir_channel(b, src, 3);
 
-   nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, src_w,
+   nir_def *wclamp = nir_fmax(b, nir_fmin(b, src_w,
                                               nir_imm_float(b, 128.0f)),
                                   nir_imm_float(b, -128.0f));
-   nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src_y, zero), wclamp);
+   nir_def *pow = nir_fpow(b, nir_fmax(b, src_y, zero), wclamp);
 
    return nir_vec4(b,
                    one,
@@ -731,19 +731,19 @@ emit_lit(nir_builder *b,
  *   lit.y = MAX(0, dots.x)
  *   lit.z = SLT(0, dots.x)
  */
-static nir_ssa_def *
+static nir_def *
 emit_degenerate_lit(nir_builder *b,
-                    nir_ssa_def *dots)
+                    nir_def *dots)
 {
-   nir_ssa_def *id = nir_imm_vec4(b, 0.0f, 0.0f, 0.0f, 1.0f);
+   nir_def *id = nir_imm_vec4(b, 0.0f, 0.0f, 0.0f, 1.0f);
 
    /* Note that lit.x & lit.w will not be examined.  Note also that
     * dots.xyzw == dots.xxxx.
     */
 
-   nir_ssa_def *zero = nir_imm_zero(b, 1, 32);
-   nir_ssa_def *dots_x = nir_channel(b, dots, 0);
-   nir_ssa_def *tmp = nir_fmax(b, id, dots);
+   nir_def *zero = nir_imm_zero(b, 1, 32);
+   nir_def *dots_x = nir_channel(b, dots, 0);
+   nir_def *tmp = nir_fmax(b, id, dots);
    return nir_vector_insert_imm(b, tmp, nir_slt(b, zero, dots_x), 2);
 }
 
@@ -757,11 +757,11 @@ static void build_lighting( struct tnl_program *p )
    const GLboolean twoside = p->state->light_twoside;
    const GLboolean separate = p->state->separate_specular;
    GLuint nr_lights = 0;
-   nir_ssa_def *lit = NULL;
-   nir_ssa_def *dots = nir_imm_zero(p->b, 4, 32);
-   nir_ssa_def *normal = get_transformed_normal(p);
-   nir_ssa_def *_col0 = NULL, *_col1 = NULL;
-   nir_ssa_def *_bfc0 = NULL, *_bfc1 = NULL;
+   nir_def *lit = NULL;
+   nir_def *dots = nir_imm_zero(p->b, 4, 32);
+   nir_def *normal = get_transformed_normal(p);
+   nir_def *_col0 = NULL, *_col1 = NULL;
+   nir_def *_bfc0 = NULL, *_bfc1 = NULL;
    GLuint i;
 
    /*
@@ -780,8 +780,8 @@ static void build_lighting( struct tnl_program *p )
 
    {
       if (!p->state->material_shininess_is_zero) {
-         nir_ssa_def *shininess = get_material(p, 0, STATE_SHININESS);
-         nir_ssa_def *tmp = nir_channel(p->b, shininess, 0);
+         nir_def *shininess = get_material(p, 0, STATE_SHININESS);
+         nir_def *tmp = nir_channel(p->b, shininess, 0);
          dots = nir_vector_insert_imm(p->b, dots, tmp, 3);
       }
 
@@ -795,8 +795,8 @@ static void build_lighting( struct tnl_program *p )
          /* Note that we negate the back-face specular exponent here.
           * The negation will be un-done later in the back-face code below.
           */
-         nir_ssa_def *shininess = get_material(p, 1, STATE_SHININESS);
-         nir_ssa_def *tmp = nir_channel(p->b, shininess, 0);
+         nir_def *shininess = get_material(p, 1, STATE_SHININESS);
+         nir_def *tmp = nir_channel(p->b, shininess, 0);
          tmp = nir_fneg(p->b, tmp);
          dots = nir_vector_insert_imm(p->b, dots, tmp, 2);
       }
@@ -825,8 +825,8 @@ static void build_lighting( struct tnl_program *p )
    /* Declare light products first to place them sequentially next to each
     * other for optimal constant uploads.
     */
-   nir_ssa_def *lightprod_front[MAX_LIGHTS][3];
-   nir_ssa_def *lightprod_back[MAX_LIGHTS][3];
+   nir_def *lightprod_front[MAX_LIGHTS][3];
+   nir_def *lightprod_back[MAX_LIGHTS][3];
    bool lightprod_front_is_state_light[MAX_LIGHTS][3];
    bool lightprod_back_is_state_light[MAX_LIGHTS][3];
 
@@ -877,19 +877,19 @@ static void build_lighting( struct tnl_program *p )
 
    for (i = 0; i < MAX_LIGHTS; i++) {
       if (p->state->unit[i].light_enabled) {
-         nir_ssa_def *half = NULL;
-         nir_ssa_def *att = NULL, *VPpli = NULL;
-         nir_ssa_def *dist = NULL;
+         nir_def *half = NULL;
+         nir_def *att = NULL, *VPpli = NULL;
+         nir_def *dist = NULL;
 
          if (p->state->unit[i].light_eyepos3_is_zero) {
             VPpli = load_state_var(p, STATE_LIGHT_POSITION_NORMALIZED,
                                    i, 0, 0,
                                    glsl_vector_type(GLSL_TYPE_FLOAT, 3));
          } else {
-            nir_ssa_def *Ppli =
+            nir_def *Ppli =
                load_state_vec4(p, STATE_LIGHT_POSITION, i, 0, 0);
 
-            nir_ssa_def *V = get_eye_position(p);
+            nir_def *V = get_eye_position(p);
             VPpli = nir_fsub(p->b, Ppli, V);
 
             /* Normalize VPpli.  The dist value also used in
@@ -907,7 +907,7 @@ static void build_lighting( struct tnl_program *p )
           */
          if (!p->state->material_shininess_is_zero) {
             if (p->state->light_local_viewer) {
-               nir_ssa_def *eye_hat = get_eye_position_normalized(p);
+               nir_def *eye_hat = get_eye_position_normalized(p);
                half = emit_normalize_vec3(p->b,
                                           nir_fsub(p->b, VPpli, eye_hat));
             } else if (p->state->unit[i].light_eyepos3_is_zero) {
@@ -916,7 +916,7 @@ static void build_lighting( struct tnl_program *p )
                                  i, 0, 0,
                                  glsl_vector_type(GLSL_TYPE_FLOAT, 3));
             } else {
-               nir_ssa_def *tmp =
+               nir_def *tmp =
                   nir_fadd(p->b,
                            VPpli,
                            nir_imm_vec3(p->b, 0.0f, 0.0f, 1.0f));
@@ -926,7 +926,7 @@ static void build_lighting( struct tnl_program *p )
 
          /* Calculate dot products:
           */
-         nir_ssa_def *dot = nir_fdot3(p->b, normal, VPpli);
+         nir_def *dot = nir_fdot3(p->b, normal, VPpli);
          if (p->state->material_shininess_is_zero) {
             dots = nir_replicate(p->b, dot, 4);
          } else {
@@ -943,16 +943,16 @@ static void build_lighting( struct tnl_program *p )
             */
             for (int j = 0; j < 3; j++) {
                if (lightprod_front_is_state_light[i][j]) {
-                  nir_ssa_def *material =
+                  nir_def *material =
                      get_material(p, 0, STATE_AMBIENT + j);
                   lightprod_front[i][j] =
                      nir_fmul(p->b, lightprod_front[i][j], material);
                }
             }
 
-            nir_ssa_def *ambient = lightprod_front[i][0];
-            nir_ssa_def *diffuse = lightprod_front[i][1];
-            nir_ssa_def *specular = lightprod_front[i][2];
+            nir_def *ambient = lightprod_front[i][0];
+            nir_def *diffuse = lightprod_front[i][1];
+            nir_def *specular = lightprod_front[i][2];
 
             if (att) {
                /* light is attenuated by distance */
@@ -980,23 +980,23 @@ static void build_lighting( struct tnl_program *p )
          }
          /* Back face lighting:
           */
-         nir_ssa_def *old_dots = dots;
+         nir_def *old_dots = dots;
          if (twoside) {
             /* Transform STATE_LIGHT into STATE_LIGHTPROD if needed. This isn't done in
             * get_lightprod to avoid using too many temps.
             */
             for (int j = 0; j < 3; j++) {
                if (lightprod_back_is_state_light[i][j]) {
-                  nir_ssa_def *material =
+                  nir_def *material =
                      get_material(p, 1, STATE_AMBIENT + j);
                   lightprod_back[i][j] =
                      nir_fmul(p->b, lightprod_back[i][j], material);
                }
             }
 
-            nir_ssa_def *ambient = lightprod_back[i][0];
-            nir_ssa_def *diffuse = lightprod_back[i][1];
-            nir_ssa_def *specular = lightprod_back[i][2];
+            nir_def *ambient = lightprod_back[i][0];
+            nir_def *diffuse = lightprod_back[i][1];
+            nir_def *specular = lightprod_back[i][2];
 
             /* For the back face we need to negate the X and Y component
              * dot products.  dots.Z has the negated back-face specular
@@ -1004,7 +1004,7 @@ static void build_lighting( struct tnl_program *p )
              * negation makes the back-face specular term positive again.
              */
             unsigned swiz_xywz[] = {0, 1, 3, 2};
-            nir_ssa_def *dots =
+            nir_def *dots =
                nir_fneg(p->b, nir_swizzle(p->b, old_dots, swiz_xywz, 4));
 
             if (att) {
@@ -1048,7 +1048,7 @@ static void build_lighting( struct tnl_program *p )
 
 static void build_fog( struct tnl_program *p )
 {
-   nir_ssa_def *fog;
+   nir_def *fog;
    switch (p->state->fog_distance_mode) {
    case FDM_EYE_RADIAL:
       /* Z = sqrt(Xe*Xe + Ye*Ye + Ze*Ze) */
@@ -1073,13 +1073,13 @@ static void build_fog( struct tnl_program *p )
 }
 
 
-static nir_ssa_def *
+static nir_def *
 build_reflect_texgen(struct tnl_program *p)
 {
-   nir_ssa_def *normal = get_transformed_normal(p);
-   nir_ssa_def *eye_hat = get_eye_position_normalized(p);
+   nir_def *normal = get_transformed_normal(p);
+   nir_def *eye_hat = get_eye_position_normalized(p);
    /* n.u */
-   nir_ssa_def *tmp = nir_fdot3(p->b, normal, eye_hat);
+   nir_def *tmp = nir_fdot3(p->b, normal, eye_hat);
    /* 2n.u */
    tmp = nir_fadd(p->b, tmp, tmp);
    /* (-2n.u)n + u */
@@ -1087,11 +1087,11 @@ build_reflect_texgen(struct tnl_program *p)
 }
 
 
-static nir_ssa_def *
+static nir_def *
 build_sphere_texgen(struct tnl_program *p)
 {
-   nir_ssa_def *normal = get_transformed_normal(p);
-   nir_ssa_def *eye_hat = get_eye_position_normalized(p);
+   nir_def *normal = get_transformed_normal(p);
+   nir_def *eye_hat = get_eye_position_normalized(p);
 
    /* Could share the above calculations, but it would be
     * a fairly odd state for someone to set (both sphere and
@@ -1102,11 +1102,11 @@ build_sphere_texgen(struct tnl_program *p)
     */
 
    /* n.u */
-   nir_ssa_def *tmp = nir_fdot3(p->b, normal, eye_hat);
+   nir_def *tmp = nir_fdot3(p->b, normal, eye_hat);
    /* 2n.u */
    tmp = nir_fadd(p->b, tmp, tmp);
    /* (-2n.u)n + u */
-   nir_ssa_def *r = nir_fmad(p->b, nir_fneg(p->b, tmp), normal, eye_hat);
+   nir_def *r = nir_fmad(p->b, nir_fneg(p->b, tmp), normal, eye_hat);
    /* r + 0,0,1 */
    tmp = nir_fadd(p->b, r, nir_imm_vec4(p->b, 0.0f, 0.0f, 1.0f, 0.0f));
    /* rx^2 + ry^2 + (rz+1)^2 */
@@ -1114,7 +1114,7 @@ build_sphere_texgen(struct tnl_program *p)
    /* 2/m */
    tmp = nir_frsq(p->b, tmp);
    /* 1/m */
-   nir_ssa_def *inv_m = nir_fmul_imm(p->b, tmp, 0.5f);
+   nir_def *inv_m = nir_fmul_imm(p->b, tmp, 0.5f);
    /* r/m + 1/2 */
    return nir_fmad(p->b, r, inv_m, nir_imm_float(p->b, 0.5f));
 }
@@ -1131,14 +1131,14 @@ static void build_texture_transform( struct tnl_program *p )
       if (p->state->unit[i].coord_replace)
          continue;
 
-      nir_ssa_def *texcoord;
+      nir_def *texcoord;
       if (p->state->unit[i].texgen_enabled) {
          GLuint copy_mask = 0;
          GLuint sphere_mask = 0;
          GLuint reflect_mask = 0;
          GLuint normal_mask = 0;
          GLuint modes[4];
-         nir_ssa_def *comps[4];
+         nir_def *comps[4];
 
          modes[0] = p->state->unit[i].texgen_mode0;
          modes[1] = p->state->unit[i].texgen_mode1;
@@ -1148,16 +1148,16 @@ static void build_texture_transform( struct tnl_program *p )
          for (j = 0; j < 4; j++) {
             switch (modes[j]) {
             case TXG_OBJ_LINEAR: {
-               nir_ssa_def *obj = load_input_vec4(p, VERT_ATTRIB_POS);
-               nir_ssa_def *plane =
+               nir_def *obj = load_input_vec4(p, VERT_ATTRIB_POS);
+               nir_def *plane =
                   load_state_vec4(p, STATE_TEXGEN, i,
                                   STATE_TEXGEN_OBJECT_S + j, 0);
                comps[j] = nir_fdot4(p->b, obj, plane);
                break;
             }
             case TXG_EYE_LINEAR: {
-               nir_ssa_def *eye = get_eye_position(p);
-               nir_ssa_def *plane =
+               nir_def *eye = get_eye_position(p);
+               nir_def *plane =
                   load_state_vec4(p, STATE_TEXGEN, i,
                                   STATE_TEXGEN_EYE_S + j, 0);
                comps[j] = nir_fdot4(p->b, eye, plane);
@@ -1178,28 +1178,28 @@ static void build_texture_transform( struct tnl_program *p )
          }
 
          if (sphere_mask) {
-            nir_ssa_def *sphere = build_sphere_texgen(p);
+            nir_def *sphere = build_sphere_texgen(p);
             for (j = 0; j < 4; j++)
                if (sphere_mask & (1 << j))
                   comps[j] = nir_channel(p->b, sphere, j);
          }
 
          if (reflect_mask) {
-            nir_ssa_def *reflect = build_reflect_texgen(p);
+            nir_def *reflect = build_reflect_texgen(p);
             for (j = 0; j < 4; j++)
                if (reflect_mask & (1 << j))
                   comps[j] = nir_channel(p->b, reflect, j);
          }
 
          if (normal_mask) {
-            nir_ssa_def *normal = get_transformed_normal(p);
+            nir_def *normal = get_transformed_normal(p);
             for (j = 0; j < 4; j++)
                if (normal_mask & (1 << j))
                   comps[j] = nir_channel(p->b, normal, j);
          }
 
          if (copy_mask) {
-            nir_ssa_def *in = load_input_vec4(p, VERT_ATTRIB_TEX0 + i);
+            nir_def *in = load_input_vec4(p, VERT_ATTRIB_TEX0 + i);
             for (j = 0; j < 4; j++)
                if (copy_mask & (1 << j))
                   comps[j] = nir_channel(p->b, in, j);
@@ -1210,7 +1210,7 @@ static void build_texture_transform( struct tnl_program *p )
          texcoord = load_input_vec4(p, VERT_ATTRIB_TEX0 + i);
 
       if (p->state->unit[i].texmat_enabled) {
-         nir_ssa_def *texmat[4];
+         nir_def *texmat[4];
          if (p->mvp_with_dp4) {
             load_state_mat4(p, texmat, STATE_TEXTURE_MATRIX, i);
             texcoord =
@@ -1234,17 +1234,17 @@ static void build_texture_transform( struct tnl_program *p )
  */
 static void build_atten_pointsize( struct tnl_program *p )
 {
-   nir_ssa_def *eye = get_eye_position_z(p);
-   nir_ssa_def *in_size =
+   nir_def *eye = get_eye_position_z(p);
+   nir_def *in_size =
       load_state_vec4(p, STATE_POINT_SIZE_CLAMPED, 0, 0, 0);
-   nir_ssa_def *att =
+   nir_def *att =
       load_state_vec4(p, STATE_POINT_ATTENUATION, 0, 0, 0);
 
    /* dist = |eyez| */
-   nir_ssa_def *dist = nir_fabs(p->b, eye);
+   nir_def *dist = nir_fabs(p->b, eye);
 
    /* p1 + dist * (p2 + dist * p3); */
-   nir_ssa_def *factor = nir_fmad(p->b, dist, nir_channel(p->b, att, 2),
+   nir_def *factor = nir_fmad(p->b, dist, nir_channel(p->b, att, 2),
                                               nir_channel(p->b, att, 1));
    factor = nir_fmad(p->b, dist, factor, nir_channel(p->b, att, 0));
 
@@ -1252,7 +1252,7 @@ static void build_atten_pointsize( struct tnl_program *p )
    factor = nir_frsq(p->b, factor);
 
    /* pointSize / sqrt(factor) */
-   nir_ssa_def *size = nir_fmul(p->b, factor,
+   nir_def *size = nir_fmul(p->b, factor,
                                 nir_channel(p->b, in_size, 0));
 
 #if 1
@@ -1272,7 +1272,7 @@ static void build_atten_pointsize( struct tnl_program *p )
  */
 static void build_array_pointsize( struct tnl_program *p )
 {
-   nir_ssa_def *val = load_input(p, VERT_ATTRIB_POINT_SIZE,
+   nir_def *val = load_input(p, VERT_ATTRIB_POINT_SIZE,
                                  glsl_float_type());
    store_output_float(p, VARYING_SLOT_PSIZ, val);
 }
index f48b704..b33827f 100644 (file)
@@ -57,17 +57,17 @@ struct ptn_compile {
    nir_variable *output_vars[VARYING_SLOT_MAX];
    nir_variable *sysval_vars[SYSTEM_VALUE_MAX];
    nir_variable *sampler_vars[32]; /* matches number of bits in TexSrcUnit */
-   nir_ssa_def **output_regs;
-   nir_ssa_def **temp_regs;
+   nir_def **output_regs;
+   nir_def **temp_regs;
 
-   nir_ssa_def *addr_reg;
+   nir_def *addr_reg;
 };
 
 #define SWIZ(X, Y, Z, W) \
    (unsigned[4]){ SWIZZLE_##X, SWIZZLE_##Y, SWIZZLE_##Z, SWIZZLE_##W }
 #define ptn_channel(b, src, ch) nir_channel(b, src, SWIZZLE_##ch)
 
-static nir_ssa_def *
+static nir_def *
 ptn_get_src(struct ptn_compile *c, const struct prog_src_register *prog_src)
 {
    nir_builder *b = &c->build;
@@ -128,7 +128,7 @@ ptn_get_src(struct ptn_compile *c, const struct prog_src_register *prog_src)
 
          nir_deref_instr *deref = nir_build_deref_var(b, c->parameters);
 
-         nir_ssa_def *index = nir_imm_int(b, prog_src->Index);
+         nir_def *index = nir_imm_int(b, prog_src->Index);
 
          /* Add the address register. Note this is (uniquely) a scalar, so the
           * component sizes match.
@@ -153,7 +153,7 @@ ptn_get_src(struct ptn_compile *c, const struct prog_src_register *prog_src)
       abort();
    }
 
-   nir_ssa_def *def;
+   nir_def *def;
    if (!HAS_EXTENDED_SWIZZLE(prog_src->Swizzle) &&
        (prog_src->Negate == NEGATE_NONE || prog_src->Negate == NEGATE_XYZW)) {
       /* The simple non-SWZ case. */
@@ -168,7 +168,7 @@ ptn_get_src(struct ptn_compile *c, const struct prog_src_register *prog_src)
       /* The SWZ instruction allows per-component zero/one swizzles, and also
        * per-component negation.
        */
-      nir_ssa_def *chans[4];
+      nir_def *chans[4];
       for (int i = 0; i < 4; i++) {
          int swizzle = GET_SWZ(prog_src->Swizzle, i);
          if (swizzle == SWIZZLE_ZERO) {
@@ -201,10 +201,10 @@ ptn_get_src(struct ptn_compile *c, const struct prog_src_register *prog_src)
  *  dst.z = 2^{src.x}
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ptn_exp(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_exp(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *srcx = ptn_channel(b, src[0], X);
+   nir_def *srcx = ptn_channel(b, src[0], X);
 
    return nir_vec4(b, nir_fexp2(b, nir_ffloor(b, srcx)),
                       nir_fsub(b, srcx, nir_ffloor(b, srcx)),
@@ -218,11 +218,11 @@ ptn_exp(nir_builder *b, nir_ssa_def **src)
  *  dst.z = \log_2{|src.x|}
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ptn_log(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_log(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *abs_srcx = nir_fabs(b, ptn_channel(b, src[0], X));
-   nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
+   nir_def *abs_srcx = nir_fabs(b, ptn_channel(b, src[0], X));
+   nir_def *log2 = nir_flog2(b, abs_srcx);
 
    return nir_vec4(b, nir_ffloor(b, log2),
                       nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
@@ -236,8 +236,8 @@ ptn_log(nir_builder *b, nir_ssa_def **src)
  *   dst.z = src0.z
  *   dst.w = src1.w
  */
-static nir_ssa_def *
-ptn_dst(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_dst(nir_builder *b, nir_def **src)
 {
    return nir_vec4(b, nir_imm_float(b, 1.0),
                       nir_fmul(b, ptn_channel(b, src[0], Y),
@@ -252,17 +252,17 @@ ptn_dst(nir_builder *b, nir_ssa_def **src)
  *  dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
  *  dst.w = 1.0
  */
-static nir_ssa_def *
-ptn_lit(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_lit(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *src0_y = ptn_channel(b, src[0], Y);
-   nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ptn_channel(b, src[0], W),
+   nir_def *src0_y = ptn_channel(b, src[0], Y);
+   nir_def *wclamp = nir_fmax(b, nir_fmin(b, ptn_channel(b, src[0], W),
                                               nir_imm_float(b, 128.0)),
                                   nir_imm_float(b, -128.0));
-   nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
+   nir_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
                                wclamp);
 
-   nir_ssa_def *z = nir_bcsel(b, nir_fle_imm(b, ptn_channel(b, src[0], X), 0.0),
+   nir_def *z = nir_bcsel(b, nir_fle_imm(b, ptn_channel(b, src[0], X), 0.0),
                               nir_imm_float(b, 0.0), pow);
 
    return nir_vec4(b, nir_imm_float(b, 1.0),
@@ -278,8 +278,8 @@ ptn_lit(nir_builder *b, nir_ssa_def **src)
  *   dst.z = 0.0
  *   dst.w = 1.0
  */
-static nir_ssa_def *
-ptn_scs(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_scs(nir_builder *b, nir_def **src)
 {
    return nir_vec4(b, nir_fcos(b, ptn_channel(b, src[0], X)),
                       nir_fsin(b, ptn_channel(b, src[0], X)),
@@ -287,10 +287,10 @@ ptn_scs(nir_builder *b, nir_ssa_def **src)
                       nir_imm_float(b, 1.0));
 }
 
-static nir_ssa_def *
-ptn_xpd(nir_builder *b, nir_ssa_def **src)
+static nir_def *
+ptn_xpd(nir_builder *b, nir_def **src)
 {
-   nir_ssa_def *vec =
+   nir_def *vec =
       nir_fsub(b, nir_fmul(b, nir_swizzle(b, src[0], SWIZ(Y, Z, X, W), 3),
                               nir_swizzle(b, src[1], SWIZ(Z, X, Y, W), 3)),
                   nir_fmul(b, nir_swizzle(b, src[1], SWIZ(Y, Z, X, W), 3),
@@ -303,11 +303,11 @@ ptn_xpd(nir_builder *b, nir_ssa_def **src)
 }
 
 static void
-ptn_kil(nir_builder *b, nir_ssa_def **src)
+ptn_kil(nir_builder *b, nir_def **src)
 {
    /* flt must be exact, because NaN shouldn't discard. (apps rely on this) */
    b->exact = true;
-   nir_ssa_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0));
+   nir_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0));
    b->exact = false;
 
    nir_discard_if(b, cmp);
@@ -353,8 +353,8 @@ _mesa_texture_index_to_sampler_dim(gl_texture_index index, bool *is_array)
    unreachable("unknown texture target");
 }
 
-static nir_ssa_def *
-ptn_tex(struct ptn_compile *c, nir_ssa_def **src,
+static nir_def *
+ptn_tex(struct ptn_compile *c, nir_def **src,
         struct prog_instruction *prog_inst)
 {
    nir_builder *b = &c->build;
@@ -526,12 +526,12 @@ ptn_emit_instruction(struct ptn_compile *c, struct prog_instruction *prog_inst)
    if (op == OPCODE_END)
       return;
 
-   nir_ssa_def *src[3];
+   nir_def *src[3];
    for (i = 0; i < 3; i++) {
       src[i] = ptn_get_src(c, &prog_inst->SrcReg[i]);
    }
 
-   nir_ssa_def *dst = NULL;
+   nir_def *dst = NULL;
    if (c->error)
       return;
 
@@ -672,7 +672,7 @@ ptn_emit_instruction(struct ptn_compile *c, struct prog_instruction *prog_inst)
    const struct prog_dst_register *prog_dst = &prog_inst->DstReg;
    assert(!prog_dst->RelAddr);
 
-   nir_ssa_def *reg = NULL;
+   nir_def *reg = NULL;
    unsigned write_mask = prog_dst->WriteMask;
 
    switch (prog_dst->File) {
@@ -716,7 +716,7 @@ ptn_add_output_stores(struct ptn_compile *c)
    nir_builder *b = &c->build;
 
    nir_foreach_shader_out_variable(var, b->shader) {
-      nir_ssa_def *src = nir_load_reg(b, c->output_regs[var->data.location]);
+      nir_def *src = nir_load_reg(b, c->output_regs[var->data.location]);
       if (c->prog->Target == GL_FRAGMENT_PROGRAM_ARB &&
           var->data.location == FRAG_RESULT_DEPTH) {
          /* result.depth has this strange convention of being the .z component of
@@ -799,7 +799,7 @@ setup_registers_and_variables(struct ptn_compile *c)
 
    /* Create output registers and variables. */
    int max_outputs = util_last_bit64(c->prog->info.outputs_written);
-   c->output_regs = rzalloc_array(c, nir_ssa_def *, max_outputs);
+   c->output_regs = rzalloc_array(c, nir_def *, max_outputs);
 
    uint64_t outputs_written = c->prog->info.outputs_written;
    while (outputs_written) {
@@ -809,7 +809,7 @@ setup_registers_and_variables(struct ptn_compile *c)
        * for the outputs and emit stores to the real outputs at the end of
        * the shader.
        */
-      nir_ssa_def *reg = nir_decl_reg(b, 4, 32, 0);
+      nir_def *reg = nir_decl_reg(b, 4, 32, 0);
 
       const struct glsl_type *type;
       if ((c->prog->Target == GL_FRAGMENT_PROGRAM_ARB && i == FRAG_RESULT_DEPTH) ||
@@ -830,7 +830,7 @@ setup_registers_and_variables(struct ptn_compile *c)
    }
 
    /* Create temporary registers. */
-   c->temp_regs = rzalloc_array(c, nir_ssa_def *,
+   c->temp_regs = rzalloc_array(c, nir_def *,
                                 c->prog->arb.NumTemporaries);
 
    for (unsigned i = 0; i < c->prog->arb.NumTemporaries; i++) {
index b2ee11b..018b6f0 100644 (file)
@@ -39,13 +39,13 @@ struct st_translate {
    nir_builder *b;
    struct ati_fragment_shader *atifs;
 
-   nir_ssa_def *temps[MAX_PROGRAM_TEMPS];
+   nir_def *temps[MAX_PROGRAM_TEMPS];
 
    nir_variable *fragcolor;
    nir_variable *constants;
    nir_variable *samplers[MAX_TEXTURE_UNITS];
 
-   nir_ssa_def *inputs[VARYING_SLOT_MAX];
+   nir_def *inputs[VARYING_SLOT_MAX];
 
    unsigned current_pass;
 
@@ -54,30 +54,30 @@ struct st_translate {
    bool error;
 };
 
-static nir_ssa_def *
-nir_channel_vec4(nir_builder *b, nir_ssa_def *src, unsigned channel)
+static nir_def *
+nir_channel_vec4(nir_builder *b, nir_def *src, unsigned channel)
 {
    unsigned swizzle[4] = { channel, channel, channel, channel };
    return nir_swizzle(b, src, swizzle, 4);
 }
 
-static nir_ssa_def *
+static nir_def *
 nir_imm_vec4_float(nir_builder *b, float f)
 {
    return nir_imm_vec4(b, f, f, f, f);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_temp(struct st_translate *t, unsigned index)
 {
    if (!t->temps[index])
-      t->temps[index] = nir_ssa_undef(t->b, 4, 32);
+      t->temps[index] = nir_undef(t->b, 4, 32);
    return t->temps[index];
 }
 
-static nir_ssa_def *
+static nir_def *
 apply_swizzle(struct st_translate *t,
-              struct nir_ssa_def *src, GLuint swizzle)
+              struct nir_def *src, GLuint swizzle)
 {
    /* From the ATI_fs spec:
     *
@@ -97,10 +97,10 @@ apply_swizzle(struct st_translate *t,
       static unsigned xywz[4] = { 0, 1, 3, 2 };
       return nir_swizzle(t->b, src, xywz, 4);
    } else {
-      nir_ssa_def *rcp = nir_frcp(t->b, nir_channel(t->b, src,
+      nir_def *rcp = nir_frcp(t->b, nir_channel(t->b, src,
                                                     swizzle == GL_SWIZZLE_STR_DR_ATI ? 2 : 3));
 
-      nir_ssa_def *st_mul = nir_fmul(t->b, nir_trim_vector(t->b, src, 2), rcp);
+      nir_def *st_mul = nir_fmul(t->b, nir_trim_vector(t->b, src, 2), rcp);
 
       return nir_vec4(t->b,
                       nir_channel(t->b, st_mul, 0),
@@ -110,7 +110,7 @@ apply_swizzle(struct st_translate *t,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 load_input(struct st_translate *t, gl_varying_slot slot)
 {
    if (!t->inputs[slot]) {
@@ -124,7 +124,7 @@ load_input(struct st_translate *t, gl_varying_slot slot)
    return t->inputs[slot];
 }
 
-static nir_ssa_def *
+static nir_def *
 atifs_load_uniform(struct st_translate *t, int index)
 {
    nir_deref_instr *deref = nir_build_deref_array(t->b,
@@ -133,7 +133,7 @@ atifs_load_uniform(struct st_translate *t, int index)
    return nir_load_deref(t->b, deref);
 }
 
-static struct nir_ssa_def *
+static struct nir_def *
 get_source(struct st_translate *t, GLenum src_type)
 {
    if (src_type >= GL_REG_0_ATI && src_type <= GL_REG_5_ATI) {
@@ -167,7 +167,7 @@ get_source(struct st_translate *t, GLenum src_type)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 prepare_argument(struct st_translate *t, const struct atifs_instruction *inst,
                  const unsigned argId, bool alpha)
 {
@@ -178,7 +178,7 @@ prepare_argument(struct st_translate *t, const struct atifs_instruction *inst,
 
    const struct atifragshader_src_register *srcReg = &inst->SrcReg[alpha][argId];
 
-   nir_ssa_def *src = get_source(t, srcReg->Index);
+   nir_def *src = get_source(t, srcReg->Index);
 
    switch (srcReg->argRep) {
    case GL_NONE:
@@ -211,12 +211,12 @@ prepare_argument(struct st_translate *t, const struct atifs_instruction *inst,
    return src;
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_arith_inst(struct st_translate *t,
                 const struct atifs_instruction *inst,
                 bool alpha)
 {
-   nir_ssa_def *src[3] = {0};
+   nir_def *src[3] = {0};
    for (int i = 0; i < inst->ArgCount[alpha]; i++)
       src[i] = prepare_argument(t, inst, i, alpha);
 
@@ -269,9 +269,9 @@ emit_arith_inst(struct st_translate *t,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 emit_dstmod(struct st_translate *t,
-            struct nir_ssa_def *dst, GLuint dstMod)
+            struct nir_def *dst, GLuint dstMod)
 {
    switch (dstMod & ~GL_SATURATE_BIT_ATI) {
    case GL_2X_BIT_ATI:
@@ -315,7 +315,7 @@ compile_setupinst(struct st_translate *t,
 
    GLuint pass_tex = texinst->src;
 
-   nir_ssa_def *coord;
+   nir_def *coord;
 
    if (pass_tex >= GL_TEXTURE0_ARB && pass_tex <= GL_TEXTURE7_ARB) {
       unsigned attr = pass_tex - GL_TEXTURE0_ARB;
@@ -331,7 +331,7 @@ compile_setupinst(struct st_translate *t,
          coord = nir_imm_vec4_float(t->b, 0.0f);
       }
    } else {
-      coord = nir_ssa_undef(t->b, 4, 32);
+      coord = nir_undef(t->b, 4, 32);
    }
    coord = apply_swizzle(t, coord, texinst->swizzle);
 
@@ -393,7 +393,7 @@ compile_instruction(struct st_translate *t,
          continue;
 
       /* Execute the op */
-      nir_ssa_def *result = emit_arith_inst(t, inst, optype);
+      nir_def *result = emit_arith_inst(t, inst, optype);
       result = emit_dstmod(t, result, inst->DstReg[optype].dstMod);
 
       /* Do the writemask */
@@ -516,7 +516,7 @@ st_nir_lower_atifs_samplers_instr(nir_builder *b, nir_instr *instr, void *data)
     * accidentally enables a cube array).
     */
    if (coord_components != tex->coord_components) {
-      nir_ssa_def *coords = nir_ssa_for_src(b, tex->src[coords_idx].src, tex->coord_components);
+      nir_def *coords = nir_ssa_for_src(b, tex->src[coords_idx].src, tex->coord_components);
       nir_instr_rewrite_src_ssa(instr, &tex->src[coords_idx].src,
                                 nir_resize_vector(b, coords, coord_components));
       tex->coord_components = coord_components;
index bfbccec..0a86551 100644 (file)
  */
 #define USE_DRAWPIXELS_CACHE 1
 
-static nir_ssa_def *
+static nir_def *
 sample_via_nir(nir_builder *b, nir_variable *texcoord,
                const char *name, int sampler, enum glsl_base_type base_type,
                nir_alu_type alu_type)
@@ -160,7 +160,7 @@ make_drawpix_z_stencil_program_nir(struct st_context *st,
       nir_variable *out =
          nir_create_variable_with_location(b.shader, nir_var_shader_out,
                                            FRAG_RESULT_DEPTH, glsl_float_type());
-      nir_ssa_def *depth = sample_via_nir(&b, texcoord, "depth", 0,
+      nir_def *depth = sample_via_nir(&b, texcoord, "depth", 0,
                                           GLSL_TYPE_FLOAT, nir_type_float32);
       nir_store_var(&b, out, depth, 0x1);
 
@@ -176,7 +176,7 @@ make_drawpix_z_stencil_program_nir(struct st_context *st,
       nir_variable *out =
          nir_create_variable_with_location(b.shader, nir_var_shader_out,
                                            FRAG_RESULT_STENCIL, glsl_uint_type());
-      nir_ssa_def *stencil = sample_via_nir(&b, texcoord, "stencil", 1,
+      nir_def *stencil = sample_via_nir(&b, texcoord, "stencil", 1,
                                             GLSL_TYPE_UINT, nir_type_uint32);
       nir_store_var(&b, out, stencil, 0x1);
    }
@@ -199,9 +199,9 @@ make_drawpix_zs_to_color_program_nir(struct st_context *st,
                                         VARYING_SLOT_TEX0, glsl_vec_type(2));
 
    /* Sample depth and stencil */
-   nir_ssa_def *depth = sample_via_nir(&b, texcoord, "depth", 0,
+   nir_def *depth = sample_via_nir(&b, texcoord, "depth", 0,
                                        GLSL_TYPE_FLOAT, nir_type_float32);
-   nir_ssa_def *stencil = sample_via_nir(&b, texcoord, "stencil", 1,
+   nir_def *stencil = sample_via_nir(&b, texcoord, "stencil", 1,
                                          GLSL_TYPE_UINT, nir_type_uint32);
 
    /* Create the variable to store the output color */
@@ -209,29 +209,29 @@ make_drawpix_zs_to_color_program_nir(struct st_context *st,
       nir_create_variable_with_location(b.shader, nir_var_shader_out,
                                         FRAG_RESULT_COLOR, glsl_vec_type(4));
 
-   nir_ssa_def *shifted_depth = nir_fmul(&b,nir_f2f64(&b, depth), nir_imm_double(&b,0xffffff));
-   nir_ssa_def *int_depth = nir_f2u32(&b,shifted_depth);
+   nir_def *shifted_depth = nir_fmul(&b,nir_f2f64(&b, depth), nir_imm_double(&b,0xffffff));
+   nir_def *int_depth = nir_f2u32(&b,shifted_depth);
 
-   nir_ssa_def *ds[4];
+   nir_def *ds[4];
    ds[0] = nir_ubitfield_extract(&b, stencil, nir_imm_int(&b, 0), nir_imm_int(&b,8));
    ds[1] = nir_ubitfield_extract(&b, int_depth, nir_imm_int(&b, 0), nir_imm_int(&b,8));
    ds[2] = nir_ubitfield_extract(&b, int_depth, nir_imm_int(&b, 8), nir_imm_int(&b,8));
    ds[3] = nir_ubitfield_extract(&b, int_depth, nir_imm_int(&b, 16), nir_imm_int(&b,8));
 
-   nir_ssa_def *ds_comp[4];
+   nir_def *ds_comp[4];
    ds_comp[0] = nir_fsat(&b, nir_fmul_imm(&b, nir_u2f32(&b, ds[3]), 1.0/255.0));
    ds_comp[1] = nir_fsat(&b, nir_fmul_imm(&b, nir_u2f32(&b, ds[2]), 1.0/255.0));
    ds_comp[2] = nir_fsat(&b, nir_fmul_imm(&b, nir_u2f32(&b, ds[1]), 1.0/255.0));
    ds_comp[3] = nir_fsat(&b, nir_fmul_imm(&b, nir_u2f32(&b, ds[0]), 1.0/255.0));
 
-   nir_ssa_def *unpacked_ds = nir_vec4(&b, ds_comp[0], ds_comp[1], ds_comp[2], ds_comp[3]);
+   nir_def *unpacked_ds = nir_vec4(&b, ds_comp[0], ds_comp[1], ds_comp[2], ds_comp[3]);
 
    if (rgba) {
       nir_store_var(&b, color_out, unpacked_ds, 0xf);
    }
    else {
       unsigned zyxw[4] = { 2, 1, 0, 3 };
-      nir_ssa_def *swizzled_ds= nir_swizzle(&b, unpacked_ds, zyxw, 4);
+      nir_def *swizzled_ds= nir_swizzle(&b, unpacked_ds, zyxw, 4);
       nir_store_var(&b, color_out, swizzled_ds, 0xf);
    }
 
index b58a0ba..7b08948 100644 (file)
@@ -66,18 +66,18 @@ struct geometry_constant {
       var->data.location_frac = (offset >> 2) & 0x3;             \
    } while (0)
 
-static nir_ssa_def *
-has_nan_or_inf(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+has_nan_or_inf(nir_builder *b, nir_def *v)
 {
-   nir_ssa_def *nan = nir_bany_fnequal4(b, v, v);
+   nir_def *nan = nir_bany_fnequal4(b, v, v);
 
-   nir_ssa_def *inf = nir_bany(b, nir_feq_imm(b, nir_fabs(b, v), INFINITY));
+   nir_def *inf = nir_bany(b, nir_feq_imm(b, nir_fabs(b, v), INFINITY));
 
    return nir_ior(b, nan, inf);
 }
 
 static void
-return_if_true(nir_builder *b, nir_ssa_def *cond)
+return_if_true(nir_builder *b, nir_def *cond)
 {
    nir_if *if_cond = nir_push_if(b, cond);
    nir_jump(b, nir_jump_return);
@@ -85,7 +85,7 @@ return_if_true(nir_builder *b, nir_ssa_def *cond)
 }
 
 static void
-get_input_vertices(nir_builder *b, nir_ssa_def **v)
+get_input_vertices(nir_builder *b, nir_def **v)
 {
    const int num_in_vert = b->shader->info.gs.vertices_in;
 
@@ -94,17 +94,17 @@ get_input_vertices(nir_builder *b, nir_ssa_def **v)
       "gl_Position");
    in_pos->data.location = VARYING_SLOT_POS;
 
-   nir_ssa_def *is_nan_or_inf = NULL;
+   nir_def *is_nan_or_inf = NULL;
    for (int i = 0; i < num_in_vert; i++) {
       v[i] = nir_load_array_var_imm(b, in_pos, i);
-      nir_ssa_def *r = has_nan_or_inf(b, v[i]);
+      nir_def *r = has_nan_or_inf(b, v[i]);
       is_nan_or_inf = i ? nir_ior(b, is_nan_or_inf, r) : r;
    }
    return_if_true(b, is_nan_or_inf);
 }
 
 static void
-face_culling(nir_builder *b, nir_ssa_def **v, bool packed)
+face_culling(nir_builder *b, nir_def **v, bool packed)
 {
    /* use the z value of the face normal to determine if the face points to us:
     *   Nz = (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
@@ -119,41 +119,41 @@ face_culling(nir_builder *b, nir_ssa_def **v, bool packed)
     * we only care about the sign of the det, but also need to count the sign of
     * w0/w1/w2 as a negtive w would change the direction of Nz < 0
     */
-   nir_ssa_def *y1w2 = nir_fmul(b, nir_channel(b, v[1], 1), nir_channel(b, v[2], 3));
-   nir_ssa_def *y2w1 = nir_fmul(b, nir_channel(b, v[2], 1), nir_channel(b, v[1], 3));
-   nir_ssa_def *y2w0 = nir_fmul(b, nir_channel(b, v[2], 1), nir_channel(b, v[0], 3));
-   nir_ssa_def *y0w2 = nir_fmul(b, nir_channel(b, v[0], 1), nir_channel(b, v[2], 3));
-   nir_ssa_def *y0w1 = nir_fmul(b, nir_channel(b, v[0], 1), nir_channel(b, v[1], 3));
-   nir_ssa_def *y1w0 = nir_fmul(b, nir_channel(b, v[1], 1), nir_channel(b, v[0], 3));
-   nir_ssa_def *t0 = nir_fmul(b, nir_channel(b, v[0], 0), nir_fsub(b, y1w2, y2w1));
-   nir_ssa_def *t1 = nir_fmul(b, nir_channel(b, v[1], 0), nir_fsub(b, y2w0, y0w2));
-   nir_ssa_def *t2 = nir_fmul(b, nir_channel(b, v[2], 0), nir_fsub(b, y0w1, y1w0));
-   nir_ssa_def *det = nir_fadd(b, nir_fadd(b, t0, t1), t2);
+   nir_def *y1w2 = nir_fmul(b, nir_channel(b, v[1], 1), nir_channel(b, v[2], 3));
+   nir_def *y2w1 = nir_fmul(b, nir_channel(b, v[2], 1), nir_channel(b, v[1], 3));
+   nir_def *y2w0 = nir_fmul(b, nir_channel(b, v[2], 1), nir_channel(b, v[0], 3));
+   nir_def *y0w2 = nir_fmul(b, nir_channel(b, v[0], 1), nir_channel(b, v[2], 3));
+   nir_def *y0w1 = nir_fmul(b, nir_channel(b, v[0], 1), nir_channel(b, v[1], 3));
+   nir_def *y1w0 = nir_fmul(b, nir_channel(b, v[1], 1), nir_channel(b, v[0], 3));
+   nir_def *t0 = nir_fmul(b, nir_channel(b, v[0], 0), nir_fsub(b, y1w2, y2w1));
+   nir_def *t1 = nir_fmul(b, nir_channel(b, v[1], 0), nir_fsub(b, y2w0, y0w2));
+   nir_def *t2 = nir_fmul(b, nir_channel(b, v[2], 0), nir_fsub(b, y0w1, y1w0));
+   nir_def *det = nir_fadd(b, nir_fadd(b, t0, t1), t2);
 
    /* invert det sign once any vertex w < 0 */
-   nir_ssa_def *n0 = nir_flt_imm(b, nir_channel(b, v[0], 3), 0);
-   nir_ssa_def *n1 = nir_flt_imm(b, nir_channel(b, v[1], 3), 0);
-   nir_ssa_def *n2 = nir_flt_imm(b, nir_channel(b, v[2], 3), 0);
-   nir_ssa_def *cond = nir_ixor(b, nir_ixor(b, n0, n1), n2);
+   nir_def *n0 = nir_flt_imm(b, nir_channel(b, v[0], 3), 0);
+   nir_def *n1 = nir_flt_imm(b, nir_channel(b, v[1], 3), 0);
+   nir_def *n2 = nir_flt_imm(b, nir_channel(b, v[2], 3), 0);
+   nir_def *cond = nir_ixor(b, nir_ixor(b, n0, n1), n2);
    det = nir_bcsel(b, cond, nir_fneg(b, det), det);
 
    nir_variable *culling_config = nir_variable_create(
       b->shader, nir_var_uniform, glsl_uint_type(), "culling_config");
    set_uniform_location(culling_config, culling_config, packed);
-   nir_ssa_def *config = nir_i2b(b, nir_load_var(b, culling_config));
+   nir_def *config = nir_i2b(b, nir_load_var(b, culling_config));
 
    /* det < 0 then z points to camera */
-   nir_ssa_def *zero = nir_imm_zero(b, 1, det->bit_size);
-   nir_ssa_def *is_zero = nir_feq(b, det, zero);
-   nir_ssa_def *is_neg = nir_flt(b, det, zero);
-   nir_ssa_def *cull = nir_ixor(b, is_neg, config);
+   nir_def *zero = nir_imm_zero(b, 1, det->bit_size);
+   nir_def *is_zero = nir_feq(b, det, zero);
+   nir_def *is_neg = nir_flt(b, det, zero);
+   nir_def *cull = nir_ixor(b, is_neg, config);
    return_if_true(b, nir_ior(b, is_zero, cull));
 }
 
 static void
-fast_frustum_culling(nir_builder *b, nir_ssa_def **v)
+fast_frustum_culling(nir_builder *b, nir_def **v)
 {
-   nir_ssa_def *cull = NULL;
+   nir_def *cull = NULL;
 
    /* there are six culling planes for the visible volume:
     *   1.  x + w = 0
@@ -167,14 +167,14 @@ fast_frustum_culling(nir_builder *b, nir_ssa_def **v)
     * any plane, the primitive must be invisible.
     */
    for (int i = 0; i < 6; i++) {
-      nir_ssa_def *outside = NULL;
+      nir_def *outside = NULL;
 
       for (int j = 0; j < b->shader->info.gs.vertices_in; j++) {
-         nir_ssa_def *c = nir_channel(b, v[j], i >> 1);
+         nir_def *c = nir_channel(b, v[j], i >> 1);
          if (i & 1)
             c = nir_fneg(b, c);
 
-         nir_ssa_def *r = nir_flt(b, nir_channel(b, v[j], 3), c);
+         nir_def *r = nir_flt(b, nir_channel(b, v[j], 3), c);
          outside = j ? nir_iand(b, outside, r) : r;
       }
 
@@ -184,11 +184,11 @@ fast_frustum_culling(nir_builder *b, nir_ssa_def **v)
    return_if_true(b, cull);
 }
 
-static nir_ssa_def *
-get_intersection(nir_builder *b, nir_ssa_def *v1, nir_ssa_def *v2,
-                 nir_ssa_def *d1, nir_ssa_def *d2)
+static nir_def *
+get_intersection(nir_builder *b, nir_def *v1, nir_def *v2,
+                 nir_def *d1, nir_def *d2)
 {
-   nir_ssa_def *factor = nir_fdiv(b, d1, nir_fsub(b, d1, d2));
+   nir_def *factor = nir_fdiv(b, d1, nir_fsub(b, d1, d2));
    return nir_fmad(b, nir_fsub(b, v2, v1), factor, v1);
 }
 
@@ -199,7 +199,7 @@ get_intersection(nir_builder *b, nir_ssa_def *v1, nir_ssa_def *v2,
                                                                         \
    nir_loop *name = nir_push_loop(b);                                   \
    {                                                                    \
-      nir_ssa_def *idx = nir_load_var(b, name##_index);                 \
+      nir_def *idx = nir_load_var(b, name##_index);                 \
       nir_if *if_in_loop = nir_push_if(b, nir_ilt(b, idx, max));
 
 #define end_for_loop(name)                                              \
@@ -212,7 +212,7 @@ get_intersection(nir_builder *b, nir_ssa_def *v1, nir_ssa_def *v2,
 
 static void
 clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
-                int max_vert, nir_ssa_def *plane)
+                int max_vert, nir_def *plane)
 {
    nir_variable *all_clipped = nir_local_variable_create(
       b->impl, glsl_bool_type(), "all_clipped");
@@ -221,14 +221,14 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
    nir_variable *dist = nir_local_variable_create(
       b->impl, glsl_array_type(glsl_float_type(), max_vert, 0), "dist");
 
-   nir_ssa_def *num = nir_load_var(b, num_vert);
+   nir_def *num = nir_load_var(b, num_vert);
    begin_for_loop(dist_loop, num)
    {
-      nir_ssa_def *v = nir_load_array_var(b, vert, idx);
-      nir_ssa_def *d = nir_fdot(b, v, plane);
+      nir_def *v = nir_load_array_var(b, vert, idx);
+      nir_def *d = nir_fdot(b, v, plane);
       nir_store_array_var(b, dist, idx, d, 1);
 
-      nir_ssa_def *clipped = nir_flt_imm(b, d, 0);
+      nir_def *clipped = nir_flt_imm(b, d, 0);
       nir_store_var(b, all_clipped,
                     nir_iand(b, nir_load_var(b, all_clipped), clipped), 1);
    }
@@ -265,15 +265,15 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
 
    begin_for_loop(vert_loop, num)
    {
-      nir_ssa_def *di = nir_load_array_var(b, dist, idx);
+      nir_def *di = nir_load_array_var(b, dist, idx);
       nir_if *if_clipped = nir_push_if(b, nir_flt_imm(b, di, 0));
       {
          /* - case, we need to take care of sign change and insert vertex */
 
-         nir_ssa_def *prev = nir_bcsel(b, nir_ieq_imm(b, idx, 0),
+         nir_def *prev = nir_bcsel(b, nir_ieq_imm(b, idx, 0),
                                        nir_iadd_imm(b, num, -1),
                                        nir_iadd_imm(b, idx, -1));
-         nir_ssa_def *dp = nir_load_array_var(b, dist, prev);
+         nir_def *dp = nir_load_array_var(b, dist, prev);
          nir_if *prev_if = nir_push_if(b, nir_fgt_imm(b, dp, 0));
          {
             /* +- case, replace - with inserted vertex
@@ -281,21 +281,21 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
              * but need to save vert[idx] when vert_index==idx
              */
 
-            nir_ssa_def *vi = nir_load_array_var(b, vert, idx);
+            nir_def *vi = nir_load_array_var(b, vert, idx);
             nir_store_var(b, saved, vi, 0xf);
 
-            nir_ssa_def *vp = nir_load_array_var(b, vert, prev);
-            nir_ssa_def *iv = get_intersection(b, vp, vi, dp, di);
-            nir_ssa_def *index = nir_load_var(b, vert_index);
+            nir_def *vp = nir_load_array_var(b, vert, prev);
+            nir_def *iv = get_intersection(b, vp, vi, dp, di);
+            nir_def *index = nir_load_var(b, vert_index);
             nir_store_array_var(b, vert, index, iv, 0xf);
 
             nir_store_var(b, vert_index, nir_iadd_imm(b, index, 1), 1);
          }
          nir_pop_if(b, prev_if);
 
-         nir_ssa_def *next = nir_bcsel(b, nir_ieq(b, idx, nir_iadd_imm(b, num, -1)),
+         nir_def *next = nir_bcsel(b, nir_ieq(b, idx, nir_iadd_imm(b, num, -1)),
                                        nir_imm_int(b, 0), nir_iadd_imm(b, idx, 1));
-         nir_ssa_def *dn = nir_load_array_var(b, dist, next);
+         nir_def *dn = nir_load_array_var(b, dist, next);
          nir_if *next_if = nir_push_if(b, nir_fgt_imm(b, dn, 0));
          {
             /* -+ case, may grow array:
@@ -305,12 +305,12 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
              *     no need to save last -, because + case won't use - value.
              */
 
-            nir_ssa_def *index = nir_load_var(b, vert_index);
-            nir_ssa_def *vi = nir_bcsel(b, nir_flt(b, idx, index),
+            nir_def *index = nir_load_var(b, vert_index);
+            nir_def *vi = nir_bcsel(b, nir_flt(b, idx, index),
                                         nir_load_var(b, saved),
                                         nir_load_array_var(b, vert, idx));
-            nir_ssa_def *vn = nir_load_array_var(b, vert, next);
-            nir_ssa_def *iv = get_intersection(b, vn, vi, dn, di);
+            nir_def *vn = nir_load_array_var(b, vert, next);
+            nir_def *iv = get_intersection(b, vn, vi, dn, di);
 
             nir_store_var(b, saved, nir_load_array_var(b, vert, index), 0xf);
             nir_store_array_var(b, vert, index, iv, 0xf);
@@ -327,8 +327,8 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
           *   vert_index < idx: array trim case
           */
 
-         nir_ssa_def *index = nir_load_var(b, vert_index);
-         nir_ssa_def *vi = nir_bcsel(b, nir_flt(b, idx, index),
+         nir_def *index = nir_load_var(b, vert_index);
+         nir_def *vi = nir_bcsel(b, nir_flt(b, idx, index),
                                      nir_load_var(b, saved),
                                      nir_load_array_var(b, vert, idx));
 
@@ -344,7 +344,7 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert,
    nir_copy_var(b, num_vert, vert_index);
 }
 
-static nir_ssa_def *
+static nir_def *
 get_user_clip_plane(nir_builder *b, int index, bool packed)
 {
    char name[16];
@@ -358,7 +358,7 @@ get_user_clip_plane(nir_builder *b, int index, bool packed)
 }
 
 static void
-get_depth_range_transform(nir_builder *b, bool packed, nir_ssa_def **trans)
+get_depth_range_transform(nir_builder *b, bool packed, nir_def **trans)
 {
    nir_variable *depth_scale = nir_variable_create(
       b->shader, nir_var_uniform, glsl_float_type(), "depth_scale");
@@ -372,27 +372,27 @@ get_depth_range_transform(nir_builder *b, bool packed, nir_ssa_def **trans)
    trans[1] = nir_load_var(b, depth_transport);
 }
 
-static nir_ssa_def *
-get_window_space_depth(nir_builder *b, nir_ssa_def *v, nir_ssa_def **trans)
+static nir_def *
+get_window_space_depth(nir_builder *b, nir_def *v, nir_def **trans)
 {
-   nir_ssa_def *z = nir_channel(b, v, 2);
-   nir_ssa_def *w = nir_channel(b, v, 3);
+   nir_def *z = nir_channel(b, v, 2);
+   nir_def *w = nir_channel(b, v, 3);
 
    /* do perspective division, if w==0, xyz must be 0 too (otherwise can't pass
     * the clip test), 0/0=NaN, but we want it to be the nearest point.
     */
-   nir_ssa_def *c = nir_feq_imm(b, w, 0);
-   nir_ssa_def *d = nir_bcsel(b, c, nir_imm_float(b, -1), nir_fdiv(b, z, w));
+   nir_def *c = nir_feq_imm(b, w, 0);
+   nir_def *d = nir_bcsel(b, c, nir_imm_float(b, -1), nir_fdiv(b, z, w));
 
    /* map [-1, 1] to [near, far] set by glDepthRange(near, far) */
    return nir_fmad(b, trans[0], d, trans[1]);
 }
 
 static void
-update_result_buffer(nir_builder *b, nir_ssa_def *dmin, nir_ssa_def *dmax,
+update_result_buffer(nir_builder *b, nir_def *dmin, nir_def *dmax,
                      bool offset_from_attribute, bool packed)
 {
-   nir_ssa_def *offset;
+   nir_def *offset;
    if (offset_from_attribute) {
       nir_variable *in_offset = nir_variable_create(
          b->shader, nir_var_shader_in,
@@ -411,7 +411,7 @@ update_result_buffer(nir_builder *b, nir_ssa_def *dmin, nir_ssa_def *dmax,
                        glsl_array_type(glsl_uint_type(), 0, 0), "result");
    /* driver_location = 0 (slot 0) */
 
-   nir_ssa_def *ssbo = nir_imm_int(b, 0);
+   nir_def *ssbo = nir_imm_int(b, 0);
    nir_ssbo_atomic(b, 32, ssbo, offset, nir_imm_int(b, 1),
                    .atomic_op = nir_atomic_op_xchg);
    nir_ssbo_atomic(b, 32, ssbo, nir_iadd_imm(b, offset, 4), dmin,
@@ -425,27 +425,27 @@ build_point_nir_shader(nir_builder *b, union state_key state, bool packed)
 {
    assert(b->shader->info.gs.vertices_in == 1);
 
-   nir_ssa_def *v;
+   nir_def *v;
    get_input_vertices(b, &v);
 
    fast_frustum_culling(b, &v);
 
-   nir_ssa_def *outside = NULL;
+   nir_def *outside = NULL;
    for (int i = 0; i < state.num_user_clip_planes; i++) {
-      nir_ssa_def *p = get_user_clip_plane(b, i, packed);
-      nir_ssa_def *d = nir_fdot(b, v, p);
-      nir_ssa_def *r = nir_flt_imm(b, d, 0);
+      nir_def *p = get_user_clip_plane(b, i, packed);
+      nir_def *d = nir_fdot(b, v, p);
+      nir_def *r = nir_flt_imm(b, d, 0);
       outside = i ? nir_ior(b, outside, r) : r;
    }
    if (outside)
       return_if_true(b, outside);
 
-   nir_ssa_def *trans[2];
+   nir_def *trans[2];
    get_depth_range_transform(b, packed, trans);
 
-   nir_ssa_def *depth = get_window_space_depth(b, v, trans);
-   nir_ssa_def *fdepth = nir_fmul_imm(b, depth, 4294967295.0);
-   nir_ssa_def *idepth = nir_f2uN(b, fdepth, 32);
+   nir_def *depth = get_window_space_depth(b, v, trans);
+   nir_def *fdepth = nir_fmul_imm(b, depth, 4294967295.0);
+   nir_def *idepth = nir_f2uN(b, fdepth, 32);
 
    update_result_buffer(b, idepth, idepth, state.result_offset_from_attribute, packed);
 }
@@ -456,7 +456,7 @@ create_clip_planes(nir_builder *b, int num_clip_planes, bool packed)
    nir_variable *clip_planes = nir_local_variable_create(
       b->impl, glsl_array_type(glsl_vec4_type(), num_clip_planes, 0), "clip_planes");
 
-   nir_ssa_def *unit_clip_planes[6] = {
+   nir_def *unit_clip_planes[6] = {
       nir_imm_vec4(b,  1,  0,  0,  1),
       nir_imm_vec4(b, -1,  0,  0,  1),
       nir_imm_vec4(b,  0,  1,  0,  1),
@@ -468,7 +468,7 @@ create_clip_planes(nir_builder *b, int num_clip_planes, bool packed)
       nir_store_array_var_imm(b, clip_planes, i, unit_clip_planes[i], 0xf);
 
    for (int i = 6; i < num_clip_planes; i++) {
-      nir_ssa_def *p = get_user_clip_plane(b, i - 6, packed);
+      nir_def *p = get_user_clip_plane(b, i - 6, packed);
       nir_store_array_var_imm(b, clip_planes, i, p, 0xf);
    }
 
@@ -480,7 +480,7 @@ build_line_nir_shader(nir_builder *b, union state_key state, bool packed)
 {
    assert(b->shader->info.gs.vertices_in == 2);
 
-   nir_ssa_def *v[2];
+   nir_def *v[2];
    get_input_vertices(b, v);
 
    fast_frustum_culling(b, v);
@@ -496,19 +496,19 @@ build_line_nir_shader(nir_builder *b, union state_key state, bool packed)
 
    begin_for_loop(clip_loop, nir_imm_int(b, num_clip_planes))
    {
-      nir_ssa_def *plane = nir_load_array_var(b, clip_planes, idx);
-      nir_ssa_def *v0 = nir_load_var(b, vert0);
-      nir_ssa_def *v1 = nir_load_var(b, vert1);
-      nir_ssa_def *d0 = nir_fdot(b, v0, plane);
-      nir_ssa_def *d1 = nir_fdot(b, v1, plane);
-      nir_ssa_def *n0 = nir_flt_imm(b, d0, 0);
-      nir_ssa_def *n1 = nir_flt_imm(b, d1, 0);
+      nir_def *plane = nir_load_array_var(b, clip_planes, idx);
+      nir_def *v0 = nir_load_var(b, vert0);
+      nir_def *v1 = nir_load_var(b, vert1);
+      nir_def *d0 = nir_fdot(b, v0, plane);
+      nir_def *d1 = nir_fdot(b, v1, plane);
+      nir_def *n0 = nir_flt_imm(b, d0, 0);
+      nir_def *n1 = nir_flt_imm(b, d1, 0);
 
       return_if_true(b, nir_iand(b, n0, n1));
 
       nir_if *clip_if = nir_push_if(b, nir_ior(b, n0, n1));
       {
-         nir_ssa_def *iv = get_intersection(b, v0, v1, d0, d1);
+         nir_def *iv = get_intersection(b, v0, v1, d0, d1);
          nir_store_var(b, vert0, nir_bcsel(b, n0, iv, v0), 0xf);
          nir_store_var(b, vert1, nir_bcsel(b, n1, iv, v1), 0xf);
       }
@@ -516,20 +516,20 @@ build_line_nir_shader(nir_builder *b, union state_key state, bool packed)
    }
    end_for_loop(clip_loop)
 
-   nir_ssa_def *trans[2];
+   nir_def *trans[2];
    get_depth_range_transform(b, packed, trans);
 
-   nir_ssa_def *d0 = get_window_space_depth(b, nir_load_var(b, vert0), trans);
-   nir_ssa_def *d1 = get_window_space_depth(b, nir_load_var(b, vert1), trans);
+   nir_def *d0 = get_window_space_depth(b, nir_load_var(b, vert0), trans);
+   nir_def *d1 = get_window_space_depth(b, nir_load_var(b, vert1), trans);
 
-   nir_ssa_def *dmin = nir_fmin(b, d0, d1);
-   nir_ssa_def *dmax = nir_fmax(b, d0, d1);
+   nir_def *dmin = nir_fmin(b, d0, d1);
+   nir_def *dmax = nir_fmax(b, d0, d1);
 
-   nir_ssa_def *fdmin = nir_fmul_imm(b, dmin, 4294967295.0);
-   nir_ssa_def *idmin = nir_f2uN(b, fdmin, 32);
+   nir_def *fdmin = nir_fmul_imm(b, dmin, 4294967295.0);
+   nir_def *idmin = nir_f2uN(b, fdmin, 32);
 
-   nir_ssa_def *fdmax = nir_fmul_imm(b, dmax, 4294967295.0);
-   nir_ssa_def *idmax = nir_f2uN(b, fdmax, 32);
+   nir_def *fdmax = nir_fmul_imm(b, dmax, 4294967295.0);
+   nir_def *idmax = nir_f2uN(b, fdmax, 32);
 
    update_result_buffer(b, idmin, idmax, state.result_offset_from_attribute, packed);
 }
@@ -540,7 +540,7 @@ build_planar_primitive_nir_shader(nir_builder *b, union state_key state, bool pa
    const int num_in_vert = b->shader->info.gs.vertices_in;
    assert(num_in_vert == 3 || num_in_vert == 4);
 
-   nir_ssa_def *v[4];
+   nir_def *v[4];
    get_input_vertices(b, v);
 
    if (state.face_culling_enabled)
@@ -569,12 +569,12 @@ build_planar_primitive_nir_shader(nir_builder *b, union state_key state, bool pa
    /* accurate clipping with all clip planes */
    begin_for_loop(clip_loop, nir_imm_int(b, num_clip_planes))
    {
-      nir_ssa_def *plane = nir_load_array_var(b, clip_planes, idx);
+      nir_def *plane = nir_load_array_var(b, clip_planes, idx);
       clip_with_plane(b, vert, num_vert, max_vert, plane);
    }
    end_for_loop(clip_loop)
 
-   nir_ssa_def *trans[2];
+   nir_def *trans[2];
    get_depth_range_transform(b, packed, trans);
 
    nir_variable *dmin =
@@ -587,18 +587,18 @@ build_planar_primitive_nir_shader(nir_builder *b, union state_key state, bool pa
 
    begin_for_loop(depth_loop, nir_load_var(b, num_vert))
    {
-      nir_ssa_def *vtx = nir_load_array_var(b, vert, idx);
-      nir_ssa_def *depth = get_window_space_depth(b, vtx, trans);
+      nir_def *vtx = nir_load_array_var(b, vert, idx);
+      nir_def *depth = get_window_space_depth(b, vtx, trans);
       nir_store_var(b, dmin, nir_fmin(b, nir_load_var(b, dmin), depth), 1);
       nir_store_var(b, dmax, nir_fmax(b, nir_load_var(b, dmax), depth), 1);
    }
    end_for_loop(depth_loop)
 
-   nir_ssa_def *fdmin = nir_fmul_imm(b, nir_load_var(b, dmin), 4294967295.0);
-   nir_ssa_def *idmin = nir_f2uN(b, fdmin, 32);
+   nir_def *fdmin = nir_fmul_imm(b, nir_load_var(b, dmin), 4294967295.0);
+   nir_def *idmin = nir_f2uN(b, fdmin, 32);
 
-   nir_ssa_def *fdmax = nir_fmul_imm(b, nir_load_var(b, dmax), 4294967295.0);
-   nir_ssa_def *idmax = nir_f2uN(b, fdmax, 32);
+   nir_def *fdmax = nir_fmul_imm(b, nir_load_var(b, dmax), 4294967295.0);
+   nir_def *idmax = nir_f2uN(b, fdmax, 32);
 
    update_result_buffer(b, idmin, idmax, state.result_offset_from_attribute, packed);
 }
index 703ce51..c625a5f 100644 (file)
@@ -151,7 +151,7 @@ st_nir_make_clearcolor_shader(struct st_context *st)
    b.shader->num_uniforms = 1;
 
    /* Read clear color from constant buffer */
-   nir_ssa_def *clear_color = nir_load_uniform(&b, 4, 32, nir_imm_int(&b,0),
+   nir_def *clear_color = nir_load_uniform(&b, 4, 32, nir_imm_int(&b,0),
                                                .range = 16,
                                                .dest_type = nir_type_float32);
 
index cef41b4..2a14dd0 100644 (file)
@@ -202,7 +202,7 @@ lower_builtin_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *def = nir_load_var(b, new_var);
+   nir_def *def = nir_load_var(b, new_var);
 
    /* swizzle the result: */
    unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
@@ -213,7 +213,7 @@ lower_builtin_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
    def = nir_swizzle(b, def, swiz, intrin->num_components);
 
    /* and rewrite uses of original instruction: */
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
+   nir_def_rewrite_uses(&intrin->dest.ssa, def);
 
    /* at this point intrin should be unused.  We need to remove it
     * (rather than waiting for DCE pass) to avoid dangling reference
index 1d68356..76aea99 100644 (file)
@@ -7,13 +7,13 @@
 #include "nir_builtin_builder.h"
 #include "st_nir.h"
 
-static nir_ssa_def *
-fog_result(nir_builder *b, nir_ssa_def *color, enum gl_fog_mode fog_mode, struct gl_program_parameter_list *paramList)
+static nir_def *
+fog_result(nir_builder *b, nir_def *color, enum gl_fog_mode fog_mode, struct gl_program_parameter_list *paramList)
 {
    nir_shader *s = b->shader;
    nir_variable *fogc_var =
       nir_create_variable_with_location(s, nir_var_shader_in, VARYING_SLOT_FOGC, glsl_float_type());
-   nir_ssa_def *fogc = nir_load_var(b, fogc_var);
+   nir_def *fogc = nir_load_var(b, fogc_var);
    s->info.inputs_read |= VARYING_BIT_FOGC;
 
    static const gl_state_index16 fog_params_tokens[STATE_LENGTH] = {STATE_FOG_PARAMS_OPTIMIZED};
@@ -21,14 +21,14 @@ fog_result(nir_builder *b, nir_ssa_def *color, enum gl_fog_mode fog_mode, struct
 
    nir_variable *fog_params_var = st_nir_state_variable_create(s, glsl_vec4_type(), fog_params_tokens);
    fog_params_var->data.driver_location = _mesa_add_state_reference(paramList, fog_params_tokens);
-   nir_ssa_def *params = nir_load_var(b, fog_params_var);
+   nir_def *params = nir_load_var(b, fog_params_var);
 
    nir_variable *fog_color_var = st_nir_state_variable_create(s, glsl_vec4_type(), fog_color_tokens);
    fog_color_var->data.driver_location = _mesa_add_state_reference(paramList, fog_color_tokens);
-   nir_ssa_def *fog_color = nir_load_var(b, fog_color_var);
+   nir_def *fog_color = nir_load_var(b, fog_color_var);
 
    /* compute the 1 component fog factor f */
-   nir_ssa_def *f = NULL;
+   nir_def *f = NULL;
    switch (fog_mode) {
    case FOG_LINEAR:
       /* f = (end - z) / (end - start)
@@ -94,10 +94,10 @@ st_nir_lower_fog_instr(nir_builder *b, nir_instr *instr, void *_state)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *color = nir_ssa_for_src(b, intr->src[0], intr->num_components);
+   nir_def *color = nir_ssa_for_src(b, intr->src[0], intr->num_components);
    color = nir_resize_vector(b, color, 4);
 
-   nir_ssa_def *fog = fog_result(b, color, state->fog_mode, state->paramList);
+   nir_def *fog = fog_result(b, color, state->fog_mode, state->paramList);
 
    /* retain the non-fog-blended alpha value for color */
    color = nir_vector_insert_imm(b, fog, nir_channel(b, color, 3), 3);
@@ -137,7 +137,7 @@ st_nir_lower_fog(nir_shader *s, enum gl_fog_mode fog_mode, struct gl_program_par
        */
       assert(!glsl_type_is_array(color_var->type));
 
-      nir_ssa_def *color = nir_load_var(&b, color_var);
+      nir_def *color = nir_load_var(&b, color_var);
       color = fog_result(&b, color, fog_mode, paramList);
       nir_store_var(&b, color_var, color, 0x7);
 
index 3b9f802..c57dae2 100644 (file)
@@ -23,7 +23,7 @@ st_nir_lower_position_invariant(struct nir_shader *s, bool aos,
    nir_function_impl *impl = nir_shader_get_entrypoint(s);
    nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
 
-   nir_ssa_def *mvp[4];
+   nir_def *mvp[4];
    for (int i = 0; i < 4; i++) {
       gl_state_index16 tokens[STATE_LENGTH] = {
           aos ? STATE_MVP_MATRIX : STATE_MVP_MATRIX_TRANSPOSE, 0, i, i};
@@ -32,12 +32,12 @@ st_nir_lower_position_invariant(struct nir_shader *s, bool aos,
       mvp[i] = nir_load_var(&b, var);
    }
 
-   nir_ssa_def *result;
-   nir_ssa_def *in_pos = nir_load_var(&b, nir_get_variable_with_location(s, nir_var_shader_in,
+   nir_def *result;
+   nir_def *in_pos = nir_load_var(&b, nir_get_variable_with_location(s, nir_var_shader_in,
                                                                          VERT_ATTRIB_POS, glsl_vec4_type()));
    s->info.inputs_read |= VERT_BIT_POS;
    if (aos) {
-      nir_ssa_def *chans[4];
+      nir_def *chans[4];
       for (int i = 0; i < 4; i++)
          chans[i] = nir_fdot4(&b, mvp[i], in_pos);
       result = nir_vec4(&b, chans[0], chans[1], chans[2], chans[3]);
index 8b7ac4f..96e2dca 100644 (file)
@@ -145,7 +145,7 @@ lower_tex_src_plane_block(nir_builder *b, lower_tex_src_state *state, nir_block
             assert(samp);
 
             nir_deref_instr *tex_deref_instr = nir_build_deref_var(b, samp);
-            nir_ssa_def *tex_deref = &tex_deref_instr->dest.ssa;
+            nir_def *tex_deref = &tex_deref_instr->dest.ssa;
 
             nir_instr_rewrite_src(&tex->instr,
                                   &tex->src[tex_index].src,
index aa40f94..2caf266 100644 (file)
@@ -353,7 +353,7 @@ st_pbo_create_gs(struct st_context *st)
    b.shader->info.outputs_written |= VARYING_BIT_LAYER;
 
    for (int i = 0; i < 3; ++i) {
-      nir_ssa_def *pos = nir_load_array_var_imm(&b, in_pos, i);
+      nir_def *pos = nir_load_array_var_imm(&b, in_pos, i);
 
       nir_store_var(&b, out_pos, nir_vector_insert_imm(&b, pos, nir_imm_float(&b, 0.0), 2), 0xf);
       /* out_layer.x = f2i(in_pos[i].z) */
@@ -412,13 +412,13 @@ create_fs(struct st_context *st, bool download,
                                                   "st/pbo download FS" :
                                                   "st/pbo upload FS");
 
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *zero = nir_imm_int(&b, 0);
 
    /* param = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
    nir_variable *param_var =
       nir_variable_create(b.shader, nir_var_uniform, glsl_vec4_type(), "param");
    b.shader->num_uniforms += 4;
-   nir_ssa_def *param = nir_load_var(&b, param_var);
+   nir_def *param = nir_load_var(&b, param_var);
 
    nir_variable *fragcoord;
    if (pos_is_sysval)
@@ -427,14 +427,14 @@ create_fs(struct st_context *st, bool download,
    else
       fragcoord = nir_create_variable_with_location(b.shader, nir_var_shader_in,
                                                     VARYING_SLOT_POS, glsl_vec4_type());
-   nir_ssa_def *coord = nir_load_var(&b, fragcoord);
+   nir_def *coord = nir_load_var(&b, fragcoord);
 
    /* When st->pbo.layers == false, it is guaranteed we only have a single
     * layer. But we still need the "layer" variable to add the "array"
     * coordinate to the texture. Hence we set layer to zero when array texture
     * is used in case only a single layer is required.
     */
-   nir_ssa_def *layer = NULL;
+   nir_def *layer = NULL;
    if (!download || target == PIPE_TEXTURE_1D_ARRAY ||
                     target == PIPE_TEXTURE_2D_ARRAY ||
                     target == PIPE_TEXTURE_3D ||
@@ -453,12 +453,12 @@ create_fs(struct st_context *st, bool download,
    }
 
    /* offset_pos = param.xy + f2i(coord.xy) */
-   nir_ssa_def *offset_pos =
+   nir_def *offset_pos =
       nir_iadd(&b, nir_channels(&b, param, TGSI_WRITEMASK_XY),
                nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY)));
 
    /* addr = offset_pos.x + offset_pos.y * stride */
-   nir_ssa_def *pbo_addr =
+   nir_def *pbo_addr =
       nir_iadd(&b, nir_channel(&b, offset_pos, 0),
                nir_imul(&b, nir_channel(&b, offset_pos, 1),
                         nir_channel(&b, param, 2)));
@@ -468,7 +468,7 @@ create_fs(struct st_context *st, bool download,
                           nir_imul(&b, layer, nir_channel(&b, param, 3)));
    }
 
-   nir_ssa_def *texcoord;
+   nir_def *texcoord;
    if (download) {
       texcoord = nir_f2i32(&b, nir_channels(&b, coord, TGSI_WRITEMASK_XY));
 
@@ -478,7 +478,7 @@ create_fs(struct st_context *st, bool download,
       }
 
       if (layer) {
-         nir_ssa_def *src_layer = layer;
+         nir_def *src_layer = layer;
 
          if (target == PIPE_TEXTURE_3D) {
             nir_variable *layer_offset_var =
@@ -486,7 +486,7 @@ create_fs(struct st_context *st, bool download,
                                    glsl_int_type(), "layer_offset");
             b.shader->num_uniforms += 1;
             layer_offset_var->data.driver_location = 4;
-            nir_ssa_def *layer_offset = nir_load_var(&b, layer_offset_var);
+            nir_def *layer_offset = nir_load_var(&b, layer_offset_var);
 
             src_layer = nir_iadd(&b, layer, layer_offset);
          }
@@ -529,7 +529,7 @@ create_fs(struct st_context *st, bool download,
    tex->src[2].src = nir_src_for_ssa(texcoord);
    nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
    nir_builder_instr_insert(&b, &tex->instr);
-   nir_ssa_def *result = &tex->dest.ssa;
+   nir_def *result = &tex->dest.ssa;
 
    if (conversion == ST_PBO_CONVERT_SINT_TO_UINT)
       result = nir_imax(&b, result, zero);
index 3297166..a02ed0e 100644 (file)
@@ -142,24 +142,24 @@ get_convert_format(struct gl_context *ctx,
 
 
 struct pbo_shader_data {
-   nir_ssa_def *offset;
-   nir_ssa_def *range;
-   nir_ssa_def *invert;
-   nir_ssa_def *blocksize;
-   nir_ssa_def *alignment;
-   nir_ssa_def *dst_bit_size;
-   nir_ssa_def *channels;
-   nir_ssa_def *normalized;
-   nir_ssa_def *integer;
-   nir_ssa_def *clamp_uint;
-   nir_ssa_def *r11g11b10_or_sint;
-   nir_ssa_def *r9g9b9e5;
-   nir_ssa_def *bits1;
-   nir_ssa_def *bits2;
-   nir_ssa_def *bits3;
-   nir_ssa_def *bits4;
-   nir_ssa_def *swap;
-   nir_ssa_def *bits; //vec4
+   nir_def *offset;
+   nir_def *range;
+   nir_def *invert;
+   nir_def *blocksize;
+   nir_def *alignment;
+   nir_def *dst_bit_size;
+   nir_def *channels;
+   nir_def *normalized;
+   nir_def *integer;
+   nir_def *clamp_uint;
+   nir_def *r11g11b10_or_sint;
+   nir_def *r9g9b9e5;
+   nir_def *bits1;
+   nir_def *bits2;
+   nir_def *bits3;
+   nir_def *bits4;
+   nir_def *swap;
+   nir_def *bits; //vec4
 };
 
 
@@ -208,13 +208,13 @@ struct pbo_data {
 #define STRUCT_BLOCK(offset, ...) \
    do { \
       assert(offset % 8 == 0); \
-      nir_ssa_def *block##offset = nir_u2u32(b, nir_extract_bits(b, &ubo_load, 1, (offset), 1, 8)); \
+      nir_def *block##offset = nir_u2u32(b, nir_extract_bits(b, &ubo_load, 1, (offset), 1, 8)); \
       __VA_ARGS__ \
    } while (0)
 #define STRUCT_MEMBER(blockoffset, name, offset, size, op, clamp) \
    do { \
       assert(offset + size <= 8); \
-      nir_ssa_def *val = nir_iand_imm(b, block##blockoffset, u_bit_consecutive(offset, size)); \
+      nir_def *val = nir_iand_imm(b, block##blockoffset, u_bit_consecutive(offset, size)); \
       if (offset) \
          val = nir_ushr_imm(b, val, offset); \
       sd->name = op; \
@@ -235,7 +235,7 @@ static void
 init_pbo_shader_data(nir_builder *b, struct pbo_shader_data *sd, unsigned coord_components)
 {
    nir_variable *ubo = nir_variable_create(b->shader, nir_var_uniform, glsl_uvec4_type(), "offset");
-   nir_ssa_def *ubo_load = nir_load_var(b, ubo);
+   nir_def *ubo_load = nir_load_var(b, ubo);
 
    sd->offset = nir_u2u32(b, nir_extract_bits(b, &ubo_load, 1, STRUCT_OFFSET(x), 2, 16));
    if (coord_components == 1)
@@ -353,8 +353,8 @@ fill_pbo_data(struct pbo_data *pd, enum pipe_format src_format, enum pipe_format
    return weird_packed ? 1 : dst_desc->nr_channels;
 }
 
-static nir_ssa_def *
-get_buffer_offset(nir_builder *b, nir_ssa_def *coord, struct pbo_shader_data *sd)
+static nir_def *
+get_buffer_offset(nir_builder *b, nir_def *coord, struct pbo_shader_data *sd)
 {
 /* from _mesa_image_offset():
       offset = topOfImage
@@ -362,13 +362,13 @@ get_buffer_offset(nir_builder *b, nir_ssa_def *coord, struct pbo_shader_data *sd
                + (skiprows + row) * bytes_per_row
                + (skipimages + img) * bytes_per_image;
  */
-   nir_ssa_def *bytes_per_row = nir_imul(b, nir_channel(b, sd->range, 0), sd->blocksize);
+   nir_def *bytes_per_row = nir_imul(b, nir_channel(b, sd->range, 0), sd->blocksize);
    bytes_per_row = nir_bcsel(b, nir_ult_imm(b, sd->alignment, 2),
                              bytes_per_row,
                              nir_iand(b,
                                       nir_iadd_imm(b, nir_iadd(b, bytes_per_row, sd->alignment), -1),
                                       nir_inot(b, nir_iadd_imm(b, sd->alignment, -1))));
-   nir_ssa_def *bytes_per_image = nir_imul(b, bytes_per_row, nir_channel(b, sd->range, 1));
+   nir_def *bytes_per_image = nir_imul(b, bytes_per_row, nir_channel(b, sd->range, 1));
    bytes_per_row = nir_bcsel(b, sd->invert,
                              nir_ineg(b, bytes_per_row),
                              bytes_per_row);
@@ -380,7 +380,7 @@ get_buffer_offset(nir_builder *b, nir_ssa_def *coord, struct pbo_shader_data *sd
 }
 
 static inline void
-write_ssbo(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset)
+write_ssbo(nir_builder *b, nir_def *pixel, nir_def *buffer_offset)
 {
    nir_store_ssbo(b, pixel, nir_imm_zero(b, 1, 32), buffer_offset,
                   .align_mul = pixel->bit_size / 8,
@@ -388,7 +388,7 @@ write_ssbo(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset)
 }
 
 static void
-write_conversion(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset, struct pbo_shader_data *sd)
+write_conversion(nir_builder *b, nir_def *pixel, nir_def *buffer_offset, struct pbo_shader_data *sd)
 {
    nir_push_if(b, nir_ilt_imm(b, sd->dst_bit_size, 32));
       nir_push_if(b, nir_ieq_imm(b, sd->dst_bit_size, 16));
@@ -401,8 +401,8 @@ write_conversion(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset,
    nir_pop_if(b, NULL);
 }
 
-static nir_ssa_def *
-swap2(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swap2(nir_builder *b, nir_def *src)
 {
    /* dst[i] = (src[i] >> 8) | ((src[i] << 8) & 0xff00); */
    return nir_ior(b,
@@ -410,8 +410,8 @@ swap2(nir_builder *b, nir_ssa_def *src)
                   nir_iand_imm(b, nir_ishl_imm(b, src, 8), 0xff00));
 }
 
-static nir_ssa_def *
-swap4(nir_builder *b, nir_ssa_def *src)
+static nir_def *
+swap4(nir_builder *b, nir_def *src)
 {
    /* a = (b >> 24) | ((b >> 8) & 0xff00) | ((b << 8) & 0xff0000) | ((b << 24) & 0xff000000); */
    return nir_ior(b,
@@ -429,7 +429,7 @@ swap4(nir_builder *b, nir_ssa_def *src)
 
 /* explode the cf to handle channel counts in the shader */
 static void
-grab_components(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset, struct pbo_shader_data *sd, bool weird_packed)
+grab_components(nir_builder *b, nir_def *pixel, nir_def *buffer_offset, struct pbo_shader_data *sd, bool weird_packed)
 {
    if (weird_packed) {
       nir_push_if(b, nir_ieq_imm(b, sd->bits1, 32));
@@ -459,23 +459,23 @@ grab_components(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset,
 
 /* if byteswap is enabled, handle that and then write the components */
 static void
-handle_swap(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset,
+handle_swap(nir_builder *b, nir_def *pixel, nir_def *buffer_offset,
             struct pbo_shader_data *sd, unsigned num_components, bool weird_packed)
 {
    nir_push_if(b, sd->swap); {
       nir_push_if(b, nir_ieq_imm(b, nir_udiv_imm(b, sd->blocksize, num_components), 2)); {
          /* this is a single high/low swap per component */
-         nir_ssa_def *components[4];
+         nir_def *components[4];
          for (unsigned i = 0; i < 4; i++)
             components[i] = swap2(b, nir_channel(b, pixel, i));
-         nir_ssa_def *v = nir_vec(b, components, 4);
+         nir_def *v = nir_vec(b, components, 4);
          grab_components(b, v, buffer_offset, sd, weird_packed);
       } nir_push_else(b, NULL); {
          /* this is a pair of high/low swaps for each half of the component */
-         nir_ssa_def *components[4];
+         nir_def *components[4];
          for (unsigned i = 0; i < 4; i++)
             components[i] = swap4(b, nir_channel(b, pixel, i));
-         nir_ssa_def *v = nir_vec(b, components, 4);
+         nir_def *v = nir_vec(b, components, 4);
          grab_components(b, v, buffer_offset, sd, weird_packed);
       } nir_pop_if(b, NULL);
    } nir_push_else(b, NULL); {
@@ -484,10 +484,10 @@ handle_swap(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset,
    } nir_pop_if(b, NULL);
 }
 
-static nir_ssa_def *
+static nir_def *
 check_for_weird_packing(nir_builder *b, struct pbo_shader_data *sd, unsigned component)
 {
-   nir_ssa_def *c = nir_channel(b, sd->bits, component - 1);
+   nir_def *c = nir_channel(b, sd->bits, component - 1);
 
    return nir_bcsel(b,
                     nir_ige_imm(b, sd->channels, component),
@@ -498,14 +498,14 @@ check_for_weird_packing(nir_builder *b, struct pbo_shader_data *sd, unsigned com
 }
 
 /* convenience function for clamping signed integers */
-static inline nir_ssa_def *
-nir_imin_imax(nir_builder *build, nir_ssa_def *src, nir_ssa_def *clamp_to_min, nir_ssa_def *clamp_to_max)
+static inline nir_def *
+nir_imin_imax(nir_builder *build, nir_def *src, nir_def *clamp_to_min, nir_def *clamp_to_max)
 {
    return nir_imax(build, nir_imin(build, src, clamp_to_min), clamp_to_max);
 }
 
-static inline nir_ssa_def *
-nir_format_float_to_unorm_with_factor(nir_builder *b, nir_ssa_def *f, nir_ssa_def *factor)
+static inline nir_def *
+nir_format_float_to_unorm_with_factor(nir_builder *b, nir_def *f, nir_def *factor)
 {
    /* Clamp to the range [0, 1] */
    f = nir_fsat(b, f);
@@ -513,8 +513,8 @@ nir_format_float_to_unorm_with_factor(nir_builder *b, nir_ssa_def *f, nir_ssa_de
    return nir_f2u32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
 }
 
-static inline nir_ssa_def *
-nir_format_float_to_snorm_with_factor(nir_builder *b, nir_ssa_def *f, nir_ssa_def *factor)
+static inline nir_def *
+nir_format_float_to_snorm_with_factor(nir_builder *b, nir_def *f, nir_def *factor)
 {
    /* Clamp to the range [-1, 1] */
    f = nir_fmin(b, nir_fmax(b, f, nir_imm_float(b, -1)), nir_imm_float(b, 1));
@@ -522,13 +522,13 @@ nir_format_float_to_snorm_with_factor(nir_builder *b, nir_ssa_def *f, nir_ssa_de
    return nir_f2i32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
 }
 
-static nir_ssa_def *
-clamp_and_mask(nir_builder *b, nir_ssa_def *src, nir_ssa_def *channels)
+static nir_def *
+clamp_and_mask(nir_builder *b, nir_def *src, nir_def *channels)
 {
-   nir_ssa_def *one = nir_imm_ivec4(b, 1, 0, 0, 0);
-   nir_ssa_def *two = nir_imm_ivec4(b, 1, 1, 0, 0);
-   nir_ssa_def *three = nir_imm_ivec4(b, 1, 1, 1, 0);
-   nir_ssa_def *four = nir_imm_ivec4(b, 1, 1, 1, 1);
+   nir_def *one = nir_imm_ivec4(b, 1, 0, 0, 0);
+   nir_def *two = nir_imm_ivec4(b, 1, 1, 0, 0);
+   nir_def *three = nir_imm_ivec4(b, 1, 1, 1, 0);
+   nir_def *four = nir_imm_ivec4(b, 1, 1, 1, 1);
    /* avoid underflow by clamping to channel count */
    src = nir_bcsel(b,
                    nir_ieq(b, channels, one),
@@ -545,12 +545,12 @@ clamp_and_mask(nir_builder *b, nir_ssa_def *src, nir_ssa_def *channels)
 }
 
 static void
-convert_swap_write(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset,
+convert_swap_write(nir_builder *b, nir_def *pixel, nir_def *buffer_offset,
                    unsigned num_components,
                    struct pbo_shader_data *sd)
 {
 
-   nir_ssa_def *weird_packed = nir_ior(b,
+   nir_def *weird_packed = nir_ior(b,
                                        nir_ior(b,
                                                check_for_weird_packing(b, sd, 4),
                                                check_for_weird_packing(b, sd, 3)),
@@ -564,7 +564,7 @@ convert_swap_write(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offse
                handle_swap(b, nir_pad_vec4(b, nir_format_pack_r9g9b9e5(b, pixel)), buffer_offset, sd, 1, true);
             nir_push_else(b, NULL);
                nir_push_if(b, nir_ieq_imm(b, sd->bits1, 32)); { //PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
-                  nir_ssa_def *pack[2];
+                  nir_def *pack[2];
                   pack[0] = nir_format_pack_uint_unmasked_ssa(b, nir_channel(b, pixel, 0), nir_channel(b, sd->bits, 0));
                   pack[1] = nir_format_pack_uint_unmasked_ssa(b, nir_channels(b, pixel, 6), nir_channels(b, sd->bits, 6));
                   handle_swap(b, nir_pad_vec4(b, nir_vec2(b, pack[0], pack[1])), buffer_offset, sd, 2, true);
@@ -586,13 +586,13 @@ convert_swap_write(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offse
 }
 
 static void
-do_shader_conversion(nir_builder *b, nir_ssa_def *pixel,
+do_shader_conversion(nir_builder *b, nir_def *pixel,
                      unsigned num_components,
-                     nir_ssa_def *coord, struct pbo_shader_data *sd)
+                     nir_def *coord, struct pbo_shader_data *sd)
 {
-   nir_ssa_def *buffer_offset = get_buffer_offset(b, coord, sd);
+   nir_def *buffer_offset = get_buffer_offset(b, coord, sd);
 
-   nir_ssa_def *signed_bit_mask = clamp_and_mask(b, sd->bits, sd->channels);
+   nir_def *signed_bit_mask = clamp_and_mask(b, sd->bits, sd->channels);
 
 #define CONVERT_SWAP_WRITE(PIXEL) \
    convert_swap_write(b, PIXEL, buffer_offset, num_components, sd);
@@ -645,18 +645,18 @@ create_conversion_shader(struct st_context *st, enum pipe_texture_target target,
    struct pbo_shader_data sd;
    init_pbo_shader_data(&b, &sd, coord_components);
 
-   nir_ssa_def *bsize = nir_imm_ivec4(&b,
+   nir_def *bsize = nir_imm_ivec4(&b,
                                       b.shader->info.workgroup_size[0],
                                       b.shader->info.workgroup_size[1],
                                       b.shader->info.workgroup_size[2],
                                       0);
-   nir_ssa_def *wid = nir_load_workgroup_id(&b, 32);
-   nir_ssa_def *iid = nir_load_local_invocation_id(&b);
-   nir_ssa_def *tile = nir_imul(&b, wid, bsize);
-   nir_ssa_def *global_id = nir_iadd(&b, tile, iid);
-   nir_ssa_def *start = nir_iadd(&b, nir_trim_vector(&b, global_id, 2), sd.offset);
+   nir_def *wid = nir_load_workgroup_id(&b, 32);
+   nir_def *iid = nir_load_local_invocation_id(&b);
+   nir_def *tile = nir_imul(&b, wid, bsize);
+   nir_def *global_id = nir_iadd(&b, tile, iid);
+   nir_def *start = nir_iadd(&b, nir_trim_vector(&b, global_id, 2), sd.offset);
 
-   nir_ssa_def *coord;
+   nir_def *coord;
    if (coord_components < 3)
       coord = start;
    else {
@@ -667,11 +667,11 @@ create_conversion_shader(struct st_context *st, enum pipe_texture_target target,
                            nir_channel(&b, global_id, 2));
    }
    coord = nir_trim_vector(&b, coord, coord_components);
-   nir_ssa_def *offset = coord_components > 2 ?
+   nir_def *offset = coord_components > 2 ?
                          nir_pad_vector_imm_int(&b, sd.offset, 0, 3) :
                          nir_trim_vector(&b, sd.offset, coord_components);
-   nir_ssa_def *range = nir_trim_vector(&b, sd.range, coord_components);
-   nir_ssa_def *max = nir_iadd(&b, offset, range);
+   nir_def *range = nir_trim_vector(&b, sd.range, coord_components);
+   nir_def *max = nir_iadd(&b, offset, range);
    nir_push_if(&b, nir_ball(&b, nir_ilt(&b, coord, max)));
    nir_tex_instr *txf = nir_tex_instr_create(b.shader, 3);
    txf->is_array = glsl_sampler_type_is_array(sampler->type);
index f610bbf..041c724 100644 (file)
@@ -124,7 +124,7 @@ clc_lower_input_image_deref(nir_builder *b, struct clc_image_lower_context *cont
    };
 
    int image_bindings[IMAGE_TYPE_COUNT] = {-1, -1, -1};
-   nir_ssa_def *format_deref_dest = NULL, *order_deref_dest = NULL;
+   nir_def *format_deref_dest = NULL, *order_deref_dest = NULL;
 
    nir_variable *in_var = nir_deref_instr_get_variable(context->deref);
 
@@ -203,7 +203,7 @@ clc_lower_input_image_deref(nir_builder *b, struct clc_image_lower_context *cont
 
             case nir_intrinsic_image_deref_format:
             case nir_intrinsic_image_deref_order: {
-               nir_ssa_def **cached_deref = intrinsic->intrinsic == nir_intrinsic_image_deref_format ?
+               nir_def **cached_deref = intrinsic->intrinsic == nir_intrinsic_image_deref_format ?
                   &format_deref_dest : &order_deref_dest;
                if (!*cached_deref) {
                   nir_variable *new_input = nir_variable_create(b->shader, nir_var_uniform, glsl_uint_type(), NULL);
@@ -218,7 +218,7 @@ clc_lower_input_image_deref(nir_builder *b, struct clc_image_lower_context *cont
                }
 
                /* No actual intrinsic needed here, just reference the loaded variable */
-               nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, *cached_deref);
+               nir_def_rewrite_uses(&intrinsic->dest.ssa, *cached_deref);
                nir_instr_remove(&intrinsic->instr);
                break;
             }
@@ -310,8 +310,8 @@ clc_lower_64bit_semantics(nir_shader *nir)
                intrinsic->dest.ssa.bit_size = 32;
                b.cursor = nir_after_instr(instr);
 
-               nir_ssa_def *i64 = nir_u2u64(&b, &intrinsic->dest.ssa);
-               nir_ssa_def_rewrite_uses_after(
+               nir_def *i64 = nir_u2u64(&b, &intrinsic->dest.ssa);
+               nir_def_rewrite_uses_after(
                   &intrinsic->dest.ssa,
                   i64,
                   i64->parent_instr);
@@ -359,14 +359,14 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
 
             int coords_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
             assert(coords_idx != -1);
-            nir_ssa_def *coords =
+            nir_def *coords =
                nir_ssa_for_src(&b, tex->src[coords_idx].src, tex->coord_components);
 
-            nir_ssa_def *txs = nir_i2f32(&b, nir_get_texture_size(&b, tex));
+            nir_def *txs = nir_i2f32(&b, nir_get_texture_size(&b, tex));
 
             // Normalize coords for tex
-            nir_ssa_def *scale = nir_frcp(&b, txs);
-            nir_ssa_def *comps[4];
+            nir_def *scale = nir_frcp(&b, txs);
+            nir_def *comps[4];
             for (unsigned i = 0; i < coords->num_components; ++i) {
                comps[i] = nir_channel(&b, coords, i);
                if (tex->is_array && i == coords->num_components - 1) {
@@ -383,7 +383,7 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
                   comps[i] = nir_fadd_imm(&b, nir_ffloor(&b, comps[i]), 0.5f);
                comps[i] = nir_fmul(&b, comps[i], nir_channel(&b, scale, i));
             }
-            nir_ssa_def *normalized_coords = nir_vec(&b, comps, coords->num_components);
+            nir_def *normalized_coords = nir_vec(&b, comps, coords->num_components);
             nir_instr_rewrite_src(&tex->instr,
                                   &tex->src[coords_idx].src,
                                   nir_src_for_ssa(normalized_coords));
index adebbc8..3324379 100644 (file)
@@ -31,7 +31,7 @@
 #include "clc_compiler.h"
 #include "../compiler/dxil_nir.h"
 
-static nir_ssa_def *
+static nir_def *
 load_ubo(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, unsigned offset)
 {
    return nir_load_ubo(b,
@@ -51,9 +51,9 @@ lower_load_base_global_invocation_id(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *offset = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
+   nir_def *offset = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
                                                          global_offset_x));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, offset);
+   nir_def_rewrite_uses(&intr->dest.ssa, offset);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -64,9 +64,9 @@ lower_load_work_dim(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *dim = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
+   nir_def *dim = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
                                                       work_dim));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, dim);
+   nir_def_rewrite_uses(&intr->dest.ssa, dim);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -77,10 +77,10 @@ lower_load_num_workgroups(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *count =
+   nir_def *count =
       load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
                                       group_count_total_x));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, count);
+   nir_def_rewrite_uses(&intr->dest.ssa, count);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -91,10 +91,10 @@ lower_load_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intr,
 {
    b->cursor = nir_after_instr(&intr->instr);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
                                       group_id_offset_x));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, offset);
+   nir_def_rewrite_uses(&intr->dest.ssa, offset);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -168,16 +168,16 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr,
 
    const struct glsl_type *type =
       glsl_vector_type(base_type, nir_dest_num_components(intr->dest));
-   nir_ssa_def *ptr = nir_vec2(b, nir_imm_int(b, var->data.binding),
+   nir_def *ptr = nir_vec2(b, nir_imm_int(b, var->data.binding),
                                   nir_u2uN(b, intr->src[0].ssa, 32));
    nir_deref_instr *deref = nir_build_deref_cast(b, ptr, nir_var_mem_ubo, type,
                                                     bit_size / 8);
    deref->cast.align_mul = nir_intrinsic_align_mul(intr);
    deref->cast.align_offset = nir_intrinsic_align_offset(intr);
 
-   nir_ssa_def *result =
+   nir_def *result =
       nir_load_deref(b, deref);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -229,7 +229,7 @@ bool
 clc_lower_printf_base(nir_shader *nir, unsigned uav_id)
 {
    nir_variable *printf_var = NULL;
-   nir_ssa_def *printf_deref = NULL;
+   nir_def *printf_deref = NULL;
    nir_foreach_function_impl(impl, nir) {
       nir_builder b = nir_builder_at(nir_before_block(nir_start_block(impl)));
       bool progress = false;
@@ -247,7 +247,7 @@ clc_lower_printf_base(nir_shader *nir, unsigned uav_id)
                nir_deref_instr *deref = nir_build_deref_var(&b, printf_var);
                printf_deref = &deref->dest.ssa;
             }
-            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, printf_deref);
+            nir_def_rewrite_uses(&intrin->dest.ssa, printf_deref);
             progress = true;
          }
       }
index b9aadcd..e0fd42c 100644 (file)
@@ -39,9 +39,9 @@ cl_type_size_align(const struct glsl_type *type, unsigned *size,
    *align = glsl_get_cl_alignment(type);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_comps_to_vec(nir_builder *b, unsigned src_bit_size,
-                  nir_ssa_def **src_comps, unsigned num_src_comps,
+                  nir_def **src_comps, unsigned num_src_comps,
                   unsigned dst_bit_size)
 {
    if (src_bit_size == dst_bit_size)
@@ -51,14 +51,14 @@ load_comps_to_vec(nir_builder *b, unsigned src_bit_size,
 
    unsigned num_dst_comps = DIV_ROUND_UP(num_src_comps * src_bit_size, dst_bit_size);
    unsigned comps_per_dst = dst_bit_size / src_bit_size;
-   nir_ssa_def *dst_comps[4];
+   nir_def *dst_comps[4];
 
    for (unsigned i = 0; i < num_dst_comps; i++) {
       unsigned src_offs = i * comps_per_dst;
 
       dst_comps[i] = nir_u2uN(b, src_comps[src_offs], dst_bit_size);
       for (unsigned j = 1; j < comps_per_dst && src_offs + j < num_src_comps; j++) {
-         nir_ssa_def *tmp = nir_ishl_imm(b, nir_u2uN(b, src_comps[src_offs + j], dst_bit_size),
+         nir_def *tmp = nir_ishl_imm(b, nir_u2uN(b, src_comps[src_offs + j], dst_bit_size),
                                          j * src_bit_size);
          dst_comps[i] = nir_ior(b, dst_comps[i], tmp);
       }
@@ -76,14 +76,14 @@ lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *v
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *offset = intr->src[0].ssa;
+   nir_def *offset = intr->src[0].ssa;
    if (intr->intrinsic == nir_intrinsic_load_shared)
       offset = nir_iadd_imm(b, offset, nir_intrinsic_base(intr));
    else
       offset = nir_u2u32(b, offset);
-   nir_ssa_def *index = nir_ushr_imm(b, offset, 2);
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
-   nir_ssa_def *comps_32bit[NIR_MAX_VEC_COMPONENTS * 2];
+   nir_def *index = nir_ushr_imm(b, offset, 2);
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *comps_32bit[NIR_MAX_VEC_COMPONENTS * 2];
 
    /* We need to split loads in 32-bit accesses because the buffer
     * is an i32 array and DXIL does not support type casts.
@@ -96,40 +96,40 @@ lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *v
    for (unsigned i = 0; i < num_32bit_comps; i += num_comps_per_pass) {
       unsigned num_vec32_comps = MIN2(num_32bit_comps - i, 4);
       unsigned num_dest_comps = num_vec32_comps * 32 / bit_size;
-      nir_ssa_def *vec32 = nir_vec(b, &comps_32bit[i], num_vec32_comps);
+      nir_def *vec32 = nir_vec(b, &comps_32bit[i], num_vec32_comps);
 
       /* If we have 16 bits or less to load we need to adjust the u32 value so
        * we can always extract the LSB.
        */
       if (num_bits <= 16) {
-         nir_ssa_def *shift =
+         nir_def *shift =
             nir_imul_imm(b, nir_iand_imm(b, offset, 3), 8);
          vec32 = nir_ushr(b, vec32, shift);
       }
 
       /* And now comes the pack/unpack step to match the original type. */
       unsigned dest_index = i * 32 / bit_size;
-      nir_ssa_def *temp_vec = nir_extract_bits(b, &vec32, 1, 0, num_dest_comps, bit_size);
+      nir_def *temp_vec = nir_extract_bits(b, &vec32, 1, 0, num_dest_comps, bit_size);
       for (unsigned comp = 0; comp < num_dest_comps; ++comp, ++dest_index)
          comps[dest_index] = nir_channel(b, temp_vec, comp);
    }
 
-   nir_ssa_def *result = nir_vec(b, comps, num_components);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def *result = nir_vec(b, comps, num_components);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    nir_instr_remove(&intr->instr);
 
    return true;
 }
 
 static void
-lower_masked_store_vec32(nir_builder *b, nir_ssa_def *offset, nir_ssa_def *index,
-                         nir_ssa_def *vec32, unsigned num_bits, nir_variable *var, unsigned alignment)
+lower_masked_store_vec32(nir_builder *b, nir_def *offset, nir_def *index,
+                         nir_def *vec32, unsigned num_bits, nir_variable *var, unsigned alignment)
 {
-   nir_ssa_def *mask = nir_imm_int(b, (1 << num_bits) - 1);
+   nir_def *mask = nir_imm_int(b, (1 << num_bits) - 1);
 
    /* If we have small alignments, we need to place them correctly in the u32 component. */
    if (alignment <= 2) {
-      nir_ssa_def *shift =
+      nir_def *shift =
          nir_imul_imm(b, nir_iand_imm(b, offset, 3), 8);
 
       vec32 = nir_ishl(b, vec32, shift);
@@ -143,9 +143,9 @@ lower_masked_store_vec32(nir_builder *b, nir_ssa_def *offset, nir_ssa_def *index
       nir_deref_atomic(b, 32, &deref->dest.ssa, vec32, .atomic_op = nir_atomic_op_ior);
    } else {
       /* For scratch, since we don't need atomics, just generate the read-modify-write in NIR */
-      nir_ssa_def *load = nir_load_array_var(b, var, index);
+      nir_def *load = nir_load_array_var(b, var, index);
 
-      nir_ssa_def *new_val = nir_ior(b, vec32,
+      nir_def *new_val = nir_ior(b, vec32,
                                      nir_iand(b,
                                               nir_inot(b, mask),
                                               load));
@@ -163,12 +163,12 @@ lower_32b_offset_store(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *offset = intr->src[1].ssa;
+   nir_def *offset = intr->src[1].ssa;
    if (intr->intrinsic == nir_intrinsic_store_shared)
       offset = nir_iadd_imm(b, offset, nir_intrinsic_base(intr));
    else
       offset = nir_u2u32(b, offset);
-   nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+   nir_def *comps[NIR_MAX_VEC_COMPONENTS];
 
    unsigned comp_idx = 0;
    for (unsigned i = 0; i < num_components; i++)
@@ -178,10 +178,10 @@ lower_32b_offset_store(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *
    for (unsigned i = 0; i < num_bits; i += step) {
       /* For each 4byte chunk (or smaller) we generate a 32bit scalar store. */
       unsigned substore_num_bits = MIN2(num_bits - i, step);
-      nir_ssa_def *local_offset = nir_iadd_imm(b, offset, i / 8);
-      nir_ssa_def *vec32 = load_comps_to_vec(b, bit_size, &comps[comp_idx],
+      nir_def *local_offset = nir_iadd_imm(b, offset, i / 8);
+      nir_def *vec32 = load_comps_to_vec(b, bit_size, &comps[comp_idx],
                                              substore_num_bits / bit_size, 32);
-      nir_ssa_def *index = nir_ushr_imm(b, local_offset, 2);
+      nir_def *index = nir_ushr_imm(b, local_offset, 2);
 
       /* For anything less than 32bits we need to use the masked version of the
        * intrinsic to preserve data living in the same 32bit slot. */
@@ -272,7 +272,7 @@ dxil_nir_lower_constant_to_temp(nir_shader *nir)
                   deref->dest.ssa.bit_size = 32;
                   if (deref->deref_type == nir_deref_type_array) {
                      b.cursor = nir_before_instr(instr);
-                     nir_src_rewrite_ssa(&deref->arr.index, nir_u2u32(&b, deref->arr.index.ssa));
+                     nir_src_rewrite(&deref->arr.index, nir_u2u32(&b, deref->arr.index.ssa));
                   }
                }
             }
@@ -319,12 +319,12 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data)
    assert(path.path[0]->deref_type == nir_deref_type_var);
    b->cursor = nir_before_instr(&path.path[0]->instr);
    nir_deref_instr *new_var_deref = nir_build_deref_var(b, var);
-   nir_ssa_def *index = NULL;
+   nir_def *index = NULL;
    for (unsigned level = 1; path.path[level]; ++level) {
       nir_deref_instr *arr_deref = path.path[level];
       assert(arr_deref->deref_type == nir_deref_type_array);
       b->cursor = nir_before_instr(&arr_deref->instr);
-      nir_ssa_def *val = nir_imul_imm(b, arr_deref->arr.index.ssa,
+      nir_def *val = nir_imul_imm(b, arr_deref->arr.index.ssa,
                                       glsl_get_component_slots(arr_deref->type));
       if (index) {
          index = nir_iadd(b, index, val);
@@ -337,25 +337,25 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data)
    if (vector_comps > 1) {
       b->cursor = nir_before_instr(instr);
       if (intr->intrinsic == nir_intrinsic_load_deref) {
-         nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
+         nir_def *components[NIR_MAX_VEC_COMPONENTS];
          for (unsigned i = 0; i < vector_comps; ++i) {
-            nir_ssa_def *final_index = index ? nir_iadd_imm(b, index, i) : nir_imm_int(b, i);
+            nir_def *final_index = index ? nir_iadd_imm(b, index, i) : nir_imm_int(b, i);
             nir_deref_instr *comp_deref = nir_build_deref_array(b, new_var_deref, final_index);
             components[i] = nir_load_deref(b, comp_deref);
          }
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, components, vector_comps));
+         nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, components, vector_comps));
       } else if (intr->intrinsic == nir_intrinsic_store_deref) {
          for (unsigned i = 0; i < vector_comps; ++i) {
             if (((1 << i) & nir_intrinsic_write_mask(intr)) == 0)
                continue;
-            nir_ssa_def *final_index = index ? nir_iadd_imm(b, index, i) : nir_imm_int(b, i);
+            nir_def *final_index = index ? nir_iadd_imm(b, index, i) : nir_imm_int(b, i);
             nir_deref_instr *comp_deref = nir_build_deref_array(b, new_var_deref, final_index);
             nir_store_deref(b, comp_deref, nir_channel(b, intr->src[1].ssa, i), 1);
          }
       }
       nir_instr_remove(instr);
    } else {
-      nir_src_rewrite_ssa(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->dest.ssa);
+      nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->dest.ssa);
    }
 
    nir_deref_path_finish(&path);
@@ -462,13 +462,13 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
       if (intr->intrinsic == nir_intrinsic_load_deref) {
          intr->dest.ssa.bit_size = glsl_get_bit_size(var_scalar_type);
          b->cursor = nir_after_instr(instr);
-         nir_ssa_def *downcast = nir_type_convert(b, &intr->dest.ssa, new_type, old_type, nir_rounding_mode_undef);
-         nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, downcast, downcast->parent_instr);
+         nir_def *downcast = nir_type_convert(b, &intr->dest.ssa, new_type, old_type, nir_rounding_mode_undef);
+         nir_def_rewrite_uses_after(&intr->dest.ssa, downcast, downcast->parent_instr);
       }
       else {
          b->cursor = nir_before_instr(instr);
-         nir_ssa_def *upcast = nir_type_convert(b, intr->src[1].ssa, old_type, new_type, nir_rounding_mode_undef);
-         nir_src_rewrite_ssa(&intr->src[1], upcast);
+         nir_def *upcast = nir_type_convert(b, intr->src[1].ssa, old_type, new_type, nir_rounding_mode_undef);
+         nir_src_rewrite(&intr->src[1], upcast);
       }
 
       while (deref->deref_type == nir_deref_type_array) {
@@ -488,12 +488,12 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
                                                       nir_iadd_imm(b, deref->arr.index.ssa, 1));
       b->cursor = nir_before_instr(instr);
       if (intr->intrinsic == nir_intrinsic_load_deref) {
-         nir_ssa_def *src1 = nir_load_deref(b, deref);
-         nir_ssa_def *src2 = nir_load_deref(b, deref2);
-         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_pack_64_2x32_split(b, src1, src2));
+         nir_def *src1 = nir_load_deref(b, deref);
+         nir_def *src2 = nir_load_deref(b, deref2);
+         nir_def_rewrite_uses(&intr->dest.ssa, nir_pack_64_2x32_split(b, src1, src2));
       } else {
-         nir_ssa_def *src1 = nir_unpack_64_2x32_split_x(b, intr->src[1].ssa);
-         nir_ssa_def *src2 = nir_unpack_64_2x32_split_y(b, intr->src[1].ssa);
+         nir_def *src1 = nir_unpack_64_2x32_split_x(b, intr->src[1].ssa);
+         nir_def *src2 = nir_unpack_64_2x32_split_y(b, intr->src[1].ssa);
          nir_store_deref(b, deref, src1, 1);
          nir_store_deref(b, deref, src2, 1);
       }
@@ -619,12 +619,12 @@ lower_shared_atomic(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var
 {
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_iadd_imm(b, intr->src[0].ssa, nir_intrinsic_base(intr));
-   nir_ssa_def *index = nir_ushr_imm(b, offset, 2);
+   nir_def *index = nir_ushr_imm(b, offset, 2);
 
    nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, var), index);
-   nir_ssa_def *result;
+   nir_def *result;
    if (intr->intrinsic == nir_intrinsic_shared_atomic_swap)
       result = nir_deref_atomic_swap(b, 32, &deref->dest.ssa, intr->src[1].ssa, intr->src[2].ssa,
                                      .atomic_op = nir_intrinsic_atomic_op(intr));
@@ -632,7 +632,7 @@ lower_shared_atomic(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var
       result = nir_deref_atomic(b, 32, &deref->dest.ssa, intr->src[1].ssa,
                                 .atomic_op = nir_intrinsic_atomic_op(intr));
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    nir_instr_remove(&intr->instr);
    return true;
 }
@@ -713,11 +713,11 @@ lower_deref_ssbo(nir_builder *b, nir_deref_instr *deref)
       /* We turn all deref_var into deref_cast and build a pointer value based on
        * the var binding which encodes the UAV id.
        */
-      nir_ssa_def *ptr = nir_imm_int64(b, (uint64_t)var->data.binding << 32);
+      nir_def *ptr = nir_imm_int64(b, (uint64_t)var->data.binding << 32);
       nir_deref_instr *deref_cast =
          nir_build_deref_cast(b, ptr, nir_var_mem_ssbo, deref->type,
                               glsl_get_explicit_stride(var->type));
-      nir_ssa_def_rewrite_uses(&deref->dest.ssa,
+      nir_def_rewrite_uses(&deref->dest.ssa,
                                &deref_cast->dest.ssa);
       nir_instr_remove(&deref->instr);
 
@@ -781,7 +781,7 @@ lower_alu_deref_srcs(nir_builder *b, nir_alu_instr *alu)
       if (root_deref->deref_type != nir_deref_type_cast)
          continue;
 
-      nir_ssa_def *ptr =
+      nir_def *ptr =
          nir_iadd(b, root_deref->parent.ssa,
                      nir_build_deref_offset(b, deref, cl_type_size_align));
       nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(ptr));
@@ -830,7 +830,7 @@ cast_phi(nir_builder *b, nir_phi_instr *phi, unsigned new_bit_size)
 
       b->cursor = nir_after_instr_and_phis(src->src.ssa->parent_instr);
 
-      nir_ssa_def *cast = nir_u2uN(b, src->src.ssa, new_bit_size);
+      nir_def *cast = nir_u2uN(b, src->src.ssa, new_bit_size);
 
       nir_phi_instr_add_src(lowered, src->pred, nir_src_for_ssa(cast));
    }
@@ -842,9 +842,9 @@ cast_phi(nir_builder *b, nir_phi_instr *phi, unsigned new_bit_size)
    nir_builder_instr_insert(b, &lowered->instr);
 
    b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
-   nir_ssa_def *result = nir_u2uN(b, &lowered->dest.ssa, old_bit_size);
+   nir_def *result = nir_u2uN(b, &lowered->dest.ssa, old_bit_size);
 
-   nir_ssa_def_rewrite_uses(&phi->dest.ssa, result);
+   nir_def_rewrite_uses(&phi->dest.ssa, result);
    nir_instr_remove(&phi->instr);
 }
 
@@ -1002,7 +1002,7 @@ dxil_nir_split_clip_cull_distance_instr(nir_builder *b,
       new_intermediate_deref = nir_build_deref_array(b, new_intermediate_deref, parent->arr.index.ssa);
    }
    nir_deref_instr *new_array_deref = nir_build_deref_array(b, new_intermediate_deref, nir_imm_int(b, total_index % 4));
-   nir_ssa_def_rewrite_uses(&deref->dest.ssa, &new_array_deref->dest.ssa);
+   nir_def_rewrite_uses(&deref->dest.ssa, &new_array_deref->dest.ssa);
    return true;
 }
 
@@ -1048,10 +1048,10 @@ dxil_nir_lower_double_math_instr(nir_builder *b,
          unsigned num_components = nir_op_infos[alu->op].input_sizes[i];
          if (!num_components)
             num_components = alu->dest.dest.ssa.num_components;
-         nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
+         nir_def *components[NIR_MAX_VEC_COMPONENTS];
          for (unsigned c = 0; c < num_components; ++c) {
-            nir_ssa_def *packed_double = nir_channel(b, alu->src[i].src.ssa, alu->src[i].swizzle[c]);
-            nir_ssa_def *unpacked_double = nir_unpack_64_2x32(b, packed_double);
+            nir_def *packed_double = nir_channel(b, alu->src[i].src.ssa, alu->src[i].swizzle[c]);
+            nir_def *unpacked_double = nir_unpack_64_2x32(b, packed_double);
             components[c] = nir_pack_double_2x32_dxil(b, unpacked_double);
             alu->src[i].swizzle[c] = c;
          }
@@ -1063,14 +1063,14 @@ dxil_nir_lower_double_math_instr(nir_builder *b,
    if (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) == nir_type_float &&
        alu->dest.dest.ssa.bit_size == 64) {
       b->cursor = nir_after_instr(&alu->instr);
-      nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS];
+      nir_def *components[NIR_MAX_VEC_COMPONENTS];
       for (unsigned c = 0; c < alu->dest.dest.ssa.num_components; ++c) {
-         nir_ssa_def *packed_double = nir_channel(b, &alu->dest.dest.ssa, c);
-         nir_ssa_def *unpacked_double = nir_unpack_double_2x32_dxil(b, packed_double);
+         nir_def *packed_double = nir_channel(b, &alu->dest.dest.ssa, c);
+         nir_def *unpacked_double = nir_unpack_double_2x32_dxil(b, packed_double);
          components[c] = nir_pack_64_2x32(b, unpacked_double);
       }
-      nir_ssa_def *repacked_dvec = nir_vec(b, components, alu->dest.dest.ssa.num_components);
-      nir_ssa_def_rewrite_uses_after(&alu->dest.dest.ssa, repacked_dvec, repacked_dvec->parent_instr);
+      nir_def *repacked_dvec = nir_vec(b, components, alu->dest.dest.ssa.num_components);
+      nir_def_rewrite_uses_after(&alu->dest.dest.ssa, repacked_dvec, repacked_dvec->parent_instr);
       progress = true;
    }
 
@@ -1128,7 +1128,7 @@ lower_system_value_to_zero_filter(const nir_instr* instr, const void* cb_state)
    return false;
 }
 
-static nir_ssa_def*
+static nir_def*
 lower_system_value_to_zero_instr(nir_builder* b, nir_instr* instr, void* _state)
 {
    return nir_imm_int(b, 0);
@@ -1156,8 +1156,8 @@ lower_load_local_group_size(nir_builder *b, nir_intrinsic_instr *intr)
       nir_const_value_for_int(b->shader->info.workgroup_size[1], 32),
       nir_const_value_for_int(b->shader->info.workgroup_size[2], 32)
    };
-   nir_ssa_def *size = nir_build_imm(b, 3, 32, v);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, size);
+   nir_def *size = nir_build_imm(b, 3, 32, v);
+   nir_def_rewrite_uses(&intr->dest.ssa, size);
    nir_instr_remove(&intr->instr);
 }
 
@@ -1423,7 +1423,7 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data)
       ? 32 : intr->dest.ssa.bit_size;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *result = nir_load_input(b, intr->dest.ssa.num_components, bit_size, nir_imm_int(b, 0),
+   nir_def *result = nir_load_input(b, intr->dest.ssa.num_components, bit_size, nir_imm_int(b, 0),
       .base = var->data.driver_location, .dest_type = dest_type);
 
    /* The nir_type_uint32 is really a nir_type_bool32, but that type is very
@@ -1433,7 +1433,7 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data)
    if (sysval == SYSTEM_VALUE_FRONT_FACE)
       result = nir_ine_imm(b, result, 0);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    return true;
 }
 
@@ -1610,14 +1610,14 @@ lower_ubo_array_one_to_static(struct nir_builder *b, nir_instr *inst,
    // Indexing out of bounds on array of UBOs is considered undefined
    // behavior. Therefore, we just hardcode all the index to 0.
    uint8_t bit_size = index->dest.ssa.bit_size;
-   nir_ssa_def *zero = nir_imm_intN_t(b, 0, bit_size);
-   nir_ssa_def *dest =
+   nir_def *zero = nir_imm_intN_t(b, 0, bit_size);
+   nir_def *dest =
       nir_vulkan_resource_index(b, index->num_components, bit_size, zero,
                                 .desc_set = nir_intrinsic_desc_set(index),
                                 .binding = nir_intrinsic_binding(index),
                                 .desc_type = nir_intrinsic_desc_type(index));
 
-   nir_ssa_def_rewrite_uses(&index->dest.ssa, dest);
+   nir_def_rewrite_uses(&index->dest.ssa, dest);
 
    return true;
 }
@@ -1641,7 +1641,7 @@ is_fquantize2f16(const nir_instr *instr, const void *data)
    return alu->op == nir_op_fquantize2f16;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_fquantize2f16(struct nir_builder *b, nir_instr *instr, void *data)
 {
    /*
@@ -1672,19 +1672,19 @@ lower_fquantize2f16(struct nir_builder *b, nir_instr *instr, void *data)
     *      return round(val);
     */
    nir_alu_instr *alu = nir_instr_as_alu(instr);
-   nir_ssa_def *src =
+   nir_def *src =
       nir_ssa_for_src(b, alu->src[0].src, nir_src_num_components(alu->src[0].src));
 
-   nir_ssa_def *neg_inf_cond =
+   nir_def *neg_inf_cond =
       nir_flt_imm(b, src, -65504.0f);
-   nir_ssa_def *pos_inf_cond =
+   nir_def *pos_inf_cond =
       nir_fgt_imm(b, src, 65504.0f);
-   nir_ssa_def *zero_cond =
+   nir_def *zero_cond =
       nir_flt_imm(b, nir_fabs(b, src), ldexpf(1.0, -14));
-   nir_ssa_def *zero = nir_iand_imm(b, src, 1 << 31);
-   nir_ssa_def *round = nir_iand_imm(b, src, ~BITFIELD_MASK(13));
+   nir_def *zero = nir_iand_imm(b, src, 1 << 31);
+   nir_def *round = nir_iand_imm(b, src, ~BITFIELD_MASK(13));
 
-   nir_ssa_def *res =
+   nir_def *res =
       nir_bcsel(b, neg_inf_cond, nir_imm_float(b, -INFINITY), round);
    res = nir_bcsel(b, pos_inf_cond, nir_imm_float(b, INFINITY), res);
    res = nir_bcsel(b, zero_cond, zero, res);
@@ -1847,14 +1847,14 @@ update_writes(struct nir_builder *b, nir_instr *instr, void *_state)
    if (io.location != VARYING_SLOT_POS)
       return false;
 
-   nir_ssa_def *src = intr->src[0].ssa;
+   nir_def *src = intr->src[0].ssa;
    unsigned write_mask = nir_intrinsic_write_mask(intr);
    if (src->num_components == 4 && write_mask == 0xf)
       return false;
 
    b->cursor = nir_before_instr(instr);
    unsigned first_comp = nir_intrinsic_component(intr);
-   nir_ssa_def *channels[4] = { NULL, NULL, NULL, NULL };
+   nir_def *channels[4] = { NULL, NULL, NULL, NULL };
    assert(first_comp + src->num_components <= ARRAY_SIZE(channels));
    for (unsigned i = 0; i < src->num_components; ++i)
       if (write_mask & (1 << i))
@@ -1894,7 +1894,7 @@ is_sample_pos(const nir_instr *instr, const void *_data)
    return intr->intrinsic == nir_intrinsic_load_sample_pos;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_sample_pos(nir_builder *b, nir_instr *instr, void *_data)
 {
    return nir_load_sample_pos_from_id(b, 32, nir_load_sample_id(b));
@@ -1920,19 +1920,19 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data)
        b->shader->info.workgroup_size[2] == 1) {
       /* When using Nx1x1 groups, use a simple stable algorithm
        * which is almost guaranteed to be correct. */
-      nir_ssa_def *subgroup_id = nir_udiv(b, nir_load_local_invocation_index(b), nir_load_subgroup_size(b));
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, subgroup_id);
+      nir_def *subgroup_id = nir_udiv(b, nir_load_local_invocation_index(b), nir_load_subgroup_size(b));
+      nir_def_rewrite_uses(&intr->dest.ssa, subgroup_id);
       return true;
    }
 
-   nir_ssa_def **subgroup_id = (nir_ssa_def **)data;
+   nir_def **subgroup_id = (nir_def **)data;
    if (*subgroup_id == NULL) {
       nir_variable *subgroup_id_counter = nir_variable_create(b->shader, nir_var_mem_shared, glsl_uint_type(), "dxil_SubgroupID_counter");
       nir_variable *subgroup_id_local = nir_local_variable_create(b->impl, glsl_uint_type(), "dxil_SubgroupID_local");
       nir_store_var(b, subgroup_id_local, nir_imm_int(b, 0), 1);
 
       nir_deref_instr *counter_deref = nir_build_deref_var(b, subgroup_id_counter);
-      nir_ssa_def *tid = nir_load_local_invocation_index(b);
+      nir_def *tid = nir_load_local_invocation_index(b);
       nir_if *nif = nir_push_if(b, nir_ieq_imm(b, tid, 0));
       nir_store_deref(b, counter_deref, nir_imm_int(b, 0), 1);
       nir_pop_if(b, nif);
@@ -1944,22 +1944,22 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data)
                          .memory_modes = nir_var_mem_shared);
 
       nif = nir_push_if(b, nir_elect(b, 1));
-      nir_ssa_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->dest.ssa, nir_imm_int(b, 1),
+      nir_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->dest.ssa, nir_imm_int(b, 1),
                                                                .atomic_op = nir_atomic_op_iadd);
       nir_store_var(b, subgroup_id_local, subgroup_id_first_thread, 1);
       nir_pop_if(b, nif);
 
-      nir_ssa_def *subgroup_id_loaded = nir_load_var(b, subgroup_id_local);
+      nir_def *subgroup_id_loaded = nir_load_var(b, subgroup_id_local);
       *subgroup_id = nir_read_first_invocation(b, subgroup_id_loaded);
    }
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, *subgroup_id);
+   nir_def_rewrite_uses(&intr->dest.ssa, *subgroup_id);
    return true;
 }
 
 bool
 dxil_nir_lower_subgroup_id(nir_shader *s)
 {
-   nir_ssa_def *subgroup_id = NULL;
+   nir_def *subgroup_id = NULL;
    return nir_shader_instructions_pass(s, lower_subgroup_id, nir_metadata_none, &subgroup_id);
 }
 
@@ -1973,14 +1973,14 @@ lower_num_subgroups(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *subgroup_size = nir_load_subgroup_size(b);
-   nir_ssa_def *size_minus_one = nir_iadd_imm(b, subgroup_size, -1);
-   nir_ssa_def *workgroup_size_vec = nir_load_workgroup_size(b);
-   nir_ssa_def *workgroup_size = nir_imul(b, nir_channel(b, workgroup_size_vec, 0),
+   nir_def *subgroup_size = nir_load_subgroup_size(b);
+   nir_def *size_minus_one = nir_iadd_imm(b, subgroup_size, -1);
+   nir_def *workgroup_size_vec = nir_load_workgroup_size(b);
+   nir_def *workgroup_size = nir_imul(b, nir_channel(b, workgroup_size_vec, 0),
                                              nir_imul(b, nir_channel(b, workgroup_size_vec, 1),
                                                          nir_channel(b, workgroup_size_vec, 2)));
-   nir_ssa_def *ret = nir_idiv(b, nir_iadd(b, workgroup_size, size_minus_one), subgroup_size);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, ret);
+   nir_def *ret = nir_idiv(b, nir_iadd(b, workgroup_size, size_minus_one), subgroup_size);
+   nir_def_rewrite_uses(&intr->dest.ssa, ret);
    return true;
 }
 
@@ -2014,7 +2014,7 @@ static void
 split_unaligned_load(nir_builder *b, nir_intrinsic_instr *intrin, unsigned alignment)
 {
    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
-   nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS * NIR_MAX_VEC_COMPONENTS * sizeof(int64_t) / 8];
+   nir_def *srcs[NIR_MAX_VEC_COMPONENTS * NIR_MAX_VEC_COMPONENTS * sizeof(int64_t) / 8];
    unsigned comp_size = intrin->dest.ssa.bit_size / 8;
    unsigned num_comps = intrin->dest.ssa.num_components;
 
@@ -2031,8 +2031,8 @@ split_unaligned_load(nir_builder *b, nir_intrinsic_instr *intrin, unsigned align
       srcs[i] = nir_load_deref_with_access(b, elem, access);
    }
 
-   nir_ssa_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->dest.ssa.bit_size);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+   nir_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->dest.ssa.bit_size);
+   nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
    nir_instr_remove(&intrin->instr);
 }
 
@@ -2041,7 +2041,7 @@ split_unaligned_store(nir_builder *b, nir_intrinsic_instr *intrin, unsigned alig
 {
    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
 
-   nir_ssa_def *value = intrin->src[1].ssa;
+   nir_def *value = intrin->src[1].ssa;
    unsigned comp_size = value->bit_size / 8;
    unsigned num_comps = value->num_components;
 
@@ -2054,7 +2054,7 @@ split_unaligned_store(nir_builder *b, nir_intrinsic_instr *intrin, unsigned alig
 
    unsigned num_stores = DIV_ROUND_UP(comp_size * num_comps, alignment);
    for (unsigned i = 0; i < num_stores; ++i) {
-      nir_ssa_def *substore_val = nir_extract_bits(b, &value, 1, i * alignment * 8, 1, alignment * 8);
+      nir_def *substore_val = nir_extract_bits(b, &value, 1, i * alignment * 8, 1, alignment * 8);
       nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->dest.ssa.bit_size));
       nir_store_deref_with_access(b, elem, substore_val, ~0, access);
    }
@@ -2094,7 +2094,7 @@ dxil_nir_split_unaligned_loads_stores(nir_shader *shader, nir_variable_mode mode
             if (alignment >= req_align)
                continue;
 
-            nir_ssa_def *val;
+            nir_def *val;
             if (intrin->intrinsic == nir_intrinsic_load_deref) {
                val = &intrin->dest.ssa;
             } else {
@@ -2133,9 +2133,9 @@ lower_inclusive_to_exclusive(nir_builder *b, nir_intrinsic_instr *intr)
    intr->intrinsic = nir_intrinsic_exclusive_scan;
    nir_intrinsic_set_reduction_op(intr, op);
 
-   nir_ssa_def *final_val = nir_build_alu2(b, nir_intrinsic_reduction_op(intr),
+   nir_def *final_val = nir_build_alu2(b, nir_intrinsic_reduction_op(intr),
                                            &intr->dest.ssa, intr->src[0].ssa);
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, final_val, final_val->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, final_val, final_val->parent_instr);
 }
 
 static bool
@@ -2166,9 +2166,9 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
    nir_op op = nir_intrinsic_reduction_op(intr);
-   nir_ssa_def *subgroup_id = nir_load_subgroup_invocation(b);
-   nir_ssa_def *active_threads = nir_ballot(b, 4, 32, nir_imm_true(b));
-   nir_ssa_def *base_value;
+   nir_def *subgroup_id = nir_load_subgroup_invocation(b);
+   nir_def *active_threads = nir_ballot(b, 4, 32, nir_imm_true(b));
+   nir_def *base_value;
    uint32_t bit_size = intr->dest.ssa.bit_size;
    if (op == nir_op_iand || op == nir_op_umin)
       base_value = nir_imm_intN_t(b, ~0ull, bit_size);
@@ -2191,12 +2191,12 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
    nir_store_var(b, loop_counter_var, nir_imm_int(b, 0), 1);
    nir_store_var(b, result_var, base_value, 1);
    nir_loop *loop = nir_push_loop(b);
-   nir_ssa_def *loop_counter = nir_load_var(b, loop_counter_var);
+   nir_def *loop_counter = nir_load_var(b, loop_counter_var);
    nir_if *nif = nir_push_if(b, intr->intrinsic == nir_intrinsic_inclusive_scan ?
       nir_ige(b, subgroup_id, loop_counter) :
       nir_ilt(b, loop_counter, subgroup_id));
    nir_if *if_active_thread = nir_push_if(b, nir_ballot_bitfield_extract(b, 32, active_threads, loop_counter));
-   nir_ssa_def *result = nir_build_alu2(b, op,
+   nir_def *result = nir_build_alu2(b, op,
                                         nir_load_var(b, result_var),
                                         nir_read_invocation(b, intr->src[0].ssa, loop_counter));
    nir_store_var(b, result_var, result, 1);
@@ -2208,7 +2208,7 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
    nir_pop_loop(b, loop);
 
    result = nir_load_var(b, result_var);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, result);
+   nir_def_rewrite_uses(&intr->dest.ssa, result);
    return true;
 }
 
@@ -2237,9 +2237,9 @@ lower_load_face(nir_builder *b, nir_instr *instr, void *data)
    b->cursor = nir_before_instr(&intr->instr);
 
    nir_variable *var = data;
-   nir_ssa_def *load = nir_ine_imm(b, nir_load_var(b, var), 0);
+   nir_def *load = nir_ine_imm(b, nir_load_var(b, var), 0);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, load);
+   nir_def_rewrite_uses(&intr->dest.ssa, load);
    nir_instr_remove(instr);
    return true;
 }
@@ -2276,7 +2276,7 @@ move_consts(nir_builder *b, nir_instr *instr, void *data)
                                                                          load_const->def.bit_size);
             memcpy(new_load->value, load_const->value, sizeof(load_const->value[0]) * load_const->def.num_components);
             nir_builder_instr_insert(b, &new_load->instr);
-            nir_src_rewrite_ssa(src, &new_load->def);
+            nir_src_rewrite(src, &new_load->def);
             progress = true;
          }
       }
index 3e07e4e..52bbaab 100644 (file)
@@ -95,76 +95,76 @@ lower_int_cubmap_to_array_filter(const nir_instr *instr,
 
 typedef struct {
    bool image;
-   nir_ssa_def *rx;
-   nir_ssa_def *ry;
-   nir_ssa_def *rz;
-   nir_ssa_def *arx;
-   nir_ssa_def *ary;
-   nir_ssa_def *arz;
-   nir_ssa_def *array;
+   nir_def *rx;
+   nir_def *ry;
+   nir_def *rz;
+   nir_def *arx;
+   nir_def *ary;
+   nir_def *arz;
+   nir_def *array;
 } coord_t;
 
 
 /* This is taken from from sp_tex_sample:convert_cube */
-static nir_ssa_def *
+static nir_def *
 evaluate_face_x(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->rx);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->rx, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
+   nir_def *sign = nir_fsign(b, coord->rx);
+   nir_def *positive = nir_fge_imm(b, coord->rx, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 0.0), nir_imm_float(b, 1.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
 
    return coord->image ?
-      nir_vec4(b, x,y, face, nir_ssa_undef(b, 1, 32)) :
+      nir_vec4(b, x,y, face, nir_undef(b, 1, 32)) :
       nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
+static nir_def *
 evaluate_face_y(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->ry);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->ry, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
+   nir_def *sign = nir_fsign(b, coord->ry);
+   nir_def *positive = nir_fge_imm(b, coord->ry, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 2.0), nir_imm_float(b, 3.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
    
    return coord->image ?
-      nir_vec4(b, x,y, face, nir_ssa_undef(b, 1, 32)) :
+      nir_vec4(b, x,y, face, nir_undef(b, 1, 32)) :
       nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
+static nir_def *
 evaluate_face_z(nir_builder *b, coord_t *coord)
 {
-   nir_ssa_def *sign = nir_fsign(b, coord->rz);
-   nir_ssa_def *positive = nir_fge_imm(b, coord->rz, 0.0);
-   nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
+   nir_def *sign = nir_fsign(b, coord->rz);
+   nir_def *positive = nir_fge_imm(b, coord->rz, 0.0);
+   nir_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz);
 
-   nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5);
-   nir_ssa_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
-   nir_ssa_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
+   nir_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5);
+   nir_def *y = nir_fadd_imm(b, nir_fmul(b, ima, coord->ry), 0.5);
+   nir_def *face = nir_bcsel(b, positive, nir_imm_float(b, 4.0), nir_imm_float(b, 5.0));
 
    if (coord->array)
       face = nir_fadd(b, face, coord->array);
    
    return coord->image ?
-      nir_vec4(b, x,y, face, nir_ssa_undef(b, 1, 32)) :
+      nir_vec4(b, x,y, face, nir_undef(b, 1, 32)) :
       nir_vec3(b, x,y, face);
 }
 
-static nir_ssa_def *
-create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord, nir_texop op)
+static nir_def *
+create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coord, nir_texop op)
 {
    nir_tex_instr *array_tex;
 
@@ -194,8 +194,8 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *
    return &array_tex->dest.ssa;
 }
 
-static nir_ssa_def *
-handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *face, nir_ssa_def *array_slice_cube_base, nir_ssa_def *tex_size)
+static nir_def *
+handle_cube_edge(nir_builder *b, nir_def *x, nir_def *y, nir_def *face, nir_def *array_slice_cube_base, nir_def *tex_size)
 {
    enum cube_remap
    {
@@ -263,22 +263,22 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
       cube_remap_pos_y
    };
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
+   nir_def *zero = nir_imm_int(b, 0);
    
    /* Doesn't matter since the texture is square */
    tex_size = nir_channel(b, tex_size, 0);
 
-   nir_ssa_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
-   nir_ssa_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
-   nir_ssa_def *one_on = nir_ixor(b, x_on, y_on);
+   nir_def *x_on = nir_iand(b, nir_ige(b, x, zero), nir_ige(b, tex_size, x));
+   nir_def *y_on = nir_iand(b, nir_ige(b, y, zero), nir_ige(b, tex_size, y));
+   nir_def *one_on = nir_ixor(b, x_on, y_on);
 
    /* If the sample did not fall off the face in either dimension, then set output = input */
-   nir_ssa_def *x_result = x;
-   nir_ssa_def *y_result = y;
-   nir_ssa_def *face_result = face;
+   nir_def *x_result = x;
+   nir_def *y_result = y;
+   nir_def *face_result = face;
 
    /* otherwise, if the sample fell off the face in either the X or the Y direction, remap to the new face */
-   nir_ssa_def *remap_predicates[4] =
+   nir_def *remap_predicates[4] =
    {
       nir_iand(b, one_on, nir_ilt(b, x, zero)),
       nir_iand(b, one_on, nir_ilt(b, tex_size, x)),
@@ -286,7 +286,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
       nir_iand(b, one_on, nir_ilt(b, tex_size, y)),
    };
 
-   nir_ssa_def *remap_array[cube_remap_size];
+   nir_def *remap_array[cube_remap_size];
 
    remap_array[cube_remap_zero] = zero;
    remap_array[cube_remap_x] = x;
@@ -301,7 +301,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
 
       /* For each possible original face */
       for (unsigned j = 0; j < 6; j++) {
-         nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
+         nir_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
 
          x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
          y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);
@@ -312,48 +312,48 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
    return nir_vec3(b, x_result, y_result, nir_iadd(b, face_result, array_slice_cube_base));
 }
 
-static nir_ssa_def *
-handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord)
+static nir_def *
+handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_def *coord)
 {
-   nir_ssa_def *tex_size = nir_get_texture_size(b, tex);
+   nir_def *tex_size = nir_get_texture_size(b, tex);
 
    /* nir_get_texture_size puts the cursor before the tex op */
    b->cursor = nir_after_instr(coord->parent_instr);
 
-   nir_ssa_def *const_05 = nir_imm_float(b, 0.5f);
-   nir_ssa_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2),
+   nir_def *const_05 = nir_imm_float(b, 0.5f);
+   nir_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2),
                                         nir_i2f32(b, nir_trim_vector(b, tex_size, 2)));
 
-   nir_ssa_def *x_orig = nir_channel(b, texel_coords, 0);
-   nir_ssa_def *y_orig = nir_channel(b, texel_coords, 1);
+   nir_def *x_orig = nir_channel(b, texel_coords, 0);
+   nir_def *y_orig = nir_channel(b, texel_coords, 1);
 
-   nir_ssa_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
-   nir_ssa_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
-   nir_ssa_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
-   nir_ssa_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
-   nir_ssa_def *coords[4][2] = {
+   nir_def *x_pos = nir_f2i32(b, nir_fadd(b, x_orig, const_05));
+   nir_def *x_neg = nir_f2i32(b, nir_fsub(b, x_orig, const_05));
+   nir_def *y_pos = nir_f2i32(b, nir_fadd(b, y_orig, const_05));
+   nir_def *y_neg = nir_f2i32(b, nir_fsub(b, y_orig, const_05));
+   nir_def *coords[4][2] = {
       { x_neg, y_pos },
       { x_pos, y_pos },
       { x_pos, y_neg },
       { x_neg, y_neg },
    };
 
-   nir_ssa_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
-   nir_ssa_def *face = nir_imod_imm(b, array_slice_2d, 6);
-   nir_ssa_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
+   nir_def *array_slice_2d = nir_f2i32(b, nir_channel(b, coord, 2));
+   nir_def *face = nir_imod_imm(b, array_slice_2d, 6);
+   nir_def *array_slice_cube_base = nir_isub(b, array_slice_2d, face);
 
-   nir_ssa_def *channels[4];
+   nir_def *channels[4];
    for (unsigned i = 0; i < 4; ++i) {
-      nir_ssa_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
-      nir_ssa_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
+      nir_def *final_coord = handle_cube_edge(b, coords[i][0], coords[i][1], face, array_slice_cube_base, tex_size);
+      nir_def *sampled_val = create_array_tex_from_cube_tex(b, tex, final_coord, nir_texop_txf);
       channels[i] = nir_channel(b, sampled_val, tex->component);
    }
 
    return nir_vec(b, channels, 4);
 }
 
-static nir_ssa_def *
-lower_cube_coords(nir_builder *b, nir_ssa_def *coord, bool is_array, bool is_image)
+static nir_def *
+lower_cube_coords(nir_builder *b, nir_def *coord, bool is_array, bool is_image)
 {
    coord_t coords;
    coords.image = is_image;
@@ -367,43 +367,43 @@ lower_cube_coords(nir_builder *b, nir_ssa_def *coord, bool is_array, bool is_ima
    if (is_array)
       coords.array = nir_fmul_imm(b, nir_channel(b, coord, 3), 6.0f);
 
-   nir_ssa_def *use_face_x = nir_iand(b,
+   nir_def *use_face_x = nir_iand(b,
                                       nir_fge(b, coords.arx, coords.ary),
                                       nir_fge(b, coords.arx, coords.arz));
 
    nir_if *use_face_x_if = nir_push_if(b, use_face_x);
-   nir_ssa_def *face_x_coord = evaluate_face_x(b, &coords);
+   nir_def *face_x_coord = evaluate_face_x(b, &coords);
    nir_if *use_face_x_else = nir_push_else(b, use_face_x_if);
 
-   nir_ssa_def *use_face_y = nir_iand(b,
+   nir_def *use_face_y = nir_iand(b,
                                       nir_fge(b, coords.ary, coords.arx),
                                       nir_fge(b, coords.ary, coords.arz));
 
    nir_if *use_face_y_if = nir_push_if(b, use_face_y);
-   nir_ssa_def *face_y_coord = evaluate_face_y(b, &coords);
+   nir_def *face_y_coord = evaluate_face_y(b, &coords);
    nir_if *use_face_y_else = nir_push_else(b, use_face_y_if);
 
-   nir_ssa_def *face_z_coord = evaluate_face_z(b, &coords);
+   nir_def *face_z_coord = evaluate_face_z(b, &coords);
 
    nir_pop_if(b, use_face_y_else);
-   nir_ssa_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
+   nir_def *face_y_or_z_coord = nir_if_phi(b, face_y_coord, face_z_coord);
    nir_pop_if(b, use_face_x_else);
 
    // This contains in xy the normalized sample coordinates, and in z the face index
-   nir_ssa_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
+   nir_def *coord_and_face = nir_if_phi(b, face_x_coord, face_y_or_z_coord);
 
    return coord_and_face;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
 {
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
    assert(coord_index >= 0);
 
    /* Evaluate the face and the xy coordinates for a 2D tex op */
-   nir_ssa_def *coord = tex->src[coord_index].src.ssa;
-   nir_ssa_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array, false);
+   nir_def *coord = tex->src[coord_index].src.ssa;
+   nir_def *coord_and_face = lower_cube_coords(b, coord, tex->is_array, false);
 
    if (tex->op == nir_texop_tg4)
       return handle_cube_gather(b, tex, coord_and_face);
@@ -411,7 +411,7 @@ lower_cube_sample(nir_builder *b, nir_tex_instr *tex)
       return create_array_tex_from_cube_tex(b, tex, coord_and_face, tex->op);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_image_load_store_atomic(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_before_instr(&intr->instr);
@@ -421,29 +421,29 @@ lower_cube_image_load_store_atomic(nir_builder *b, nir_intrinsic_instr *intr)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_txs(nir_builder *b, nir_tex_instr *tex)
 {
    b->cursor = nir_after_instr(&tex->instr);
    if (!tex->is_array)
       return nir_trim_vector(b, &tex->dest.ssa, 2);
 
-   nir_ssa_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
-   nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
+   nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
+   nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
    return nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
                       nir_channel(b, &tex->dest.ssa, 1),
                       cube_array_dim);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_image_size(nir_builder *b, nir_intrinsic_instr *intr)
 {
    b->cursor = nir_after_instr(&intr->instr);
    if (!nir_intrinsic_image_array(intr))
       return nir_trim_vector(b, &intr->dest.ssa, 2);
 
-   nir_ssa_def *array_dim = nir_channel(b, &intr->dest.ssa, 2);
-   nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
+   nir_def *array_dim = nir_channel(b, &intr->dest.ssa, 2);
+   nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
    return nir_vec3(b, nir_channel(b, &intr->dest.ssa, 0),
                       nir_channel(b, &intr->dest.ssa, 1),
                       cube_array_dim);
@@ -482,7 +482,7 @@ make_2darray_from_cubemap_with_array(const struct glsl_type *type, bool is_image
       return make_2darray_sampler_from_cubemap(type);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_int_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
 {
    switch (tex->op) {
@@ -500,7 +500,7 @@ lower_int_cubemap_to_array_tex(nir_builder *b, nir_tex_instr *tex)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
 {
    if (intr->intrinsic == nir_intrinsic_image_size ||
@@ -510,7 +510,7 @@ lower_cube_image_intrinsic(nir_builder *b, nir_intrinsic_instr *intr)
       return lower_cube_image_load_store_atomic(b, intr);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_cube_image_deref(nir_builder *b, nir_deref_instr *deref)
 {
    deref->type = make_2darray_from_cubemap_with_array(
@@ -519,7 +519,7 @@ lower_cube_image_deref(nir_builder *b, nir_deref_instr *deref)
    return NIR_LOWER_INSTR_PROGRESS;
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_int_cubmap_to_array_impl(nir_builder *b, nir_instr *instr,
                                void *options)
 {
index 1b8d55a..94ae274 100644 (file)
@@ -49,7 +49,7 @@ lower_sample_to_txf_for_integer_tex_filter(const nir_instr *instr,
    return (tex->dest_type & (nir_type_int | nir_type_uint));
 }
 
-static nir_ssa_def *
+static nir_def *
 dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
 {
    nir_tex_instr *tql;
@@ -85,7 +85,7 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
     * check for is_array though, in the worst case we create an additional
     * move the the optimization will remove later again. */
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
-   nir_ssa_def *ssa_src = nir_trim_vector(b, tex->src[coord_index].src.ssa,
+   nir_def *ssa_src = nir_trim_vector(b, tex->src[coord_index].src.ssa,
                                           coord_components);
    nir_src src = nir_src_for_ssa(ssa_src);
    nir_src_copy(&tql->src[0].src, &src, &tql->instr);
@@ -113,19 +113,19 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
 }
 
 typedef struct {
-   nir_ssa_def *coords;
-   nir_ssa_def *use_border_color;
+   nir_def *coords;
+   nir_def *use_border_color;
 } wrap_result_t;
 
 typedef struct {
-   nir_ssa_def *lod;
-   nir_ssa_def *size;
+   nir_def *lod;
+   nir_def *size;
    int ncoord_comp;
    wrap_result_t wrap[3];
 } wrap_lower_param_t;
 
 static void
-wrap_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
    /* clamp(coord, 0, size - 1) */
    wrap_params->coords = nir_fmin(b, nir_fadd_imm(b, size, -1.0f),
@@ -133,7 +133,7 @@ wrap_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size
 }
 
 static void
-wrap_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
    /* mod(coord, size)
     * This instruction must be exact, otherwise certain sizes result in
@@ -142,8 +142,8 @@ wrap_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
    nir_instr_as_alu(wrap_params->coords->parent_instr)->exact = true;
 }
 
-static nir_ssa_def *
-mirror(nir_builder *b, nir_ssa_def *coord)
+static nir_def *
+mirror(nir_builder *b, nir_def *coord)
 {
    /* coord if >= 0, otherwise -(1 + coord) */
    return nir_bcsel(b, nir_fge_imm(b, coord, 0.0f), coord,
@@ -151,17 +151,17 @@ mirror(nir_builder *b, nir_ssa_def *coord)
 }
 
 static void
-wrap_mirror_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_mirror_repeat(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
    /* (size âˆ’ 1) âˆ’ mirror(mod(coord, 2 * size) âˆ’ size) */
-   nir_ssa_def *coord_mod2size = nir_fmod(b, wrap_params->coords, nir_fmul_imm(b, size, 2.0f));
+   nir_def *coord_mod2size = nir_fmod(b, wrap_params->coords, nir_fmul_imm(b, size, 2.0f));
    nir_instr_as_alu(coord_mod2size->parent_instr)->exact = true;
-   nir_ssa_def *a = nir_fsub(b, coord_mod2size, size);
+   nir_def *a = nir_fsub(b, coord_mod2size, size);
    wrap_params->coords = nir_fsub(b, nir_fadd_imm(b, size, -1.0f), mirror(b, a));
 }
 
 static void
-wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
    /* clamp(mirror(coord), 0, size - 1) */
    wrap_params->coords = nir_fmin(b, nir_fadd_imm(b, size, -1.0f),
@@ -169,19 +169,19 @@ wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_de
 }
 
 static void
-wrap_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
-   nir_ssa_def *is_low = nir_flt_imm(b, wrap_params->coords, 0.0);
-   nir_ssa_def *is_high = nir_fge(b, wrap_params->coords, size);
+   nir_def *is_low = nir_flt_imm(b, wrap_params->coords, 0.0);
+   nir_def *is_high = nir_fge(b, wrap_params->coords, size);
    wrap_params->use_border_color = nir_ior(b, is_low, is_high);
 }
 
 static void
-wrap_mirror_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
+wrap_mirror_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_def *size)
 {
    /* We have to take care of the boundaries */
-   nir_ssa_def *is_low = nir_flt(b, wrap_params->coords, nir_fmul_imm(b, size, -1.0));
-   nir_ssa_def *is_high = nir_flt(b, nir_fmul_imm(b, size, 2.0), wrap_params->coords);
+   nir_def *is_low = nir_flt(b, wrap_params->coords, nir_fmul_imm(b, size, -1.0));
+   nir_def *is_high = nir_flt(b, nir_fmul_imm(b, size, 2.0), wrap_params->coords);
    wrap_params->use_border_color = nir_ior(b, is_low, is_high);
 
    /* Within the boundaries this acts like mirror_repeat */
@@ -190,8 +190,8 @@ wrap_mirror_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size)
 }
 
 static wrap_result_t
-wrap_coords(nir_builder *b, nir_ssa_def *coords, enum pipe_tex_wrap wrap,
-            nir_ssa_def *size)
+wrap_coords(nir_builder *b, nir_def *coords, enum pipe_tex_wrap wrap,
+            nir_def *size)
 {
    wrap_result_t result = {coords, nir_imm_false(b)};
 
@@ -220,7 +220,7 @@ wrap_coords(nir_builder *b, nir_ssa_def *coords, enum pipe_tex_wrap wrap,
    return result;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_bordercolor(nir_builder *b, nir_tex_instr *tex, const dxil_wrap_sampler_state *active_state,
                  const dxil_texture_swizzle_state *tex_swizzle)
 {
@@ -303,10 +303,10 @@ create_txf_from_tex(nir_builder *b, nir_tex_instr *tex)
    return txf;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_texel(nir_builder *b, nir_tex_instr *tex, wrap_lower_param_t *params)
 {
-   nir_ssa_def *texcoord = NULL;
+   nir_def *texcoord = NULL;
 
    /* Put coordinates back together */
    switch (tex->coord_components) {
@@ -335,14 +335,14 @@ load_texel(nir_builder *b, nir_tex_instr *tex, wrap_lower_param_t *params)
 typedef struct {
    const dxil_wrap_sampler_state *aws;
    float max_bias;
-   nir_ssa_def *size;
+   nir_def *size;
    int ncoord_comp;
 } lod_params;
 
-static nir_ssa_def *
+static nir_def *
 evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
 {
-   static nir_ssa_def *lod = NULL;
+   static nir_def *lod = NULL;
 
    /* Later we use min_lod for clamping the LOD to a legal value */
    float min_lod = MAX2(params->aws->min_lod, 0.0f);
@@ -357,12 +357,12 @@ evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
       int ddy_index = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
       assert(ddx_index >= 0 && ddy_index >= 0);
 
-      nir_ssa_def *grad = nir_fmax(b,
+      nir_def *grad = nir_fmax(b,
                                    tex->src[ddx_index].src.ssa,
                                    tex->src[ddy_index].src.ssa);
 
-      nir_ssa_def *r = nir_fmul(b, grad, nir_i2f32(b, params->size));
-      nir_ssa_def *rho = nir_channel(b, r, 0);
+      nir_def *r = nir_fmul(b, grad, nir_i2f32(b, params->size));
+      nir_def *rho = nir_channel(b, r, 0);
       for (int i = 1; i < params->ncoord_comp; ++i)
          rho = nir_fmax(b, rho, nir_channel(b, r, i));
       lod = nir_flog2(b, rho);
@@ -391,7 +391,7 @@ evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
     * in compatibility contexts and as bias_texobj in core contexts, hence the
     * implementation here is the same in both cases.
     */
-   nir_ssa_def *lod_bias = nir_imm_float(b, params->aws->lod_bias);
+   nir_def *lod_bias = nir_imm_float(b, params->aws->lod_bias);
 
    if (unlikely(tex->op == nir_texop_txb)) {
       int bias_index = nir_tex_instr_src_index(tex, nir_tex_src_bias);
@@ -418,7 +418,7 @@ evalute_active_lod(nir_builder *b, nir_tex_instr *tex, lod_params *params)
 }
 
 
-static nir_ssa_def *
+static nir_def *
 lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
                                          void *options)
 {
@@ -443,14 +443,14 @@ lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
    b->cursor = nir_before_instr(instr);
 
    int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
-   nir_ssa_def *old_coord = tex->src[coord_index].src.ssa;
+   nir_def *old_coord = tex->src[coord_index].src.ssa;
    params.ncoord_comp = tex->coord_components;
    if (tex->is_array)
       params.ncoord_comp -= 1;
 
    /* This helper to get the texture size always uses LOD 0, and DirectX doesn't support
     * giving another LOD when querying the texture size */
-   nir_ssa_def *size0 = nir_get_texture_size(b, tex);
+   nir_def *size0 = nir_get_texture_size(b, tex);
 
    params.lod = nir_imm_int(b, 0);
 
@@ -470,16 +470,16 @@ lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
       params.size = nir_i2f32(b, size0);
    }
 
-   nir_ssa_def *new_coord = old_coord;
+   nir_def *new_coord = old_coord;
    if (!active_wrap_state->is_nonnormalized_coords) {
       /* Evaluate the integer lookup coordinates for the requested LOD, don't touch the
        * array index */
       if (!tex->is_array) {
          new_coord = nir_fmul(b, params.size, old_coord);
       } else {
-         nir_ssa_def *array_index = nir_channel(b, old_coord, params.ncoord_comp);
+         nir_def *array_index = nir_channel(b, old_coord, params.ncoord_comp);
          int mask = (1 << params.ncoord_comp) - 1;
-         nir_ssa_def *coord = nir_fmul(b, nir_channels(b, params.size, mask),
+         nir_def *coord = nir_fmul(b, nir_channels(b, params.size, mask),
                                           nir_channels(b, old_coord, mask));
          switch (params.ncoord_comp) {
          case 1:
@@ -496,7 +496,7 @@ lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
       }
    }
 
-   nir_ssa_def *coord_help[3];
+   nir_def *coord_help[3];
    for (int i = 0; i < params.ncoord_comp; ++i)
       coord_help[i] = nir_ffloor(b, nir_channel(b, new_coord, i));
 
@@ -507,12 +507,12 @@ lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
    /* Correct the texture coordinates for the offsets. */
    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
    if (offset_index >= 0) {
-      nir_ssa_def *offset = tex->src[offset_index].src.ssa;
+      nir_def *offset = tex->src[offset_index].src.ssa;
       for (int i = 0; i < params.ncoord_comp; ++i)
          coord_help[i] = nir_fadd(b, coord_help[i], nir_i2f32(b, nir_channel(b, offset, i)));
    }
 
-   nir_ssa_def *use_border_color = nir_imm_false(b);
+   nir_def *use_border_color = nir_imm_false(b);
 
    if (!active_wrap_state->skip_boundary_conditions) {
 
@@ -544,9 +544,9 @@ lower_sample_to_txf_for_integer_tex_impl(nir_builder *b, nir_instr *instr,
                                                  &states->tex_swizzles[tex->sampler_index]:
                                                  &one2one;
 
-   nir_ssa_def *border_color = load_bordercolor(b, tex, active_wrap_state, swizzle);
+   nir_def *border_color = load_bordercolor(b, tex, active_wrap_state, swizzle);
    nir_if *border_else = nir_push_else(b, border_if);
-   nir_ssa_def *sampler_color = load_texel(b, tex, &params);
+   nir_def *sampler_color = load_texel(b, tex, &params);
    nir_pop_if(b, border_else);
 
    return nir_if_phi(b, border_color, sampler_color);
index 711ad99..0e55d9b 100644 (file)
@@ -48,25 +48,25 @@ lower_vs_vertex_conversion_filter(const nir_instr *instr, const void *options)
          (get_input_target_format(var, options) != PIPE_FORMAT_NONE);
 }
 
-typedef  nir_ssa_def *
-(*shift_right_func)(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1);
+typedef  nir_def *
+(*shift_right_func)(nir_builder *build, nir_def *src0, nir_def *src1);
 
 /* decoding the signed vs unsigned scaled format is handled
  * by applying the signed or unsigned shift right function
  * accordingly */
-static nir_ssa_def *
-from_10_10_10_2_scaled(nir_builder *b, nir_ssa_def *src,
-                       nir_ssa_def *lshift, shift_right_func shr)
+static nir_def *
+from_10_10_10_2_scaled(nir_builder *b, nir_def *src,
+                       nir_def *lshift, shift_right_func shr)
 {
-   nir_ssa_def *rshift = nir_imm_ivec4(b, 22, 22, 22, 30);
+   nir_def *rshift = nir_imm_ivec4(b, 22, 22, 22, 30);
    return nir_i2f32(b, shr(b, nir_ishl(b, src, lshift), rshift));
 }
 
-static nir_ssa_def *
-from_10_10_10_2_snorm(nir_builder *b, nir_ssa_def *src, nir_ssa_def *lshift)
+static nir_def *
+from_10_10_10_2_snorm(nir_builder *b, nir_def *src, nir_def *lshift)
 {
-   nir_ssa_def *split = from_10_10_10_2_scaled(b, src, lshift, nir_ishr);
-   nir_ssa_def *scale_rgb = nir_imm_vec4(b,
+   nir_def *split = from_10_10_10_2_scaled(b, src, lshift, nir_ishr);
+   nir_def *scale_rgb = nir_imm_vec4(b,
                                          1.0f / 0x1ff,
                                          1.0f / 0x1ff,
                                          1.0f / 0x1ff,
@@ -74,11 +74,11 @@ from_10_10_10_2_snorm(nir_builder *b, nir_ssa_def *src, nir_ssa_def *lshift)
    return nir_fmul(b, split, scale_rgb);
 }
 
-static nir_ssa_def *
-from_10_10_10_2_unorm(nir_builder *b, nir_ssa_def *src, nir_ssa_def *lshift)
+static nir_def *
+from_10_10_10_2_unorm(nir_builder *b, nir_def *src, nir_def *lshift)
 {
-   nir_ssa_def *split = from_10_10_10_2_scaled(b, src, lshift, nir_ushr);
-   nir_ssa_def *scale_rgb = nir_imm_vec4(b,
+   nir_def *split = from_10_10_10_2_scaled(b, src, lshift, nir_ushr);
+   nir_def *scale_rgb = nir_imm_vec4(b,
                                          1.0f / 0x3ff,
                                          1.0f / 0x3ff,
                                          1.0f / 0x3ff,
@@ -86,19 +86,19 @@ from_10_10_10_2_unorm(nir_builder *b, nir_ssa_def *src, nir_ssa_def *lshift)
    return nir_fmul(b, split, scale_rgb);
 }
 
-inline static nir_ssa_def *
+inline static nir_def *
 lshift_rgba(nir_builder *b)
 {
    return nir_imm_ivec4(b, 22, 12, 2, 0);
 }
 
-inline static nir_ssa_def *
+inline static nir_def *
 lshift_bgra(nir_builder *b)
 {
    return nir_imm_ivec4(b, 2, 12, 22, 0);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_vs_vertex_conversion_impl(nir_builder *b, nir_instr *instr, void *options)
 {
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
@@ -115,7 +115,7 @@ lower_vs_vertex_conversion_impl(nir_builder *b, nir_instr *instr, void *options)
          return NULL;
       return nir_vector_insert_imm(b, &intr->dest.ssa, nir_imm_int(b, 1), 3);
    } else {
-      nir_ssa_def *src = nir_channel(b, &intr->dest.ssa, 0);
+      nir_def *src = nir_channel(b, &intr->dest.ssa, 0);
 
       switch (fmt) {
       case PIPE_FORMAT_R10G10B10A2_SNORM:
index 2cf1577..7cad89b 100644 (file)
@@ -128,7 +128,7 @@ get_cursor_for_instr_without_cf(nir_instr *instr)
 }
 
 struct tcs_patch_loop_state {
-   nir_ssa_def *deref, *count;
+   nir_def *deref, *count;
    nir_cursor begin_cursor, end_cursor, insert_cursor;
    nir_loop *loop;
 };
@@ -228,7 +228,7 @@ dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func)
             continue;
          nir_foreach_use_including_if_safe(src, &intr->dest.ssa) {
             b.cursor = nir_before_src(src);
-            nir_src_rewrite_ssa(src, nir_load_invocation_id(&b));
+            nir_src_rewrite(src, nir_load_invocation_id(&b));
          }
          nir_instr_remove(instr);
       }
@@ -254,7 +254,7 @@ dxil_nir_split_tess_ctrl(nir_shader *nir, nir_function **patch_const_func)
                b.cursor = state.begin_cursor = get_cursor_for_instr_without_cf(instr);
                start_tcs_loop(&b, &state, loop_var_deref);
             }
-            nir_ssa_def_rewrite_uses(&intr->dest.ssa, state.count);
+            nir_def_rewrite_uses(&intr->dest.ssa, state.count);
             break;
          }
          case nir_intrinsic_barrier:
@@ -313,7 +313,7 @@ remove_tess_level_accesses(nir_builder *b, nir_instr *instr, void *_data)
    } else {
       b->cursor = nir_after_instr(instr);
       assert(nir_dest_num_components(intr->dest) == 1);
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_ssa_undef(b, 1, intr->dest.ssa.bit_size));
+      nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, intr->dest.ssa.bit_size));
    }
    return true;
 }
index 68ccd7d..3dd00f3 100644 (file)
@@ -2120,7 +2120,7 @@ bitcast_to_float(struct ntd_context *ctx, unsigned bit_size,
 }
 
 static bool
-is_phi_src(nir_ssa_def *ssa)
+is_phi_src(nir_def *ssa)
 {
    nir_foreach_use(src, ssa)
       if (src->parent_instr->type == nir_instr_type_phi)
@@ -2129,7 +2129,7 @@ is_phi_src(nir_ssa_def *ssa)
 }
 
 static void
-store_ssa_def(struct ntd_context *ctx, nir_ssa_def *ssa, unsigned chan,
+store_ssa_def(struct ntd_context *ctx, nir_def *ssa, unsigned chan,
               const struct dxil_value *value)
 {
    assert(ssa->index < ctx->num_defs);
@@ -2188,7 +2188,7 @@ store_alu_dest(struct ntd_context *ctx, nir_alu_instr *alu, unsigned chan,
 }
 
 static const struct dxil_value *
-get_src_ssa(struct ntd_context *ctx, const nir_ssa_def *ssa, unsigned chan)
+get_src_ssa(struct ntd_context *ctx, const nir_def *ssa, unsigned chan)
 {
    assert(ssa->index < ctx->num_defs);
    assert(chan < ssa->num_components);
@@ -2298,8 +2298,8 @@ emit_shift(struct ntd_context *ctx, nir_alu_instr *alu,
                             dxil_module_get_int_const(&ctx->mod, shift_mask, op0_bit_size),
                             0);
    } else {
-      uint64_t val = nir_ssa_scalar_as_uint(
-         nir_ssa_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->dest.dest.ssa, 0), 1));
+      uint64_t val = nir_scalar_as_uint(
+         nir_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->dest.dest.ssa, 0), 1));
       op1 = dxil_module_get_int_const(&ctx->mod, val & shift_mask, op0_bit_size);
    }
 
@@ -2906,8 +2906,8 @@ emit_alu(struct ntd_context *ctx, nir_alu_instr *alu)
    case nir_op_udiv:
       if (nir_src_is_const(alu->src[1].src)) {
          /* It's illegal to emit a literal divide by 0 in DXIL */
-         nir_ssa_scalar divisor = nir_ssa_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->dest.dest.ssa, 0), 1);
-         if (nir_ssa_scalar_as_int(divisor) == 0) {
+         nir_scalar divisor = nir_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->dest.dest.ssa, 0), 1);
+         if (nir_scalar_as_int(divisor) == 0) {
             store_alu_dest(ctx, alu, 0, dxil_module_get_int_const(&ctx->mod, 0, nir_dest_bit_size(alu->dest.dest)));
             return true;
          }
@@ -3123,7 +3123,7 @@ static bool
 emit_load_global_invocation_id(struct ntd_context *ctx,
                                     nir_intrinsic_instr *intr)
 {
-   nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
+   nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
 
    for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
       if (comps & (1 << i)) {
@@ -3145,7 +3145,7 @@ static bool
 emit_load_local_invocation_id(struct ntd_context *ctx,
                               nir_intrinsic_instr *intr)
 {
-   nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
+   nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
 
    for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
       if (comps & (1 << i)) {
@@ -3180,7 +3180,7 @@ static bool
 emit_load_local_workgroup_id(struct ntd_context *ctx,
                               nir_intrinsic_instr *intr)
 {
-   nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
+   nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
 
    for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
       if (comps & (1 << i)) {
@@ -4947,7 +4947,7 @@ emit_intrinsic(struct ntd_context *ctx, nir_intrinsic_instr *intr)
 }
 
 static const struct dxil_type *
-dxil_type_for_const(struct ntd_context *ctx, nir_ssa_def *def)
+dxil_type_for_const(struct ntd_context *ctx, nir_def *def)
 {
    if (BITSET_TEST(ctx->int_types, def->index) ||
        !BITSET_TEST(ctx->float_types, def->index))
@@ -5634,7 +5634,7 @@ emit_tex(struct ntd_context *ctx, nir_tex_instr *instr)
 }
 
 static bool
-emit_undefined(struct ntd_context *ctx, nir_ssa_undef_instr *undef)
+emit_undefined(struct ntd_context *ctx, nir_undef_instr *undef)
 {
    for (unsigned i = 0; i < undef->def.num_components; ++i)
       store_ssa_def(ctx, &undef->def, i, dxil_module_get_int32_const(&ctx->mod, 0));
@@ -5909,7 +5909,7 @@ emit_function(struct ntd_context *ctx, nir_function *func, nir_function_impl *im
    if (!ctx->phis)
       return false;
 
-   nir_gather_ssa_types(impl, ctx->float_types, ctx->int_types);
+   nir_gather_types(impl, ctx->float_types, ctx->int_types);
 
    if (!emit_scratch(ctx, impl))
       return false;
index f6e4137..b824b87 100644 (file)
@@ -234,7 +234,7 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
    builder->cursor = nir_after_instr(instr);
    nir_address_format ubo_format = nir_address_format_32bit_index_offset;
 
-   nir_ssa_def *index = nir_vulkan_resource_index(
+   nir_def *index = nir_vulkan_resource_index(
       builder, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       nir_imm_int(builder, 0),
@@ -242,12 +242,12 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
       .binding = conf->runtime_data_cbv.base_shader_register,
       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
+   nir_def *load_desc = nir_load_vulkan_descriptor(
       builder, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *load_data = nir_load_ubo(
+   nir_def *load_data = nir_load_ubo(
       builder, 
       nir_dest_num_components(intrin->dest),
       nir_dest_bit_size(intrin->dest),
@@ -258,7 +258,7 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
       .range_base = offset,
       .range = nir_dest_bit_size(intrin->dest) * nir_dest_num_components(intrin->dest) / 8);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load_data);
+   nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
    nir_instr_remove(instr);
    return true;
 }
@@ -323,20 +323,20 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
    builder->cursor = nir_after_instr(instr);
    nir_address_format ubo_format = data->ubo_format;
 
-   nir_ssa_def *index = nir_vulkan_resource_index(
+   nir_def *index = nir_vulkan_resource_index(
       builder, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       nir_imm_int(builder, 0),
       .desc_set = data->desc_set, .binding = data->binding,
       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
+   nir_def *load_desc = nir_load_vulkan_descriptor(
       builder, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
-   nir_ssa_def *load_data = nir_load_ubo(
+   nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
+   nir_def *load_data = nir_load_ubo(
       builder, 
       nir_dest_num_components(intrin->dest),
       nir_dest_bit_size(intrin->dest), 
@@ -347,7 +347,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
       .range_base = base,
       .range = range);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, load_data);
+   nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
    nir_instr_remove(instr);
    return true;
 }
@@ -406,10 +406,10 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
 
    const struct dxil_spirv_runtime_conf *rt_conf = data->rt_conf;
 
-   nir_ssa_def *pos = nir_ssa_for_src(builder, intrin->src[1], 4);
-   nir_ssa_def *y_pos = nir_channel(builder, pos, 1);
-   nir_ssa_def *z_pos = nir_channel(builder, pos, 2);
-   nir_ssa_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
+   nir_def *pos = nir_ssa_for_src(builder, intrin->src[1], 4);
+   nir_def *y_pos = nir_channel(builder, pos, 1);
+   nir_def *z_pos = nir_channel(builder, pos, 2);
+   nir_def *y_flip_mask = NULL, *z_flip_mask = NULL, *dyn_yz_flip_mask = NULL;
 
    if (rt_conf->yz_flip.mode & DXIL_SPIRV_YZ_FLIP_CONDITIONAL) {
       // conditional YZ-flip. The flip bitmask is passed through the vertex
@@ -418,7 +418,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
          offsetof(struct dxil_spirv_vertex_runtime_data, yz_flip_mask);
       nir_address_format ubo_format = nir_address_format_32bit_index_offset;
 
-      nir_ssa_def *index = nir_vulkan_resource_index(
+      nir_def *index = nir_vulkan_resource_index(
          builder, nir_address_format_num_components(ubo_format),
          nir_address_format_bit_size(ubo_format),
          nir_imm_int(builder, 0),
@@ -426,7 +426,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
          .binding = rt_conf->runtime_data_cbv.base_shader_register,
          .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-      nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
+      nir_def *load_desc = nir_load_vulkan_descriptor(
          builder, nir_address_format_num_components(ubo_format),
          nir_address_format_bit_size(ubo_format),
          index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
@@ -455,14 +455,14 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
    /* TODO: Multi-viewport */
 
    if (y_flip_mask) {
-      nir_ssa_def *flip = nir_test_mask(builder, y_flip_mask, 1);
+      nir_def *flip = nir_test_mask(builder, y_flip_mask, 1);
 
       // Z-flip => pos.y = -pos.y
       y_pos = nir_bcsel(builder, flip, nir_fneg(builder, y_pos), y_pos);
    }
 
    if (z_flip_mask) {
-      nir_ssa_def *flip = nir_test_mask(builder, z_flip_mask, 1);
+      nir_def *flip = nir_test_mask(builder, z_flip_mask, 1);
 
       // Z-flip => pos.z = -pos.z + 1.0f
       z_pos = nir_bcsel(builder, flip,
@@ -470,7 +470,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
                         z_pos);
    }
 
-   nir_ssa_def *def = nir_vec4(builder,
+   nir_def *def = nir_vec4(builder,
                                nir_channel(builder, pos, 0),
                                y_pos,
                                z_pos,
@@ -517,7 +517,7 @@ discard_psiz_access(struct nir_builder *builder, nir_instr *instr,
    builder->cursor = nir_before_instr(instr);
 
    if (intrin->intrinsic == nir_intrinsic_load_deref)
-      nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_imm_float(builder, 1.0));
+      nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_float(builder, 1.0));
 
    nir_instr_remove(instr);
    return true;
@@ -595,9 +595,9 @@ kill_undefined_varyings(struct nir_builder *b,
     * since that would remove the store instruction, and would make it tricky to satisfy
     * the DXIL requirements of writing all position components.
     */
-   nir_ssa_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
+   nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
                                        nir_dest_bit_size(intr->dest));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, zero);
+   nir_def_rewrite_uses(&intr->dest.ssa, zero);
    nir_instr_remove(instr);
    return true;
 }
@@ -708,7 +708,7 @@ write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
    if (!var || var->data.location != VARYING_SLOT_POS)
       return false;
 
-   nir_ssa_def *pos = intr->src[1].ssa;
+   nir_def *pos = intr->src[1].ssa;
 
    unsigned offset =
       offsetof(struct dxil_spirv_vertex_runtime_data, viewport_width) - 4;
@@ -717,7 +717,7 @@ write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
    nir_address_format ubo_format = nir_address_format_32bit_index_offset;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *index = nir_vulkan_resource_index(
+   nir_def *index = nir_vulkan_resource_index(
       b, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       nir_imm_int(b, 0),
@@ -725,12 +725,12 @@ write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
       .binding = data->conf->runtime_data_cbv.base_shader_register,
       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
+   nir_def *load_desc = nir_load_vulkan_descriptor(
       b, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *transform = nir_channels(b,
+   nir_def *transform = nir_channels(b,
                                          nir_load_ubo(b, 4, 32,
                                                       nir_channel(b, load_desc, 0),
                                                       nir_imm_int(b, offset),
@@ -738,9 +738,9 @@ write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data)
                                                       .range_base = offset,
                                                       .range = 16),
                                          0x6);
-   nir_ssa_def *point_center_in_clip = nir_fmul(b, nir_trim_vector(b, pos, 2),
+   nir_def *point_center_in_clip = nir_fmul(b, nir_trim_vector(b, pos, 2),
                                                 nir_frcp(b, nir_channel(b, pos, 3)));
-   nir_ssa_def *point_center =
+   nir_def *point_center =
       nir_fmul(b, nir_fadd_imm(b,
                                nir_fmul(b, point_center_in_clip,
                                         nir_vec2(b, nir_imm_float(b, 0.5), nir_imm_float(b, -0.5f))),
@@ -785,12 +785,12 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data)
    if (!var || var->data.location != VARYING_SLOT_PNTC)
       return false;
 
-   nir_ssa_def *point_center = &intr->dest.ssa;
+   nir_def *point_center = &intr->dest.ssa;
    nir_variable *pos_var = (nir_variable *)data;
 
    b->cursor = nir_after_instr(instr);
 
-   nir_ssa_def *pos;
+   nir_def *pos;
    if (var->data.sample == pos_var->data.sample)
       pos = nir_load_var(b, pos_var);
    else if (var->data.sample)
@@ -802,10 +802,10 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data)
                                        &nir_build_deref_var(b, pos_var)->dest.ssa,
                                        nir_imm_zero(b, 2, 32));
 
-   nir_ssa_def *pntc = nir_fadd_imm(b,
+   nir_def *pntc = nir_fadd_imm(b,
                                     nir_fsub(b, nir_trim_vector(b, pos, 2), nir_trim_vector(b, point_center, 2)),
                                     0.5);
-   nir_ssa_def_rewrite_uses_after(point_center, pntc, pntc->parent_instr);
+   nir_def_rewrite_uses_after(point_center, pntc, pntc->parent_instr);
    return true;
 }
 
@@ -841,8 +841,8 @@ lower_view_index_to_rt_layer_instr(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *layer = intr->src[1].ssa;
-   nir_ssa_def *new_layer = nir_iadd(b, layer,
+   nir_def *layer = intr->src[1].ssa;
+   nir_def *new_layer = nir_iadd(b, layer,
                                      nir_load_view_index(b));
    nir_instr_rewrite_src_ssa(instr, &intr->src[1], new_layer);
    return true;
index cd74569..cb0d411 100644 (file)
@@ -41,17 +41,17 @@ type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
    *align = *size;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_vulkan_ssbo(nir_builder *b, unsigned buf_idx,
-                 nir_ssa_def *offset, unsigned num_comps)
+                 nir_def *offset, unsigned num_comps)
 {
-   nir_ssa_def *res_index =
+   nir_def *res_index =
       nir_vulkan_resource_index(b, 2, 32,
                                 nir_imm_int(b, 0),
                                 .desc_set = 0,
                                 .binding = buf_idx,
                                 .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
-   nir_ssa_def *descriptor =
+   nir_def *descriptor =
       nir_load_vulkan_descriptor(b, 2, 32, res_index,
                                  .desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
    return nir_load_ssbo(b, num_comps, 32,
@@ -62,7 +62,7 @@ load_vulkan_ssbo(nir_builder *b, unsigned buf_idx,
                         .access = ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER);
 }
 
-static nir_ssa_def *
+static nir_def *
 lower_deref_to_index(nir_builder *b, nir_deref_instr *deref, bool is_sampler_handle,
                      struct dxil_spirv_nir_lower_bindless_options *options)
 {
@@ -79,11 +79,11 @@ lower_deref_to_index(nir_builder *b, nir_deref_instr *deref, bool is_sampler_han
    if (remap.descriptor_set == ~0)
       return NULL;
 
-   nir_ssa_def *index_in_ubo =
+   nir_def *index_in_ubo =
       nir_iadd_imm(b,
                    nir_build_deref_offset(b, deref, type_size_align_1),
                    remap.binding);
-   nir_ssa_def *offset = nir_imul_imm(b, index_in_ubo, descriptor_size);
+   nir_def *offset = nir_imul_imm(b, index_in_ubo, descriptor_size);
    if (is_sampler_handle)
       offset = nir_iadd_imm(b, offset, 4);
    return load_vulkan_ssbo(b,
@@ -105,12 +105,12 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *intr,
 
    options->remap_binding(&remap, options->callback_context);
    b->cursor = nir_before_instr(&intr->instr);
-   nir_ssa_def *index = intr->src[0].ssa;
-   nir_ssa_def *index_in_ubo = nir_iadd_imm(b, index, remap.binding);
-   nir_ssa_def *res_idx =
+   nir_def *index = intr->src[0].ssa;
+   nir_def *index_in_ubo = nir_iadd_imm(b, index, remap.binding);
+   nir_def *res_idx =
       load_vulkan_ssbo(b, remap.descriptor_set, nir_imul_imm(b, index_in_ubo, descriptor_size), 2);
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, res_idx);
+   nir_def_rewrite_uses(&intr->dest.ssa, res_idx);
    return true;
 }
 
@@ -126,7 +126,7 @@ lower_bindless_tex_src(nir_builder *b, nir_tex_instr *tex,
 
    b->cursor = nir_before_instr(&tex->instr);
    nir_deref_instr *deref = nir_src_as_deref(tex->src[index].src);
-   nir_ssa_def *handle = lower_deref_to_index(b, deref, is_sampler_handle, options);
+   nir_def *handle = lower_deref_to_index(b, deref, is_sampler_handle, options);
    if (!handle)
       return false;
 
@@ -148,7 +148,7 @@ lower_bindless_image_intr(nir_builder *b, nir_intrinsic_instr *intr, struct dxil
 {
    b->cursor = nir_before_instr(&intr->instr);
    nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
-   nir_ssa_def *handle = lower_deref_to_index(b, deref, false, options);
+   nir_def *handle = lower_deref_to_index(b, deref, false, options);
    if (!handle)
       return false;
 
index 69cf5f5..861d086 100644 (file)
@@ -30,7 +30,7 @@
 #include "dxil_nir.h"
 #include "vk_nir_convert_ycbcr.h"
 
-static nir_ssa_def *
+static nir_def *
 dzn_nir_create_bo_desc(nir_builder *b,
                        nir_variable_mode mode,
                        uint32_t desc_set,
@@ -64,7 +64,7 @@ dzn_nir_create_bo_desc(nir_builder *b,
       VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER :
       VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
    nir_address_format addr_format = nir_address_format_32bit_index_offset;
-   nir_ssa_def *index =
+   nir_def *index =
       nir_vulkan_resource_index(b,
                                 nir_address_format_num_components(addr_format),
                                 nir_address_format_bit_size(addr_format),
@@ -73,7 +73,7 @@ dzn_nir_create_bo_desc(nir_builder *b,
                                 .binding = binding,
                                 .desc_type = desc_type);
 
-   nir_ssa_def *desc =
+   nir_def *desc =
       nir_load_vulkan_descriptor(b,
                                  nir_address_format_num_components(addr_format),
                                  nir_address_format_bit_size(addr_format),
@@ -127,11 +127,11 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
                                      type_str[type]);
    b.shader->info.internal = true;
 
-   nir_ssa_def *params_desc =
+   nir_def *params_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ubo, 0, 0, "params", 0);
-   nir_ssa_def *draw_buf_desc =
+   nir_def *draw_buf_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 1, "draw_buf", ACCESS_NON_WRITEABLE);
-   nir_ssa_def *exec_buf_desc =
+   nir_def *exec_buf_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 2, "exec_buf", ACCESS_NON_READABLE);
 
    unsigned params_size;
@@ -140,24 +140,24 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
    else
       params_size = sizeof(struct dzn_indirect_draw_rewrite_params);
 
-   nir_ssa_def *params =
+   nir_def *params =
       nir_load_ubo(&b, params_size / 4, 32,
                    params_desc, nir_imm_int(&b, 0),
                    .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0);
 
-   nir_ssa_def *draw_stride = nir_channel(&b, params, 0);
-   nir_ssa_def *exec_stride =
+   nir_def *draw_stride = nir_channel(&b, params, 0);
+   nir_def *exec_stride =
       triangle_fan ?
       nir_imm_int(&b, sizeof(struct dzn_indirect_triangle_fan_draw_exec_params)) :
       nir_imm_int(&b, sizeof(struct dzn_indirect_draw_exec_params));
-   nir_ssa_def *index =
+   nir_def *index =
       nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0);
 
    if (indirect_count) {
-      nir_ssa_def *count_buf_desc =
+      nir_def *count_buf_desc =
          dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 3, "count_buf", ACCESS_NON_WRITEABLE);
 
-      nir_ssa_def *draw_count =
+      nir_def *draw_count =
          nir_load_ssbo(&b, 1, 32, count_buf_desc, nir_imm_int(&b, 0), .align_mul = 4);
 
       nir_push_if(&b, nir_ieq_imm(&b, index, 0));
@@ -169,27 +169,27 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
       nir_push_if(&b, nir_ult(&b, index, draw_count));
    }
 
-   nir_ssa_def *draw_offset = nir_imul(&b, draw_stride, index);
+   nir_def *draw_offset = nir_imul(&b, draw_stride, index);
 
    /* The first entry contains the indirect count */
-   nir_ssa_def *exec_offset =
+   nir_def *exec_offset =
       indirect_count ?
       nir_imul(&b, exec_stride, nir_iadd_imm(&b, index, 1)) : 
       nir_imul(&b, exec_stride, index);
 
-   nir_ssa_def *draw_info1 =
+   nir_def *draw_info1 =
       nir_load_ssbo(&b, 4, 32, draw_buf_desc, draw_offset, .align_mul = 4);
-   nir_ssa_def *draw_info2 =
+   nir_def *draw_info2 =
       indexed ?
       nir_load_ssbo(&b, 1, 32, draw_buf_desc,
                     nir_iadd_imm(&b, draw_offset, 16), .align_mul = 4) :
       nir_imm_int(&b, 0);
 
-   nir_ssa_def *first_vertex = nir_channel(&b, draw_info1, indexed ? 3 : 2);
-   nir_ssa_def *base_instance =
+   nir_def *first_vertex = nir_channel(&b, draw_info1, indexed ? 3 : 2);
+   nir_def *base_instance =
       indexed ? draw_info2 : nir_channel(&b, draw_info1, 3);
 
-   nir_ssa_def *exec_vals[8] = {
+   nir_def *exec_vals[8] = {
       first_vertex,
       base_instance,
       index,
@@ -197,7 +197,7 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
 
    if (triangle_fan) {
       /* Patch {vertex,index}_count and first_index */
-      nir_ssa_def *triangle_count =
+      nir_def *triangle_count =
          nir_usub_sat(&b, nir_channel(&b, draw_info1, 0), nir_imm_int(&b, 2));
       exec_vals[3] = nir_imul_imm(&b, triangle_count, 3);
       exec_vals[4] = nir_channel(&b, draw_info1, 1);
@@ -205,20 +205,20 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
       exec_vals[6] = first_vertex;
       exec_vals[7] = base_instance;
 
-      nir_ssa_def *triangle_fan_exec_buf_desc =
+      nir_def *triangle_fan_exec_buf_desc =
          dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 4,
                                 "triangle_fan_exec_buf",
                                 ACCESS_NON_READABLE);
-      nir_ssa_def *triangle_fan_index_buf_stride = nir_channel(&b, params, 1);
-      nir_ssa_def *triangle_fan_index_buf_addr_lo =
+      nir_def *triangle_fan_index_buf_stride = nir_channel(&b, params, 1);
+      nir_def *triangle_fan_index_buf_addr_lo =
          nir_iadd(&b, nir_channel(&b, params, 2),
                   nir_imul(&b, triangle_fan_index_buf_stride, index));
 
-      nir_ssa_def *triangle_fan_exec_vals[9] = { 0 };
+      nir_def *triangle_fan_exec_vals[9] = { 0 };
       uint32_t triangle_fan_exec_param_count = 0;
-      nir_ssa_def *addr_lo_overflow =
+      nir_def *addr_lo_overflow =
          nir_ult(&b, triangle_fan_index_buf_addr_lo, nir_channel(&b, params, 2));
-      nir_ssa_def *triangle_fan_index_buf_addr_hi =
+      nir_def *triangle_fan_index_buf_addr_hi =
          nir_iadd(&b, nir_channel(&b, params, 3),
                   nir_bcsel(&b, addr_lo_overflow, nir_imm_int(&b, 1), nir_imm_int(&b, 0)));
 
@@ -230,16 +230,16 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
          triangle_fan_exec_vals[triangle_fan_exec_param_count++] = nir_channel(&b, draw_info1, 0);
          uint32_t index_count_offset =
             offsetof(struct dzn_indirect_triangle_fan_draw_exec_params, indexed_draw.index_count);
-         nir_ssa_def *exec_buf_start =
+         nir_def *exec_buf_start =
             nir_load_ubo(&b, 2, 32,
                          params_desc, nir_imm_int(&b, 16),
                          .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0);
-         nir_ssa_def *exec_buf_start_lo =
+         nir_def *exec_buf_start_lo =
             nir_iadd(&b, nir_imm_int(&b, index_count_offset),
                      nir_iadd(&b, nir_channel(&b, exec_buf_start, 0),
                               nir_imul(&b, exec_stride, index)));
          addr_lo_overflow = nir_ult(&b, exec_buf_start_lo, nir_channel(&b, exec_buf_start, 0));
-         nir_ssa_def *exec_buf_start_hi =
+         nir_def *exec_buf_start_hi =
             nir_iadd(&b, nir_channel(&b, exec_buf_start, 0),
                      nir_bcsel(&b, addr_lo_overflow, nir_imm_int(&b, 1), nir_imm_int(&b, 0)));
          triangle_fan_exec_vals[triangle_fan_exec_param_count++] = exec_buf_start_lo;
@@ -258,9 +258,9 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
          prim_restart ?
          sizeof(struct dzn_indirect_triangle_fan_prim_restart_rewrite_index_exec_params) :
          sizeof(struct dzn_indirect_triangle_fan_rewrite_index_exec_params);
-      nir_ssa_def *triangle_fan_exec_stride =
+      nir_def *triangle_fan_exec_stride =
          nir_imm_int(&b, rewrite_index_exec_params);
-      nir_ssa_def *triangle_fan_exec_offset =
+      nir_def *triangle_fan_exec_offset =
          nir_imul(&b, triangle_fan_exec_stride, index);
 
       for (uint32_t i = 0; i < triangle_fan_exec_param_count; i += 4) {
@@ -273,7 +273,7 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
                         .write_mask = mask, .access = ACCESS_NON_READABLE, .align_mul = 4);
       }
 
-      nir_ssa_def *ibview_vals[] = {
+      nir_def *ibview_vals[] = {
          triangle_fan_index_buf_addr_lo,
          triangle_fan_index_buf_addr_hi,
          triangle_fan_index_buf_stride,
@@ -317,33 +317,33 @@ dzn_nir_triangle_fan_prim_restart_rewrite_index_shader(uint8_t old_index_size)
                                      old_index_size);
    b.shader->info.internal = true;
 
-   nir_ssa_def *params_desc =
+   nir_def *params_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ubo, 0, 0, "params", 0);
-   nir_ssa_def *new_index_buf_desc =
+   nir_def *new_index_buf_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 1,
                              "new_index_buf", ACCESS_NON_READABLE);
-   nir_ssa_def *old_index_buf_desc =
+   nir_def *old_index_buf_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 2,
                              "old_index_buf", ACCESS_NON_WRITEABLE);
-   nir_ssa_def *new_index_count_ptr_desc =
+   nir_def *new_index_count_ptr_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 3,
                              "new_index_count_ptr", ACCESS_NON_READABLE);
 
-   nir_ssa_def *params =
+   nir_def *params =
       nir_load_ubo(&b, sizeof(struct dzn_triangle_fan_prim_restart_rewrite_index_params) / 4, 32,
                    params_desc, nir_imm_int(&b, 0),
                    .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0);
 
-   nir_ssa_def *prim_restart_val =
+   nir_def *prim_restart_val =
       nir_imm_int(&b, old_index_size == 2 ? 0xffff : 0xffffffff);
    nir_variable *old_index_ptr_var =
       nir_local_variable_create(b.impl, glsl_uint_type(), "old_index_ptr_var");
-   nir_ssa_def *old_index_ptr = nir_channel(&b, params, 0);
+   nir_def *old_index_ptr = nir_channel(&b, params, 0);
    nir_store_var(&b, old_index_ptr_var, old_index_ptr, 1);
    nir_variable *new_index_ptr_var =
       nir_local_variable_create(b.impl, glsl_uint_type(), "new_index_ptr_var");
    nir_store_var(&b, new_index_ptr_var, nir_imm_int(&b, 0), 1);
-   nir_ssa_def *old_index_count = nir_channel(&b, params, 1);
+   nir_def *old_index_count = nir_channel(&b, params, 1);
    nir_variable *index0_var =
       nir_local_variable_create(b.impl, glsl_uint_type(), "index0_var");
    nir_store_var(&b, index0_var, prim_restart_val, 1);
@@ -395,20 +395,20 @@ dzn_nir_triangle_fan_prim_restart_rewrite_index_shader(uint8_t old_index_size)
    nir_push_loop(&b);
 
    old_index_ptr = nir_load_var(&b, old_index_ptr_var);
-   nir_ssa_def *index0 = nir_load_var(&b, index0_var);
+   nir_def *index0 = nir_load_var(&b, index0_var);
 
-   nir_ssa_def *read_index_count =
+   nir_def *read_index_count =
       nir_bcsel(&b, nir_ieq(&b, index0, prim_restart_val),
                 nir_imm_int(&b, 3), nir_imm_int(&b, 2));
    nir_push_if(&b, nir_ult(&b, old_index_count, nir_iadd(&b, old_index_ptr, read_index_count)));
    nir_jump(&b, nir_jump_break);
    nir_pop_if(&b, NULL);
 
-   nir_ssa_def *old_index_offset =
+   nir_def *old_index_offset =
       nir_imul_imm(&b, old_index_ptr, old_index_size);
 
    nir_push_if(&b, nir_ieq(&b, index0, prim_restart_val));
-   nir_ssa_def *index_val =
+   nir_def *index_val =
       nir_load_ssbo(&b, 1, 32, old_index_buf_desc,
                     old_index_size == 2 ? nir_iand_imm(&b, old_index_offset, ~3ULL) : old_index_offset,
                     .align_mul = 4);
@@ -423,12 +423,12 @@ dzn_nir_triangle_fan_prim_restart_rewrite_index_shader(uint8_t old_index_size)
    nir_jump(&b, nir_jump_continue);
    nir_pop_if(&b, NULL);
 
-   nir_ssa_def *index12 =
+   nir_def *index12 =
       nir_load_ssbo(&b, 2, 32, old_index_buf_desc,
                     old_index_size == 2 ? nir_iand_imm(&b, old_index_offset, ~3ULL) : old_index_offset,
                     .align_mul = 4);
    if (old_index_size == 2) {
-      nir_ssa_def *indices[] = {
+      nir_def *indices[] = {
          nir_iand_imm(&b, nir_channel(&b, index12, 0), 0xffff),
          nir_ushr_imm(&b, nir_channel(&b, index12, 0), 16),
          nir_iand_imm(&b, nir_channel(&b, index12, 1), 0xffff),
@@ -449,10 +449,10 @@ dzn_nir_triangle_fan_prim_restart_rewrite_index_shader(uint8_t old_index_size)
    nir_store_var(&b, index0_var, prim_restart_val, 1);
    nir_jump(&b, nir_jump_continue);
    nir_push_else(&b, NULL);
-   nir_ssa_def *new_indices =
+   nir_def *new_indices =
       nir_vec3(&b, nir_channel(&b, index12, 0), nir_channel(&b, index12, 1), index0);
-   nir_ssa_def *new_index_ptr = nir_load_var(&b, new_index_ptr_var);
-   nir_ssa_def *new_index_offset = nir_imul_imm(&b, new_index_ptr, sizeof(uint32_t));
+   nir_def *new_index_ptr = nir_load_var(&b, new_index_ptr_var);
+   nir_def *new_index_offset = nir_imul_imm(&b, new_index_ptr, sizeof(uint32_t));
    nir_store_ssbo(&b, new_indices, new_index_buf_desc,
                   new_index_offset,
                   .write_mask = 7, .access = ACCESS_NON_READABLE, .align_mul = 4);
@@ -480,36 +480,36 @@ dzn_nir_triangle_fan_rewrite_index_shader(uint8_t old_index_size)
                                      old_index_size);
    b.shader->info.internal = true;
 
-   nir_ssa_def *params_desc =
+   nir_def *params_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ubo, 0, 0, "params", 0);
-   nir_ssa_def *new_index_buf_desc =
+   nir_def *new_index_buf_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 1,
                              "new_index_buf", ACCESS_NON_READABLE);
 
-   nir_ssa_def *old_index_buf_desc = NULL;
+   nir_def *old_index_buf_desc = NULL;
    if (old_index_size > 0) {
       old_index_buf_desc =
          dzn_nir_create_bo_desc(&b, nir_var_mem_ssbo, 0, 2,
                                 "old_index_buf", ACCESS_NON_WRITEABLE);
    }
 
-   nir_ssa_def *params =
+   nir_def *params =
       nir_load_ubo(&b, sizeof(struct dzn_triangle_fan_rewrite_index_params) / 4, 32,
                    params_desc, nir_imm_int(&b, 0),
                    .align_mul = 4, .align_offset = 0, .range_base = 0, .range = ~0);
 
-   nir_ssa_def *triangle = nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0);
-   nir_ssa_def *new_indices;
+   nir_def *triangle = nir_channel(&b, nir_load_global_invocation_id(&b, 32), 0);
+   nir_def *new_indices;
 
    if (old_index_size > 0) {
-      nir_ssa_def *old_first_index = nir_channel(&b, params, 0);
-      nir_ssa_def *old_index0_offset =
+      nir_def *old_first_index = nir_channel(&b, params, 0);
+      nir_def *old_index0_offset =
          nir_imul_imm(&b, old_first_index, old_index_size);
-      nir_ssa_def *old_index1_offset =
+      nir_def *old_index1_offset =
          nir_imul_imm(&b, nir_iadd(&b, nir_iadd_imm(&b, triangle, 1), old_first_index),
                       old_index_size);
 
-      nir_ssa_def *old_index0 =
+      nir_def *old_index0 =
          nir_load_ssbo(&b, 1, 32, old_index_buf_desc,
                        old_index_size == 2 ? nir_iand_imm(&b, old_index0_offset, ~3ULL) : old_index0_offset,
                        .align_mul = 4);
@@ -520,12 +520,12 @@ dzn_nir_triangle_fan_rewrite_index_shader(uint8_t old_index_size)
                                nir_iand_imm(&b, old_index0, 0xffff));
       }
 
-      nir_ssa_def *old_index12 =
+      nir_def *old_index12 =
          nir_load_ssbo(&b, 2, 32, old_index_buf_desc,
                        old_index_size == 2 ? nir_iand_imm(&b, old_index1_offset, ~3ULL) : old_index1_offset,
                        .align_mul = 4);
       if (old_index_size == 2) {
-         nir_ssa_def *indices[] = {
+         nir_def *indices[] = {
             nir_iand_imm(&b, nir_channel(&b, old_index12, 0), 0xffff),
             nir_ushr_imm(&b, nir_channel(&b, old_index12, 0), 16),
             nir_iand_imm(&b, nir_channel(&b, old_index12, 1), 0xffff),
@@ -548,7 +548,7 @@ dzn_nir_triangle_fan_rewrite_index_shader(uint8_t old_index_size)
                   nir_imm_int(&b, 0));
    }
 
-   nir_ssa_def *new_index_offset =
+   nir_def *new_index_offset =
       nir_imul_imm(&b, triangle, 4 * 3);
 
    nir_store_ssbo(&b, new_indices, new_index_buf_desc,
@@ -567,7 +567,7 @@ dzn_nir_blit_vs(void)
                                      "dzn_meta_blit_vs()");
    b.shader->info.internal = true;
 
-   nir_ssa_def *params_desc =
+   nir_def *params_desc =
       dzn_nir_create_bo_desc(&b, nir_var_mem_ubo, 0, 0, "params", 0);
 
    nir_variable *out_pos =
@@ -582,8 +582,8 @@ dzn_nir_blit_vs(void)
    out_coords->data.location = VARYING_SLOT_TEX0;
    out_coords->data.driver_location = 1;
 
-   nir_ssa_def *vertex = nir_load_vertex_id(&b);
-   nir_ssa_def *coords_arr[4] = {
+   nir_def *vertex = nir_load_vertex_id(&b);
+   nir_def *coords_arr[4] = {
       nir_load_ubo(&b, 4, 32, params_desc, nir_imm_int(&b, 0),
                    .align_mul = 16, .align_offset = 0, .range_base = 0, .range = ~0),
       nir_load_ubo(&b, 4, 32, params_desc, nir_imm_int(&b, 16),
@@ -593,14 +593,14 @@ dzn_nir_blit_vs(void)
       nir_load_ubo(&b, 4, 32, params_desc, nir_imm_int(&b, 48),
                    .align_mul = 16, .align_offset = 0, .range_base = 0, .range = ~0),
    };
-   nir_ssa_def *coords =
+   nir_def *coords =
       nir_bcsel(&b, nir_ieq_imm(&b, vertex, 0), coords_arr[0],
                 nir_bcsel(&b, nir_ieq_imm(&b, vertex, 1), coords_arr[1],
                           nir_bcsel(&b, nir_ieq_imm(&b, vertex, 2), coords_arr[2], coords_arr[3])));
-   nir_ssa_def *pos =
+   nir_def *pos =
       nir_vec4(&b, nir_channel(&b, coords, 0), nir_channel(&b, coords, 1),
                nir_imm_float(&b, 0.0), nir_imm_float(&b, 1.0));
-   nir_ssa_def *z_coord =
+   nir_def *z_coord =
       nir_load_ubo(&b, 1, 32, params_desc, nir_imm_int(&b, 4 * 4 * sizeof(float)),
                    .align_mul = 64, .align_offset = 0, .range_base = 0, .range = ~0);
    coords = nir_vec3(&b, nir_channel(&b, coords, 2), nir_channel(&b, coords, 3), z_coord);
@@ -645,7 +645,7 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info)
                           "coord");
    coord_var->data.location = VARYING_SLOT_TEX0;
    coord_var->data.driver_location = 1;
-   nir_ssa_def *coord =
+   nir_def *coord =
       nir_trim_vector(&b, nir_load_var(&b, coord_var), coord_comps);
 
    uint32_t out_comps =
@@ -656,7 +656,7 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info)
                           "out");
    out->data.location = info->loc;
 
-   nir_ssa_def *res = NULL;
+   nir_def *res = NULL;
 
    if (info->resolve_mode != dzn_blit_resolve_none) {
       enum dzn_blit_resolve_mode resolve_mode = info->resolve_mode;
@@ -776,17 +776,17 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info)
    return b.shader;
 }
 
-static nir_ssa_def *
+static nir_def *
 cull_face(nir_builder *b, nir_variable *vertices, bool ccw)
 {
-   nir_ssa_def *v0 =
+   nir_def *v0 =
       nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 0)));
-   nir_ssa_def *v1 =
+   nir_def *v1 =
       nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 1)));
-   nir_ssa_def *v2 =
+   nir_def *v2 =
       nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, vertices), nir_imm_int(b, 2)));
 
-   nir_ssa_def *dir = nir_fdot(b, nir_cross4(b, nir_fsub(b, v1, v0),
+   nir_def *dir = nir_fdot(b, nir_cross4(b, nir_fsub(b, v1, v0),
                                                 nir_fsub(b, v2, v0)),
                                nir_imm_vec4(b, 0.0, 0.0, -1.0, 0.0));
    if (ccw)
@@ -810,13 +810,13 @@ copy_vars(nir_builder *b, nir_deref_instr *dst, nir_deref_instr *src)
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 load_dynamic_depth_bias(nir_builder *b, struct dzn_nir_point_gs_info *info)
 {
    nir_address_format ubo_format = nir_address_format_32bit_index_offset;
    unsigned offset = offsetof(struct dxil_spirv_vertex_runtime_data, depth_bias);
 
-   nir_ssa_def *index = nir_vulkan_resource_index(
+   nir_def *index = nir_vulkan_resource_index(
       b, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       nir_imm_int(b, 0),
@@ -824,7 +824,7 @@ load_dynamic_depth_bias(nir_builder *b, struct dzn_nir_point_gs_info *info)
       .binding = info->runtime_data_cbv.base_shader_register,
       .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
 
-   nir_ssa_def *load_desc = nir_load_vulkan_descriptor(
+   nir_def *load_desc = nir_load_vulkan_descriptor(
       b, nir_address_format_num_components(ubo_format),
       nir_address_format_bit_size(ubo_format),
       index, .desc_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
@@ -891,7 +891,7 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
    front_facing_var->data.driver_location = num_vars;
    front_facing_var->data.interpolation = INTERP_MODE_FLAT;
 
-   nir_ssa_def *depth_bias_scale = NULL;
+   nir_def *depth_bias_scale = NULL;
    if (info->depth_bias) {
       switch (info->ds_fmt) {
       case DXGI_FORMAT_D16_UNORM:
@@ -903,13 +903,13 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
       case DXGI_FORMAT_D32_FLOAT:
       case DXGI_FORMAT_D32_FLOAT_S8X24_UINT: {
          nir_deref_instr *deref_pos = nir_build_deref_var(b, pos_var);
-         nir_ssa_def *max_z = NULL;
+         nir_def *max_z = NULL;
          for (uint32_t i = 0; i < 3; ++i) {
-            nir_ssa_def *pos = nir_load_deref(b, nir_build_deref_array_imm(b, deref_pos, i));
-            nir_ssa_def *z = nir_iand_imm(b, nir_channel(b, pos, 2), 0x7fffffff);
+            nir_def *pos = nir_load_deref(b, nir_build_deref_array_imm(b, deref_pos, i));
+            nir_def *z = nir_iand_imm(b, nir_channel(b, pos, 2), 0x7fffffff);
             max_z = i == 0 ? z : nir_imax(b, z, max_z);
          }
-         nir_ssa_def *exponent = nir_ishr_imm(b, nir_iand_imm(b, max_z, 0x7f800000), 23);
+         nir_def *exponent = nir_ishr_imm(b, nir_iand_imm(b, max_z, 0x7f800000), 23);
          depth_bias_scale = nir_fexp2(b, nir_i2f32(b, nir_iadd_imm(b, exponent, -23)));
          break;
       }
@@ -925,8 +925,8 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
    nir_deref_instr *loop_index_deref = nir_build_deref_var(b, loop_index_var);
    nir_store_deref(b, loop_index_deref, nir_imm_int(b, 0), 1);
 
-   nir_ssa_def *cull_pass = nir_imm_true(b);
-   nir_ssa_def *front_facing;
+   nir_def *cull_pass = nir_imm_true(b);
+   nir_def *front_facing;
    assert(info->cull_mode != VK_CULL_MODE_FRONT_AND_BACK);
    if (info->cull_mode == VK_CULL_MODE_FRONT_BIT) {
       cull_pass = cull_face(b, pos_var, info->front_ccw);
@@ -946,8 +946,8 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
    nir_if *cull_check = nir_push_if(b, cull_pass);
    nir_loop *loop = nir_push_loop(b);
 
-   nir_ssa_def *loop_index = nir_load_deref(b, loop_index_deref);
-   nir_ssa_def *cmp = nir_ige(b, loop_index,
+   nir_def *loop_index = nir_load_deref(b, loop_index_deref);
+   nir_def *cmp = nir_ige(b, loop_index,
                               nir_imm_int(b, 3));
    nir_if *loop_check = nir_push_if(b, cmp);
    nir_jump(b, nir_jump_break);
@@ -958,10 +958,10 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
     *        EmitVertex();
     */
    for (unsigned i = 0; i < num_vars; ++i) {
-      nir_ssa_def *index = loop_index;
+      nir_def *index = loop_index;
       nir_deref_instr *in_value = nir_build_deref_array(b, nir_build_deref_var(b, in[i]), index);
       if (in[i] == pos_var && info->depth_bias) {
-         nir_ssa_def *bias_val;
+         nir_def *bias_val;
          if (info->depth_bias_dynamic) {
             bias_val = load_dynamic_depth_bias(b, info);
          } else {
@@ -969,8 +969,8 @@ dzn_nir_polygon_point_mode_gs(const nir_shader *previous_shader, struct dzn_nir_
             bias_val = nir_imm_float(b, info->constant_depth_bias);
          }
          bias_val = nir_fmul(b, bias_val, depth_bias_scale);
-         nir_ssa_def *old_val = nir_load_deref(b, in_value);
-         nir_ssa_def *new_val = nir_vector_insert_imm(b, old_val,
+         nir_def *old_val = nir_load_deref(b, in_value);
+         nir_def *new_val = nir_vector_insert_imm(b, old_val,
                                                       nir_fadd(b, nir_channel(b, old_val, 2), bias_val),
                                                       2);
          nir_store_var(b, out[i], new_val, 0xf);
index 2e442b7..29f13f3 100644 (file)
@@ -84,11 +84,11 @@ private:
    LValues& convert(nir_dest *);
    SVSemantic convert(nir_intrinsic_op);
    Value* convert(nir_load_const_instr*, uint8_t);
-   LValues& convert(nir_ssa_def *);
+   LValues& convert(nir_def *);
 
    Value* getSrc(nir_alu_src *, uint8_t component = 0);
    Value* getSrc(nir_src *, uint8_t, bool indirect = false);
-   Value* getSrc(nir_ssa_def *, uint8_t);
+   Value* getSrc(nir_def *, uint8_t);
 
    // returned value is the constant part of the given source (either the
    // nir_src or the selected source component of an intrinsic). Even though
@@ -156,7 +156,7 @@ private:
    bool visit(nir_jump_instr *);
    bool visit(nir_load_const_instr*);
    bool visit(nir_loop *);
-   bool visit(nir_ssa_undef_instr *);
+   bool visit(nir_undef_instr *);
    bool visit(nir_tex_instr *);
 
    static unsigned lowerBitSizeCB(const nir_instr *, void *);
@@ -694,7 +694,7 @@ Converter::convert(nir_dest *dest)
 }
 
 Converter::LValues&
-Converter::convert(nir_ssa_def *def)
+Converter::convert(nir_def *def)
 {
    NirDefMap::iterator it = ssaDefs.find(def->index);
    if (it != ssaDefs.end())
@@ -719,7 +719,7 @@ Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
 }
 
 Value*
-Converter::getSrc(nir_ssa_def *src, uint8_t idx)
+Converter::getSrc(nir_def *src, uint8_t idx)
 {
    ImmediateMap::iterator iit = immediates.find(src->index);
    if (iit != immediates.end())
@@ -2887,7 +2887,7 @@ Converter::visit(nir_alu_instr *insn)
 #undef DEFAULT_CHECKS
 
 bool
-Converter::visit(nir_ssa_undef_instr *insn)
+Converter::visit(nir_undef_instr *insn)
 {
    LValues &newDefs = convert(&insn->def);
    for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
index 7ba83f2..586e395 100644 (file)
@@ -14,7 +14,7 @@ struct lower_descriptors_ctx {
    nir_address_format ssbo_addr_format;
 };
 
-static nir_ssa_def *
+static nir_def *
 load_descriptor_set_addr(nir_builder *b, uint32_t set,
                          UNUSED const struct lower_descriptors_ctx *ctx)
 {
@@ -40,9 +40,9 @@ get_binding_layout(uint32_t set, uint32_t binding,
    return &set_layout->binding[binding];
 }
 
-static nir_ssa_def *
+static nir_def *
 load_descriptor(nir_builder *b, unsigned num_components, unsigned bit_size,
-                uint32_t set, uint32_t binding, nir_ssa_def *index,
+                uint32_t set, uint32_t binding, nir_def *index,
                 unsigned offset_B, const struct lower_descriptors_ctx *ctx)
 {
    const struct nvk_descriptor_set_binding_layout *binding_layout =
@@ -62,7 +62,7 @@ load_descriptor(nir_builder *b, unsigned num_components, unsigned bit_size,
                            dynamic_buffer_start +
                            binding_layout->dynamic_buffer_index);
 
-      nir_ssa_def *root_desc_offset =
+      nir_def *root_desc_offset =
          nir_iadd_imm(b, nir_imul_imm(b, index, sizeof(struct nvk_buffer_address)),
                       nvk_root_descriptor_offset(dynamic_buffers));
 
@@ -72,7 +72,7 @@ load_descriptor(nir_builder *b, unsigned num_components, unsigned bit_size,
    }
 
    case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
-      nir_ssa_def *base_addr =
+      nir_def *base_addr =
          nir_iadd_imm(b, load_descriptor_set_addr(b, set, ctx),
                           binding_layout->offset);
 
@@ -89,14 +89,14 @@ load_descriptor(nir_builder *b, unsigned num_components, unsigned bit_size,
 
    default: {
       assert(binding_layout->stride > 0);
-      nir_ssa_def *desc_ubo_offset =
+      nir_def *desc_ubo_offset =
          nir_iadd_imm(b, nir_imul_imm(b, index, binding_layout->stride),
                          binding_layout->offset + offset_B);
 
       unsigned desc_align = (1 << (ffs(binding_layout->stride) - 1));
       desc_align = MIN2(desc_align, 16);
 
-      nir_ssa_def *set_addr = load_descriptor_set_addr(b, set, ctx);
+      nir_def *set_addr = load_descriptor_set_addr(b, set, ctx);
       return nir_load_global_constant_offset(b, num_components, bit_size,
                                              set_addr, desc_ubo_offset,
                                              .align_mul = desc_align,
@@ -117,11 +117,11 @@ is_idx_intrin(nir_intrinsic_instr *intrin)
    return intrin->intrinsic == nir_intrinsic_vulkan_resource_index;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_descriptor_for_idx_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
                                const struct lower_descriptors_ctx *ctx)
 {
-   nir_ssa_def *index = nir_imm_int(b, 0);
+   nir_def *index = nir_imm_int(b, 0);
 
    while (intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex) {
       index = nir_iadd(b, index, nir_ssa_for_src(b, intrin->src[1], 1));
@@ -150,9 +150,9 @@ try_lower_load_vulkan_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
       return false;
    }
 
-   nir_ssa_def *desc = load_descriptor_for_idx_intrin(b, idx_intrin, ctx);
+   nir_def *desc = load_descriptor_for_idx_intrin(b, idx_intrin, ctx);
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+   nir_def_rewrite_uses(&intrin->dest.ssa, desc);
 
    return true;
 }
@@ -166,14 +166,14 @@ lower_num_workgroups(nir_builder *b, nir_intrinsic_instr *load,
 
    b->cursor = nir_instr_remove(&load->instr);
 
-   nir_ssa_def *val = nir_load_ubo(b, 3, 32,
+   nir_def *val = nir_load_ubo(b, 3, 32,
                                    nir_imm_int(b, 0), /* Root table */
                                    nir_imm_int(b, root_table_offset),
                                    .align_mul = 4,
                                    .align_offset = 0,
                                    .range = root_table_offset + 3 * 4);
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
 
    return true;
 }
@@ -187,14 +187,14 @@ lower_load_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *load,
 
    b->cursor = nir_instr_remove(&load->instr);
 
-   nir_ssa_def *val = nir_load_ubo(b, 3, 32,
+   nir_def *val = nir_load_ubo(b, 3, 32,
                                    nir_imm_int(b, 0),
                                    nir_imm_int(b, root_table_offset),
                                    .align_mul = 4,
                                    .align_offset = 0,
                                    .range = root_table_offset + 3 * 4);
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
 
    return true;
 }
@@ -209,10 +209,10 @@ lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *load,
 
    b->cursor = nir_before_instr(&load->instr);
 
-   nir_ssa_def *offset = nir_iadd_imm(b, load->src[0].ssa,
+   nir_def *offset = nir_iadd_imm(b, load->src[0].ssa,
                                          push_region_offset + base);
 
-   nir_ssa_def *val =
+   nir_def *val =
       nir_load_ubo(b, load->dest.ssa.num_components, load->dest.ssa.bit_size,
                    nir_imm_int(b, 0), offset,
                    .align_mul = load->dest.ssa.bit_size / 8,
@@ -220,7 +220,7 @@ lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *load,
                    .range = push_region_offset + base +
                             nir_intrinsic_range(load));
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
 
    return true;
 }
@@ -234,14 +234,14 @@ lower_load_view_index(nir_builder *b, nir_intrinsic_instr *load,
 
    b->cursor = nir_instr_remove(&load->instr);
 
-   nir_ssa_def *val = nir_load_ubo(b, 1, 32,
+   nir_def *val = nir_load_ubo(b, 1, 32,
                                    nir_imm_int(b, 0),
                                    nir_imm_int(b, root_table_offset),
                                    .align_mul = 4,
                                    .align_offset = 0,
                                    .range = root_table_offset + 4);
 
-   nir_ssa_def_rewrite_uses(&load->dest.ssa, val);
+   nir_def_rewrite_uses(&load->dest.ssa, val);
 
    return true;
 }
@@ -249,7 +249,7 @@ lower_load_view_index(nir_builder *b, nir_intrinsic_instr *load,
 static void
 get_resource_deref_binding(nir_builder *b, nir_deref_instr *deref,
                            uint32_t *set, uint32_t *binding,
-                           nir_ssa_def **index)
+                           nir_def **index)
 {
    if (deref->deref_type == nir_deref_type_array) {
       *index = deref->arr.index.ssa;
@@ -265,14 +265,14 @@ get_resource_deref_binding(nir_builder *b, nir_deref_instr *deref,
    *binding = var->data.binding;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_resource_deref_desc(nir_builder *b, 
                          unsigned num_components, unsigned bit_size,
                          nir_deref_instr *deref, unsigned offset_B,
                          const struct lower_descriptors_ctx *ctx)
 {
    uint32_t set, binding;
-   nir_ssa_def *index;
+   nir_def *index;
    get_resource_deref_binding(b, deref, &set, &binding, &index);
    return load_descriptor(b, num_components, bit_size,
                           set, binding, index, offset_B, ctx);
@@ -284,7 +284,7 @@ lower_image_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
 {
    b->cursor = nir_before_instr(&intrin->instr);
    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
-   nir_ssa_def *desc = load_resource_deref_desc(b, 1, 32, deref, 0, ctx);
+   nir_def *desc = load_resource_deref_desc(b, 1, 32, deref, 0, ctx);
    nir_rewrite_image_intrinsic(intrin, desc, true);
 
    /* We treat 3D images as 2D arrays */
@@ -360,24 +360,24 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
                               nir_src_as_deref(tex->src[sampler_src_idx].src);
    assert(texture);
 
-   nir_ssa_def *plane_ssa = nir_steal_tex_src(tex, nir_tex_src_plane);
+   nir_def *plane_ssa = nir_steal_tex_src(tex, nir_tex_src_plane);
    const uint32_t plane =
       plane_ssa ? nir_src_as_uint(nir_src_for_ssa(plane_ssa)) : 0;
    const uint64_t plane_offset_B = plane * sizeof(struct nvk_image_descriptor);
 
-   nir_ssa_def *combined_handle;
+   nir_def *combined_handle;
    if (texture == sampler) {
       combined_handle = load_resource_deref_desc(b, 1, 32, texture, plane_offset_B, ctx);
    } else {
-      nir_ssa_def *texture_desc =
+      nir_def *texture_desc =
          load_resource_deref_desc(b, 1, 32, texture, plane_offset_B, ctx);
       combined_handle = nir_iand_imm(b, texture_desc,
                                      NVK_IMAGE_DESCRIPTOR_IMAGE_INDEX_MASK);
 
       if (sampler != NULL) {
-         nir_ssa_def *sampler_desc =
+         nir_def *sampler_desc =
             load_resource_deref_desc(b, 1, 32, sampler, plane_offset_B, ctx);
-         nir_ssa_def *sampler_index =
+         nir_def *sampler_index =
             nir_iand_imm(b, sampler_desc,
                          NVK_IMAGE_DESCRIPTOR_SAMPLER_INDEX_MASK);
          combined_handle = nir_ior(b, combined_handle, sampler_index);
@@ -437,17 +437,17 @@ lower_ssbo_resource_index(nir_builder *b, nir_intrinsic_instr *intrin,
 
    uint32_t set = nir_intrinsic_desc_set(intrin);
    uint32_t binding = nir_intrinsic_binding(intrin);
-   nir_ssa_def *index = intrin->src[0].ssa;
+   nir_def *index = intrin->src[0].ssa;
 
    const struct nvk_descriptor_set_binding_layout *binding_layout =
       get_binding_layout(set, binding, ctx);
 
-   nir_ssa_def *binding_addr;
+   nir_def *binding_addr;
    uint8_t binding_stride;
    switch (binding_layout->type) {
    case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
-      nir_ssa_def *set_addr = load_descriptor_set_addr(b, set, ctx);
+      nir_def *set_addr = load_descriptor_set_addr(b, set, ctx);
       binding_addr = nir_iadd_imm(b, set_addr, binding_layout->offset);
       binding_stride = binding_layout->stride;
       break;
@@ -457,7 +457,7 @@ lower_ssbo_resource_index(nir_builder *b, nir_intrinsic_instr *intrin,
       const uint32_t root_desc_addr_offset =
          nvk_root_descriptor_offset(root_desc_addr);
 
-      nir_ssa_def *root_desc_addr =
+      nir_def *root_desc_addr =
          nir_load_ubo(b, 1, 64, nir_imm_int(b, 0),
                       nir_imm_int(b, root_desc_addr_offset),
                       .align_mul = 8, .align_offset = 0, .range = ~0);
@@ -483,9 +483,9 @@ lower_ssbo_resource_index(nir_builder *b, nir_intrinsic_instr *intrin,
    binding_addr = nir_ior_imm(b, binding_addr, (uint64_t)binding_stride << 56);
 
    const uint32_t binding_size = binding_layout->array_size * binding_stride;
-   nir_ssa_def *offset_in_binding = nir_imul_imm(b, index, binding_stride);
+   nir_def *offset_in_binding = nir_imul_imm(b, index, binding_stride);
 
-   nir_ssa_def *addr;
+   nir_def *addr;
    switch (ctx->ssbo_addr_format) {
    case nir_address_format_64bit_global:
       addr = nir_iadd(b, binding_addr, nir_u2u64(b, offset_in_binding));
@@ -503,7 +503,7 @@ lower_ssbo_resource_index(nir_builder *b, nir_intrinsic_instr *intrin,
       unreachable("Unknown address mode");
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, addr);
+   nir_def_rewrite_uses(&intrin->dest.ssa, addr);
 
    return true;
 }
@@ -519,10 +519,10 @@ lower_ssbo_resource_reindex(nir_builder *b, nir_intrinsic_instr *intrin,
 
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *addr = intrin->src[0].ssa;
-   nir_ssa_def *index = intrin->src[1].ssa;
+   nir_def *addr = intrin->src[0].ssa;
+   nir_def *index = intrin->src[1].ssa;
 
-   nir_ssa_def *addr_high32;
+   nir_def *addr_high32;
    switch (ctx->ssbo_addr_format) {
    case nir_address_format_64bit_global:
       addr_high32 = nir_unpack_64_2x32_split_y(b, addr);
@@ -537,12 +537,12 @@ lower_ssbo_resource_reindex(nir_builder *b, nir_intrinsic_instr *intrin,
       unreachable("Unknown address mode");
    }
 
-   nir_ssa_def *stride = nir_ushr_imm(b, addr_high32, 24);
-   nir_ssa_def *offset = nir_imul(b, index, stride);
+   nir_def *stride = nir_ushr_imm(b, addr_high32, 24);
+   nir_def *offset = nir_imul(b, index, stride);
 
    addr = nir_build_addr_iadd(b, addr, ctx->ssbo_addr_format,
                               nir_var_mem_ssbo, offset);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, addr);
+   nir_def_rewrite_uses(&intrin->dest.ssa, addr);
 
    return true;
 }
@@ -558,9 +558,9 @@ lower_load_ssbo_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
 
    b->cursor = nir_instr_remove(&intrin->instr);
 
-   nir_ssa_def *addr = intrin->src[0].ssa;
+   nir_def *addr = intrin->src[0].ssa;
 
-   nir_ssa_def *desc;
+   nir_def *desc;
    switch (ctx->ssbo_addr_format) {
    case nir_address_format_64bit_global:
       /* Mask off the binding stride */
@@ -571,8 +571,8 @@ lower_load_ssbo_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
       break;
 
    case nir_address_format_64bit_global_32bit_offset: {
-      nir_ssa_def *base = nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2));
-      nir_ssa_def *offset = nir_channel(b, addr, 3);
+      nir_def *base = nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2));
+      nir_def *offset = nir_channel(b, addr, 3);
       /* Mask off the binding stride */
       base = nir_iand_imm(b, base, BITFIELD64_MASK(56));
       desc = nir_load_global_constant_offset(b, 4, 32, base, offset,
@@ -582,9 +582,9 @@ lower_load_ssbo_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
    }
 
    case nir_address_format_64bit_bounded_global: {
-      nir_ssa_def *base = nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2));
-      nir_ssa_def *size = nir_channel(b, addr, 2);
-      nir_ssa_def *offset = nir_channel(b, addr, 3);
+      nir_def *base = nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2));
+      nir_def *size = nir_channel(b, addr, 2);
+      nir_def *offset = nir_channel(b, addr, 3);
       /* Mask off the binding stride */
       base = nir_iand_imm(b, base, BITFIELD64_MASK(56));
       desc = nir_load_global_constant_bounded(b, 4, 32, base, offset, size,
@@ -597,7 +597,7 @@ lower_load_ssbo_descriptor(nir_builder *b, nir_intrinsic_instr *intrin,
       unreachable("Unknown address mode");
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
+   nir_def_rewrite_uses(&intrin->dest.ssa, desc);
 
    return true;
 }
index ff5e34e..5674e8f 100644 (file)
@@ -111,11 +111,11 @@ nvk_query_available_addr(struct nvk_query_pool *pool, uint32_t query)
    return pool->bo->offset + query * sizeof(uint32_t);
 }
 
-static nir_ssa_def *
-nvk_nir_available_addr(nir_builder *b, nir_ssa_def *pool_addr,
-                       nir_ssa_def *query)
+static nir_def *
+nvk_nir_available_addr(nir_builder *b, nir_def *pool_addr,
+                       nir_def *query)
 {
-   nir_ssa_def *offset = nir_imul_imm(b, query, sizeof(uint32_t));
+   nir_def *offset = nir_imul_imm(b, query, sizeof(uint32_t));
    return nir_iadd(b, pool_addr, nir_u2u64(b, offset));
 }
 
@@ -139,12 +139,12 @@ nvk_query_report_addr(struct nvk_query_pool *pool, uint32_t query)
    return pool->bo->offset + nvk_query_offset(pool, query);
 }
 
-static nir_ssa_def *
-nvk_nir_query_report_addr(nir_builder *b, nir_ssa_def *pool_addr,
-                          nir_ssa_def *query_start, nir_ssa_def *query_stride,
-                          nir_ssa_def *query)
+static nir_def *
+nvk_nir_query_report_addr(nir_builder *b, nir_def *pool_addr,
+                          nir_def *query_start, nir_def *query_stride,
+                          nir_def *query)
 {
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_iadd(b, query_start, nir_umul_2x32_64(b, query, query_stride));
    return nir_iadd(b, pool_addr, offset);
 }
@@ -681,7 +681,7 @@ struct nvk_copy_query_push {
    uint32_t flags;
 };
 
-static nir_ssa_def *
+static nir_def *
 load_struct_var(nir_builder *b, nir_variable *var, uint32_t field)
 {
    nir_deref_instr *deref =
@@ -690,79 +690,79 @@ load_struct_var(nir_builder *b, nir_variable *var, uint32_t field)
 }
 
 static void
-nir_write_query_result(nir_builder *b, nir_ssa_def *dst_addr,
-                       nir_ssa_def *idx, nir_ssa_def *flags,
-                       nir_ssa_def *result)
+nir_write_query_result(nir_builder *b, nir_def *dst_addr,
+                       nir_def *idx, nir_def *flags,
+                       nir_def *result)
 {
    assert(result->num_components == 1);
    assert(result->bit_size == 64);
 
    nir_push_if(b, nir_test_mask(b, flags, VK_QUERY_RESULT_64_BIT));
    {
-      nir_ssa_def *offset = nir_i2i64(b, nir_imul_imm(b, idx, 8));
+      nir_def *offset = nir_i2i64(b, nir_imul_imm(b, idx, 8));
       nir_store_global(b, nir_iadd(b, dst_addr, offset), 8, result, 0x1);
    }
    nir_push_else(b, NULL);
    {
-      nir_ssa_def *result32 = nir_u2u32(b, result);
-      nir_ssa_def *offset = nir_i2i64(b, nir_imul_imm(b, idx, 4));
+      nir_def *result32 = nir_u2u32(b, result);
+      nir_def *offset = nir_i2i64(b, nir_imul_imm(b, idx, 4));
       nir_store_global(b, nir_iadd(b, dst_addr, offset), 4, result32, 0x1);
    }
    nir_pop_if(b, NULL);
 }
 
 static void
-nir_get_query_delta(nir_builder *b, nir_ssa_def *dst_addr,
-                    nir_ssa_def *report_addr, nir_ssa_def *idx,
-                    nir_ssa_def *flags)
+nir_get_query_delta(nir_builder *b, nir_def *dst_addr,
+                    nir_def *report_addr, nir_def *idx,
+                    nir_def *flags)
 {
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_imul_imm(b, idx, 2 * sizeof(struct nvk_query_report));
-   nir_ssa_def *begin_addr =
+   nir_def *begin_addr =
       nir_iadd(b, report_addr, nir_i2i64(b, offset));
-   nir_ssa_def *end_addr =
+   nir_def *end_addr =
       nir_iadd_imm(b, begin_addr, sizeof(struct nvk_query_report));
 
    /* nvk_query_report::timestamp is the first uint64_t */
-   nir_ssa_def *begin = nir_load_global(b, begin_addr, 16, 1, 64);
-   nir_ssa_def *end = nir_load_global(b, end_addr, 16, 1, 64);
+   nir_def *begin = nir_load_global(b, begin_addr, 16, 1, 64);
+   nir_def *end = nir_load_global(b, end_addr, 16, 1, 64);
 
-   nir_ssa_def *delta = nir_isub(b, end, begin);
+   nir_def *delta = nir_isub(b, end, begin);
 
    nir_write_query_result(b, dst_addr, idx, flags, delta);
 }
 
 static void
-nvk_nir_copy_query(nir_builder *b, nir_variable *push, nir_ssa_def *i)
+nvk_nir_copy_query(nir_builder *b, nir_variable *push, nir_def *i)
 {
-   nir_ssa_def *pool_addr = load_struct_var(b, push, 0);
-   nir_ssa_def *query_start = nir_u2u64(b, load_struct_var(b, push, 1));
-   nir_ssa_def *query_stride = load_struct_var(b, push, 2);
-   nir_ssa_def *first_query = load_struct_var(b, push, 3);
-   nir_ssa_def *dst_addr = load_struct_var(b, push, 5);
-   nir_ssa_def *dst_stride = load_struct_var(b, push, 6);
-   nir_ssa_def *flags = load_struct_var(b, push, 7);
-
-   nir_ssa_def *query = nir_iadd(b, first_query, i);
-
-   nir_ssa_def *avail_addr = nvk_nir_available_addr(b, pool_addr, query);
-   nir_ssa_def *available =
+   nir_def *pool_addr = load_struct_var(b, push, 0);
+   nir_def *query_start = nir_u2u64(b, load_struct_var(b, push, 1));
+   nir_def *query_stride = load_struct_var(b, push, 2);
+   nir_def *first_query = load_struct_var(b, push, 3);
+   nir_def *dst_addr = load_struct_var(b, push, 5);
+   nir_def *dst_stride = load_struct_var(b, push, 6);
+   nir_def *flags = load_struct_var(b, push, 7);
+
+   nir_def *query = nir_iadd(b, first_query, i);
+
+   nir_def *avail_addr = nvk_nir_available_addr(b, pool_addr, query);
+   nir_def *available =
       nir_i2b(b, nir_load_global(b, avail_addr, 4, 1, 32));
 
-   nir_ssa_def *partial = nir_test_mask(b, flags, VK_QUERY_RESULT_PARTIAL_BIT);
-   nir_ssa_def *write_results = nir_ior(b, available, partial);
+   nir_def *partial = nir_test_mask(b, flags, VK_QUERY_RESULT_PARTIAL_BIT);
+   nir_def *write_results = nir_ior(b, available, partial);
 
-   nir_ssa_def *report_addr =
+   nir_def *report_addr =
       nvk_nir_query_report_addr(b, pool_addr, query_start, query_stride,
                                 query);
-   nir_ssa_def *dst_offset = nir_imul(b, nir_u2u64(b, i), dst_stride);
+   nir_def *dst_offset = nir_imul(b, nir_u2u64(b, i), dst_stride);
 
    /* Timestamp queries are the only ones use a single report */
-   nir_ssa_def *is_timestamp =
+   nir_def *is_timestamp =
       nir_ieq_imm(b, query_stride, sizeof(struct nvk_query_report));
 
-   nir_ssa_def *one = nir_imm_int(b, 1);
-   nir_ssa_def *num_reports;
+   nir_def *one = nir_imm_int(b, 1);
+   nir_def *num_reports;
    nir_push_if(b, is_timestamp);
    {
       nir_push_if(b, write_results);
@@ -770,7 +770,7 @@ nvk_nir_copy_query(nir_builder *b, nir_variable *push, nir_ssa_def *i)
          /* This is the timestamp case.  We add 8 because we're loading
           * nvk_query_report::timestamp.
           */
-         nir_ssa_def *timestamp =
+         nir_def *timestamp =
             nir_load_global(b, nir_iadd_imm(b, report_addr, 8), 8, 1, 64);
 
          nir_write_query_result(b, nir_iadd(b, dst_addr, dst_offset),
@@ -847,7 +847,7 @@ build_copy_queries_shader(void)
    nir_variable *push = nir_variable_create(b->shader, nir_var_mem_push_const,
                                             push_iface_type, "push");
 
-   nir_ssa_def *query_count = load_struct_var(b, push, 4);
+   nir_def *query_count = load_struct_var(b, push, 4);
 
    nir_variable *i = nir_local_variable_create(b->impl, glsl_uint_type(), "i");
    nir_store_var(b, i, nir_imm_int(b, 0), 0x1);
index f53d243..da2d7d0 100644 (file)
@@ -113,9 +113,9 @@ lower_image_size_to_txs(nir_builder *b, nir_instr *instr, UNUSED void *_data)
    b->cursor = nir_instr_remove(&intrin->instr);
 
    nir_deref_instr *img = nir_src_as_deref(intrin->src[0]);
-   nir_ssa_def *lod = nir_tex_type_has_lod(img->type) ?
+   nir_def *lod = nir_tex_type_has_lod(img->type) ?
                       intrin->src[1].ssa : NULL;
-   nir_ssa_def *size = nir_txs_deref(b, img, lod);
+   nir_def *size = nir_txs_deref(b, img, lod);
 
    if (glsl_get_sampler_dim(img->type) == GLSL_SAMPLER_DIM_CUBE) {
       /* Cube image descriptors are set up as simple arrays but SPIR-V wants
@@ -132,7 +132,7 @@ lower_image_size_to_txs(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       }
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, size);
+   nir_def_rewrite_uses(&intrin->dest.ssa, size);
 
    return true;
 }
@@ -151,12 +151,12 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr,
 
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *base_addr = intrin->src[0].ssa;
-   nir_ssa_def *offset = intrin->src[1].ssa;
+   nir_def *base_addr = intrin->src[0].ssa;
+   nir_def *offset = intrin->src[1].ssa;
 
-   nir_ssa_def *zero = NULL;
+   nir_def *zero = NULL;
    if (intrin->intrinsic == nir_intrinsic_load_global_constant_bounded) {
-      nir_ssa_def *bound = intrin->src[2].ssa;
+      nir_def *bound = intrin->src[2].ssa;
 
       unsigned bit_size = intrin->dest.ssa.bit_size;
       assert(bit_size >= 8 && bit_size % 8 == 0);
@@ -166,15 +166,15 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr,
 
       unsigned load_size = byte_size * intrin->num_components;
 
-      nir_ssa_def *sat_offset =
+      nir_def *sat_offset =
          nir_umin(b, offset, nir_imm_int(b, UINT32_MAX - (load_size - 1)));
-      nir_ssa_def *in_bounds =
+      nir_def *in_bounds =
          nir_ilt(b, nir_iadd_imm(b, sat_offset, load_size - 1), bound);
 
       nir_push_if(b, in_bounds);
    }
 
-   nir_ssa_def *val =
+   nir_def *val =
       nir_build_load_global(b, intrin->dest.ssa.num_components,
                             intrin->dest.ssa.bit_size,
                             nir_iadd(b, base_addr, nir_u2u64(b, offset)),
@@ -187,7 +187,7 @@ lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr,
       val = nir_if_phi(b, val, zero);
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
+   nir_def_rewrite_uses(&intrin->dest.ssa, val);
 
    return true;
 }
@@ -221,7 +221,7 @@ lower_fragcoord_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
    b->cursor = nir_before_instr(&intrin->instr);
 
-   nir_ssa_def *val;
+   nir_def *val;
    switch (intrin->intrinsic) {
    case nir_intrinsic_load_frag_coord:
       var = find_or_create_input(b, glsl_vec4_type(),
@@ -251,7 +251,7 @@ lower_fragcoord_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
       return false;
    }
 
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, val);
+   nir_def_rewrite_uses(&intrin->dest.ssa, val);
 
    return true;
 }
@@ -270,8 +270,8 @@ lower_system_value_first_vertex(nir_builder *b, nir_instr *instr, UNUSED void *_
       return false;
 
    b->cursor = nir_before_instr(&intrin->instr);
-   nir_ssa_def *base_vertex = nir_load_base_vertex(b);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, base_vertex);
+   nir_def *base_vertex = nir_load_base_vertex(b);
+   nir_def_rewrite_uses(&intrin->dest.ssa, base_vertex);
 
    return true;
 }
index 3c50fb2..4cee4f6 100644 (file)
@@ -79,15 +79,15 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
    /* This indirect does need it */
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *lane = nir_load_subgroup_invocation(b);
+   nir_def *lane = nir_load_subgroup_invocation(b);
    unsigned *lanes = data;
 
    /* Write zero in a funny way to bypass lower_load_const_to_scalar */
    bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
    unsigned size = has_dest ? nir_dest_bit_size(intr->dest) : 32;
-   nir_ssa_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
-   nir_ssa_def *zeroes[4] = {zero, zero, zero, zero};
-   nir_ssa_def *res =
+   nir_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
+   nir_def *zeroes[4] = {zero, zero, zero, zero};
+   nir_def *res =
       has_dest ? nir_vec(b, zeroes, nir_dest_num_components(intr->dest)) : NULL;
 
    for (unsigned i = 0; i < (*lanes); ++i) {
@@ -99,13 +99,13 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
       nir_pop_if(b, NULL);
 
       if (has_dest) {
-         nir_ssa_def *c_ssa = &c_intr->dest.ssa;
+         nir_def *c_ssa = &c_intr->dest.ssa;
          res = nir_if_phi(b, c_ssa, res);
       }
    }
 
    if (has_dest)
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, res);
+      nir_def_rewrite_uses(&intr->dest.ssa, res);
 
    nir_instr_remove(instr);
    return true;
index fbf06bb..dc73e5a 100644 (file)
@@ -3564,7 +3564,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
    image_src = bi_lshift_or_i32(b, texture, image_src, bi_imm_u8(16));
 
    /* Only write the components that we actually read */
-   unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
+   unsigned mask = nir_def_components_read(&instr->dest.ssa);
    unsigned comps_per_reg = nir_dest_bit_size(instr->dest) == 16 ? 2 : 1;
    unsigned res_size = DIV_ROUND_UP(util_bitcount(mask), comps_per_reg);
 
@@ -4259,8 +4259,8 @@ bi_scalarize_filter(const nir_instr *instr, const void *data)
 }
 
 /* Ensure we write exactly 4 components */
-static nir_ssa_def *
-bifrost_nir_valid_channel(nir_builder *b, nir_ssa_def *in, unsigned channel,
+static nir_def *
+bifrost_nir_valid_channel(nir_builder *b, nir_def *in, unsigned channel,
                           unsigned first, unsigned mask)
 {
    if (!(mask & BITFIELD_BIT(channel)))
@@ -4286,7 +4286,7 @@ bifrost_nir_lower_blend_components(struct nir_builder *b, nir_instr *instr,
    if (intr->intrinsic != nir_intrinsic_store_output)
       return false;
 
-   nir_ssa_def *in = intr->src[0].ssa;
+   nir_def *in = intr->src[0].ssa;
    unsigned first = nir_intrinsic_component(intr);
    unsigned mask = nir_intrinsic_write_mask(intr);
 
@@ -4299,7 +4299,7 @@ bifrost_nir_lower_blend_components(struct nir_builder *b, nir_instr *instr,
    b->cursor = nir_before_instr(&intr->instr);
 
    /* Replicate the first valid component instead */
-   nir_ssa_def *replicated =
+   nir_def *replicated =
       nir_vec4(b, bifrost_nir_valid_channel(b, in, 0, first, mask),
                bifrost_nir_valid_channel(b, in, 1, first, mask),
                bifrost_nir_valid_channel(b, in, 2, first, mask),
@@ -4542,8 +4542,8 @@ bi_gather_texcoords(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    nir_src src = tex->src[coord_idx].src;
-   nir_ssa_scalar x = nir_ssa_scalar_resolved(src.ssa, 0);
-   nir_ssa_scalar y = nir_ssa_scalar_resolved(src.ssa, 1);
+   nir_scalar x = nir_scalar_resolved(src.ssa, 0);
+   nir_scalar y = nir_scalar_resolved(src.ssa, 1);
 
    if (x.def != y.def)
       return false;
@@ -4597,7 +4597,7 @@ bi_lower_sample_mask_writes(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *orig = nir_load_sample_mask(b);
+   nir_def *orig = nir_load_sample_mask(b);
 
    nir_instr_rewrite_src_ssa(
       instr, &intr->src[0],
@@ -4623,15 +4623,15 @@ bi_lower_load_output(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *conversion = nir_load_rt_conversion_pan(
+   nir_def *conversion = nir_load_rt_conversion_pan(
       b, .base = rt, .src_type = nir_intrinsic_dest_type(intr));
 
-   nir_ssa_def *lowered = nir_load_converted_output_pan(
+   nir_def *lowered = nir_load_converted_output_pan(
       b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest),
       conversion, .dest_type = nir_intrinsic_dest_type(intr),
       .io_semantics = nir_intrinsic_io_semantics(intr));
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, lowered);
+   nir_def_rewrite_uses(&intr->dest.ssa, lowered);
    return true;
 }
 
index de9e65e..4b858ca 100644 (file)
@@ -620,8 +620,8 @@ pan_inline_blend_constants(nir_builder *b, nir_instr *instr, void *data)
       nir_const_value_for_float(floats[3], 32)};
 
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def *constant = nir_build_imm(b, 4, 32, constants);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, constant);
+   nir_def *constant = nir_build_imm(b, 4, 32, constants);
+   nir_def_rewrite_uses(&intr->dest.ssa, constant);
    nir_instr_remove(instr);
    return true;
 }
@@ -683,8 +683,8 @@ GENX(pan_blend_create_shader)(const struct panfrost_device *dev,
       options.rt[rt].alpha.dst_factor = rt_state->equation.alpha_dst_factor;
    }
 
-   nir_ssa_def *pixel = nir_load_barycentric_pixel(&b, 32, .interp_mode = 1);
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_def *pixel = nir_load_barycentric_pixel(&b, 32, .interp_mode = 1);
+   nir_def *zero = nir_imm_int(&b, 0);
 
    for (unsigned i = 0; i < 2; ++i) {
       nir_alu_type src_type =
@@ -694,7 +694,7 @@ GENX(pan_blend_create_shader)(const struct panfrost_device *dev,
       src_type = nir_alu_type_get_base_type(nir_type) |
                  nir_alu_type_get_type_size(src_type);
 
-      nir_ssa_def *src = nir_load_interpolated_input(
+      nir_def *src = nir_load_interpolated_input(
          &b, 4, nir_alu_type_get_type_size(src_type), pixel, zero,
          .io_semantics.location = i ? VARYING_SLOT_VAR0 : VARYING_SLOT_COL0,
          .io_semantics.num_slots = 1, .base = i, .dest_type = src_type);
@@ -806,7 +806,7 @@ inline_rt_conversion(nir_builder *b, nir_instr *instr, void *data)
       inputs->dev, inputs->formats[rt], rt, size, false);
 
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_imm_int(b, conversion >> 32));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_int(b, conversion >> 32));
    return true;
 }
 
index 91c0f00..abbe1b9 100644 (file)
@@ -402,8 +402,7 @@ lower_sampler_parameters(nir_builder *b, nir_instr *instr, UNUSED void *data)
    };
 
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
-                            nir_build_imm(b, 3, 32, constants));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_build_imm(b, 3, 32, constants));
    return true;
 }
 
@@ -482,7 +481,7 @@ pan_blitter_get_blit_shader(struct panfrost_device *dev,
       glsl_vector_type(GLSL_TYPE_FLOAT, coord_comps), "coord");
    coord_var->data.location = VARYING_SLOT_VAR0;
 
-   nir_ssa_def *coord = nir_load_var(&b, coord_var);
+   nir_def *coord = nir_load_var(&b, coord_var);
 
    unsigned active_count = 0;
    for (unsigned i = 0; i < ARRAY_SIZE(key->surfaces); i++) {
@@ -526,7 +525,7 @@ pan_blitter_get_blit_shader(struct panfrost_device *dev,
          break;
       }
 
-      nir_ssa_def *res = NULL;
+      nir_def *res = NULL;
 
       if (resolve) {
          /* When resolving a float type, we need to calculate
index 5d065ab..2a0ed90 100644 (file)
@@ -58,41 +58,40 @@ pan_indirect_dispatch_init(struct panfrost_device *dev)
    nir_builder b = nir_builder_init_simple_shader(
       MESA_SHADER_COMPUTE, GENX(pan_shader_get_compiler_options)(), "%s",
       "indirect_dispatch");
-   nir_ssa_def *zero = nir_imm_int(&b, 0);
-   nir_ssa_def *one = nir_imm_int(&b, 1);
-   nir_ssa_def *num_wg =
+   nir_def *zero = nir_imm_int(&b, 0);
+   nir_def *one = nir_imm_int(&b, 1);
+   nir_def *num_wg =
       nir_load_global(&b, get_input_field(&b, indirect_dim), 4, 3, 32);
-   nir_ssa_def *num_wg_x = nir_channel(&b, num_wg, 0);
-   nir_ssa_def *num_wg_y = nir_channel(&b, num_wg, 1);
-   nir_ssa_def *num_wg_z = nir_channel(&b, num_wg, 2);
+   nir_def *num_wg_x = nir_channel(&b, num_wg, 0);
+   nir_def *num_wg_y = nir_channel(&b, num_wg, 1);
+   nir_def *num_wg_z = nir_channel(&b, num_wg, 2);
 
-   nir_ssa_def *job_hdr_ptr = get_input_field(&b, job);
-   nir_ssa_def *num_wg_flat =
+   nir_def *job_hdr_ptr = get_input_field(&b, job);
+   nir_def *num_wg_flat =
       nir_imul(&b, num_wg_x, nir_imul(&b, num_wg_y, num_wg_z));
 
    nir_push_if(&b, nir_ieq(&b, num_wg_flat, zero));
    {
-      nir_ssa_def *type_ptr =
-         nir_iadd(&b, job_hdr_ptr, nir_imm_int64(&b, 4 * 4));
-      nir_ssa_def *ntype = nir_imm_intN_t(&b, (MALI_JOB_TYPE_NULL << 1) | 1, 8);
+      nir_def *type_ptr = nir_iadd(&b, job_hdr_ptr, nir_imm_int64(&b, 4 * 4));
+      nir_def *ntype = nir_imm_intN_t(&b, (MALI_JOB_TYPE_NULL << 1) | 1, 8);
       nir_store_global(&b, type_ptr, 1, ntype, 1);
    }
    nir_push_else(&b, NULL);
    {
-      nir_ssa_def *job_dim_ptr = nir_iadd(
+      nir_def *job_dim_ptr = nir_iadd(
          &b, job_hdr_ptr,
          nir_imm_int64(&b, pan_section_offset(COMPUTE_JOB, INVOCATION)));
-      nir_ssa_def *num_wg_x_m1 = nir_isub(&b, num_wg_x, one);
-      nir_ssa_def *num_wg_y_m1 = nir_isub(&b, num_wg_y, one);
-      nir_ssa_def *num_wg_z_m1 = nir_isub(&b, num_wg_z, one);
-      nir_ssa_def *job_dim = nir_load_global(&b, job_dim_ptr, 8, 2, 32);
-      nir_ssa_def *dims = nir_channel(&b, job_dim, 0);
-      nir_ssa_def *split = nir_channel(&b, job_dim, 1);
-      nir_ssa_def *num_wg_x_split =
+      nir_def *num_wg_x_m1 = nir_isub(&b, num_wg_x, one);
+      nir_def *num_wg_y_m1 = nir_isub(&b, num_wg_y, one);
+      nir_def *num_wg_z_m1 = nir_isub(&b, num_wg_z, one);
+      nir_def *job_dim = nir_load_global(&b, job_dim_ptr, 8, 2, 32);
+      nir_def *dims = nir_channel(&b, job_dim, 0);
+      nir_def *split = nir_channel(&b, job_dim, 1);
+      nir_def *num_wg_x_split =
          nir_iand_imm(&b, nir_ushr_imm(&b, split, 10), 0x3f);
-      nir_ssa_def *num_wg_y_split = nir_iadd(
+      nir_def *num_wg_y_split = nir_iadd(
          &b, num_wg_x_split, nir_isub_imm(&b, 32, nir_uclz(&b, num_wg_x_m1)));
-      nir_ssa_def *num_wg_z_split = nir_iadd(
+      nir_def *num_wg_z_split = nir_iadd(
          &b, num_wg_y_split, nir_isub_imm(&b, 32, nir_uclz(&b, num_wg_y_m1)));
       split =
          nir_ior(&b, split,
@@ -106,7 +105,7 @@ pan_indirect_dispatch_init(struct panfrost_device *dev)
 
       nir_store_global(&b, job_dim_ptr, 8, nir_vec2(&b, dims, split), 3);
 
-      nir_ssa_def *num_wg_x_ptr = get_input_field(&b, num_wg_sysval[0]);
+      nir_def *num_wg_x_ptr = get_input_field(&b, num_wg_sysval[0]);
 
       nir_push_if(&b, nir_ine_imm(&b, num_wg_x_ptr, 0));
       {
index fbb52e5..b973df9 100644 (file)
@@ -466,13 +466,13 @@ mir_is_ssa(unsigned index)
 }
 
 static inline unsigned
-nir_ssa_index(nir_ssa_def *ssa)
+nir_ssa_index(nir_def *ssa)
 {
    return (ssa->index << 1) | 0;
 }
 
 static inline unsigned
-nir_reg_index(nir_ssa_def *handle)
+nir_reg_index(nir_def *handle)
 {
    return (handle->index << 1) | PAN_IS_REG;
 }
index 6a24d57..fc36cb7 100644 (file)
@@ -38,8 +38,8 @@
  */
 
 struct mir_address {
-   nir_ssa_scalar A;
-   nir_ssa_scalar B;
+   nir_scalar A;
+   nir_scalar B;
 
    midgard_index_address_format type;
    unsigned shift;
@@ -47,7 +47,7 @@ struct mir_address {
 };
 
 static bool
-mir_args_ssa(nir_ssa_scalar s, unsigned count)
+mir_args_ssa(nir_scalar s, unsigned count)
 {
    nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
 
@@ -62,13 +62,13 @@ mir_args_ssa(nir_ssa_scalar s, unsigned count)
 static void
 mir_match_constant(struct mir_address *address)
 {
-   if (address->A.def && nir_ssa_scalar_is_const(address->A)) {
-      address->bias += nir_ssa_scalar_as_uint(address->A);
+   if (address->A.def && nir_scalar_is_const(address->A)) {
+      address->bias += nir_scalar_as_uint(address->A);
       address->A.def = NULL;
    }
 
-   if (address->B.def && nir_ssa_scalar_is_const(address->B)) {
-      address->bias += nir_ssa_scalar_as_uint(address->B);
+   if (address->B.def && nir_scalar_is_const(address->B)) {
+      address->bias += nir_scalar_as_uint(address->B);
       address->B.def = NULL;
    }
 }
@@ -81,29 +81,29 @@ mir_match_constant(struct mir_address *address)
 static void
 mir_match_iadd(struct mir_address *address, bool first_free)
 {
-   if (!address->B.def || !nir_ssa_scalar_is_alu(address->B))
+   if (!address->B.def || !nir_scalar_is_alu(address->B))
       return;
 
    if (!mir_args_ssa(address->B, 2))
       return;
 
-   nir_op op = nir_ssa_scalar_alu_op(address->B);
+   nir_op op = nir_scalar_alu_op(address->B);
 
    if (op != nir_op_iadd)
       return;
 
-   nir_ssa_scalar op1 = nir_ssa_scalar_chase_alu_src(address->B, 0);
-   nir_ssa_scalar op2 = nir_ssa_scalar_chase_alu_src(address->B, 1);
+   nir_scalar op1 = nir_scalar_chase_alu_src(address->B, 0);
+   nir_scalar op2 = nir_scalar_chase_alu_src(address->B, 1);
 
-   if (nir_ssa_scalar_is_const(op1) &&
-       nir_ssa_scalar_as_uint(op1) <= MAX_POSITIVE_OFFSET) {
-      address->bias += nir_ssa_scalar_as_uint(op1);
+   if (nir_scalar_is_const(op1) &&
+       nir_scalar_as_uint(op1) <= MAX_POSITIVE_OFFSET) {
+      address->bias += nir_scalar_as_uint(op1);
       address->B = op2;
-   } else if (nir_ssa_scalar_is_const(op2) &&
-              nir_ssa_scalar_as_uint(op2) <= MAX_POSITIVE_OFFSET) {
-      address->bias += nir_ssa_scalar_as_uint(op2);
+   } else if (nir_scalar_is_const(op2) &&
+              nir_scalar_as_uint(op2) <= MAX_POSITIVE_OFFSET) {
+      address->bias += nir_scalar_as_uint(op2);
       address->B = op1;
-   } else if (!nir_ssa_scalar_is_const(op1) && !nir_ssa_scalar_is_const(op2) &&
+   } else if (!nir_scalar_is_const(op1) && !nir_scalar_is_const(op2) &&
               first_free && !address->A.def) {
       address->A = op1;
       address->B = op2;
@@ -115,16 +115,16 @@ mir_match_iadd(struct mir_address *address, bool first_free)
 static void
 mir_match_u2u64(struct mir_address *address)
 {
-   if (!address->B.def || !nir_ssa_scalar_is_alu(address->B))
+   if (!address->B.def || !nir_scalar_is_alu(address->B))
       return;
 
    if (!mir_args_ssa(address->B, 1))
       return;
 
-   nir_op op = nir_ssa_scalar_alu_op(address->B);
+   nir_op op = nir_scalar_alu_op(address->B);
    if (op != nir_op_u2u64)
       return;
-   nir_ssa_scalar arg = nir_ssa_scalar_chase_alu_src(address->B, 0);
+   nir_scalar arg = nir_scalar_chase_alu_src(address->B, 0);
 
    address->B = arg;
    address->type = midgard_index_address_u32;
@@ -135,16 +135,16 @@ mir_match_u2u64(struct mir_address *address)
 static void
 mir_match_i2i64(struct mir_address *address)
 {
-   if (!address->B.def || !nir_ssa_scalar_is_alu(address->B))
+   if (!address->B.def || !nir_scalar_is_alu(address->B))
       return;
 
    if (!mir_args_ssa(address->B, 1))
       return;
 
-   nir_op op = nir_ssa_scalar_alu_op(address->B);
+   nir_op op = nir_scalar_alu_op(address->B);
    if (op != nir_op_i2i64)
       return;
-   nir_ssa_scalar arg = nir_ssa_scalar_chase_alu_src(address->B, 0);
+   nir_scalar arg = nir_scalar_chase_alu_src(address->B, 0);
 
    address->B = arg;
    address->type = midgard_index_address_s32;
@@ -155,22 +155,22 @@ mir_match_i2i64(struct mir_address *address)
 static void
 mir_match_ishl(struct mir_address *address)
 {
-   if (!address->B.def || !nir_ssa_scalar_is_alu(address->B))
+   if (!address->B.def || !nir_scalar_is_alu(address->B))
       return;
 
    if (!mir_args_ssa(address->B, 2))
       return;
 
-   nir_op op = nir_ssa_scalar_alu_op(address->B);
+   nir_op op = nir_scalar_alu_op(address->B);
    if (op != nir_op_ishl)
       return;
-   nir_ssa_scalar op1 = nir_ssa_scalar_chase_alu_src(address->B, 0);
-   nir_ssa_scalar op2 = nir_ssa_scalar_chase_alu_src(address->B, 1);
+   nir_scalar op1 = nir_scalar_chase_alu_src(address->B, 0);
+   nir_scalar op2 = nir_scalar_chase_alu_src(address->B, 1);
 
-   if (!nir_ssa_scalar_is_const(op2))
+   if (!nir_scalar_is_const(op2))
       return;
 
-   unsigned shift = nir_ssa_scalar_as_uint(op2);
+   unsigned shift = nir_scalar_as_uint(op2);
    if (shift > 0x7)
       return;
 
@@ -183,25 +183,25 @@ mir_match_ishl(struct mir_address *address)
 static void
 mir_match_mov(struct mir_address *address)
 {
-   if (address->A.def && nir_ssa_scalar_is_alu(address->A)) {
-      nir_op op = nir_ssa_scalar_alu_op(address->A);
+   if (address->A.def && nir_scalar_is_alu(address->A)) {
+      nir_op op = nir_scalar_alu_op(address->A);
 
       if (op == nir_op_mov && mir_args_ssa(address->A, 1))
-         address->A = nir_ssa_scalar_chase_alu_src(address->A, 0);
+         address->A = nir_scalar_chase_alu_src(address->A, 0);
    }
 
-   if (address->B.def && nir_ssa_scalar_is_alu(address->B)) {
-      nir_op op = nir_ssa_scalar_alu_op(address->B);
+   if (address->B.def && nir_scalar_is_alu(address->B)) {
+      nir_op op = nir_scalar_alu_op(address->B);
 
       if (op == nir_op_mov && mir_args_ssa(address->B, 1))
-         address->B = nir_ssa_scalar_chase_alu_src(address->B, 0);
+         address->B = nir_scalar_chase_alu_src(address->B, 0);
    }
 }
 
 /* Tries to pattern match into mir_address */
 
 static struct mir_address
-mir_match_offset(nir_ssa_def *offset, bool first_free, bool extend)
+mir_match_offset(nir_def *offset, bool first_free, bool extend)
 {
    struct mir_address address = {
       .B = {.def = offset},
index 1208c9d..955a8d1 100644 (file)
@@ -231,16 +231,16 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *addr = intr->src[0].ssa;
+   nir_def *addr = intr->src[0].ssa;
 
-   nir_ssa_def *comps[MIR_VEC_COMPONENTS];
+   nir_def *comps[MIR_VEC_COMPONENTS];
    unsigned ncomps = 0;
 
    while (totalsz) {
       unsigned loadsz = MIN2(1 << (util_last_bit(totalsz) - 1), 128);
       unsigned loadncomps = loadsz / compsz;
 
-      nir_ssa_def *load;
+      nir_def *load;
       if (intr->intrinsic == nir_intrinsic_load_global) {
          load = nir_load_global(b, addr, compsz / 8, loadncomps, compsz);
       } else {
@@ -265,7 +265,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
    }
 
    assert(ncomps == nir_dest_num_components(intr->dest));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, comps, ncomps));
+   nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, comps, ncomps));
 
    return true;
 }
@@ -494,7 +494,7 @@ optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
 static void
 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
 {
-   nir_ssa_def def = instr->def;
+   nir_def def = instr->def;
 
    midgard_constants *consts = rzalloc(ctx, midgard_constants);
 
@@ -1547,12 +1547,12 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
    case nir_intrinsic_load_reg: {
       /* NIR guarantees that, for typical isel, this will always be fully
-       * consumed. However, we also do our own nir_ssa_scalar chasing for
+       * consumed. However, we also do our own nir_scalar chasing for
        * address arithmetic, bypassing the source chasing helpers. So we can end
        * up with unconsumed load_register instructions. Translate them here. 99%
        * of the time, these moves will be DCE'd away.
        */
-      nir_ssa_def *handle = instr->src[0].ssa;
+      nir_def *handle = instr->src[0].ssa;
 
       midgard_instruction ins =
          v_mov(nir_reg_index(handle), nir_dest_index(&instr->dest));
index 5bf6d01..c2cb769 100644 (file)
@@ -56,22 +56,22 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data)
    nir_src_copy(&l->src[0], &idx, &l->instr);
 
    nir_builder_instr_insert(b, &l->instr);
-   nir_ssa_def *params = &l->dest.ssa;
+   nir_def *params = &l->dest.ssa;
 
    /* Extract the individual components */
-   nir_ssa_def *min_lod = nir_channel(b, params, 0);
-   nir_ssa_def *max_lod = nir_channel(b, params, 1);
-   nir_ssa_def *lod_bias = nir_channel(b, params, 2);
+   nir_def *min_lod = nir_channel(b, params, 0);
+   nir_def *max_lod = nir_channel(b, params, 1);
+   nir_def *lod_bias = nir_channel(b, params, 2);
 
    /* Rewrite the LOD with bias/clamps. Order sensitive. */
    for (unsigned i = 0; i < tex->num_srcs; i++) {
       if (tex->src[i].src_type != nir_tex_src_lod)
          continue;
 
-      nir_ssa_def *lod = nir_ssa_for_src(b, tex->src[i].src, 1);
+      nir_def *lod = nir_ssa_for_src(b, tex->src[i].src, 1);
 
-      nir_ssa_def *biased = nir_fadd(b, lod, lod_bias);
-      nir_ssa_def *clamped = nir_fmin(b, nir_fmax(b, biased, min_lod), max_lod);
+      nir_def *biased = nir_fadd(b, lod, lod_bias);
+      nir_def *clamped = nir_fmin(b, nir_fmax(b, biased, min_lod), max_lod);
 
       nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
                             nir_src_for_ssa(clamped));
index 7241565..0a4dbc1 100644 (file)
@@ -50,10 +50,10 @@ nir_lower_image_bitsize(nir_builder *b, nir_instr *instr, UNUSED void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *coord =
+   nir_def *coord =
       nir_ssa_for_src(b, intr->src[1], nir_src_num_components(intr->src[1]));
 
-   nir_ssa_def *coord16 = nir_u2u16(b, coord);
+   nir_def *coord16 = nir_u2u16(b, coord);
 
    nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(coord16));
 
index e9bca61..e2cd8c6 100644 (file)
@@ -35,7 +35,7 @@ midgard_nir_type_csel(nir_shader *shader)
 
    BITSET_WORD *float_types =
       calloc(BITSET_WORDS(impl->ssa_alloc), sizeof(BITSET_WORD));
-   nir_gather_ssa_types(impl, float_types, NULL);
+   nir_gather_types(impl, float_types, NULL);
 
    nir_shader_instructions_pass(
       shader, pass, nir_metadata_block_index | nir_metadata_dominance,
index 883c6b5..4ac35f4 100644 (file)
@@ -81,9 +81,9 @@ nir_fuse_io_16(nir_shader *shader)
             nir_builder b = nir_builder_at(nir_after_instr(instr));
 
             /* The f2f32(f2fmp(x)) will cancel by opt_algebraic */
-            nir_ssa_def *conv = nir_f2f32(&b, &intr->dest.ssa);
-            nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, conv,
-                                           conv->parent_instr);
+            nir_def *conv = nir_f2f32(&b, &intr->dest.ssa);
+            nir_def_rewrite_uses_after(&intr->dest.ssa, conv,
+                                       conv->parent_instr);
 
             progress |= true;
          }
index 989e4ab..5785d77 100644 (file)
@@ -59,9 +59,9 @@ nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data)
 
    intr->dest.ssa.bit_size = 32;
 
-   nir_ssa_def *conv = nir_u2u64(b, &intr->dest.ssa);
+   nir_def *conv = nir_u2u64(b, &intr->dest.ssa);
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, conv, conv->parent_instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, conv, conv->parent_instr);
 
    return true;
 }
index f2154cd..cd387e6 100644 (file)
@@ -114,10 +114,10 @@ pan_is_format_native(const struct util_format_description *desc,
  * suitable for storing (with components replicated to fill). Unpacks do the
  * reverse but cannot rely on replication. */
 
-static nir_ssa_def *
-pan_replicate(nir_builder *b, nir_ssa_def *v, unsigned num_components)
+static nir_def *
+pan_replicate(nir_builder *b, nir_def *v, unsigned num_components)
 {
-   nir_ssa_def *replicated[4];
+   nir_def *replicated[4];
 
    for (unsigned i = 0; i < 4; ++i)
       replicated[i] = nir_channel(b, v, i % num_components);
@@ -128,26 +128,26 @@ pan_replicate(nir_builder *b, nir_ssa_def *v, unsigned num_components)
 /* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
  * upper/lower halves of course */
 
-static nir_ssa_def *
-pan_pack_pure_16(nir_builder *b, nir_ssa_def *v, unsigned num_components)
+static nir_def *
+pan_pack_pure_16(nir_builder *b, nir_def *v, unsigned num_components)
 {
-   nir_ssa_def *v4 = pan_replicate(b, v, num_components);
+   nir_def *v4 = pan_replicate(b, v, num_components);
 
-   nir_ssa_def *lo = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 0));
-   nir_ssa_def *hi = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 2));
+   nir_def *lo = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 0));
+   nir_def *hi = nir_pack_32_2x16(b, nir_channels(b, v4, 0x3 << 2));
 
    return nir_vec4(b, lo, hi, lo, hi);
 }
 
-static nir_ssa_def *
-pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
+static nir_def *
+pan_unpack_pure_16(nir_builder *b, nir_def *pack, unsigned num_components)
 {
-   nir_ssa_def *unpacked[4];
+   nir_def *unpacked[4];
 
    assert(num_components <= 4);
 
    for (unsigned i = 0; i < num_components; i += 2) {
-      nir_ssa_def *halves = nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
+      nir_def *halves = nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
 
       unpacked[i + 0] = nir_channel(b, halves, 0);
       unpacked[i + 1] = nir_channel(b, halves, 1);
@@ -156,9 +156,9 @@ pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
    return nir_pad_vec4(b, nir_vec(b, unpacked, num_components));
 }
 
-static nir_ssa_def *
+static nir_def *
 pan_pack_reorder(nir_builder *b, const struct util_format_description *desc,
-                 nir_ssa_def *v)
+                 nir_def *v)
 {
    unsigned swizzle[4] = {0, 1, 2, 3};
 
@@ -170,9 +170,9 @@ pan_pack_reorder(nir_builder *b, const struct util_format_description *desc,
    return nir_swizzle(b, v, swizzle, v->num_components);
 }
 
-static nir_ssa_def *
+static nir_def *
 pan_unpack_reorder(nir_builder *b, const struct util_format_description *desc,
-                   nir_ssa_def *v)
+                   nir_def *v)
 {
    unsigned swizzle[4] = {0, 1, 2, 3};
 
@@ -184,22 +184,22 @@ pan_unpack_reorder(nir_builder *b, const struct util_format_description *desc,
    return nir_swizzle(b, v, swizzle, v->num_components);
 }
 
-static nir_ssa_def *
-pan_pack_pure_8(nir_builder *b, nir_ssa_def *v, unsigned num_components)
+static nir_def *
+pan_pack_pure_8(nir_builder *b, nir_def *v, unsigned num_components)
 {
    return nir_replicate(
       b, nir_pack_32_4x8(b, pan_replicate(b, v, num_components)), 4);
 }
 
-static nir_ssa_def *
-pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
+static nir_def *
+pan_unpack_pure_8(nir_builder *b, nir_def *pack, unsigned num_components)
 {
-   nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
+   nir_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
    return nir_trim_vector(b, unpacked, num_components);
 }
 
-static nir_ssa_def *
-pan_fsat(nir_builder *b, nir_ssa_def *v, bool is_signed)
+static nir_def *
+pan_fsat(nir_builder *b, nir_def *v, bool is_signed)
 {
    if (is_signed)
       return nir_fsat_signed_mali(b, v);
@@ -219,33 +219,33 @@ norm_scale(bool snorm, unsigned bits)
 /* For <= 8-bits per channel, [U,S]NORM formats are packed like [U,S]NORM 8,
  * with zeroes spacing out each component as needed */
 
-static nir_ssa_def *
-pan_pack_norm(nir_builder *b, nir_ssa_def *v, unsigned x, unsigned y,
-              unsigned z, unsigned w, bool is_signed)
+static nir_def *
+pan_pack_norm(nir_builder *b, nir_def *v, unsigned x, unsigned y, unsigned z,
+              unsigned w, bool is_signed)
 {
    /* If a channel has N bits, 1.0 is encoded as 2^N - 1 for UNORMs and
     * 2^(N-1) - 1 for SNORMs */
-   nir_ssa_def *scales =
+   nir_def *scales =
       is_signed ? nir_imm_vec4_16(b, (1 << (x - 1)) - 1, (1 << (y - 1)) - 1,
                                   (1 << (z - 1)) - 1, (1 << (w - 1)) - 1)
                 : nir_imm_vec4_16(b, (1 << x) - 1, (1 << y) - 1, (1 << z) - 1,
                                   (1 << w) - 1);
 
    /* If a channel has N bits, we pad out to the byte by (8 - N) bits */
-   nir_ssa_def *shifts = nir_imm_ivec4(b, 8 - x, 8 - y, 8 - z, 8 - w);
-   nir_ssa_def *clamped = pan_fsat(b, nir_pad_vec4(b, v), is_signed);
+   nir_def *shifts = nir_imm_ivec4(b, 8 - x, 8 - y, 8 - z, 8 - w);
+   nir_def *clamped = pan_fsat(b, nir_pad_vec4(b, v), is_signed);
 
-   nir_ssa_def *f = nir_fmul(b, clamped, scales);
-   nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
-   nir_ssa_def *s = nir_ishl(b, u8, shifts);
-   nir_ssa_def *repl = nir_pack_32_4x8(b, s);
+   nir_def *f = nir_fmul(b, clamped, scales);
+   nir_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
+   nir_def *s = nir_ishl(b, u8, shifts);
+   nir_def *repl = nir_pack_32_4x8(b, s);
 
    return nir_replicate(b, repl, 4);
 }
 
-static nir_ssa_def *
-pan_pack_unorm(nir_builder *b, nir_ssa_def *v, unsigned x, unsigned y,
-               unsigned z, unsigned w)
+static nir_def *
+pan_pack_unorm(nir_builder *b, nir_def *v, unsigned x, unsigned y, unsigned z,
+               unsigned w)
 {
    return pan_pack_norm(b, v, x, y, z, w, false);
 }
@@ -254,33 +254,33 @@ pan_pack_unorm(nir_builder *b, nir_ssa_def *v, unsigned x, unsigned y,
  * 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
  * pointed out, this means free conversion to RGBX8 */
 
-static nir_ssa_def *
-pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+pan_pack_unorm_1010102(nir_builder *b, nir_def *v)
 {
-   nir_ssa_def *scale = nir_imm_vec4(b, 1023.0, 1023.0, 1023.0, 3.0);
-   nir_ssa_def *s =
+   nir_def *scale = nir_imm_vec4(b, 1023.0, 1023.0, 1023.0, 3.0);
+   nir_def *s =
       nir_f2u32(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b, v), scale)));
 
-   nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
-   nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
+   nir_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
+   nir_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
 
-   nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
+   nir_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
 
-   nir_ssa_def *top =
+   nir_def *top =
       nir_ior(b,
               nir_ior(b, nir_ishl_imm(b, nir_channel(b, bottom2, 0), 24 + 0),
                       nir_ishl_imm(b, nir_channel(b, bottom2, 1), 24 + 2)),
               nir_ior(b, nir_ishl_imm(b, nir_channel(b, bottom2, 2), 24 + 4),
                       nir_ishl_imm(b, nir_channel(b, bottom2, 3), 24 + 6)));
 
-   nir_ssa_def *p = nir_ior(b, top, top8_rgb);
+   nir_def *p = nir_ior(b, top, top8_rgb);
    return nir_replicate(b, p, 4);
 }
 
 /* On the other hand, the pure int RGB10_A2 is identical to the spec */
 
-static nir_ssa_def *
-pan_pack_int_1010102(nir_builder *b, nir_ssa_def *v, bool is_signed)
+static nir_def *
+pan_pack_int_1010102(nir_builder *b, nir_def *v, bool is_signed)
 {
    v = nir_u2u32(b, v);
 
@@ -299,10 +299,10 @@ pan_pack_int_1010102(nir_builder *b, nir_ssa_def *v, bool is_signed)
    return nir_replicate(b, v, 4);
 }
 
-static nir_ssa_def *
-pan_unpack_int_1010102(nir_builder *b, nir_ssa_def *packed, bool is_signed)
+static nir_def *
+pan_unpack_int_1010102(nir_builder *b, nir_def *packed, bool is_signed)
 {
-   nir_ssa_def *v = nir_replicate(b, nir_channel(b, packed, 0), 4);
+   nir_def *v = nir_replicate(b, nir_channel(b, packed, 0), 4);
 
    /* Left shift all components so the sign bit is on the MSB, and
     * can be extended by ishr(). The ishl()+[u,i]shr() combination
@@ -320,38 +320,37 @@ pan_unpack_int_1010102(nir_builder *b, nir_ssa_def *packed, bool is_signed)
 
 /* NIR means we can *finally* catch a break */
 
-static nir_ssa_def *
-pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+pan_pack_r11g11b10(nir_builder *b, nir_def *v)
 {
    return nir_replicate(b, nir_format_pack_11f11f10f(b, nir_f2f32(b, v)), 4);
 }
 
-static nir_ssa_def *
-pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
+static nir_def *
+pan_unpack_r11g11b10(nir_builder *b, nir_def *v)
 {
-   nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
-   nir_ssa_def *f16 = nir_f2fmp(b, f32);
+   nir_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
+   nir_def *f16 = nir_f2fmp(b, f32);
 
    /* Extend to vec4 with alpha */
-   nir_ssa_def *components[4] = {nir_channel(b, f16, 0), nir_channel(b, f16, 1),
-                                 nir_channel(b, f16, 2),
-                                 nir_imm_float16(b, 1.0)};
+   nir_def *components[4] = {nir_channel(b, f16, 0), nir_channel(b, f16, 1),
+                             nir_channel(b, f16, 2), nir_imm_float16(b, 1.0)};
 
    return nir_vec(b, components, 4);
 }
 
 /* Wrapper around sRGB conversion */
 
-static nir_ssa_def *
-pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
+static nir_def *
+pan_linear_to_srgb(nir_builder *b, nir_def *linear)
 {
-   nir_ssa_def *rgb = nir_trim_vector(b, linear, 3);
+   nir_def *rgb = nir_trim_vector(b, linear, 3);
 
    /* TODO: fp16 native conversion */
-   nir_ssa_def *srgb =
+   nir_def *srgb =
       nir_f2fmp(b, nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
 
-   nir_ssa_def *comp[4] = {
+   nir_def *comp[4] = {
       nir_channel(b, srgb, 0),
       nir_channel(b, srgb, 1),
       nir_channel(b, srgb, 2),
@@ -361,8 +360,8 @@ pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
    return nir_vec(b, comp, 4);
 }
 
-static nir_ssa_def *
-pan_unpack_pure(nir_builder *b, nir_ssa_def *packed, unsigned size, unsigned nr)
+static nir_def *
+pan_unpack_pure(nir_builder *b, nir_def *packed, unsigned size, unsigned nr)
 {
    switch (size) {
    case 32:
@@ -378,16 +377,15 @@ pan_unpack_pure(nir_builder *b, nir_ssa_def *packed, unsigned size, unsigned nr)
 
 /* Generic dispatches for un/pack regardless of format */
 
-static nir_ssa_def *
+static nir_def *
 pan_unpack(nir_builder *b, const struct util_format_description *desc,
-           nir_ssa_def *packed)
+           nir_def *packed)
 {
    if (desc->is_array) {
       int c = util_format_get_first_non_void_channel(desc->format);
       assert(c >= 0);
       struct util_format_channel_description d = desc->channel[c];
-      nir_ssa_def *unpacked =
-         pan_unpack_pure(b, packed, d.size, desc->nr_channels);
+      nir_def *unpacked = pan_unpack_pure(b, packed, d.size, desc->nr_channels);
 
       /* Normalized formats are unpacked as integers. We need to
        * convert to float for the final result.
@@ -397,8 +395,8 @@ pan_unpack(nir_builder *b, const struct util_format_description *desc,
          unsigned float_sz = (d.size <= 8 ? 16 : 32);
          float multiplier = norm_scale(snorm, d.size);
 
-         nir_ssa_def *as_float = snorm ? nir_i2fN(b, unpacked, float_sz)
-                                       : nir_u2fN(b, unpacked, float_sz);
+         nir_def *as_float = snorm ? nir_i2fN(b, unpacked, float_sz)
+                                   : nir_u2fN(b, unpacked, float_sz);
 
          return nir_fmul_imm(b, as_float, 1.0 / multiplier);
       } else {
@@ -423,9 +421,9 @@ pan_unpack(nir_builder *b, const struct util_format_description *desc,
    unreachable("Unknown format");
 }
 
-static nir_ssa_def *pan_pack(nir_builder *b,
-                             const struct util_format_description *desc,
-                             nir_ssa_def * unpacked)
+static nir_def *pan_pack(nir_builder *b,
+                         const struct util_format_description *desc,
+                         nir_def * unpacked)
 {
    if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
       unpacked = pan_linear_to_srgb(b, unpacked);
@@ -436,14 +434,14 @@ static nir_ssa_def *pan_pack(nir_builder *b,
       struct util_format_channel_description d = desc->channel[c];
 
       /* Pure formats are packed as-is */
-      nir_ssa_def *raw = unpacked;
+      nir_def *raw = unpacked;
 
       /* Normalized formats get normalized first */
       if (d.normalized) {
          bool snorm = desc->is_snorm;
          float multiplier = norm_scale(snorm, d.size);
-         nir_ssa_def *clamped = pan_fsat(b, unpacked, snorm);
-         nir_ssa_def *normed = nir_fmul_imm(b, clamped, multiplier);
+         nir_def *clamped = pan_fsat(b, unpacked, snorm);
+         nir_def *normed = nir_fmul_imm(b, clamped, multiplier);
 
          raw = nir_f2uN(b, normed, d.size);
       }
@@ -500,15 +498,14 @@ pan_lower_fb_store(nir_builder *b, nir_intrinsic_instr *intr,
                    bool reorder_comps, unsigned nr_samples)
 {
    /* For stores, add conversion before */
-   nir_ssa_def *unpacked =
-      nir_ssa_for_src(b, intr->src[0], intr->num_components);
+   nir_def *unpacked = nir_ssa_for_src(b, intr->src[0], intr->num_components);
    unpacked = nir_pad_vec4(b, unpacked);
 
    /* Re-order the components */
    if (reorder_comps)
       unpacked = pan_pack_reorder(b, desc, unpacked);
 
-   nir_ssa_def *packed = pan_pack(b, desc, unpacked);
+   nir_def *packed = pan_pack(b, desc, unpacked);
 
    /* We have to split writeout in 128 bit chunks */
    unsigned iterations = DIV_ROUND_UP(desc->block.bits * nr_samples, 128);
@@ -520,7 +517,7 @@ pan_lower_fb_store(nir_builder *b, nir_intrinsic_instr *intr,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 pan_sample_id(nir_builder *b, int sample)
 {
    return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
@@ -531,12 +528,12 @@ pan_lower_fb_load(nir_builder *b, nir_intrinsic_instr *intr,
                   const struct util_format_description *desc,
                   bool reorder_comps, int sample)
 {
-   nir_ssa_def *packed =
+   nir_def *packed =
       nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),
                               .io_semantics = nir_intrinsic_io_semantics(intr));
 
    /* Convert the raw value */
-   nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
+   nir_def *unpacked = pan_unpack(b, desc, packed);
 
    /* Convert to the size of the load intrinsic.
     *
@@ -562,7 +559,7 @@ pan_lower_fb_load(nir_builder *b, nir_intrinsic_instr *intr,
    if (reorder_comps)
       unpacked = pan_unpack_reorder(b, desc, unpacked);
 
-   nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
+   nir_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
 }
 
 struct inputs {
index 71bb4a8..6cae500 100644 (file)
@@ -41,9 +41,9 @@ pan_lower_helper_invocation_instr(nir_builder *b, nir_instr *instr, void *data)
 
    b->cursor = nir_before_instr(instr);
 
-   nir_ssa_def *mask = nir_load_sample_mask_in(b);
-   nir_ssa_def *eq = nir_ieq_imm(b, mask, 0);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, eq);
+   nir_def *mask = nir_load_sample_mask_in(b);
+   nir_def *eq = nir_ieq_imm(b, mask, 0);
+   nir_def_rewrite_uses(&intr->dest.ssa, eq);
 
    return true;
 }
index f51b9c9..c7c68dc 100644 (file)
@@ -46,19 +46,19 @@ pan_lower_sample_pos_impl(struct nir_builder *b, nir_instr *instr,
    b->cursor = nir_before_instr(instr);
 
    /* Elements are 4 bytes */
-   nir_ssa_def *addr =
+   nir_def *addr =
       nir_iadd(b, nir_load_sample_positions_pan(b),
                nir_u2u64(b, nir_imul_imm(b, nir_load_sample_id(b), 4)));
 
    /* Decode 8:8 fixed-point */
-   nir_ssa_def *raw = nir_load_global(b, addr, 2, 2, 16);
-   nir_ssa_def *decoded = nir_fmul_imm(b, nir_i2f16(b, raw), 1.0 / 256.0);
+   nir_def *raw = nir_load_global(b, addr, 2, 2, 16);
+   nir_def *decoded = nir_fmul_imm(b, nir_i2f16(b, raw), 1.0 / 256.0);
 
    /* Make NIR validator happy */
    if (decoded->bit_size != nir_dest_bit_size(intr->dest))
       decoded = nir_f2fN(b, decoded, nir_dest_bit_size(intr->dest));
 
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, decoded);
+   nir_def_rewrite_uses(&intr->dest.ssa, decoded);
    return true;
 }
 
index aa00e61..6393fc1 100644 (file)
@@ -52,16 +52,16 @@ lower_store_component(nir_builder *b, nir_instr *instr, void *data)
    nir_intrinsic_instr *prev = _mesa_hash_table_u64_search(slots, slot);
    unsigned mask = (prev ? nir_intrinsic_write_mask(prev) : 0);
 
-   nir_ssa_def *value = intr->src[0].ssa;
+   nir_def *value = intr->src[0].ssa;
    b->cursor = nir_before_instr(&intr->instr);
 
-   nir_ssa_def *undef = nir_ssa_undef(b, 1, value->bit_size);
-   nir_ssa_def *channels[4] = {undef, undef, undef, undef};
+   nir_def *undef = nir_undef(b, 1, value->bit_size);
+   nir_def *channels[4] = {undef, undef, undef, undef};
 
    /* Copy old */
    u_foreach_bit(i, mask) {
       assert(prev != NULL);
-      nir_ssa_def *prev_ssa = prev->src[0].ssa;
+      nir_def *prev_ssa = prev->src[0].ssa;
       channels[i] = nir_channel(b, prev_ssa, i);
    }
 
index 843442a..56b33a4 100644 (file)
@@ -61,10 +61,10 @@ pan_nir_emit_combined_store(nir_builder *b, nir_intrinsic_instr *rt0_store,
    nir_intrinsic_set_dest_type(intr, pan_nir_rt_store_type(stores[2]));
    nir_intrinsic_set_component(intr, writeout);
 
-   nir_ssa_def *zero = nir_imm_int(b, 0);
-   nir_ssa_def *zero4 = nir_imm_ivec4(b, 0, 0, 0, 0);
+   nir_def *zero = nir_imm_int(b, 0);
+   nir_def *zero4 = nir_imm_ivec4(b, 0, 0, 0, 0);
 
-   nir_ssa_def *src[] = {
+   nir_def *src[] = {
       rt0_store ? rt0_store->src[0].ssa : zero4,
       rt0_store ? rt0_store->src[1].ssa : zero,
       stores[0] ? stores[0]->src[0].ssa : zero,
index 5e4a491..5bb00b3 100644 (file)
@@ -38,7 +38,7 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
 
    uint16_t offset = offset_words * 4;
 
-   nir_ssa_def *index = nir_iadd(
+   nir_def *index = nir_iadd(
       b, nir_imul(b, nir_load_instance_id(b), nir_load_num_vertices(b)),
       nir_load_vertex_id_zero_base(b));
 
@@ -46,13 +46,13 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
               SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
    BITSET_SET(b->shader->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
 
-   nir_ssa_def *buf = nir_load_xfb_address(b, 64, .base = buffer);
-   nir_ssa_def *addr = nir_iadd(
+   nir_def *buf = nir_load_xfb_address(b, 64, .base = buffer);
+   nir_def *addr = nir_iadd(
       b, buf,
       nir_u2u64(b, nir_iadd_imm(b, nir_imul_imm(b, index, stride), offset)));
 
-   nir_ssa_def *src = intr->src[0].ssa;
-   nir_ssa_def *value =
+   nir_def *src = intr->src[0].ssa;
+   nir_def *value =
       nir_channels(b, src, BITFIELD_MASK(num_components) << start_component);
    nir_store_global(b, addr, 4, value, BITFIELD_MASK(num_components));
 }
@@ -71,10 +71,10 @@ lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data)
    if (intr->intrinsic == nir_intrinsic_load_vertex_id) {
       b->cursor = nir_instr_remove(instr);
 
-      nir_ssa_def *repl =
+      nir_def *repl =
          nir_iadd(b, nir_load_vertex_id_zero_base(b), nir_load_first_vertex(b));
 
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, repl);
+      nir_def_rewrite_uses(&intr->dest.ssa, repl);
       return true;
    }
 
index d477322..b3a92a7 100644 (file)
@@ -46,7 +46,7 @@ panvk_meta_clear_color_attachment_shader(struct panfrost_device *pdev,
       nir_variable_create(b.shader, nir_var_shader_out, out_type, "out");
    out->data.location = FRAG_RESULT_DATA0;
 
-   nir_ssa_def *clear_values =
+   nir_def *clear_values =
       nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = ~0);
    nir_store_var(&b, out, clear_values, 0xff);
 
index 45faf7c..670cfbd 100644 (file)
@@ -303,7 +303,7 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev,
       b.shader, nir_var_shader_in,
       glsl_vector_type(GLSL_TYPE_FLOAT, texdim + texisarray), "coord");
    coord_var->data.location = VARYING_SLOT_VAR0;
-   nir_ssa_def *coord = nir_f2u32(&b, nir_load_var(&b, coord_var));
+   nir_def *coord = nir_f2u32(&b, nir_load_var(&b, coord_var));
 
    nir_tex_instr *tex = nir_tex_instr_create(b.shader, is_ms ? 2 : 1);
    tex->op = is_ms ? nir_texop_txf_ms : nir_texop_txf;
@@ -340,7 +340,7 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev,
                      nir_alu_type_get_type_size(tex->dest_type));
    nir_builder_instr_insert(&b, &tex->instr);
 
-   nir_ssa_def *texel = &tex->dest.ssa;
+   nir_def *texel = &tex->dest.ssa;
 
    unsigned dstcompsz =
       util_format_get_component_bits(dstfmt, UTIL_FORMAT_COLORSPACE_RGB, 0);
@@ -348,11 +348,11 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev,
    const struct glsl_type *outtype = NULL;
 
    if (srcfmt == PIPE_FORMAT_R5G6B5_UNORM && dstfmt == PIPE_FORMAT_R8G8_UNORM) {
-      nir_ssa_def *rgb = nir_f2u32(
+      nir_def *rgb = nir_f2u32(
          &b, nir_fmul(&b, texel,
                       nir_vec3(&b, nir_imm_float(&b, 31), nir_imm_float(&b, 63),
                                nir_imm_float(&b, 31))));
-      nir_ssa_def *rg = nir_vec2(
+      nir_def *rg = nir_vec2(
          &b,
          nir_ior(&b, nir_channel(&b, rgb, 0),
                  nir_ishl(&b, nir_channel(&b, rgb, 1), nir_imm_int(&b, 5))),
@@ -363,8 +363,8 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev,
       outtype = glsl_vector_type(GLSL_TYPE_FLOAT, 2);
    } else if (srcfmt == PIPE_FORMAT_R8G8_UNORM &&
               dstfmt == PIPE_FORMAT_R5G6B5_UNORM) {
-      nir_ssa_def *rg = nir_f2u32(&b, nir_fmul_imm(&b, texel, 255));
-      nir_ssa_def *rgb = nir_vec3(
+      nir_def *rg = nir_f2u32(&b, nir_fmul_imm(&b, texel, 255));
+      nir_def *rgb = nir_vec3(
          &b, nir_channel(&b, rg, 0),
          nir_ior(&b, nir_ushr_imm(&b, nir_channel(&b, rg, 0), 5),
                  nir_ishl(&b, nir_channel(&b, rg, 1), nir_imm_int(&b, 3))),
@@ -402,8 +402,8 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev,
 
    unsigned fullmask = (1 << ndstcomps) - 1;
    if (dstcompsz > 8 && dstmask != fullmask) {
-      nir_ssa_def *oldtexel = nir_load_var(&b, out);
-      nir_ssa_def *dstcomps[4];
+      nir_def *oldtexel = nir_load_var(&b, out);
+      nir_def *dstcomps[4];
 
       for (unsigned i = 0; i < ndstcomps; i++) {
          if (dstmask & BITFIELD_BIT(i))
@@ -867,21 +867,21 @@ panvk_meta_copy_buf2img_shader(struct panfrost_device *pdev,
       nir_variable_create(b.shader, nir_var_shader_in,
                           glsl_vector_type(GLSL_TYPE_FLOAT, 3), "coord");
    coord_var->data.location = VARYING_SLOT_VAR0;
-   nir_ssa_def *coord = nir_load_var(&b, coord_var);
+   nir_def *coord = nir_load_var(&b, coord_var);
 
    coord = nir_f2u32(&b, coord);
 
-   nir_ssa_def *bufptr = panvk_meta_copy_buf2img_get_info_field(&b, buf.ptr);
-   nir_ssa_def *buflinestride =
+   nir_def *bufptr = panvk_meta_copy_buf2img_get_info_field(&b, buf.ptr);
+   nir_def *buflinestride =
       panvk_meta_copy_buf2img_get_info_field(&b, buf.stride.line);
-   nir_ssa_def *bufsurfstride =
+   nir_def *bufsurfstride =
       panvk_meta_copy_buf2img_get_info_field(&b, buf.stride.surf);
 
    unsigned imgtexelsz = util_format_get_blocksize(key.imgfmt);
    unsigned buftexelsz = panvk_meta_copy_buf_texelsize(key.imgfmt, key.mask);
    unsigned writemask = key.mask;
 
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_imul(&b, nir_channel(&b, coord, 0), nir_imm_int(&b, buftexelsz));
    offset = nir_iadd(&b, offset,
                      nir_imul(&b, nir_channel(&b, coord, 1), buflinestride));
@@ -901,7 +901,7 @@ panvk_meta_copy_buf2img_shader(struct panfrost_device *pdev,
    assert(bufcompsz == 1 || bufcompsz == 2 || bufcompsz == 4);
    assert(nbufcomps <= 4 && nimgcomps <= 4);
 
-   nir_ssa_def *texel =
+   nir_def *texel =
       nir_load_global(&b, bufptr, bufcompsz, nbufcomps, bufcompsz * 8);
 
    enum glsl_base_type basetype;
@@ -940,11 +940,11 @@ panvk_meta_copy_buf2img_shader(struct panfrost_device *pdev,
 
    if (fullmask != writemask) {
       unsigned first_written_comp = ffs(writemask) - 1;
-      nir_ssa_def *oldtexel = NULL;
+      nir_def *oldtexel = NULL;
       if (imgcompsz > 1)
          oldtexel = nir_load_var(&b, out);
 
-      nir_ssa_def *texel_comps[4];
+      nir_def *texel_comps[4];
       for (unsigned i = 0; i < nimgcomps; i++) {
          if (writemask & BITFIELD_BIT(i))
             texel_comps[i] = nir_channel(&b, texel, i - first_written_comp);
@@ -1256,23 +1256,23 @@ panvk_meta_copy_img2buf_shader(struct panfrost_device *pdev,
       "panvk_meta_copy_img2buf(dim=%dD%s,imgfmt=%s,mask=%x)", texdim,
       texisarray ? "[]" : "", util_format_name(key.imgfmt), key.mask);
 
-   nir_ssa_def *coord = nir_load_global_invocation_id(&b, 32);
-   nir_ssa_def *bufptr = panvk_meta_copy_img2buf_get_info_field(&b, buf.ptr);
-   nir_ssa_def *buflinestride =
+   nir_def *coord = nir_load_global_invocation_id(&b, 32);
+   nir_def *bufptr = panvk_meta_copy_img2buf_get_info_field(&b, buf.ptr);
+   nir_def *buflinestride =
       panvk_meta_copy_img2buf_get_info_field(&b, buf.stride.line);
-   nir_ssa_def *bufsurfstride =
+   nir_def *bufsurfstride =
       panvk_meta_copy_img2buf_get_info_field(&b, buf.stride.surf);
 
-   nir_ssa_def *imgminx =
+   nir_def *imgminx =
       panvk_meta_copy_img2buf_get_info_field(&b, img.extent.minx);
-   nir_ssa_def *imgminy =
+   nir_def *imgminy =
       panvk_meta_copy_img2buf_get_info_field(&b, img.extent.miny);
-   nir_ssa_def *imgmaxx =
+   nir_def *imgmaxx =
       panvk_meta_copy_img2buf_get_info_field(&b, img.extent.maxx);
-   nir_ssa_def *imgmaxy =
+   nir_def *imgmaxy =
       panvk_meta_copy_img2buf_get_info_field(&b, img.extent.maxy);
 
-   nir_ssa_def *imgcoords, *inbounds;
+   nir_def *imgcoords, *inbounds;
 
    switch (texdim + texisarray) {
    case 1:
@@ -1325,7 +1325,7 @@ panvk_meta_copy_img2buf_shader(struct panfrost_device *pdev,
     * This being said, compressed textures are not compatible with AFBC, so we
     * could use a compute shader arranging the blocks properly.
     */
-   nir_ssa_def *offset =
+   nir_def *offset =
       nir_imul(&b, nir_channel(&b, coord, 0), nir_imm_int(&b, buftexelsz));
    offset = nir_iadd(&b, offset,
                      nir_imul(&b, nir_channel(&b, coord, 1), buflinestride));
@@ -1365,12 +1365,12 @@ panvk_meta_copy_img2buf_shader(struct panfrost_device *pdev,
                      nir_alu_type_get_type_size(tex->dest_type));
    nir_builder_instr_insert(&b, &tex->instr);
 
-   nir_ssa_def *texel = &tex->dest.ssa;
+   nir_def *texel = &tex->dest.ssa;
 
    unsigned fullmask = (1 << util_format_get_nr_components(key.imgfmt)) - 1;
    unsigned nbufcomps = util_bitcount(fullmask);
    if (key.mask != fullmask) {
-      nir_ssa_def *bufcomps[4];
+      nir_def *bufcomps[4];
       nbufcomps = 0;
       for (unsigned i = 0; i < nimgcomps; i++) {
          if (key.mask & BITFIELD_BIT(i))
@@ -1397,7 +1397,7 @@ panvk_meta_copy_img2buf_shader(struct panfrost_device *pdev,
       nbufcomps = 1;
       nimgcomps = 1;
    } else if (imgcompsz == 1) {
-      nir_ssa_def *packed = nir_channel(&b, texel, 0);
+      nir_def *packed = nir_channel(&b, texel, 0);
       for (unsigned i = 1; i < nbufcomps; i++) {
          packed = nir_ior(
             &b, packed,
@@ -1634,13 +1634,13 @@ panvk_meta_copy_buf2buf_shader(struct panfrost_device *pdev,
       MESA_SHADER_COMPUTE, GENX(pan_shader_get_compiler_options)(),
       "panvk_meta_copy_buf2buf(blksz=%d)", blksz);
 
-   nir_ssa_def *coord = nir_load_global_invocation_id(&b, 32);
+   nir_def *coord = nir_load_global_invocation_id(&b, 32);
 
-   nir_ssa_def *offset = nir_u2u64(
+   nir_def *offset = nir_u2u64(
       &b, nir_imul(&b, nir_channel(&b, coord, 0), nir_imm_int(&b, blksz)));
-   nir_ssa_def *srcptr =
+   nir_def *srcptr =
       nir_iadd(&b, panvk_meta_copy_buf2buf_get_info_field(&b, src), offset);
-   nir_ssa_def *dstptr =
+   nir_def *dstptr =
       nir_iadd(&b, panvk_meta_copy_buf2buf_get_info_field(&b, dst), offset);
 
    unsigned compsz = blksz < 4 ? blksz : 4;
@@ -1765,14 +1765,13 @@ panvk_meta_fill_buf_shader(struct panfrost_device *pdev,
       MESA_SHADER_COMPUTE, GENX(pan_shader_get_compiler_options)(),
       "panvk_meta_fill_buf()");
 
-   nir_ssa_def *coord = nir_load_global_invocation_id(&b, 32);
+   nir_def *coord = nir_load_global_invocation_id(&b, 32);
 
-   nir_ssa_def *offset =
-      nir_u2u64(&b, nir_imul(&b, nir_channel(&b, coord, 0),
-                             nir_imm_int(&b, sizeof(uint32_t))));
-   nir_ssa_def *ptr =
+   nir_def *offset = nir_u2u64(&b, nir_imul(&b, nir_channel(&b, coord, 0),
+                                            nir_imm_int(&b, sizeof(uint32_t))));
+   nir_def *ptr =
       nir_iadd(&b, panvk_meta_fill_buf_get_info_field(&b, start), offset);
-   nir_ssa_def *val = panvk_meta_fill_buf_get_info_field(&b, val);
+   nir_def *val = panvk_meta_fill_buf_get_info_field(&b, val);
 
    nir_store_global(&b, ptr, sizeof(uint32_t), val, 1);
 
index 6d9c68b..ff59938 100644 (file)
@@ -92,9 +92,9 @@ get_binding_layout(uint32_t set, uint32_t binding,
  * The load_vulkan_descriptor intrinsic exists to provide a transition point
  * between these two forms of derefs: descriptor and memory.
  */
-static nir_ssa_def *
+static nir_def *
 build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
-                nir_ssa_def *array_index, nir_address_format addr_format,
+                nir_def *array_index, nir_address_format addr_format,
                 const struct apply_descriptors_ctx *ctx)
 {
    const struct panvk_descriptor_set_layout *set_layout =
@@ -164,8 +164,8 @@ build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
  * vulkan_resource_index intrinsic and we have to do it based on nothing but
  * the address format.
  */
-static nir_ssa_def *
-build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta,
+static nir_def *
+build_res_reindex(nir_builder *b, nir_def *orig, nir_def *delta,
                   nir_address_format addr_format)
 {
    switch (addr_format) {
@@ -191,18 +191,17 @@ build_res_reindex(nir_builder *b, nir_ssa_def *orig, nir_ssa_def *delta,
  *
  * See build_res_index for details about each resource index format.
  */
-static nir_ssa_def *
-build_buffer_addr_for_res_index(nir_builder *b, nir_ssa_def *res_index,
+static nir_def *
+build_buffer_addr_for_res_index(nir_builder *b, nir_def *res_index,
                                 nir_address_format addr_format,
                                 const struct apply_descriptors_ctx *ctx)
 {
    switch (addr_format) {
    case nir_address_format_32bit_index_offset: {
-      nir_ssa_def *packed = nir_channel(b, res_index, 0);
-      nir_ssa_def *array_index = nir_channel(b, res_index, 1);
-      nir_ssa_def *surface_index =
-         nir_extract_u16(b, packed, nir_imm_int(b, 0));
-      nir_ssa_def *array_max = nir_extract_u16(b, packed, nir_imm_int(b, 1));
+      nir_def *packed = nir_channel(b, res_index, 0);
+      nir_def *array_index = nir_channel(b, res_index, 1);
+      nir_def *surface_index = nir_extract_u16(b, packed, nir_imm_int(b, 0));
+      nir_def *array_max = nir_extract_u16(b, packed, nir_imm_int(b, 1));
 
       if (ctx->add_bounds_checks)
          array_index = nir_umin(b, array_index, array_max);
@@ -213,14 +212,13 @@ build_buffer_addr_for_res_index(nir_builder *b, nir_ssa_def *res_index,
 
    case nir_address_format_64bit_bounded_global:
    case nir_address_format_64bit_global_32bit_offset: {
-      nir_ssa_def *packed = nir_channel(b, res_index, 0);
-      nir_ssa_def *desc_ubo_offset = nir_channel(b, res_index, 1);
-      nir_ssa_def *array_max = nir_channel(b, res_index, 2);
-      nir_ssa_def *array_index = nir_channel(b, res_index, 3);
+      nir_def *packed = nir_channel(b, res_index, 0);
+      nir_def *desc_ubo_offset = nir_channel(b, res_index, 1);
+      nir_def *array_max = nir_channel(b, res_index, 2);
+      nir_def *array_index = nir_channel(b, res_index, 3);
 
-      nir_ssa_def *desc_ubo_idx = nir_extract_u16(b, packed, nir_imm_int(b, 0));
-      nir_ssa_def *desc_ubo_stride =
-         nir_extract_u16(b, packed, nir_imm_int(b, 1));
+      nir_def *desc_ubo_idx = nir_extract_u16(b, packed, nir_imm_int(b, 0));
+      nir_def *desc_ubo_stride = nir_extract_u16(b, packed, nir_imm_int(b, 1));
 
       if (ctx->add_bounds_checks)
          array_index = nir_umin(b, array_index, array_max);
@@ -228,8 +226,8 @@ build_buffer_addr_for_res_index(nir_builder *b, nir_ssa_def *res_index,
       desc_ubo_offset = nir_iadd(b, desc_ubo_offset,
                                  nir_imul(b, array_index, desc_ubo_stride));
 
-      nir_ssa_def *desc = nir_load_ubo(b, 4, 32, desc_ubo_idx, desc_ubo_offset,
-                                       .align_mul = 16, .range = ~0);
+      nir_def *desc = nir_load_ubo(b, 4, 32, desc_ubo_idx, desc_ubo_offset,
+                                   .align_mul = 16, .range = ~0);
 
       /* The offset in the descriptor is guaranteed to be zero when it's
        * written into the descriptor set.  This lets us avoid some unnecessary
@@ -253,7 +251,7 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
    const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
    nir_address_format addr_format = addr_format_for_desc_type(desc_type, ctx);
 
-   nir_ssa_def *res;
+   nir_def *res;
    switch (intrin->intrinsic) {
    case nir_intrinsic_vulkan_resource_index:
       res = build_res_index(b, nir_intrinsic_desc_set(intrin),
@@ -277,7 +275,7 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 
    assert(intrin->dest.ssa.bit_size == res->bit_size);
    assert(intrin->dest.ssa.num_components == res->num_components);
-   nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
+   nir_def_rewrite_uses(&intrin->dest.ssa, res);
    nir_instr_remove(&intrin->instr);
 
    return true;
@@ -286,7 +284,7 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
 static void
 get_resource_deref_binding(nir_deref_instr *deref, uint32_t *set,
                            uint32_t *binding, uint32_t *index_imm,
-                           nir_ssa_def **index_ssa)
+                           nir_def **index_ssa)
 {
    *index_imm = 0;
    *index_ssa = NULL;
@@ -307,14 +305,14 @@ get_resource_deref_binding(nir_deref_instr *deref, uint32_t *set,
    *binding = var->data.binding;
 }
 
-static nir_ssa_def *
+static nir_def *
 load_resource_deref_desc(nir_builder *b, nir_deref_instr *deref,
                          unsigned desc_offset, unsigned num_components,
                          unsigned bit_size,
                          const struct apply_descriptors_ctx *ctx)
 {
    uint32_t set, binding, index_imm;
-   nir_ssa_def *index_ssa;
+   nir_def *index_ssa;
    get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
 
    const struct panvk_descriptor_set_layout *set_layout =
@@ -330,7 +328,7 @@ load_resource_deref_desc(nir_builder *b, nir_deref_instr *deref,
       panvk_pipeline_layout_ubo_start(ctx->layout, set, false) +
       set_layout->desc_ubo_index;
 
-   nir_ssa_def *desc_ubo_offset =
+   nir_def *desc_ubo_offset =
       nir_iadd_imm(b, nir_imul_imm(b, index_ssa, bind_layout->desc_ubo_stride),
                    bind_layout->desc_ubo_offset + desc_offset);
 
@@ -343,7 +341,7 @@ load_resource_deref_desc(nir_builder *b, nir_deref_instr *deref,
                        .align_offset = (desc_offset % desc_align), .range = ~0);
 }
 
-static nir_ssa_def *
+static nir_def *
 load_tex_img_size(nir_builder *b, nir_deref_instr *deref,
                   enum glsl_sampler_dim dim,
                   const struct apply_descriptors_ctx *ctx)
@@ -351,7 +349,7 @@ load_tex_img_size(nir_builder *b, nir_deref_instr *deref,
    if (dim == GLSL_SAMPLER_DIM_BUF) {
       return load_resource_deref_desc(b, deref, 0, 1, 32, ctx);
    } else {
-      nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
+      nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
 
       /* The sizes are provided as 16-bit values with 1 subtracted so
        * convert to 32-bit and add 1.
@@ -360,23 +358,23 @@ load_tex_img_size(nir_builder *b, nir_deref_instr *deref,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 load_tex_img_levels(nir_builder *b, nir_deref_instr *deref,
                     enum glsl_sampler_dim dim,
                     const struct apply_descriptors_ctx *ctx)
 {
    assert(dim != GLSL_SAMPLER_DIM_BUF);
-   nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
+   nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
    return nir_u2u32(b, nir_iand_imm(b, nir_channel(b, desc, 3), 0xff));
 }
 
-static nir_ssa_def *
+static nir_def *
 load_tex_img_samples(nir_builder *b, nir_deref_instr *deref,
                      enum glsl_sampler_dim dim,
                      const struct apply_descriptors_ctx *ctx)
 {
    assert(dim != GLSL_SAMPLER_DIM_BUF);
-   nir_ssa_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
+   nir_def *desc = load_resource_deref_desc(b, deref, 0, 4, 16, ctx);
    return nir_u2u32(b, nir_ushr_imm(b, nir_channel(b, desc, 3), 8));
 }
 
@@ -396,7 +394,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
 
       const enum glsl_sampler_dim dim = tex->sampler_dim;
 
-      nir_ssa_def *res;
+      nir_def *res;
       switch (tex->op) {
       case nir_texop_txs:
          res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
@@ -414,7 +412,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
          unreachable("Unsupported texture query op");
       }
 
-      nir_ssa_def_rewrite_uses(&tex->dest.ssa, res);
+      nir_def_rewrite_uses(&tex->dest.ssa, res);
       nir_instr_remove(&tex->instr);
       return true;
    }
@@ -426,7 +424,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
       nir_tex_instr_remove_src(tex, sampler_src_idx);
 
       uint32_t set, binding, index_imm;
-      nir_ssa_def *index_ssa;
+      nir_def *index_ssa;
       get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
 
       const struct panvk_descriptor_set_binding_layout *bind_layout =
@@ -448,7 +446,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
       nir_tex_instr_remove_src(tex, tex_src_idx);
 
       uint32_t set, binding, index_imm;
-      nir_ssa_def *index_ssa;
+      nir_def *index_ssa;
       get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
 
       const struct panvk_descriptor_set_binding_layout *bind_layout =
@@ -467,12 +465,12 @@ lower_tex(nir_builder *b, nir_tex_instr *tex,
    return progress;
 }
 
-static nir_ssa_def *
+static nir_def *
 get_img_index(nir_builder *b, nir_deref_instr *deref,
               const struct apply_descriptors_ctx *ctx)
 {
    uint32_t set, binding, index_imm;
-   nir_ssa_def *index_ssa;
+   nir_def *index_ssa;
    get_resource_deref_binding(deref, &set, &binding, &index_imm, &index_ssa);
 
    const struct panvk_descriptor_set_binding_layout *bind_layout =
@@ -503,7 +501,7 @@ lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
        intr->intrinsic == nir_intrinsic_image_deref_samples) {
       const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
 
-      nir_ssa_def *res;
+      nir_def *res;
       switch (intr->intrinsic) {
       case nir_intrinsic_image_deref_size:
          res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
@@ -516,7 +514,7 @@ lower_img_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
          unreachable("Unsupported image query op");
       }
 
-      nir_ssa_def_rewrite_uses(&intr->dest.ssa, res);
+      nir_def_rewrite_uses(&intr->dest.ssa, res);
       nir_instr_remove(&intr->instr);
    } else {
       nir_rewrite_image_intrinsic(intr, get_img_index(b, deref, ctx), false);
index 0fdfd6d..5ba814c 100644 (file)
@@ -45,7 +45,7 @@
 
 #include "vk_util.h"
 
-static nir_ssa_def *
+static nir_def *
 load_sysval_from_ubo(nir_builder *b, nir_intrinsic_instr *intr, unsigned offset)
 {
    return nir_load_ubo(
@@ -70,7 +70,7 @@ panvk_lower_sysvals(nir_builder *b, nir_instr *instr, void *data)
 
    struct sysval_options *opts = data;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-   nir_ssa_def *val = NULL;
+   nir_def *val = NULL;
    b->cursor = nir_before_instr(instr);
 
 #define SYSVAL(name) offsetof(struct panvk_sysvals, name)
@@ -116,7 +116,7 @@ panvk_lower_sysvals(nir_builder *b, nir_instr *instr, void *data)
 #undef SYSVAL
 
    b->cursor = nir_after_instr(instr);
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, val);
+   nir_def_rewrite_uses(&intr->dest.ssa, val);
    return true;
 }
 
@@ -189,13 +189,13 @@ panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data)
       return false;
 
    b->cursor = nir_before_instr(instr);
-   nir_ssa_def *ubo_load = nir_load_ubo(
+   nir_def *ubo_load = nir_load_ubo(
       b, nir_dest_num_components(intr->dest), nir_dest_bit_size(intr->dest),
       nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
       .align_mul = nir_dest_bit_size(intr->dest) / 8, .align_offset = 0,
       .range_base = nir_intrinsic_base(intr),
       .range = nir_intrinsic_range(intr));
-   nir_ssa_def_rewrite_uses(&intr->dest.ssa, ubo_load);
+   nir_def_rewrite_uses(&intr->dest.ssa, ubo_load);
    nir_instr_remove(instr);
    return true;
 }
index 20074c2..cc12371 100644 (file)
@@ -117,7 +117,7 @@ compute_off_scale(uint32_t src_level_size,
    *scale_out = dst_scale;
 }
 
-static inline nir_ssa_def *
+static inline nir_def *
 load_struct_var(nir_builder *b, nir_variable *var, uint32_t field)
 {
    nir_deref_instr *deref =
@@ -125,13 +125,13 @@ load_struct_var(nir_builder *b, nir_variable *var, uint32_t field)
    return nir_load_deref(b, deref);
 }
 
-static nir_ssa_def *
+static nir_def *
 build_tex_resolve(nir_builder *b, nir_deref_instr *t,
-                  nir_ssa_def *coord,
+                  nir_def *coord,
                   VkSampleCountFlagBits samples,
                   VkResolveModeFlagBits resolve_mode)
 {
-   nir_ssa_def *accum = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, 0));
+   nir_def *accum = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, 0));
    if (resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
       return accum;
 
@@ -139,7 +139,7 @@ build_tex_resolve(nir_builder *b, nir_deref_instr *t,
       glsl_get_sampler_result_type(t->type);
 
    for (unsigned i = 1; i < samples; i++) {
-      nir_ssa_def *val = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, i));
+      nir_def *val = nir_txf_ms_deref(b, t, coord, nir_imm_int(b, i));
       switch (resolve_mode) {
       case VK_RESOLVE_MODE_AVERAGE_BIT:
          assert(base_type == GLSL_TYPE_FLOAT);
@@ -213,28 +213,28 @@ build_blit_shader(const struct vk_meta_blit_key *key)
    nir_variable *push = nir_variable_create(b->shader, nir_var_mem_push_const,
                                             push_iface_type, "push");
 
-   nir_ssa_def *xy_xform = load_struct_var(b, push, 0);
-   nir_ssa_def *xy_off = nir_channels(b, xy_xform, 3 << 0);
-   nir_ssa_def *xy_scale = nir_channels(b, xy_xform, 3 << 2);
+   nir_def *xy_xform = load_struct_var(b, push, 0);
+   nir_def *xy_off = nir_channels(b, xy_xform, 3 << 0);
+   nir_def *xy_scale = nir_channels(b, xy_xform, 3 << 2);
 
-   nir_ssa_def *out_coord_xy = nir_load_frag_coord(b);
+   nir_def *out_coord_xy = nir_load_frag_coord(b);
    out_coord_xy = nir_trim_vector(b, out_coord_xy, 2);
-   nir_ssa_def *src_coord_xy = nir_ffma(b, out_coord_xy, xy_scale, xy_off);
+   nir_def *src_coord_xy = nir_ffma(b, out_coord_xy, xy_scale, xy_off);
 
-   nir_ssa_def *z_xform = load_struct_var(b, push, 1);
-   nir_ssa_def *out_layer = nir_load_layer_id(b);
-   nir_ssa_def *src_coord;
+   nir_def *z_xform = load_struct_var(b, push, 1);
+   nir_def *out_layer = nir_load_layer_id(b);
+   nir_def *src_coord;
    if (key->dim == GLSL_SAMPLER_DIM_3D) {
-      nir_ssa_def *z_off = nir_channel(b, z_xform, 0);
-      nir_ssa_def *z_scale = nir_channel(b, z_xform, 1);
-      nir_ssa_def *out_coord_z = nir_fadd_imm(b, nir_u2f32(b, out_layer), 0.5);
-      nir_ssa_def *src_coord_z = nir_ffma(b, out_coord_z, z_scale, z_off);
+      nir_def *z_off = nir_channel(b, z_xform, 0);
+      nir_def *z_scale = nir_channel(b, z_xform, 1);
+      nir_def *out_coord_z = nir_fadd_imm(b, nir_u2f32(b, out_layer), 0.5);
+      nir_def *src_coord_z = nir_ffma(b, out_coord_z, z_scale, z_off);
       src_coord = nir_vec3(b, nir_channel(b, src_coord_xy, 0),
                               nir_channel(b, src_coord_xy, 1),
                               src_coord_z);
    } else {
-      nir_ssa_def *arr_delta = nir_channel(b, z_xform, 2);
-      nir_ssa_def *in_layer = nir_iadd(b, out_layer, arr_delta);
+      nir_def *arr_delta = nir_channel(b, z_xform, 2);
+      nir_def *in_layer = nir_iadd(b, out_layer, arr_delta);
       if (key->dim == GLSL_SAMPLER_DIM_1D) {
          src_coord = nir_vec2(b, nir_channel(b, src_coord_xy, 0),
                                  nir_u2f32(b, in_layer));
@@ -303,7 +303,7 @@ build_blit_shader(const struct vk_meta_blit_key *key)
       texture->data.binding = aspect_to_tex_binding(aspect);
       nir_deref_instr *t = nir_build_deref_var(b, texture);
 
-      nir_ssa_def *val;
+      nir_def *val;
       if (resolve_mode == VK_RESOLVE_MODE_NONE) {
          val = nir_txl_deref(b, t, s, src_coord, nir_imm_float(b, 0));
       } else {
@@ -314,7 +314,7 @@ build_blit_shader(const struct vk_meta_blit_key *key)
 
       if (key->stencil_as_discard) {
          assert(key->aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
-         nir_ssa_def *stencil_bit = nir_channel(b, z_xform, 3);
+         nir_def *stencil_bit = nir_channel(b, z_xform, 3);
          nir_discard_if(b, nir_ieq(b, nir_iand(b, val, stencil_bit),
                                       nir_imm_int(b, 0)));
       } else {
index 950d9ff..0a64816 100644 (file)
@@ -67,7 +67,7 @@ build_clear_shader(const struct vk_meta_clear_key *key)
       nir_build_deref_struct(b, nir_build_deref_var(b, push), 0);
 
    u_foreach_bit(a, key->color_attachments_cleared) {
-      nir_ssa_def *color_value =
+      nir_def *color_value =
          nir_load_deref(b, nir_build_deref_array_imm(b, push_arr, a));
 
       const struct glsl_type *out_type;
index d0fd7fc..fd76e58 100644 (file)
@@ -79,7 +79,7 @@ vk_meta_draw_rects_vs_nir(struct vk_meta_device *device, bool use_gs)
                           use_gs ? "layer_out" : "gl_Layer");
    layer->data.location = use_gs ? VARYING_SLOT_VAR1 : VARYING_SLOT_LAYER;
 
-   nir_ssa_def *vtx = nir_load_var(b, in);
+   nir_def *vtx = nir_load_var(b, in);
    nir_store_var(b, pos, nir_vec4(b, nir_channel(b, vtx, 0),
                                      nir_channel(b, vtx, 1),
                                      nir_channel(b, vtx, 2),
index 27d971d..487203e 100644 (file)
@@ -28,9 +28,9 @@
 
 #include <math.h>
 
-static nir_ssa_def *
+static nir_def *
 y_range(nir_builder *b,
-        nir_ssa_def *y_channel,
+        nir_def *y_channel,
         int bpc,
         VkSamplerYcbcrRange range)
 {
@@ -51,9 +51,9 @@ y_range(nir_builder *b,
    }
 }
 
-static nir_ssa_def *
+static nir_def *
 chroma_range(nir_builder *b,
-             nir_ssa_def *chroma_channel,
+             nir_def *chroma_channel,
              int bpc,
              VkSamplerYcbcrRange range)
 {
@@ -115,14 +115,14 @@ ycbcr_model_to_rgb_matrix(VkSamplerYcbcrModelConversion model)
    }
 }
 
-nir_ssa_def *
+nir_def *
 nir_convert_ycbcr_to_rgb(nir_builder *b,
                          VkSamplerYcbcrModelConversion model,
                          VkSamplerYcbcrRange range,
-                         nir_ssa_def *raw_channels,
+                         nir_def *raw_channels,
                          uint32_t *bpcs)
 {
-   nir_ssa_def *expanded_channels =
+   nir_def *expanded_channels =
       nir_vec4(b,
                chroma_range(b, nir_channel(b, raw_channels, 0), bpcs[0], range),
                y_range(b, nir_channel(b, raw_channels, 1), bpcs[1], range),
@@ -135,7 +135,7 @@ nir_convert_ycbcr_to_rgb(nir_builder *b,
    const nir_const_value_3_4 *conversion_matrix =
       ycbcr_model_to_rgb_matrix(model);
 
-   nir_ssa_def *converted_channels[] = {
+   nir_def *converted_channels[] = {
       nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[0])),
       nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[1])),
       nir_fdot(b, expanded_channels, nir_build_imm(b, 4, 32, conversion_matrix->v[2]))
@@ -148,7 +148,7 @@ nir_convert_ycbcr_to_rgb(nir_builder *b,
 
 struct ycbcr_state {
    nir_builder *builder;
-   nir_ssa_def *image_size;
+   nir_def *image_size;
    nir_tex_instr *origin_tex;
    nir_deref_instr *tex_deref;
    const struct vk_ycbcr_conversion_state *conversion;
@@ -156,7 +156,7 @@ struct ycbcr_state {
 };
 
 /* TODO: we should probably replace this with a push constant/uniform. */
-static nir_ssa_def *
+static nir_def *
 get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
 {
    if (state->image_size)
@@ -184,10 +184,10 @@ get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
    return state->image_size;
 }
 
-static nir_ssa_def *
+static nir_def *
 implicit_downsampled_coord(nir_builder *b,
-                           nir_ssa_def *value,
-                           nir_ssa_def *max_value,
+                           nir_def *value,
+                           nir_def *max_value,
                            int div_scale)
 {
    return nir_fadd(b,
@@ -198,15 +198,15 @@ implicit_downsampled_coord(nir_builder *b,
                                      max_value)));
 }
 
-static nir_ssa_def *
+static nir_def *
 implicit_downsampled_coords(struct ycbcr_state *state,
-                            nir_ssa_def *old_coords,
+                            nir_def *old_coords,
                             const struct vk_format_ycbcr_plane *format_plane)
 {
    nir_builder *b = state->builder;
    const struct vk_ycbcr_conversion_state *conversion = state->conversion;
-   nir_ssa_def *image_size = get_texture_size(state, state->tex_deref);
-   nir_ssa_def *comp[4] = { NULL, };
+   nir_def *image_size = get_texture_size(state, state->tex_deref);
+   nir_def *comp[4] = { NULL, };
    int c;
 
    for (c = 0; c < ARRAY_SIZE(conversion->chroma_offsets); c++) {
@@ -228,7 +228,7 @@ implicit_downsampled_coords(struct ycbcr_state *state,
    return nir_vec(b, comp, old_coords->num_components);
 }
 
-static nir_ssa_def *
+static nir_def *
 create_plane_tex_instr_implicit(struct ycbcr_state *state,
                                 uint32_t plane)
 {
@@ -365,10 +365,10 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
    uint8_t y_bpc = y_format_desc->channel[0].size;
 
    /* |ycbcr_comp| holds components in the order : Cr-Y-Cb */
-   nir_ssa_def *zero = nir_imm_float(b, 0.0f);
-   nir_ssa_def *one = nir_imm_float(b, 1.0f);
+   nir_def *zero = nir_imm_float(b, 0.0f);
+   nir_def *one = nir_imm_float(b, 1.0f);
    /* Use extra 2 channels for following swizzle */
-   nir_ssa_def *ycbcr_comp[5] = { zero, zero, zero, one, zero };
+   nir_def *ycbcr_comp[5] = { zero, zero, zero, one, zero };
 
    uint8_t ycbcr_bpcs[5];
    memset(ycbcr_bpcs, y_bpc, sizeof(ycbcr_bpcs));
@@ -389,7 +389,7 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
          .conversion = conversion,
          .format_ycbcr_info = format_ycbcr_info,
       };
-      nir_ssa_def *plane_sample = create_plane_tex_instr_implicit(&tex_state, p);
+      nir_def *plane_sample = create_plane_tex_instr_implicit(&tex_state, p);
 
       for (uint32_t pc = 0; pc < 4; pc++) {
          VkComponentSwizzle ycbcr_swizzle = format_plane->ycbcr_swizzle[pc];
@@ -407,7 +407,7 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
    }
 
    /* Now remaps components to the order specified by the conversion. */
-   nir_ssa_def *swizzled_comp[4] = { NULL, };
+   nir_def *swizzled_comp[4] = { NULL, };
    uint32_t swizzled_bpcs[4] = { 0, };
 
    for (uint32_t i = 0; i < ARRAY_SIZE(conversion->mapping); i++) {
@@ -431,7 +431,7 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
       }
    }
 
-   nir_ssa_def *result = nir_vec(b, swizzled_comp, 4);
+   nir_def *result = nir_vec(b, swizzled_comp, 4);
    if (conversion->ycbcr_model != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) {
       result = nir_convert_ycbcr_to_rgb(b, conversion->ycbcr_model,
                                            conversion->ycbcr_range,
@@ -439,7 +439,7 @@ lower_ycbcr_tex_instr(nir_builder *b, nir_instr *instr, void *_state)
                                            swizzled_bpcs);
    }
 
-   nir_ssa_def_rewrite_uses(&tex->dest.ssa, result);
+   nir_def_rewrite_uses(&tex->dest.ssa, result);
    nir_instr_remove(&tex->instr);
 
    return true;
index 2a8442a..b17a8cb 100644 (file)
 extern "C" {
 #endif
 
-nir_ssa_def *
+nir_def *
 nir_convert_ycbcr_to_rgb(nir_builder *b,
                          VkSamplerYcbcrModelConversion model,
                          VkSamplerYcbcrRange range,
-                         nir_ssa_def *raw_channels,
+                         nir_def *raw_channels,
                          uint32_t *bpcs);
 
 struct vk_ycbcr_conversion;