2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "util/list.h"
35 #include "util/ralloc.h"
37 #include "util/bitscan.h"
38 #include "util/bitset.h"
39 #include "util/compiler.h"
40 #include "util/enum_operators.h"
41 #include "util/macros.h"
42 #include "util/u_printf.h"
43 #include "util/format/u_format.h"
44 #include "compiler/nir_types.h"
45 #include "compiler/shader_enums.h"
46 #include "compiler/shader_info.h"
47 #define XXH_INLINE_ALL
48 #include "util/xxhash.h"
52 #include "util/u_debug.h"
55 #include "nir_opcodes.h"
61 extern uint32_t nir_debug;
62 extern bool nir_debug_print_shader[MESA_SHADER_KERNEL + 1];
65 #define NIR_DEBUG(flag) unlikely(nir_debug & (NIR_DEBUG_ ## flag))
67 #define NIR_DEBUG(flag) false
70 #define NIR_DEBUG_CLONE (1u << 0)
71 #define NIR_DEBUG_SERIALIZE (1u << 1)
72 #define NIR_DEBUG_NOVALIDATE (1u << 2)
73 #define NIR_DEBUG_VALIDATE_SSA_DOMINANCE (1u << 3)
74 #define NIR_DEBUG_TGSI (1u << 4)
75 #define NIR_DEBUG_PRINT_VS (1u << 5)
76 #define NIR_DEBUG_PRINT_TCS (1u << 6)
77 #define NIR_DEBUG_PRINT_TES (1u << 7)
78 #define NIR_DEBUG_PRINT_GS (1u << 8)
79 #define NIR_DEBUG_PRINT_FS (1u << 9)
80 #define NIR_DEBUG_PRINT_CS (1u << 10)
81 #define NIR_DEBUG_PRINT_TS (1u << 11)
82 #define NIR_DEBUG_PRINT_MS (1u << 12)
83 #define NIR_DEBUG_PRINT_RGS (1u << 13)
84 #define NIR_DEBUG_PRINT_AHS (1u << 14)
85 #define NIR_DEBUG_PRINT_CHS (1u << 15)
86 #define NIR_DEBUG_PRINT_MHS (1u << 16)
87 #define NIR_DEBUG_PRINT_IS (1u << 17)
88 #define NIR_DEBUG_PRINT_CBS (1u << 18)
89 #define NIR_DEBUG_PRINT_KS (1u << 19)
90 #define NIR_DEBUG_PRINT_CONSTS (1u << 20)
91 #define NIR_DEBUG_PRINT_INTERNAL (1u << 21)
93 #define NIR_DEBUG_PRINT (NIR_DEBUG_PRINT_VS | \
94 NIR_DEBUG_PRINT_TCS | \
95 NIR_DEBUG_PRINT_TES | \
96 NIR_DEBUG_PRINT_GS | \
97 NIR_DEBUG_PRINT_FS | \
98 NIR_DEBUG_PRINT_CS | \
99 NIR_DEBUG_PRINT_TS | \
100 NIR_DEBUG_PRINT_MS | \
101 NIR_DEBUG_PRINT_RGS | \
102 NIR_DEBUG_PRINT_AHS | \
103 NIR_DEBUG_PRINT_CHS | \
104 NIR_DEBUG_PRINT_MHS | \
105 NIR_DEBUG_PRINT_IS | \
106 NIR_DEBUG_PRINT_CBS | \
110 #define NIR_TRUE (~0u)
111 #define NIR_MAX_VEC_COMPONENTS 16
112 #define NIR_MAX_MATRIX_COLUMNS 4
113 #define NIR_STREAM_PACKED (1 << 8)
114 typedef uint16_t nir_component_mask_t;
117 nir_num_components_valid(unsigned num_components)
119 return (num_components >= 1 &&
120 num_components <= 5) ||
121 num_components == 8 ||
122 num_components == 16;
125 static inline nir_component_mask_t
126 nir_component_mask(unsigned num_components)
128 assert(nir_num_components_valid(num_components));
129 return (1u << num_components) - 1;
133 nir_process_debug_variable(void);
135 bool nir_component_mask_can_reinterpret(nir_component_mask_t mask,
136 unsigned old_bit_size,
137 unsigned new_bit_size);
139 nir_component_mask_reinterpret(nir_component_mask_t mask,
140 unsigned old_bit_size,
141 unsigned new_bit_size);
143 /** Defines a cast function
145 * This macro defines a cast function from in_type to out_type where
146 * out_type is some structure type that contains a field of type out_type.
148 * Note that you have to be a bit careful as the generated cast function
149 * destroys constness.
151 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
152 type_field, type_value) \
153 static inline out_type * \
154 name(const in_type *parent) \
156 assert(parent && parent->type_field == type_value); \
157 return exec_node_data(out_type, parent, field); \
168 * Description of built-in state associated with a uniform
170 * \sa nir_variable::state_slots
173 gl_state_index16 tokens[STATE_LENGTH];
178 nir_var_system_value = (1 << 0),
179 nir_var_uniform = (1 << 1),
180 nir_var_shader_in = (1 << 2),
181 nir_var_shader_out = (1 << 3),
182 nir_var_image = (1 << 4),
183 /** Incoming call or ray payload data for ray-tracing shaders */
184 nir_var_shader_call_data = (1 << 5),
185 /** Ray hit attributes */
186 nir_var_ray_hit_attrib = (1 << 6),
188 /* Modes named nir_var_mem_* have explicit data layout */
189 nir_var_mem_ubo = (1 << 7),
190 nir_var_mem_push_const = (1 << 8),
191 nir_var_mem_ssbo = (1 << 9),
192 nir_var_mem_constant = (1 << 10),
193 nir_var_mem_task_payload = (1 << 11),
195 /* Generic modes intentionally come last. See encode_dref_modes() in
196 * nir_serialize.c for more details.
198 nir_var_shader_temp = (1 << 12),
199 nir_var_function_temp = (1 << 13),
200 nir_var_mem_shared = (1 << 14),
201 nir_var_mem_global = (1 << 15),
203 nir_var_mem_generic = (nir_var_shader_temp |
204 nir_var_function_temp |
208 nir_var_read_only_modes = nir_var_shader_in | nir_var_uniform |
209 nir_var_system_value | nir_var_mem_constant |
211 /* Modes where vector derefs can be indexed as arrays. nir_var_shader_out is only for mesh
214 nir_var_vec_indexable_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
215 nir_var_mem_shared | nir_var_mem_global |
216 nir_var_mem_push_const | nir_var_mem_task_payload |
218 nir_num_variable_modes = 16,
219 nir_var_all = (1 << nir_num_variable_modes) - 1,
221 MESA_DEFINE_CPP_ENUM_BITFIELD_OPERATORS(nir_variable_mode)
227 nir_rounding_mode_undef = 0,
228 nir_rounding_mode_rtne = 1, /* round to nearest even */
229 nir_rounding_mode_ru = 2, /* round up */
230 nir_rounding_mode_rd = 3, /* round down */
231 nir_rounding_mode_rtz = 4, /* round towards zero */
235 * Ray query values that can read from a RayQueryKHR object.
238 nir_ray_query_value_intersection_type,
239 nir_ray_query_value_intersection_t,
240 nir_ray_query_value_intersection_instance_custom_index,
241 nir_ray_query_value_intersection_instance_id,
242 nir_ray_query_value_intersection_instance_sbt_index,
243 nir_ray_query_value_intersection_geometry_index,
244 nir_ray_query_value_intersection_primitive_index,
245 nir_ray_query_value_intersection_barycentrics,
246 nir_ray_query_value_intersection_front_face,
247 nir_ray_query_value_intersection_object_ray_direction,
248 nir_ray_query_value_intersection_object_ray_origin,
249 nir_ray_query_value_intersection_object_to_world,
250 nir_ray_query_value_intersection_world_to_object,
251 nir_ray_query_value_intersection_candidate_aabb_opaque,
252 nir_ray_query_value_tmin,
253 nir_ray_query_value_flags,
254 nir_ray_query_value_world_ray_direction,
255 nir_ray_query_value_world_ray_origin,
256 } nir_ray_query_value;
272 #define nir_const_value_to_array(arr, c, components, m) \
274 for (unsigned i = 0; i < components; ++i) \
278 static inline nir_const_value
279 nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
282 memset(&v, 0, sizeof(v));
285 case 1: v.b = x; break;
286 case 8: v.u8 = x; break;
287 case 16: v.u16 = x; break;
288 case 32: v.u32 = x; break;
289 case 64: v.u64 = x; break;
291 unreachable("Invalid bit size");
297 static inline nir_const_value
298 nir_const_value_for_int(int64_t i, unsigned bit_size)
301 memset(&v, 0, sizeof(v));
303 assert(bit_size <= 64);
305 assert(i >= (-(1ll << (bit_size - 1))));
306 assert(i < (1ll << (bit_size - 1)));
309 return nir_const_value_for_raw_uint(i, bit_size);
312 static inline nir_const_value
313 nir_const_value_for_uint(uint64_t u, unsigned bit_size)
316 memset(&v, 0, sizeof(v));
318 assert(bit_size <= 64);
320 assert(u < (1ull << bit_size));
322 return nir_const_value_for_raw_uint(u, bit_size);
325 static inline nir_const_value
326 nir_const_value_for_bool(bool b, unsigned bit_size)
328 /* Booleans use a 0/-1 convention */
329 return nir_const_value_for_int(-(int)b, bit_size);
332 /* This one isn't inline because it requires half-float conversion */
333 nir_const_value nir_const_value_for_float(double b, unsigned bit_size);
335 static inline int64_t
336 nir_const_value_as_int(nir_const_value value, unsigned bit_size)
339 /* int1_t uses 0/-1 convention */
340 case 1: return -(int)value.b;
341 case 8: return value.i8;
342 case 16: return value.i16;
343 case 32: return value.i32;
344 case 64: return value.i64;
346 unreachable("Invalid bit size");
350 static inline uint64_t
351 nir_const_value_as_uint(nir_const_value value, unsigned bit_size)
354 case 1: return value.b;
355 case 8: return value.u8;
356 case 16: return value.u16;
357 case 32: return value.u32;
358 case 64: return value.u64;
360 unreachable("Invalid bit size");
365 nir_const_value_as_bool(nir_const_value value, unsigned bit_size)
367 int64_t i = nir_const_value_as_int(value, bit_size);
369 /* Booleans of any size use 0/-1 convention */
370 assert(i == 0 || i == -1);
375 /* This one isn't inline because it requires half-float conversion */
376 double nir_const_value_as_float(nir_const_value value, unsigned bit_size);
378 typedef struct nir_constant {
380 * Value of the constant.
382 * The field used to back the values supplied by the constant is determined
383 * by the type associated with the \c nir_variable. Constants may be
384 * scalars, vectors, or matrices.
386 nir_const_value values[NIR_MAX_VEC_COMPONENTS];
388 /* we could get this from the var->type but makes clone *much* easier to
389 * not have to care about the type.
391 unsigned num_elements;
393 /* Array elements / Structure Fields */
394 struct nir_constant **elements;
398 * \brief Layout qualifiers for gl_FragDepth.
400 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
401 * with a layout qualifier.
404 nir_depth_layout_none, /**< No depth layout is specified. */
405 nir_depth_layout_any,
406 nir_depth_layout_greater,
407 nir_depth_layout_less,
408 nir_depth_layout_unchanged
412 * Enum keeping track of how a variable was declared.
416 * Normal declaration.
418 nir_var_declared_normally = 0,
421 * Variable is implicitly generated by the compiler and should not be
422 * visible via the API.
425 } nir_var_declaration_type;
428 * Either a uniform, global variable, shader input, or shader output. Based on
429 * ir_variable - it should be easy to translate between the two.
432 typedef struct nir_variable {
433 struct exec_node node;
436 * Declared type of the variable
438 const struct glsl_type *type;
441 * Declared name of the variable
445 struct nir_variable_data {
447 * Storage class of the variable.
449 * \sa nir_variable_mode
454 * Is the variable read-only?
456 * This is set for variables declared as \c const, shader inputs,
459 unsigned read_only:1;
463 unsigned invariant:1;
466 * Is the variable a ray query?
468 unsigned ray_query:1;
471 * Precision qualifier.
473 * In desktop GLSL we do not care about precision qualifiers at all, in
474 * fact, the spec says that precision qualifiers are ignored.
476 * To make things easy, we make it so that this field is always
477 * GLSL_PRECISION_NONE on desktop shaders. This way all the variables
478 * have the same precision value and the checks we add in the compiler
479 * for this field will never break a desktop shader compile.
481 unsigned precision:2;
484 * Has this variable been statically assigned?
486 * This answers whether the variable was assigned in any path of
487 * the shader during ast_to_hir. This doesn't answer whether it is
488 * still written after dead code removal, nor is it maintained in
489 * non-ast_to_hir.cpp (GLSL parsing) paths.
494 * Can this variable be coalesced with another?
496 * This is set by nir_lower_io_to_temporaries to say that any
497 * copies involving this variable should stay put. Propagating it can
498 * duplicate the resulting load/store, which is not wanted, and may
499 * result in a load/store of the variable with an indirect offset which
500 * the backend may not be able to handle.
502 unsigned cannot_coalesce:1;
505 * When separate shader programs are enabled, only input/outputs between
506 * the stages of a multi-stage separate program can be safely removed
507 * from the shader interface. Other input/outputs must remains active.
509 * This is also used to make sure xfb varyings that are unused by the
510 * fragment shader are not removed.
512 unsigned always_active_io:1;
515 * Interpolation mode for shader inputs / outputs
517 * \sa glsl_interp_mode
519 unsigned interpolation:3;
522 * If non-zero, then this variable may be packed along with other variables
523 * into a single varying slot, so this offset should be applied when
524 * accessing components. For example, an offset of 1 means that the x
525 * component of this variable is actually stored in component y of the
526 * location specified by \c location.
528 unsigned location_frac:2;
531 * If true, this variable represents an array of scalars that should
532 * be tightly packed. In other words, consecutive array elements
533 * should be stored one component apart, rather than one slot apart.
538 * Whether this is a fragment shader output implicitly initialized with
539 * the previous contents of the specified render target at the
540 * framebuffer location corresponding to this shader invocation.
542 unsigned fb_fetch_output:1;
545 * Non-zero if this variable is considered bindless as defined by
546 * ARB_bindless_texture.
551 * Was an explicit binding set in the shader?
553 unsigned explicit_binding:1;
556 * Was the location explicitly set in the shader?
558 * If the location is explicitly set in the shader, it \b cannot be changed
559 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
562 unsigned explicit_location:1;
565 * Is this varying used by transform feedback?
567 * This is used by the linker to decide if it's safe to pack the varying.
572 * Is this varying used only by transform feedback?
574 * This is used by the linker to decide if its safe to pack the varying.
576 unsigned is_xfb_only:1;
579 * Was a transfer feedback buffer set in the shader?
581 unsigned explicit_xfb_buffer:1;
584 * Was a transfer feedback stride set in the shader?
586 unsigned explicit_xfb_stride:1;
589 * Was an explicit offset set in the shader?
591 unsigned explicit_offset:1;
594 * Layout of the matrix. Uses glsl_matrix_layout values.
596 unsigned matrix_layout:2;
599 * Non-zero if this variable was created by lowering a named interface
602 unsigned from_named_ifc_block:1;
605 * Non-zero if the variable must be a shader input. This is useful for
606 * constraints on function parameters.
608 unsigned must_be_shader_input:1;
611 * How the variable was declared. See nir_var_declaration_type.
613 * This is used to detect variables generated by the compiler, so should
614 * not be visible via the API.
616 unsigned how_declared:2;
619 * Is this variable per-view? If so, we know it must be an array with
620 * size corresponding to the number of views.
625 * Whether the variable is per-primitive.
626 * Can be use by Mesh Shader outputs and corresponding Fragment Shader inputs.
628 unsigned per_primitive:1;
631 * \brief Layout qualifier for gl_FragDepth. See nir_depth_layout.
633 * This is not equal to \c ir_depth_layout_none if and only if this
634 * variable is \c gl_FragDepth and a layout qualifier is specified.
636 unsigned depth_layout:3;
639 * Vertex stream output identifier.
641 * For packed outputs, NIR_STREAM_PACKED is set and bits [2*i+1,2*i]
642 * indicate the stream of the i-th component.
647 * See gl_access_qualifier.
649 * Access flags for memory variables (SSBO/global), image uniforms, and
650 * bindless images in uniforms/inputs/outputs.
655 * Descriptor set binding for sampler or UBO.
657 unsigned descriptor_set:5;
660 * output index for dual source blending.
665 * Initial binding point for a sampler or UBO.
667 * For array types, this represents the binding point for the first element.
672 * Storage location of the base of this variable
674 * The precise meaning of this field depends on the nature of the variable.
676 * - Vertex shader input: one of the values from \c gl_vert_attrib.
677 * - Vertex shader output: one of the values from \c gl_varying_slot.
678 * - Geometry shader input: one of the values from \c gl_varying_slot.
679 * - Geometry shader output: one of the values from \c gl_varying_slot.
680 * - Fragment shader input: one of the values from \c gl_varying_slot.
681 * - Fragment shader output: one of the values from \c gl_frag_result.
682 * - Task shader output: one of the values from \c gl_varying_slot.
683 * - Mesh shader input: one of the values from \c gl_varying_slot.
684 * - Mesh shader output: one of the values from \c gl_varying_slot.
685 * - Uniforms: Per-stage uniform slot number for default uniform block.
686 * - Uniforms: Index within the uniform block definition for UBO members.
687 * - Non-UBO Uniforms: uniform slot number.
688 * - Other: This field is not currently used.
690 * If the variable is a uniform, shader input, or shader output, and the
691 * slot has not been assigned, the value will be -1.
696 * The actual location of the variable in the IR. Only valid for inputs,
697 * outputs, uniforms (including samplers and images), and for UBO and SSBO
700 unsigned driver_location;
703 * Location an atomic counter or transform feedback is stored at.
709 /** Image internal format if specified explicitly, otherwise PIPE_FORMAT_NONE. */
710 enum pipe_format format;
715 * For OpenCL inline samplers. See cl_sampler_addressing_mode and cl_sampler_filter_mode
717 unsigned is_inline_sampler : 1;
718 unsigned addressing_mode : 3;
719 unsigned normalized_coordinates : 1;
720 unsigned filter_mode : 1;
725 * Transform feedback buffer.
730 * Transform feedback stride.
738 * Identifier for this variable generated by nir_index_vars() that is unique
739 * among other variables in the same exec_list.
743 /* Number of nir_variable_data members */
744 uint16_t num_members;
747 * Built-in state that backs this uniform
749 * Once set at variable creation, \c state_slots must remain invariant.
750 * This is because, ideally, this array would be shared by all clones of
751 * this variable in the IR tree. In other words, we'd really like for it
752 * to be a fly-weight.
754 * If the variable is not a uniform, \c num_state_slots will be zero and
755 * \c state_slots will be \c NULL.
758 uint16_t num_state_slots; /**< Number of state slots used */
759 nir_state_slot *state_slots; /**< State descriptors. */
763 * Constant expression assigned in the initializer of the variable
765 * This field should only be used temporarily by creators of NIR shaders
766 * and then nir_lower_variable_initializers can be used to get rid of them.
767 * Most of the rest of NIR ignores this field or asserts that it's NULL.
769 nir_constant *constant_initializer;
772 * Global variable assigned in the initializer of the variable
773 * This field should only be used temporarily by creators of NIR shaders
774 * and then nir_lower_variable_initializers can be used to get rid of them.
775 * Most of the rest of NIR ignores this field or asserts that it's NULL.
777 struct nir_variable *pointer_initializer;
780 * For variables that are in an interface block or are an instance of an
781 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
783 * \sa ir_variable::location
785 const struct glsl_type *interface_type;
788 * Description of per-member data for per-member struct variables
790 * This is used for variables which are actually an amalgamation of
791 * multiple entities such as a struct of built-in values or a struct of
792 * inputs each with their own layout specifier. This is only allowed on
793 * variables with a struct or array of array of struct type.
795 struct nir_variable_data *members;
799 _nir_shader_variable_has_mode(nir_variable *var, unsigned modes)
801 /* This isn't a shader variable */
802 assert(!(modes & nir_var_function_temp));
803 return var->data.mode & modes;
806 #define nir_foreach_variable_in_list(var, var_list) \
807 foreach_list_typed(nir_variable, var, node, var_list)
809 #define nir_foreach_variable_in_list_safe(var, var_list) \
810 foreach_list_typed_safe(nir_variable, var, node, var_list)
812 #define nir_foreach_variable_in_shader(var, shader) \
813 nir_foreach_variable_in_list(var, &(shader)->variables)
815 #define nir_foreach_variable_in_shader_safe(var, shader) \
816 nir_foreach_variable_in_list_safe(var, &(shader)->variables)
818 #define nir_foreach_variable_with_modes(var, shader, modes) \
819 nir_foreach_variable_in_shader(var, shader) \
820 if (_nir_shader_variable_has_mode(var, modes))
822 #define nir_foreach_variable_with_modes_safe(var, shader, modes) \
823 nir_foreach_variable_in_shader_safe(var, shader) \
824 if (_nir_shader_variable_has_mode(var, modes))
826 #define nir_foreach_shader_in_variable(var, shader) \
827 nir_foreach_variable_with_modes(var, shader, nir_var_shader_in)
829 #define nir_foreach_shader_in_variable_safe(var, shader) \
830 nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_in)
832 #define nir_foreach_shader_out_variable(var, shader) \
833 nir_foreach_variable_with_modes(var, shader, nir_var_shader_out)
835 #define nir_foreach_shader_out_variable_safe(var, shader) \
836 nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_out)
838 #define nir_foreach_uniform_variable(var, shader) \
839 nir_foreach_variable_with_modes(var, shader, nir_var_uniform)
841 #define nir_foreach_uniform_variable_safe(var, shader) \
842 nir_foreach_variable_with_modes_safe(var, shader, nir_var_uniform)
844 #define nir_foreach_image_variable(var, shader) \
845 nir_foreach_variable_with_modes(var, shader, nir_var_image)
847 #define nir_foreach_image_variable_safe(var, shader) \
848 nir_foreach_variable_with_modes_safe(var, shader, nir_var_image)
851 nir_variable_is_global(const nir_variable *var)
853 return var->data.mode != nir_var_function_temp;
856 typedef struct nir_register {
857 struct exec_node node;
859 unsigned num_components; /** < number of vector components */
860 unsigned num_array_elems; /** < size of array (0 for no array) */
862 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
866 * True if this register may have different values in different SIMD
867 * invocations of the shader.
871 /** generic register index. */
874 /** set of nir_srcs where this register is used (read from) */
875 struct list_head uses;
877 /** set of nir_dests where this register is defined (written to) */
878 struct list_head defs;
881 #define nir_foreach_register(reg, reg_list) \
882 foreach_list_typed(nir_register, reg, node, reg_list)
883 #define nir_foreach_register_safe(reg, reg_list) \
884 foreach_list_typed_safe(nir_register, reg, node, reg_list)
886 typedef enum PACKED {
888 nir_instr_type_deref,
891 nir_instr_type_intrinsic,
892 nir_instr_type_load_const,
894 nir_instr_type_ssa_undef,
896 nir_instr_type_parallel_copy,
899 typedef struct nir_instr {
900 struct exec_node node;
901 struct nir_block *block;
904 /* A temporary for optimization and analysis passes to use for storing
905 * flags. For instance, DCE uses this to store the "dead/live" info.
909 /** generic instruction index. */
913 static inline nir_instr *
914 nir_instr_next(nir_instr *instr)
916 struct exec_node *next = exec_node_get_next(&instr->node);
917 if (exec_node_is_tail_sentinel(next))
920 return exec_node_data(nir_instr, next, node);
923 static inline nir_instr *
924 nir_instr_prev(nir_instr *instr)
926 struct exec_node *prev = exec_node_get_prev(&instr->node);
927 if (exec_node_is_head_sentinel(prev))
930 return exec_node_data(nir_instr, prev, node);
934 nir_instr_is_first(const nir_instr *instr)
936 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr->node));
940 nir_instr_is_last(const nir_instr *instr)
942 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node));
945 typedef struct nir_ssa_def {
946 /** Instruction which produces this SSA value. */
947 nir_instr *parent_instr;
949 /** set of nir_instrs where this register is used (read from) */
950 struct list_head uses;
952 /** generic SSA definition index. */
955 uint8_t num_components;
957 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
961 * True if this SSA value may have different values in different SIMD
962 * invocations of the shader. This is set by nir_divergence_analysis.
971 struct nir_src *indirect; /** < NULL for no indirect offset */
972 unsigned base_offset;
974 /* TODO use-def chain goes here */
978 nir_instr *parent_instr;
979 struct list_head def_link;
982 struct nir_src *indirect; /** < NULL for no indirect offset */
983 unsigned base_offset;
985 /* TODO def-use chain goes here */
990 typedef struct nir_src {
992 /** Instruction that consumes this value as a source. */
993 nir_instr *parent_instr;
994 struct nir_if *parent_if;
997 struct list_head use_link;
1009 nir_src_set_parent_instr(nir_src *src, nir_instr *parent_instr)
1012 src->parent_instr = parent_instr;
1016 nir_src_set_parent_if(nir_src *src, struct nir_if *parent_if)
1019 src->parent_if = parent_if;
1022 static inline nir_src
1025 nir_src src = { { NULL } };
1029 #define NIR_SRC_INIT nir_src_init()
1031 #define nir_foreach_use_including_if(src, reg_or_ssa_def) \
1032 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
1034 #define nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
1035 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
1037 #define nir_foreach_use(src, reg_or_ssa_def) \
1038 nir_foreach_use_including_if(src, reg_or_ssa_def) \
1041 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
1042 nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
1045 #define nir_foreach_if_use(src, reg_or_ssa_def) \
1046 nir_foreach_use_including_if(src, reg_or_ssa_def) \
1049 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
1050 nir_foreach_use_including_if_safe(src, reg_or_ssa_def) \
1054 nir_ssa_def_used_by_if(const nir_ssa_def *def)
1056 nir_foreach_if_use(_, def)
1071 static inline nir_dest
1074 nir_dest dest = { { { NULL } } };
1078 #define NIR_DEST_INIT nir_dest_init()
1080 #define nir_foreach_def(dest, reg) \
1081 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
1083 #define nir_foreach_def_safe(dest, reg) \
1084 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
1086 static inline nir_src
1087 nir_src_for_ssa(nir_ssa_def *def)
1089 nir_src src = NIR_SRC_INIT;
1097 static inline nir_src
1098 nir_src_for_reg(nir_register *reg)
1100 nir_src src = NIR_SRC_INIT;
1104 src.reg.indirect = NULL;
1105 src.reg.base_offset = 0;
1110 static inline nir_dest
1111 nir_dest_for_reg(nir_register *reg)
1113 nir_dest dest = NIR_DEST_INIT;
1120 static inline unsigned
1121 nir_src_bit_size(nir_src src)
1123 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
1126 static inline unsigned
1127 nir_src_num_components(nir_src src)
1129 return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
1133 nir_src_is_const(nir_src src)
1135 return src.is_ssa &&
1136 src.ssa->parent_instr->type == nir_instr_type_load_const;
1140 nir_src_is_undef(nir_src src)
1142 return src.is_ssa &&
1143 src.ssa->parent_instr->type == nir_instr_type_ssa_undef;
1147 nir_src_is_divergent(nir_src src)
1149 return src.is_ssa ? src.ssa->divergent : src.reg.reg->divergent;
1152 static inline unsigned
1153 nir_dest_bit_size(nir_dest dest)
1155 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
1158 static inline unsigned
1159 nir_dest_num_components(nir_dest dest)
1161 return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
1165 nir_dest_is_divergent(nir_dest dest)
1167 return dest.is_ssa ? dest.ssa.divergent : dest.reg.reg->divergent;
1170 /* Are all components the same, ie. .xxxx */
1172 nir_is_same_comp_swizzle(uint8_t *swiz, unsigned nr_comp)
1174 for (unsigned i = 1; i < nr_comp; i++)
1175 if (swiz[i] != swiz[0])
1180 /* Are all components sequential, ie. .yzw */
1182 nir_is_sequential_comp_swizzle(uint8_t *swiz, unsigned nr_comp)
1184 for (unsigned i = 1; i < nr_comp; i++)
1185 if (swiz[i] != (swiz[0] + i))
1190 void nir_src_copy(nir_src *dest, const nir_src *src, nir_instr *instr);
1191 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
1198 * \name input modifiers
1202 * For inputs interpreted as floating point, flips the sign bit. For
1203 * inputs interpreted as integers, performs the two's complement negation.
1208 * Clears the sign bit for floating point values, and computes the integer
1209 * absolute value for integers. Note that the negate modifier acts after
1210 * the absolute value modifier, therefore if both are set then all inputs
1211 * will become negative.
1217 * For each input component, says which component of the register it is
1220 * Note that which elements of the swizzle are used and which are ignored
1221 * are based on the write mask for most opcodes - for example, a statement
1222 * like "foo.xzw = bar.zyx" would have a writemask of 1101b and a swizzle
1223 * of {2, 1, x, 0} where x means "don't care."
1225 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
1229 /** Base destination */
1233 * Saturate output modifier
1235 * Only valid for opcodes that output floating-point numbers. Clamps the
1236 * output to between 0.0 and 1.0 inclusive.
1243 * Ignored if dest.is_ssa is true
1245 nir_component_mask_t write_mask;
1248 /** NIR sized and unsized types
1250 * The values in this enum are carefully chosen so that the sized type is
1251 * just the unsized type OR the number of bits.
1253 typedef enum PACKED {
1254 nir_type_invalid = 0, /* Not a valid type */
1258 nir_type_float = 128,
1259 nir_type_bool1 = 1 | nir_type_bool,
1260 nir_type_bool8 = 8 | nir_type_bool,
1261 nir_type_bool16 = 16 | nir_type_bool,
1262 nir_type_bool32 = 32 | nir_type_bool,
1263 nir_type_int1 = 1 | nir_type_int,
1264 nir_type_int8 = 8 | nir_type_int,
1265 nir_type_int16 = 16 | nir_type_int,
1266 nir_type_int32 = 32 | nir_type_int,
1267 nir_type_int64 = 64 | nir_type_int,
1268 nir_type_uint1 = 1 | nir_type_uint,
1269 nir_type_uint8 = 8 | nir_type_uint,
1270 nir_type_uint16 = 16 | nir_type_uint,
1271 nir_type_uint32 = 32 | nir_type_uint,
1272 nir_type_uint64 = 64 | nir_type_uint,
1273 nir_type_float16 = 16 | nir_type_float,
1274 nir_type_float32 = 32 | nir_type_float,
1275 nir_type_float64 = 64 | nir_type_float,
1278 #define NIR_ALU_TYPE_SIZE_MASK 0x79
1279 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
1281 static inline unsigned
1282 nir_alu_type_get_type_size(nir_alu_type type)
1284 return type & NIR_ALU_TYPE_SIZE_MASK;
1287 static inline nir_alu_type
1288 nir_alu_type_get_base_type(nir_alu_type type)
1290 return (nir_alu_type)(type & NIR_ALU_TYPE_BASE_TYPE_MASK);
1294 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type);
1296 static inline nir_alu_type
1297 nir_get_nir_type_for_glsl_type(const struct glsl_type *type)
1299 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type));
1303 nir_get_glsl_base_type_for_nir_type(nir_alu_type base_type);
1305 nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
1306 nir_rounding_mode rnd);
1309 nir_op_vec(unsigned components);
1312 nir_op_is_vec(nir_op op);
1315 nir_is_float_control_signed_zero_inf_nan_preserve(unsigned execution_mode, unsigned bit_size)
1317 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) ||
1318 (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32) ||
1319 (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
1323 nir_is_denorm_flush_to_zero(unsigned execution_mode, unsigned bit_size)
1325 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16) ||
1326 (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) ||
1327 (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
1331 nir_is_denorm_preserve(unsigned execution_mode, unsigned bit_size)
1333 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) ||
1334 (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) ||
1335 (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64);
1339 nir_is_rounding_mode_rtne(unsigned execution_mode, unsigned bit_size)
1341 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
1342 (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
1343 (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
1347 nir_is_rounding_mode_rtz(unsigned execution_mode, unsigned bit_size)
1349 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
1350 (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
1351 (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
1355 nir_has_any_rounding_mode_rtz(unsigned execution_mode)
1357 return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
1358 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
1359 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
1363 nir_has_any_rounding_mode_rtne(unsigned execution_mode)
1365 return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
1366 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
1367 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
1370 static inline nir_rounding_mode
1371 nir_get_rounding_mode_from_float_controls(unsigned execution_mode,
1374 if (nir_alu_type_get_base_type(type) != nir_type_float)
1375 return nir_rounding_mode_undef;
1377 unsigned bit_size = nir_alu_type_get_type_size(type);
1379 if (nir_is_rounding_mode_rtz(execution_mode, bit_size))
1380 return nir_rounding_mode_rtz;
1381 if (nir_is_rounding_mode_rtne(execution_mode, bit_size))
1382 return nir_rounding_mode_rtne;
1383 return nir_rounding_mode_undef;
1387 nir_has_any_rounding_mode_enabled(unsigned execution_mode)
1390 nir_has_any_rounding_mode_rtne(execution_mode) ||
1391 nir_has_any_rounding_mode_rtz(execution_mode);
1397 * Operation where the first two sources are commutative.
1399 * For 2-source operations, this just mathematical commutativity. Some
1400 * 3-source operations, like ffma, are only commutative in the first two
1403 NIR_OP_IS_2SRC_COMMUTATIVE = (1 << 0),
1406 * Operation is associative
1408 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
1411 * Operation where src[0] is used to select src[1] on true or src[2] false.
1412 * src[0] may be Boolean, or it may be another type used in an implicit
1415 NIR_OP_IS_SELECTION = (1 << 2),
1416 } nir_op_algebraic_property;
1418 /* vec16 is the widest ALU op in NIR, making the max number of input of ALU
1419 * instructions to be the same as NIR_MAX_VEC_COMPONENTS.
1421 #define NIR_ALU_MAX_INPUTS NIR_MAX_VEC_COMPONENTS
1423 typedef struct nir_op_info {
1424 /** Name of the NIR ALU opcode */
1427 /** Number of inputs (sources) */
1431 * The number of components in the output
1433 * If non-zero, this is the size of the output and input sizes are
1434 * explicitly given; swizzle and writemask are still in effect, but if
1435 * the output component is masked out, then the input component may
1438 * If zero, the opcode acts in the standard, per-component manner; the
1439 * operation is performed on each component (except the ones that are
1440 * masked out) with the input being taken from the input swizzle for
1443 * The size of some of the inputs may be given (i.e. non-zero) even
1444 * though output_size is zero; in that case, the inputs with a zero
1445 * size act per-component, while the inputs with non-zero size don't.
1447 uint8_t output_size;
1450 * The type of vector that the instruction outputs. Note that the
1451 * staurate modifier is only allowed on outputs with the float type.
1453 nir_alu_type output_type;
1456 * The number of components in each input
1458 * See nir_op_infos::output_size for more detail about the relationship
1459 * between input and output sizes.
1461 uint8_t input_sizes[NIR_ALU_MAX_INPUTS];
1464 * The type of vector that each input takes. Note that negate and
1465 * absolute value are only allowed on inputs with int or float type and
1466 * behave differently on the two.
1468 nir_alu_type input_types[NIR_ALU_MAX_INPUTS];
1470 /** Algebraic properties of this opcode */
1471 nir_op_algebraic_property algebraic_properties;
1473 /** Whether this represents a numeric conversion opcode */
1477 /** Metadata for each nir_op, indexed by opcode */
1478 extern const nir_op_info nir_op_infos[nir_num_opcodes];
1481 nir_op_is_selection(nir_op op)
1483 return (nir_op_infos[op].algebraic_properties & NIR_OP_IS_SELECTION) != 0;
1486 typedef struct nir_alu_instr {
1487 /** Base instruction */
1493 /** Indicates that this ALU instruction generates an exact value
1495 * This is kind of a mixture of GLSL "precise" and "invariant" and not
1496 * really equivalent to either. This indicates that the value generated by
1497 * this operation is high-precision and any code transformations that touch
1498 * it must ensure that the resulting value is bit-for-bit identical to the
1504 * Indicates that this instruction doese not cause signed integer wrapping
1505 * to occur, in the form of overflow or underflow.
1507 bool no_signed_wrap:1;
1510 * Indicates that this instruction does not cause unsigned integer wrapping
1511 * to occur, in the form of overflow or underflow.
1513 bool no_unsigned_wrap:1;
1520 * The size of the array is given by nir_op_info::num_inputs.
1525 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
1526 nir_alu_instr *instr);
1527 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
1528 nir_alu_instr *instr);
1530 bool nir_alu_instr_is_copy(nir_alu_instr *instr);
1532 /* is this source channel used? */
1534 nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src,
1536 nir_component_mask_t
1537 nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src);
1539 * Get the number of channels used for a source
1542 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src);
1545 nir_alu_instr_is_comparison(const nir_alu_instr *instr);
1547 bool nir_const_value_negative_equal(nir_const_value c1, nir_const_value c2,
1548 nir_alu_type full_type);
1550 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
1551 unsigned src1, unsigned src2);
1553 bool nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
1554 const nir_alu_instr *alu2,
1555 unsigned src1, unsigned src2);
1557 bool nir_alu_src_is_trivial_ssa(const nir_alu_instr *alu, unsigned srcn);
1561 nir_deref_type_array,
1562 nir_deref_type_array_wildcard,
1563 nir_deref_type_ptr_as_array,
1564 nir_deref_type_struct,
1565 nir_deref_type_cast,
1571 /** The type of this deref instruction */
1572 nir_deref_type deref_type;
1574 /** Bitmask what modes the underlying variable might be
1576 * For OpenCL-style generic pointers, we may not know exactly what mode it
1577 * is at any given point in time in the compile process. This bitfield
1578 * contains the set of modes which it MAY be.
1580 * Generally, this field should not be accessed directly. Use one of the
1581 * nir_deref_mode_ helpers instead.
1583 nir_variable_mode modes;
1585 /** The dereferenced type of the resulting pointer value */
1586 const struct glsl_type *type;
1589 /** Variable being dereferenced if deref_type is a deref_var */
1592 /** Parent deref if deref_type is not deref_var */
1596 /** Additional deref parameters */
1608 unsigned ptr_stride;
1610 unsigned align_offset;
1614 /** Destination to store the resulting "pointer" */
1618 /** Returns true if deref might have one of the given modes
1620 * For multi-mode derefs, this returns true if any of the possible modes for
1621 * the deref to have any of the specified modes. This function returning true
1622 * does NOT mean that the deref definitely has one of those modes. It simply
1623 * means that, with the best information we have at the time, it might.
1626 nir_deref_mode_may_be(const nir_deref_instr *deref, nir_variable_mode modes)
1628 assert(!(modes & ~nir_var_all));
1629 assert(deref->modes != 0);
1630 return deref->modes & modes;
1633 /** Returns true if deref must have one of the given modes
1635 * For multi-mode derefs, this returns true if NIR can prove that the given
1636 * deref has one of the specified modes. This function returning false does
1637 * NOT mean that deref doesn't have one of the given mode. It very well may
1638 * have one of those modes, we just don't have enough information to prove
1639 * that it does for sure.
1642 nir_deref_mode_must_be(const nir_deref_instr *deref, nir_variable_mode modes)
1644 assert(!(modes & ~nir_var_all));
1645 assert(deref->modes != 0);
1646 return !(deref->modes & ~modes);
1649 /** Returns true if deref has the given mode
1651 * This returns true if the deref has exactly the mode specified. If the
1652 * deref may have that mode but may also have a different mode (i.e. modes has
1653 * multiple bits set), this will assert-fail.
1655 * If you're confused about which nir_deref_mode_ helper to use, use this one
1656 * or nir_deref_mode_is_one_of below.
1659 nir_deref_mode_is(const nir_deref_instr *deref, nir_variable_mode mode)
1661 assert(util_bitcount(mode) == 1 && (mode & nir_var_all));
1662 assert(deref->modes != 0);
1664 /* This is only for "simple" cases so, if modes might interact with this
1665 * deref then the deref has to have a single mode.
1667 if (nir_deref_mode_may_be(deref, mode)) {
1668 assert(util_bitcount(deref->modes) == 1);
1669 assert(deref->modes == mode);
1672 return deref->modes == mode;
1675 /** Returns true if deref has one of the given modes
1677 * This returns true if the deref has exactly one possible mode and that mode
1678 * is one of the modes specified. If the deref may have one of those modes
1679 * but may also have a different mode (i.e. modes has multiple bits set), this
1683 nir_deref_mode_is_one_of(const nir_deref_instr *deref, nir_variable_mode modes)
1685 /* This is only for "simple" cases so, if modes might interact with this
1686 * deref then the deref has to have a single mode.
1688 if (nir_deref_mode_may_be(deref, modes)) {
1689 assert(util_bitcount(deref->modes) == 1);
1690 assert(nir_deref_mode_must_be(deref, modes));
1693 return nir_deref_mode_may_be(deref, modes);
1696 /** Returns true if deref's possible modes lie in the given set of modes
1698 * This returns true if the deref's modes lie in the given set of modes. If
1699 * the deref's modes overlap with the specified modes but aren't entirely
1700 * contained in the specified set of modes, this will assert-fail. In
1701 * particular, if this is used in a generic pointers scenario, the specified
1702 * modes has to contain all or none of the possible generic pointer modes.
1704 * This is intended mostly for mass-lowering of derefs which might have
1708 nir_deref_mode_is_in_set(const nir_deref_instr *deref, nir_variable_mode modes)
1710 if (nir_deref_mode_may_be(deref, modes))
1711 assert(nir_deref_mode_must_be(deref, modes));
1713 return nir_deref_mode_may_be(deref, modes);
1716 static inline nir_deref_instr *nir_src_as_deref(nir_src src);
1718 static inline nir_deref_instr *
1719 nir_deref_instr_parent(const nir_deref_instr *instr)
1721 if (instr->deref_type == nir_deref_type_var)
1724 return nir_src_as_deref(instr->parent);
1727 static inline nir_variable *
1728 nir_deref_instr_get_variable(const nir_deref_instr *instr)
1730 while (instr->deref_type != nir_deref_type_var) {
1731 if (instr->deref_type == nir_deref_type_cast)
1734 instr = nir_deref_instr_parent(instr);
1740 bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
1741 bool nir_deref_instr_is_known_out_of_bounds(nir_deref_instr *instr);
1744 nir_deref_instr_has_complex_use_allow_memcpy_src = (1 << 0),
1745 nir_deref_instr_has_complex_use_allow_memcpy_dst = (1 << 1),
1746 } nir_deref_instr_has_complex_use_options;
1748 bool nir_deref_instr_has_complex_use(nir_deref_instr *instr,
1749 nir_deref_instr_has_complex_use_options opts);
1751 bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
1753 unsigned nir_deref_instr_array_stride(nir_deref_instr *instr);
1758 struct nir_function *callee;
1760 unsigned num_params;
1764 #include "nir_intrinsics.h"
1766 #define NIR_INTRINSIC_MAX_CONST_INDEX 7
1768 /** Represents an intrinsic
1770 * An intrinsic is an instruction type for handling things that are
1771 * more-or-less regular operations but don't just consume and produce SSA
1772 * values like ALU operations do. Intrinsics are not for things that have
1773 * special semantic meaning such as phi nodes and parallel copies.
1774 * Examples of intrinsics include variable load/store operations, system
1775 * value loads, and the like. Even though texturing more-or-less falls
1776 * under this category, texturing is its own instruction type because
1777 * trying to represent texturing with intrinsics would lead to a
1778 * combinatorial explosion of intrinsic opcodes.
1780 * By having a single instruction type for handling a lot of different
1781 * cases, optimization passes can look for intrinsics and, for the most
1782 * part, completely ignore them. Each intrinsic type also has a few
1783 * possible flags that govern whether or not they can be reordered or
1784 * eliminated. That way passes like dead code elimination can still work
1785 * on intrisics without understanding the meaning of each.
1787 * Each intrinsic has some number of constant indices, some number of
1788 * variables, and some number of sources. What these sources, variables,
1789 * and indices mean depends on the intrinsic and is documented with the
1790 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
1791 * instructions are the only types of instruction that can operate on
1797 nir_intrinsic_op intrinsic;
1801 /** number of components if this is a vectorized intrinsic
1803 * Similarly to ALU operations, some intrinsics are vectorized.
1804 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1805 * For vectorized intrinsics, the num_components field specifies the
1806 * number of destination components and the number of source components
1807 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1809 uint8_t num_components;
1811 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
1814 } nir_intrinsic_instr;
1816 static inline nir_variable *
1817 nir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i)
1819 return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i]));
1823 /* Memory ordering. */
1824 NIR_MEMORY_ACQUIRE = 1 << 0,
1825 NIR_MEMORY_RELEASE = 1 << 1,
1826 NIR_MEMORY_ACQ_REL = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE,
1828 /* Memory visibility operations. */
1829 NIR_MEMORY_MAKE_AVAILABLE = 1 << 2,
1830 NIR_MEMORY_MAKE_VISIBLE = 1 << 3,
1831 } nir_memory_semantics;
1835 NIR_SCOPE_INVOCATION,
1837 NIR_SCOPE_SHADER_CALL,
1838 NIR_SCOPE_WORKGROUP,
1839 NIR_SCOPE_QUEUE_FAMILY,
1844 * \name NIR intrinsics semantic flags
1846 * information about what the compiler can do with the intrinsics.
1848 * \sa nir_intrinsic_info::flags
1852 * whether the intrinsic can be safely eliminated if none of its output
1853 * value is not being used.
1855 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
1858 * Whether the intrinsic can be reordered with respect to any other
1859 * intrinsic, i.e. whether the only reordering dependencies of the
1860 * intrinsic are due to the register reads/writes.
1862 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
1863 } nir_intrinsic_semantic_flag;
1866 * Maximum valid value for a nir align_mul value (in intrinsics or derefs).
1868 * Offsets can be signed, so this is the largest power of two in int32_t.
1870 #define NIR_ALIGN_MUL_MAX 0x40000000
1872 typedef struct nir_io_semantics {
1873 unsigned location:7; /* gl_vert_attrib, gl_varying_slot, or gl_frag_result */
1874 unsigned num_slots:6; /* max 32, may be pessimistic with const indexing */
1875 unsigned dual_source_blend_index:1;
1876 unsigned fb_fetch_output:1; /* for GL_KHR_blend_equation_advanced */
1877 unsigned gs_streams:8; /* xxyyzzww: 2-bit stream index for each component */
1878 unsigned medium_precision:1; /* GLSL mediump qualifier */
1879 unsigned per_view:1;
1880 unsigned high_16bits:1; /* whether accessing low or high half of the slot */
1881 unsigned invariant:1; /* The variable has the invariant flag set */
1882 /* CLIP_DISTn, LAYER, VIEWPORT, and TESS_LEVEL_* have up to 3 uses:
1883 * - an output consumed by the next stage
1884 * - a system value output affecting fixed-func hardware, e.g. the clipper
1885 * - a transform feedback output written to memory
1886 * The following fields disable the first two. Transform feedback is disabled
1887 * by transform feedback info.
1889 unsigned no_varying:1; /* whether this output isn't consumed by the next stage */
1890 unsigned no_sysval_output:1; /* whether this system value output has no
1891 effect due to current pipeline states */
1895 /* Transform feedback info for 2 outputs. nir_intrinsic_store_output contains
1896 * this structure twice to support up to 4 outputs. The structure is limited
1897 * to 32 bits because it's stored in nir_intrinsic_instr::const_index[].
1899 typedef struct nir_io_xfb {
1901 /* start_component is equal to the index of out[]; add 2 for io_xfb2 */
1902 /* start_component is not relative to nir_intrinsic_component */
1903 /* get the stream index from nir_io_semantics */
1904 uint8_t num_components:4; /* max 4; if this is 0, xfb is disabled */
1905 uint8_t buffer:4; /* buffer index, max 3 */
1906 uint8_t offset; /* transform feedback buffer offset in dwords,
1907 max (1K - 4) bytes */
1912 nir_instr_xfb_write_mask(nir_intrinsic_instr *instr);
1914 #define NIR_INTRINSIC_MAX_INPUTS 11
1919 uint8_t num_srcs; /** < number of register/SSA inputs */
1921 /** number of components of each input register
1923 * If this value is 0, the number of components is given by the
1924 * num_components field of nir_intrinsic_instr. If this value is -1, the
1925 * intrinsic consumes however many components are provided and it is not
1928 int8_t src_components[NIR_INTRINSIC_MAX_INPUTS];
1932 /** number of components of the output register
1934 * If this value is 0, the number of components is given by the
1935 * num_components field of nir_intrinsic_instr.
1937 uint8_t dest_components;
1939 /** bitfield of legal bit sizes */
1940 uint8_t dest_bit_sizes;
1942 /** source which the destination bit size must match
1944 * Some intrinsics, such as subgroup intrinsics, are data manipulation
1945 * intrinsics and they have similar bit-size rules to ALU ops. This enables
1946 * validation to validate a bit more and enables auto-generated builder code
1947 * to properly determine destination bit sizes automatically.
1949 int8_t bit_size_src;
1951 /** the number of constant indices used by the intrinsic */
1952 uint8_t num_indices;
1954 /** list of indices */
1955 uint8_t indices[NIR_INTRINSIC_MAX_CONST_INDEX];
1957 /** indicates the usage of intr->const_index[n] */
1958 uint8_t index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1960 /** semantic flags for calls to this intrinsic */
1961 nir_intrinsic_semantic_flag flags;
1962 } nir_intrinsic_info;
1964 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1967 nir_intrinsic_src_components(const nir_intrinsic_instr *intr, unsigned srcn);
1970 nir_intrinsic_dest_components(nir_intrinsic_instr *intr);
1973 * Helper to copy const_index[] from src to dst, without assuming they
1976 void nir_intrinsic_copy_const_indices(nir_intrinsic_instr *dst, nir_intrinsic_instr *src);
1978 #include "nir_intrinsics_indices.h"
1981 nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
1982 unsigned align_mul, unsigned align_offset)
1984 assert(util_is_power_of_two_nonzero(align_mul));
1985 assert(align_offset < align_mul);
1986 nir_intrinsic_set_align_mul(intrin, align_mul);
1987 nir_intrinsic_set_align_offset(intrin, align_offset);
1990 /** Returns a simple alignment for an align_mul/offset pair
1992 * This helper converts from the full mul+offset alignment scheme used by
1993 * most NIR intrinsics to a simple alignment. The returned value is the
1994 * largest power of two which divides both align_mul and align_offset.
1995 * For any offset X which satisfies the complex alignment described by
1996 * align_mul/offset, X % align == 0.
1998 static inline uint32_t
1999 nir_combined_align(uint32_t align_mul, uint32_t align_offset)
2001 assert(util_is_power_of_two_nonzero(align_mul));
2002 assert(align_offset < align_mul);
2003 return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
2006 /** Returns a simple alignment for a load/store intrinsic offset
2008 * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
2009 * and ALIGN_OFFSET parameters, this helper takes both into account and
2010 * provides a single simple alignment parameter. The offset X is guaranteed
2011 * to satisfy X % align == 0.
2013 static inline unsigned
2014 nir_intrinsic_align(const nir_intrinsic_instr *intrin)
2016 return nir_combined_align(nir_intrinsic_align_mul(intrin),
2017 nir_intrinsic_align_offset(intrin));
2021 nir_intrinsic_has_align(const nir_intrinsic_instr *intrin)
2023 return nir_intrinsic_has_align_mul(intrin) &&
2024 nir_intrinsic_has_align_offset(intrin);
2028 nir_image_intrinsic_coord_components(const nir_intrinsic_instr *instr);
2030 /* Converts a image_deref_* intrinsic into a image_* one */
2031 void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
2032 nir_ssa_def *handle, bool bindless);
2034 /* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
2036 nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
2038 if (nir_intrinsic_has_access(instr) &&
2039 nir_intrinsic_access(instr) & ACCESS_VOLATILE)
2042 if (instr->intrinsic == nir_intrinsic_load_deref) {
2043 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
2044 return nir_deref_mode_is_in_set(deref, nir_var_read_only_modes) ||
2045 (nir_intrinsic_access(instr) & ACCESS_CAN_REORDER);
2046 } else if (instr->intrinsic == nir_intrinsic_load_ssbo ||
2047 instr->intrinsic == nir_intrinsic_bindless_image_load ||
2048 instr->intrinsic == nir_intrinsic_image_deref_load ||
2049 instr->intrinsic == nir_intrinsic_image_load) {
2050 return nir_intrinsic_access(instr) & ACCESS_CAN_REORDER;
2052 const nir_intrinsic_info *info =
2053 &nir_intrinsic_infos[instr->intrinsic];
2054 return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
2055 (info->flags & NIR_INTRINSIC_CAN_REORDER);
2059 bool nir_intrinsic_writes_external_memory(const nir_intrinsic_instr *instr);
2062 nir_intrinsic_is_ray_query(nir_intrinsic_op intrinsic)
2064 switch (intrinsic) {
2065 case nir_intrinsic_rq_confirm_intersection:
2066 case nir_intrinsic_rq_generate_intersection:
2067 case nir_intrinsic_rq_initialize:
2068 case nir_intrinsic_rq_load:
2069 case nir_intrinsic_rq_proceed:
2070 case nir_intrinsic_rq_terminate:
2077 /** Texture instruction source type */
2079 /** Texture coordinate
2081 * Must have nir_tex_instr::coord_components components.
2087 * The texture coordinate (except for the array component, if any) is
2088 * divided by this value before LOD computation and sampling.
2090 * Must be a float scalar.
2092 nir_tex_src_projector,
2094 /** Shadow comparator
2096 * For shadow sampling, the fetched texel values are compared against the
2097 * shadow comparator using the compare op specified by the sampler object
2098 * and converted to 1.0 if the comparison succeeds and 0.0 if it fails.
2099 * Interpolation happens after this conversion so the actual result may be
2100 * anywhere in the range [0.0, 1.0].
2102 * Only valid if nir_tex_instr::is_shadow and must be a float scalar.
2104 nir_tex_src_comparator,
2106 /** Coordinate offset
2108 * An integer value that is added to the texel address before sampling.
2109 * This is only allowed with operations that take an explicit LOD as it is
2110 * applied in integer texel space after LOD selection and not normalized
2117 * This value is added to the computed LOD before mip-mapping.
2126 * The computed LOD is clamped to be at least as large as min_lod before
2129 nir_tex_src_min_lod,
2131 /** MSAA sample index */
2132 nir_tex_src_ms_index,
2134 /** Intel-specific MSAA compression data */
2135 nir_tex_src_ms_mcs_intel,
2137 /** Explicit horizontal (X-major) coordinate derivative */
2140 /** Explicit vertical (Y-major) coordinate derivative */
2143 /** Texture variable dereference */
2144 nir_tex_src_texture_deref,
2146 /** Sampler variable dereference */
2147 nir_tex_src_sampler_deref,
2149 /** Texture index offset
2151 * This is added to nir_tex_instr::texture_index. Unless
2152 * nir_tex_instr::texture_non_uniform is set, this is guaranteed to be
2153 * dynamically uniform.
2155 nir_tex_src_texture_offset,
2157 /** Dynamically uniform sampler index offset
2159 * This is added to nir_tex_instr::sampler_index. Unless
2160 * nir_tex_instr::sampler_non_uniform is set, this is guaranteed to be
2161 * dynamically uniform. This should not be present until GLSL ES 3.20, GLSL
2162 * 4.00, or ARB_gpu_shader5, because in ES 3.10 and GL 3.30 samplers said
2163 * "When aggregated into arrays within a shader, samplers can only be indexed
2164 * with a constant integral expression."
2166 nir_tex_src_sampler_offset,
2168 /** Bindless texture handle
2170 * This is, unfortunately, a bit overloaded at the moment. There are
2171 * generally two types of bindless handles:
2173 * 1. For GL_ARB_bindless bindless handles. These are part of the
2174 * GL/Gallium-level API and are always a 64-bit integer.
2176 * 2. HW-specific handles. GL_ARB_bindless handles may be lowered to
2177 * these. Also, these are used by many Vulkan drivers to implement
2178 * descriptor sets, especially for UPDATE_AFTER_BIND descriptors.
2179 * The details of hardware handles (bit size, format, etc.) is
2182 * Because of this overloading and the resulting ambiguity, we currently
2183 * don't validate anything for these.
2185 nir_tex_src_texture_handle,
2187 /** Bindless sampler handle
2189 * See nir_tex_src_texture_handle,
2191 nir_tex_src_sampler_handle,
2193 /** Plane index for multi-plane YCbCr textures */
2197 * Backend-specific vec4 tex src argument.
2199 * Can be used to have NIR optimization (copy propagation, lower_vec_to_movs)
2200 * apply to the packing of the tex srcs. This lowering must only happen
2201 * after nir_lower_tex().
2203 * The nir_tex_instr_src_type() of this argument is float, so no lowering
2204 * will happen if nir_lower_int_to_float is used.
2206 nir_tex_src_backend1,
2208 /** Second backend-specific vec4 tex src argument, see nir_tex_src_backend1. */
2209 nir_tex_src_backend2,
2211 nir_num_tex_src_types
2214 /** A texture instruction source */
2219 /** Type of this source */
2220 nir_tex_src_type src_type;
2223 /** Texture instruction opcode */
2225 nir_texop_tex, /**< Regular texture look-up */
2226 nir_texop_txb, /**< Texture look-up with LOD bias */
2227 nir_texop_txl, /**< Texture look-up with explicit LOD */
2228 nir_texop_txd, /**< Texture look-up with partial derivatives */
2229 nir_texop_txf, /**< Texel fetch with explicit LOD */
2230 nir_texop_txf_ms, /**< Multisample texture fetch */
2231 nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */
2232 nir_texop_txf_ms_mcs_intel, /**< Multisample compression value fetch */
2233 nir_texop_txs, /**< Texture size */
2234 nir_texop_lod, /**< Texture lod query */
2235 nir_texop_tg4, /**< Texture gather */
2236 nir_texop_query_levels, /**< Texture levels query */
2237 nir_texop_texture_samples, /**< Texture samples query */
2238 nir_texop_samples_identical, /**< Query whether all samples are definitely
2241 nir_texop_tex_prefetch, /**< Regular texture look-up, eligible for pre-dispatch */
2242 nir_texop_fragment_fetch_amd, /**< Multisample fragment color texture fetch */
2243 nir_texop_fragment_mask_fetch_amd, /**< Multisample fragment mask texture fetch */
2244 nir_texop_descriptor_amd, /**< Returns a buffer or image descriptor. */
2245 nir_texop_sampler_descriptor_amd, /**< Returns a sampler descriptor. */
2246 nir_texop_lod_bias_agx, /**< Returns the sampler's LOD bias */
2249 /** Represents a texture instruction */
2251 /** Base instruction */
2254 /** Dimensionality of the texture operation
2256 * This will typically match the dimensionality of the texture deref type
2257 * if a nir_tex_src_texture_deref is present. However, it may not if
2258 * texture lowering has occurred.
2260 enum glsl_sampler_dim sampler_dim;
2262 /** ALU type of the destination
2264 * This is the canonical sampled type for this texture operation and may
2265 * not exactly match the sampled type of the deref type when a
2266 * nir_tex_src_texture_deref is present. For OpenCL, the sampled type of
2267 * the texture deref will be GLSL_TYPE_VOID and this is allowed to be
2268 * anything. With SPIR-V, the signedness of integer types is allowed to
2269 * differ. For all APIs, the bit size may differ if the driver has done
2270 * any sort of mediump or similar lowering since texture types always have
2271 * 32-bit sampled types.
2273 nir_alu_type dest_type;
2275 /** Texture opcode */
2281 /** Array of sources
2283 * This array has nir_tex_instr::num_srcs elements
2287 /** Number of sources */
2290 /** Number of components in the coordinate, if any */
2291 unsigned coord_components;
2293 /** True if the texture instruction acts on an array texture */
2296 /** True if the texture instruction performs a shadow comparison
2298 * If this is true, the texture instruction must have a
2299 * nir_tex_src_comparator.
2304 * If is_shadow is true, whether this is the old-style shadow that outputs
2305 * 4 components or the new-style shadow that outputs 1 component.
2307 bool is_new_style_shadow;
2310 * True if this texture instruction should return a sparse residency code.
2311 * The code is in the last component of the result.
2315 /** nir_texop_tg4 component selector
2317 * This determines which RGBA component is gathered.
2319 unsigned component : 2;
2321 /** Validation needs to know this for gradient component count */
2322 unsigned array_is_lowered_cube : 1;
2324 /** True if this tg4 instruction has an implicit LOD or LOD bias, instead of using level 0 */
2325 unsigned is_gather_implicit_lod : 1;
2327 /** Gather offsets */
2328 int8_t tg4_offsets[4][2];
2330 /** True if the texture index or handle is not dynamically uniform */
2331 bool texture_non_uniform;
2333 /** True if the sampler index or handle is not dynamically uniform.
2335 * This may be set when VK_EXT_descriptor_indexing is supported and the
2336 * appropriate capability is enabled.
2338 * This should always be false in GLSL (GLSL ES 3.20 says "When aggregated
2339 * into arrays within a shader, opaque types can only be indexed with a
2340 * dynamically uniform integral expression", and GLSL 4.60 says "When
2341 * aggregated into arrays within a shader, [texture, sampler, and
2342 * samplerShadow] types can only be indexed with a dynamically uniform
2343 * expression, or texture lookup will result in undefined values.").
2345 bool sampler_non_uniform;
2347 /** The texture index
2349 * If this texture instruction has a nir_tex_src_texture_offset source,
2350 * then the texture index is given by texture_index + texture_offset.
2352 unsigned texture_index;
2354 /** The sampler index
2356 * The following operations do not require a sampler and, as such, this
2357 * field should be ignored:
2359 * - nir_texop_txf_ms
2361 * - nir_texop_query_levels
2362 * - nir_texop_texture_samples
2363 * - nir_texop_samples_identical
2365 * If this texture instruction has a nir_tex_src_sampler_offset source,
2366 * then the sampler index is given by sampler_index + sampler_offset.
2368 unsigned sampler_index;
2372 * Returns true if the texture operation requires a sampler as a general rule
2374 * Note that the specific hw/driver backend could require to a sampler
2375 * object/configuration packet in any case, for some other reason.
2377 * @see nir_tex_instr::sampler_index.
2379 bool nir_tex_instr_need_sampler(const nir_tex_instr *instr);
2381 /** Returns the number of components returned by this nir_tex_instr
2383 * Useful for code building texture instructions when you don't want to think
2384 * about how many components a particular texture op returns. This does not
2385 * include the sparse residency code.
2388 nir_tex_instr_result_size(const nir_tex_instr *instr);
2391 * Returns the destination size of this nir_tex_instr including the sparse
2392 * residency code, if any.
2394 static inline unsigned
2395 nir_tex_instr_dest_size(const nir_tex_instr *instr)
2397 /* One more component is needed for the residency code. */
2398 return nir_tex_instr_result_size(instr) + instr->is_sparse;
2402 * Returns true if this texture operation queries something about the texture
2403 * rather than actually sampling it.
2406 nir_tex_instr_is_query(const nir_tex_instr *instr);
2408 /** Returns true if this texture instruction does implicit derivatives
2410 * This is important as there are extra control-flow rules around derivatives
2411 * and texture instructions which perform them implicitly.
2414 nir_tex_instr_has_implicit_derivative(const nir_tex_instr *instr);
2416 /** Returns the ALU type of the given texture instruction source */
2418 nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src);
2421 * Returns the number of components required by the given texture instruction
2425 nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src);
2428 * Returns the index of the texture instruction source with the given
2429 * nir_tex_src_type or -1 if no such source exists.
2432 nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type)
2434 for (unsigned i = 0; i < instr->num_srcs; i++)
2435 if (instr->src[i].src_type == type)
2441 /** Adds a source to a texture instruction */
2442 void nir_tex_instr_add_src(nir_tex_instr *tex,
2443 nir_tex_src_type src_type,
2446 /** Removes a source from a texture instruction */
2447 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
2449 bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
2456 nir_const_value value[];
2457 } nir_load_const_instr;
2460 /** Return from a function
2462 * This instruction is a classic function return. It jumps to
2463 * nir_function_impl::end_block. No return value is provided in this
2464 * instruction. Instead, the function is expected to write any return
2465 * data to a deref passed in from the caller.
2469 /** Immediately exit the current shader
2471 * This instruction is roughly the equivalent of C's "exit()" in that it
2472 * immediately terminates the current shader invocation. From a CFG
2473 * perspective, it looks like a jump to nir_function_impl::end_block but
2474 * it actually jumps to the end block of the shader entrypoint. A halt
2475 * instruction in the shader entrypoint itself is semantically identical
2478 * For shaders with built-in I/O, any outputs written prior to a halt
2479 * instruction remain written and any outputs not written prior to the
2480 * halt have undefined values. It does NOT cause an implicit discard of
2481 * written results. If one wants discard results in a fragment shader,
2482 * for instance, a discard or demote intrinsic is required.
2486 /** Break out of the inner-most loop
2488 * This has the same semantics as C's "break" statement.
2492 /** Jump back to the top of the inner-most loop
2494 * This has the same semantics as C's "continue" statement assuming that a
2495 * NIR loop is implemented as "while (1) { body }".
2499 /** Jumps for unstructured CFG.
2501 * As within an unstructured CFG we can't rely on block ordering we need to
2502 * place explicit jumps at the end of every block.
2512 struct nir_block *target;
2513 struct nir_block *else_target;
2516 /* creates a new SSA variable in an undefined state */
2521 } nir_ssa_undef_instr;
2524 struct exec_node node;
2526 /* The predecessor block corresponding to this source */
2527 struct nir_block *pred;
2532 #define nir_foreach_phi_src(phi_src, phi) \
2533 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
2534 #define nir_foreach_phi_src_safe(phi_src, phi) \
2535 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
2540 struct exec_list srcs; /** < list of nir_phi_src */
2545 static inline nir_phi_src *
2546 nir_phi_get_src_from_block(nir_phi_instr *phi, struct nir_block *block)
2548 nir_foreach_phi_src(src, phi) {
2549 if (src->pred == block)
2553 assert(!"Block is not a predecessor of phi.");
2558 struct exec_node node;
2561 } nir_parallel_copy_entry;
2563 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
2564 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
2569 /* A list of nir_parallel_copy_entrys. The sources of all of the
2570 * entries are copied to the corresponding destinations "in parallel".
2571 * In other words, if we have two entries: a -> b and b -> a, the values
2574 struct exec_list entries;
2575 } nir_parallel_copy_instr;
2577 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
2578 type, nir_instr_type_alu)
2579 NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
2580 type, nir_instr_type_deref)
2581 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
2582 type, nir_instr_type_call)
2583 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
2584 type, nir_instr_type_jump)
2585 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr,
2586 type, nir_instr_type_tex)
2587 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
2588 type, nir_instr_type_intrinsic)
2589 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
2590 type, nir_instr_type_load_const)
2591 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
2592 type, nir_instr_type_ssa_undef)
2593 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
2594 type, nir_instr_type_phi)
2595 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
2596 nir_parallel_copy_instr, instr,
2597 type, nir_instr_type_parallel_copy)
2600 #define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
2601 static inline type \
2602 nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
2604 assert(nir_src_is_const(src)); \
2605 nir_load_const_instr *load = \
2606 nir_instr_as_load_const(src.ssa->parent_instr); \
2607 assert(comp < load->def.num_components); \
2608 return nir_const_value_as_##suffix(load->value[comp], \
2609 load->def.bit_size); \
2612 static inline type \
2613 nir_src_as_##suffix(nir_src src) \
2615 assert(nir_src_num_components(src) == 1); \
2616 return nir_src_comp_as_##suffix(src, 0); \
2619 NIR_DEFINE_SRC_AS_CONST(int64_t, int)
2620 NIR_DEFINE_SRC_AS_CONST(uint64_t, uint)
2621 NIR_DEFINE_SRC_AS_CONST(bool, bool)
2622 NIR_DEFINE_SRC_AS_CONST(double, float)
2624 #undef NIR_DEFINE_SRC_AS_CONST
2633 nir_ssa_scalar_is_const(nir_ssa_scalar s)
2635 return s.def->parent_instr->type == nir_instr_type_load_const;
2639 nir_ssa_scalar_is_undef(nir_ssa_scalar s)
2641 return s.def->parent_instr->type == nir_instr_type_ssa_undef;
2644 static inline nir_const_value
2645 nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
2647 assert(s.comp < s.def->num_components);
2648 nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
2649 return load->value[s.comp];
2652 #define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
2653 static inline type \
2654 nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
2656 return nir_const_value_as_##suffix( \
2657 nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
2660 NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
2661 NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
2662 NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
2663 NIR_DEFINE_SCALAR_AS_CONST(double, float)
2665 #undef NIR_DEFINE_SCALAR_AS_CONST
2668 nir_ssa_scalar_is_alu(nir_ssa_scalar s)
2670 return s.def->parent_instr->type == nir_instr_type_alu;
2673 static inline nir_op
2674 nir_ssa_scalar_alu_op(nir_ssa_scalar s)
2676 return nir_instr_as_alu(s.def->parent_instr)->op;
2679 static inline nir_ssa_scalar
2680 nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
2682 nir_ssa_scalar out = { NULL, 0 };
2684 nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
2685 assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
2687 /* Our component must be written */
2688 assert(s.comp < s.def->num_components);
2689 assert(alu->dest.write_mask & (1u << s.comp));
2691 assert(alu->src[alu_src_idx].src.is_ssa);
2692 out.def = alu->src[alu_src_idx].src.ssa;
2694 if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
2695 /* The ALU src is unsized so the source component follows the
2696 * destination component.
2698 out.comp = alu->src[alu_src_idx].swizzle[s.comp];
2700 /* This is a sized source so all source components work together to
2701 * produce all the destination components. Since we need to return a
2702 * scalar, this only works if the source is a scalar.
2704 assert(nir_op_infos[alu->op].input_sizes[alu_src_idx] == 1);
2705 out.comp = alu->src[alu_src_idx].swizzle[0];
2707 assert(out.comp < out.def->num_components);
2712 nir_ssa_scalar nir_ssa_scalar_chase_movs(nir_ssa_scalar s);
2714 static inline nir_ssa_scalar
2715 nir_get_ssa_scalar(nir_ssa_def *def, unsigned channel)
2717 nir_ssa_scalar s = { def, channel };
2721 /** Returns a nir_ssa_scalar where we've followed the bit-exact mov/vec use chain to the original definition */
2722 static inline nir_ssa_scalar
2723 nir_ssa_scalar_resolved(nir_ssa_def *def, unsigned channel)
2725 return nir_ssa_scalar_chase_movs(nir_get_ssa_scalar(def, channel));
2735 unsigned num_indices;
2737 bool read_first_invocation;
2740 nir_binding nir_chase_binding(nir_src rsrc);
2741 nir_variable *nir_get_binding_variable(struct nir_shader *shader, nir_binding binding);
2747 * Control flow consists of a tree of control flow nodes, which include
2748 * if-statements and loops. The leaves of the tree are basic blocks, lists of
2749 * instructions that always run start-to-finish. Each basic block also keeps
2750 * track of its successors (blocks which may run immediately after the current
2751 * block) and predecessors (blocks which could have run immediately before the
2752 * current block). Each function also has a start block and an end block which
2753 * all return statements point to (which is always empty). Together, all the
2754 * blocks with their predecessors and successors make up the control flow
2755 * graph (CFG) of the function. There are helpers that modify the tree of
2756 * control flow nodes while modifying the CFG appropriately; these should be
2757 * used instead of modifying the tree directly.
2764 nir_cf_node_function
2767 typedef struct nir_cf_node {
2768 struct exec_node node;
2769 nir_cf_node_type type;
2770 struct nir_cf_node *parent;
2773 typedef struct nir_block {
2774 nir_cf_node cf_node;
2776 struct exec_list instr_list; /** < list of nir_instr */
2778 /** generic block index; generated by nir_index_blocks */
2782 * Each block can only have up to 2 successors, so we put them in a simple
2783 * array - no need for anything more complicated.
2785 struct nir_block *successors[2];
2787 /* Set of nir_block predecessors in the CFG */
2788 struct set *predecessors;
2791 * this node's immediate dominator in the dominance tree - set to NULL for
2794 struct nir_block *imm_dom;
2796 /* This node's children in the dominance tree */
2797 unsigned num_dom_children;
2798 struct nir_block **dom_children;
2800 /* Set of nir_blocks on the dominance frontier of this block */
2801 struct set *dom_frontier;
2804 * These two indices have the property that dom_{pre,post}_index for each
2805 * child of this block in the dominance tree will always be between
2806 * dom_pre_index and dom_post_index for this block, which makes testing if
2807 * a given block is dominated by another block an O(1) operation.
2809 uint32_t dom_pre_index, dom_post_index;
2812 * Value just before the first nir_instr->index in the block, but after
2813 * end_ip that of any predecessor block.
2817 * Value just after the last nir_instr->index in the block, but before the
2818 * start_ip of any successor block.
2822 /* SSA def live in and out for this block; used for liveness analysis.
2823 * Indexed by ssa_def->index
2825 BITSET_WORD *live_in;
2826 BITSET_WORD *live_out;
2830 nir_block_is_reachable(nir_block *b)
2832 /* See also nir_block_dominates */
2833 return b->dom_post_index != 0;
2836 static inline nir_instr *
2837 nir_block_first_instr(nir_block *block)
2839 struct exec_node *head = exec_list_get_head(&block->instr_list);
2840 return exec_node_data(nir_instr, head, node);
2843 static inline nir_instr *
2844 nir_block_last_instr(nir_block *block)
2846 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
2847 return exec_node_data(nir_instr, tail, node);
2851 nir_block_ends_in_jump(nir_block *block)
2853 return !exec_list_is_empty(&block->instr_list) &&
2854 nir_block_last_instr(block)->type == nir_instr_type_jump;
2858 nir_block_ends_in_return_or_halt(nir_block *block)
2860 if (exec_list_is_empty(&block->instr_list))
2863 nir_instr *instr = nir_block_last_instr(block);
2864 if (instr->type != nir_instr_type_jump)
2867 nir_jump_instr *jump_instr = nir_instr_as_jump(instr);
2868 return jump_instr->type == nir_jump_return ||
2869 jump_instr->type == nir_jump_halt;
2873 nir_block_ends_in_break(nir_block *block)
2875 if (exec_list_is_empty(&block->instr_list))
2878 nir_instr *instr = nir_block_last_instr(block);
2879 return instr->type == nir_instr_type_jump &&
2880 nir_instr_as_jump(instr)->type == nir_jump_break;
2883 #define nir_foreach_instr(instr, block) \
2884 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
2885 #define nir_foreach_instr_reverse(instr, block) \
2886 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
2887 #define nir_foreach_instr_safe(instr, block) \
2888 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
2889 #define nir_foreach_instr_reverse_safe(instr, block) \
2890 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
2892 static inline nir_phi_instr *
2893 nir_block_last_phi_instr(nir_block *block)
2895 nir_phi_instr *last_phi = NULL;
2896 nir_foreach_instr(instr, block) {
2897 if (instr->type == nir_instr_type_phi)
2898 last_phi = nir_instr_as_phi(instr);
2906 nir_selection_control_none = 0x0,
2909 * Defined by SPIR-V spec 3.22 "Selection Control".
2910 * The application prefers to remove control flow.
2912 nir_selection_control_flatten = 0x1,
2915 * Defined by SPIR-V spec 3.22 "Selection Control".
2916 * The application prefers to keep control flow.
2918 nir_selection_control_dont_flatten = 0x2,
2921 * May be applied by the compiler stack when it knows
2922 * that a branch is divergent, and:
2923 * - either both the if and else are always taken
2924 * - the if or else is empty and the other is always taken
2926 nir_selection_control_divergent_always_taken = 0x3,
2927 } nir_selection_control;
2929 typedef struct nir_if {
2930 nir_cf_node cf_node;
2932 nir_selection_control control;
2934 struct exec_list then_list; /** < list of nir_cf_node */
2935 struct exec_list else_list; /** < list of nir_cf_node */
2941 /** Instruction that generates nif::condition. */
2942 nir_instr *conditional_instr;
2944 /** Block within ::nif that has the break instruction. */
2945 nir_block *break_block;
2947 /** Last block for the then- or else-path that does not contain the break. */
2948 nir_block *continue_from_block;
2950 /** True when ::break_block is in the else-path of ::nif. */
2951 bool continue_from_then;
2954 /* This is true if the terminators exact trip count is unknown. For
2957 * for (int i = 0; i < imin(x, 4); i++)
2960 * Here loop analysis would have set a max_trip_count of 4 however we dont
2961 * know for sure that this is the exact trip count.
2963 bool exact_trip_count_unknown;
2965 struct list_head loop_terminator_link;
2966 } nir_loop_terminator;
2969 /* Induction variable. */
2972 /* Init statement with only uniform. */
2975 /* Update statement with only uniform. */
2976 nir_alu_src *update_src;
2977 } nir_loop_induction_variable;
2980 /* Estimated cost (in number of instructions) of the loop */
2981 unsigned instr_cost;
2983 /* Contains fp64 ops that will be lowered */
2986 /* Guessed trip count based on array indexing */
2987 unsigned guessed_trip_count;
2989 /* Maximum number of times the loop is run (if known) */
2990 unsigned max_trip_count;
2992 /* Do we know the exact number of times the loop will be run */
2993 bool exact_trip_count_known;
2995 /* Unroll the loop regardless of its size */
2998 /* Does the loop contain complex loop terminators, continues or other
2999 * complex behaviours? If this is true we can't rely on
3000 * loop_terminator_list to be complete or accurate.
3004 nir_loop_terminator *limiting_terminator;
3006 /* A list of loop_terminators terminating this loop. */
3007 struct list_head loop_terminator_list;
3009 /* array of induction variables for this loop */
3010 nir_loop_induction_variable *induction_vars;
3011 unsigned num_induction_vars;
3015 nir_loop_control_none = 0x0,
3016 nir_loop_control_unroll = 0x1,
3017 nir_loop_control_dont_unroll = 0x2,
3021 nir_cf_node cf_node;
3023 struct exec_list body; /** < list of nir_cf_node */
3024 struct exec_list continue_list; /** < (optional) list of nir_cf_node */
3026 nir_loop_info *info;
3027 nir_loop_control control;
3028 bool partially_unrolled;
3033 * Various bits of metadata that can may be created or required by
3034 * optimization and analysis passes
3037 nir_metadata_none = 0x0,
3039 /** Indicates that nir_block::index values are valid.
3041 * The start block has index 0 and they increase through a natural walk of
3042 * the CFG. nir_function_impl::num_blocks is the number of blocks and
3043 * every block index is in the range [0, nir_function_impl::num_blocks].
3045 * A pass can preserve this metadata type if it doesn't touch the CFG.
3047 nir_metadata_block_index = 0x1,
3049 /** Indicates that block dominance information is valid
3053 * - nir_block::num_dom_children
3054 * - nir_block::dom_children
3055 * - nir_block::dom_frontier
3056 * - nir_block::dom_pre_index
3057 * - nir_block::dom_post_index
3059 * A pass can preserve this metadata type if it doesn't touch the CFG.
3061 nir_metadata_dominance = 0x2,
3063 /** Indicates that SSA def data-flow liveness information is valid
3067 * - nir_block::live_in
3068 * - nir_block::live_out
3070 * A pass can preserve this metadata type if it never adds or removes any
3071 * SSA defs or uses of SSA defs (most passes shouldn't preserve this
3074 nir_metadata_live_ssa_defs = 0x4,
3076 /** A dummy metadata value to track when a pass forgot to call
3077 * nir_metadata_preserve.
3079 * A pass should always clear this value even if it doesn't make any
3080 * progress to indicate that it thought about preserving metadata.
3082 nir_metadata_not_properly_reset = 0x8,
3084 /** Indicates that loop analysis information is valid.
3086 * This includes everything pointed to by nir_loop::info.
3088 * A pass can preserve this metadata type if it is guaranteed to not affect
3089 * any loop metadata. However, since loop metadata includes things like
3090 * loop counts which depend on arithmetic in the loop, this is very hard to
3091 * determine. Most passes shouldn't preserve this metadata type.
3093 nir_metadata_loop_analysis = 0x10,
3095 /** Indicates that nir_instr::index values are valid.
3097 * The start instruction has index 0 and they increase through a natural
3098 * walk of instructions in blocks in the CFG. The indices my have holes
3099 * after passes such as DCE.
3101 * A pass can preserve this metadata type if it never adds or moves any
3102 * instructions (most passes shouldn't preserve this metadata type), but
3103 * can preserve it if it only removes instructions.
3105 nir_metadata_instr_index = 0x20,
3109 * This includes all nir_metadata flags except not_properly_reset. Passes
3110 * which do not change the shader in any way should call
3112 * nir_metadata_preserve(impl, nir_metadata_all);
3114 nir_metadata_all = ~nir_metadata_not_properly_reset,
3116 MESA_DEFINE_CPP_ENUM_BITFIELD_OPERATORS(nir_metadata)
3119 nir_cf_node cf_node;
3121 /** pointer to the function of which this is an implementation */
3122 struct nir_function *function;
3125 * For entrypoints, a pointer to a nir_function_impl which runs before
3126 * it, once per draw or dispatch, communicating via store_preamble and
3127 * load_preamble intrinsics. If NULL then there is no preamble.
3129 struct nir_function *preamble;
3131 struct exec_list body; /** < list of nir_cf_node */
3133 nir_block *end_block;
3135 /** list for all local variables in the function */
3136 struct exec_list locals;
3138 /** list of local registers in the function */
3139 struct exec_list registers;
3141 /** next available local register index */
3144 /** next available SSA value index */
3147 /* total number of basic blocks, only valid when block_index_dirty = false */
3148 unsigned num_blocks;
3150 /** True if this nir_function_impl uses structured control-flow
3152 * Structured nir_function_impls have different validation rules.
3156 nir_metadata valid_metadata;
3157 } nir_function_impl;
3159 #define nir_foreach_function_temp_variable(var, impl) \
3160 foreach_list_typed(nir_variable, var, node, &(impl)->locals)
3162 #define nir_foreach_function_temp_variable_safe(var, impl) \
3163 foreach_list_typed_safe(nir_variable, var, node, &(impl)->locals)
3165 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
3166 nir_start_block(nir_function_impl *impl)
3168 return (nir_block *) impl->body.head_sentinel.next;
3171 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
3172 nir_impl_last_block(nir_function_impl *impl)
3174 return (nir_block *) impl->body.tail_sentinel.prev;
3177 static inline nir_cf_node *
3178 nir_cf_node_next(nir_cf_node *node)
3180 struct exec_node *next = exec_node_get_next(&node->node);
3181 if (exec_node_is_tail_sentinel(next))
3184 return exec_node_data(nir_cf_node, next, node);
3187 static inline nir_cf_node *
3188 nir_cf_node_prev(nir_cf_node *node)
3190 struct exec_node *prev = exec_node_get_prev(&node->node);
3191 if (exec_node_is_head_sentinel(prev))
3194 return exec_node_data(nir_cf_node, prev, node);
3198 nir_cf_node_is_first(const nir_cf_node *node)
3200 return exec_node_is_head_sentinel(node->node.prev);
3204 nir_cf_node_is_last(const nir_cf_node *node)
3206 return exec_node_is_tail_sentinel(node->node.next);
3209 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node,
3210 type, nir_cf_node_block)
3211 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node,
3212 type, nir_cf_node_if)
3213 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node,
3214 type, nir_cf_node_loop)
3215 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node,
3216 nir_function_impl, cf_node, type, nir_cf_node_function)
3218 static inline nir_block *
3219 nir_if_first_then_block(nir_if *if_stmt)
3221 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
3222 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
3225 static inline nir_block *
3226 nir_if_last_then_block(nir_if *if_stmt)
3228 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
3229 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
3232 static inline nir_block *
3233 nir_if_first_else_block(nir_if *if_stmt)
3235 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
3236 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
3239 static inline nir_block *
3240 nir_if_last_else_block(nir_if *if_stmt)
3242 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
3243 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
3246 static inline nir_block *
3247 nir_loop_first_block(nir_loop *loop)
3249 struct exec_node *head = exec_list_get_head(&loop->body);
3250 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
3253 static inline nir_block *
3254 nir_loop_last_block(nir_loop *loop)
3256 struct exec_node *tail = exec_list_get_tail(&loop->body);
3257 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
3261 nir_loop_has_continue_construct(const nir_loop *loop)
3263 return !exec_list_is_empty(&loop->continue_list);
3266 static inline nir_block *
3267 nir_loop_first_continue_block(nir_loop *loop)
3269 assert(nir_loop_has_continue_construct(loop));
3270 struct exec_node *head = exec_list_get_head(&loop->continue_list);
3271 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
3274 static inline nir_block *
3275 nir_loop_last_continue_block(nir_loop *loop)
3277 assert(nir_loop_has_continue_construct(loop));
3278 struct exec_node *tail = exec_list_get_tail(&loop->continue_list);
3279 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
3283 * Return the target block of a nir_jump_continue statement
3285 static inline nir_block *
3286 nir_loop_continue_target(nir_loop *loop)
3288 if (nir_loop_has_continue_construct(loop))
3289 return nir_loop_first_continue_block(loop);
3291 return nir_loop_first_block(loop);
3295 * Return true if this list of cf_nodes contains a single empty block.
3298 nir_cf_list_is_empty_block(struct exec_list *cf_list)
3300 if (exec_list_is_singular(cf_list)) {
3301 struct exec_node *head = exec_list_get_head(cf_list);
3303 nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
3304 return exec_list_is_empty(&block->instr_list);
3310 uint8_t num_components;
3314 typedef struct nir_function {
3315 struct exec_node node;
3318 struct nir_shader *shader;
3320 unsigned num_params;
3321 nir_parameter *params;
3323 /** The implementation of this function.
3325 * If the function is only declared and not implemented, this is NULL.
3327 nir_function_impl *impl;
3334 nir_lower_imul64 = (1 << 0),
3335 nir_lower_isign64 = (1 << 1),
3336 /** Lower all int64 modulus and division opcodes */
3337 nir_lower_divmod64 = (1 << 2),
3338 /** Lower all 64-bit umul_high and imul_high opcodes */
3339 nir_lower_imul_high64 = (1 << 3),
3340 nir_lower_mov64 = (1 << 4),
3341 nir_lower_icmp64 = (1 << 5),
3342 nir_lower_iadd64 = (1 << 6),
3343 nir_lower_iabs64 = (1 << 7),
3344 nir_lower_ineg64 = (1 << 8),
3345 nir_lower_logic64 = (1 << 9),
3346 nir_lower_minmax64 = (1 << 10),
3347 nir_lower_shift64 = (1 << 11),
3348 nir_lower_imul_2x32_64 = (1 << 12),
3349 nir_lower_extract64 = (1 << 13),
3350 nir_lower_ufind_msb64 = (1 << 14),
3351 nir_lower_bit_count64 = (1 << 15),
3352 nir_lower_subgroup_shuffle64 = (1 << 16),
3353 nir_lower_scan_reduce_bitwise64 = (1 << 17),
3354 nir_lower_scan_reduce_iadd64 = (1 << 18),
3355 nir_lower_vote_ieq64 = (1 << 19),
3356 nir_lower_usub_sat64 = (1 << 20),
3357 nir_lower_iadd_sat64 = (1 << 21),
3358 } nir_lower_int64_options;
3361 nir_lower_drcp = (1 << 0),
3362 nir_lower_dsqrt = (1 << 1),
3363 nir_lower_drsq = (1 << 2),
3364 nir_lower_dtrunc = (1 << 3),
3365 nir_lower_dfloor = (1 << 4),
3366 nir_lower_dceil = (1 << 5),
3367 nir_lower_dfract = (1 << 6),
3368 nir_lower_dround_even = (1 << 7),
3369 nir_lower_dmod = (1 << 8),
3370 nir_lower_dsub = (1 << 9),
3371 nir_lower_ddiv = (1 << 10),
3372 nir_lower_fp64_full_software = (1 << 11),
3373 } nir_lower_doubles_options;
3376 nir_divergence_single_prim_per_subgroup = (1 << 0),
3377 nir_divergence_single_patch_per_tcs_subgroup = (1 << 1),
3378 nir_divergence_single_patch_per_tes_subgroup = (1 << 2),
3379 nir_divergence_view_index_uniform = (1 << 3),
3380 nir_divergence_single_frag_shading_rate_per_subgroup = (1 << 4),
3381 nir_divergence_multiple_workgroup_per_compute_subgroup = (1 << 5),
3382 nir_divergence_shader_record_ptr_uniform = (1 << 6),
3383 } nir_divergence_options;
3386 nir_pack_varying_interp_mode_none = (1 << 0),
3387 nir_pack_varying_interp_mode_smooth = (1 << 1),
3388 nir_pack_varying_interp_mode_flat = (1 << 2),
3389 nir_pack_varying_interp_mode_noperspective = (1 << 3),
3390 nir_pack_varying_interp_loc_sample = (1 << 16),
3391 nir_pack_varying_interp_loc_centroid = (1 << 17),
3392 nir_pack_varying_interp_loc_center = (1 << 18),
3393 } nir_pack_varying_options;
3395 /** An instruction filtering callback
3397 * Returns true if the instruction should be processed and false otherwise.
3399 typedef bool (*nir_instr_filter_cb)(const nir_instr *, const void *);
3401 /** A vectorization width callback
3403 * Returns the maximum vectorization width per instruction.
3404 * 0, if the instruction must not be modified.
3406 * The vectorization width must be a power of 2.
3408 typedef uint8_t (*nir_vectorize_cb)(const nir_instr *, const void *);
3410 typedef struct nir_shader_compiler_options {
3420 /** Lowers flrp when it does not support doubles */
3427 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
3428 bool lower_bitfield_extract;
3429 /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
3430 bool lower_bitfield_extract_to_shifts;
3431 /** Lowers bitfield_insert to bfi/bfm */
3432 bool lower_bitfield_insert;
3433 /** Lowers bitfield_insert to compares, and shifts. */
3434 bool lower_bitfield_insert_to_shifts;
3435 /** Lowers bitfield_insert to bfm/bitfield_select. */
3436 bool lower_bitfield_insert_to_bitfield_select;
3437 /** Lowers bitfield_reverse to shifts. */
3438 bool lower_bitfield_reverse;
3439 /** Lowers bit_count to shifts. */
3440 bool lower_bit_count;
3441 /** Lowers ifind_msb to compare and ufind_msb */
3442 bool lower_ifind_msb;
3443 /** Lowers ifind_msb and ufind_msb to reverse variants */
3444 bool lower_find_msb_to_reverse;
3445 /** Lowers ifind_msb to uclz and logic ops*/
3446 bool lower_ifind_msb_to_uclz;
3447 /** Lowers ufind_msb to 31-uclz */
3448 bool lower_ufind_msb_to_uclz;
3449 /** Lowers find_lsb to ufind_msb and logic ops */
3450 bool lower_find_lsb;
3451 bool lower_uadd_carry;
3452 bool lower_usub_borrow;
3453 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
3454 bool lower_mul_high;
3455 /** lowers fneg to fmul(x, -1.0). Driver must call nir_opt_algebraic_late() */
3457 /** lowers ineg to isub. Driver must call nir_opt_algebraic_late(). */
3459 /** lowers fisnormal to alu ops. */
3460 bool lower_fisnormal;
3462 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fneu} + b2f: */
3465 /* lower b/fall_equalN/b/fany_nequalN (ex:fany_nequal4 to sne+fdot4+fsat) */
3466 bool lower_vector_cmp;
3468 /** enable rules to avoid bit ops */
3471 /** enables rules to lower isign to imin+imax */
3474 /** enables rules to lower fsign to fsub and flt */
3477 /** enables rules to lower iabs to ineg+imax */
3480 /** enable rules that avoid generating umax from signed integer ops */
3483 /** enable rules that avoid generating umin from signed integer ops */
3486 /* lower fdph to fdot4 */
3489 /** lower fdot to fmul and fsum/fadd. */
3492 /* Does the native fdot instruction replicate its result for four
3493 * components? If so, then opt_algebraic_late will turn all fdotN
3494 * instructions into fdotN_replicated instructions.
3496 bool fdot_replicates;
3498 /** lowers ffloor to fsub+ffract: */
3501 /** lowers ffract to fsub+ffloor: */
3504 /** lowers fceil to fneg+ffloor+fneg: */
3509 /** Lowers fround_even to ffract+feq+csel.
3511 * Not correct in that it doesn't correctly handle the "_even" part of the
3512 * rounding, but good enough for DX9 array indexing handling on DX9-class
3515 bool lower_fround_even;
3519 bool lower_pack_half_2x16;
3520 bool lower_pack_unorm_2x16;
3521 bool lower_pack_snorm_2x16;
3522 bool lower_pack_unorm_4x8;
3523 bool lower_pack_snorm_4x8;
3524 bool lower_pack_64_2x32;
3525 bool lower_pack_64_4x16;
3526 bool lower_pack_32_2x16;
3527 bool lower_pack_64_2x32_split;
3528 bool lower_pack_32_2x16_split;
3529 bool lower_unpack_half_2x16;
3530 bool lower_unpack_unorm_2x16;
3531 bool lower_unpack_snorm_2x16;
3532 bool lower_unpack_unorm_4x8;
3533 bool lower_unpack_snorm_4x8;
3534 bool lower_unpack_64_2x32_split;
3535 bool lower_unpack_32_2x16_split;
3537 bool lower_pack_split;
3539 bool lower_extract_byte;
3540 bool lower_extract_word;
3541 bool lower_insert_byte;
3542 bool lower_insert_word;
3544 bool lower_all_io_to_temps;
3545 bool lower_all_io_to_elements;
3547 /* Indicates that the driver only has zero-based vertex id */
3548 bool vertex_id_zero_based;
3551 * If enabled, gl_BaseVertex will be lowered as:
3552 * is_indexed_draw (~0/0) & firstvertex
3554 bool lower_base_vertex;
3557 * If enabled, gl_HelperInvocation will be lowered as:
3559 * !((1 << sample_id) & sample_mask_in))
3561 * This depends on some possibly hw implementation details, which may
3562 * not be true for all hw. In particular that the FS is only executed
3563 * for covered samples or for helper invocations. So, do not blindly
3564 * enable this option.
3566 * Note: See also issue #22 in ARB_shader_image_load_store
3568 bool lower_helper_invocation;
3571 * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
3573 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
3574 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
3576 bool optimize_sample_mask_in;
3578 bool lower_cs_local_index_to_id;
3579 bool lower_cs_local_id_to_index;
3581 /* Prevents lowering global_invocation_id to be in terms of workgroup_id */
3582 bool has_cs_global_id;
3584 bool lower_device_index_to_zero;
3586 /* Set if nir_lower_pntc_ytransform() should invert gl_PointCoord.
3587 * Either when frame buffer is flipped or GL_POINT_SPRITE_COORD_ORIGIN
3590 bool lower_wpos_pntc;
3593 * Set if nir_op_[iu]hadd and nir_op_[iu]rhadd instructions should be
3594 * lowered to simple arithmetic.
3596 * If this flag is set, the lowering will be applied to all bit-sizes of
3597 * these instructions.
3599 * \sa ::lower_hadd64
3604 * Set if only 64-bit nir_op_[iu]hadd and nir_op_[iu]rhadd instructions
3605 * should be lowered to simple arithmetic.
3607 * If this flag is set, the lowering will be applied to only 64-bit
3608 * versions of these instructions.
3615 * Set if nir_op_uadd_sat should be lowered to simple arithmetic.
3617 * If this flag is set, the lowering will be applied to all bit-sizes of
3618 * these instructions.
3620 bool lower_uadd_sat;
3623 * Set if nir_op_usub_sat should be lowered to simple arithmetic.
3625 * If this flag is set, the lowering will be applied to all bit-sizes of
3626 * these instructions.
3628 bool lower_usub_sat;
3631 * Set if nir_op_iadd_sat and nir_op_isub_sat should be lowered to simple
3634 * If this flag is set, the lowering will be applied to all bit-sizes of
3635 * these instructions.
3637 bool lower_iadd_sat;
3640 * Set if imul_32x16 and umul_32x16 should be lowered to simple
3643 bool lower_mul_32x16;
3646 * Set if uclz should be lowered to find_msb_rev.
3651 * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
3652 * for IO purposes and would prefer loads/stores be vectorized.
3655 bool vectorize_tess_levels;
3656 bool lower_to_scalar;
3657 nir_instr_filter_cb lower_to_scalar_filter;
3660 * Disables potentially harmful algebraic transformations for architectures
3661 * with SIMD-within-a-register semantics.
3663 * Note, to actually vectorize 16bit instructions, use nir_opt_vectorize()
3664 * with a suitable callback function.
3666 bool vectorize_vec2_16bit;
3669 * Should the linker unify inputs_read/outputs_written between adjacent
3670 * shader stages which are linked into a single program?
3672 bool unify_interfaces;
3675 * Should nir_lower_io() create load_interpolated_input intrinsics?
3677 * If not, it generates regular load_input intrinsics and interpolation
3678 * information must be inferred from the list of input nir_variables.
3680 bool use_interpolated_input_intrinsics;
3684 * Whether nir_lower_io() will lower interpolateAt functions to
3685 * load_interpolated_input intrinsics.
3687 * Unlike use_interpolated_input_intrinsics this will only lower these
3688 * functions and leave input load intrinsics untouched.
3690 bool lower_interpolate_at;
3692 /* Lowers when 32x32->64 bit multiplication is not supported */
3693 bool lower_mul_2x32_64;
3695 /* Lowers when rotate instruction is not supported */
3698 /** Backend supports ternary addition */
3702 * Backend supports imul24, and would like to use it (when possible)
3703 * for address/offset calculation. If true, driver should call
3704 * nir_lower_amul(). (If not set, amul will automatically be lowered
3709 /** Backend supports umul24, if not set umul24 will automatically be lowered
3710 * to imul with masked inputs */
3713 /** Backend supports umad24, if not set umad24 will automatically be lowered
3714 * to imul with masked inputs and iadd */
3717 /* Backend supports fused comapre against zero and csel */
3718 bool has_fused_comp_and_csel;
3720 /** Backend supports fsub, if not set fsub will automatically be lowered to
3721 * fadd(x, fneg(y)). If true, driver should call nir_opt_algebraic_late(). */
3724 /** Backend supports isub, if not set isub will automatically be lowered to
3725 * iadd(x, ineg(y)). If true, driver should call nir_opt_algebraic_late(). */
3728 /** Backend supports pack_32_4x8 or pack_32_4x8_split. */
3729 bool has_pack_32_4x8;
3731 /** Backend supports txs, if not nir_lower_tex(..) uses txs-free variants
3732 * for rect texture lowering. */
3735 /** Backend supports sdot_4x8 opcodes. */
3738 /** Backend supports udot_4x8 opcodes. */
3741 /** Backend supports sudot_4x8 opcodes. */
3744 /** Backend supports sdot_2x16 and udot_2x16 opcodes. */
3747 /* Whether to generate only scoped_barrier intrinsics instead of the set of
3748 * memory and control barrier intrinsics based on GLSL.
3750 bool use_scoped_barrier;
3752 /** Backend supports fmulz (and ffmaz if lower_ffma32=false) */
3755 /** Backend supports 32bit ufind_msb_rev and ifind_msb_rev. */
3756 bool has_find_msb_rev;
3758 /** Backend supports pack_half_2x16_rtz_split. */
3759 bool has_pack_half_2x16_rtz;
3762 * Is this the Intel vec4 backend?
3764 * Used to inhibit algebraic optimizations that are known to be harmful on
3765 * the Intel vec4 backend. This is generally applicable to any
3766 * optimization that might cause more immediate values to be used in
3767 * 3-source (e.g., ffma and flrp) instructions.
3772 * For most Intel GPUs, all ternary operations such as FMA and BFE cannot
3773 * have immediates, so two to three instructions may eventually be needed.
3775 bool avoid_ternary_with_two_constants;
3777 /** Whether 8-bit ALU is supported. */
3778 bool support_8bit_alu;
3780 /** Whether 16-bit ALU is supported. */
3781 bool support_16bit_alu;
3783 unsigned max_unroll_iterations;
3784 unsigned max_unroll_iterations_aggressive;
3785 unsigned max_unroll_iterations_fp64;
3787 bool lower_uniforms_to_ubo;
3789 /* If the precision is ignored, backends that don't handle
3790 * different precisions when passing data between stages and use
3791 * vectorized IO can pack more varyings when linking. */
3792 bool linker_ignore_precision;
3794 /* Specifies if indirect sampler array access will trigger forced loop
3797 bool force_indirect_unrolling_sampler;
3799 /* Some older drivers don't support GLSL versions with the concept of flat
3800 * varyings and also don't support integers. This setting helps us avoid
3801 * marking varyings as flat and potentially having them changed to ints via
3807 * Specifies which type of indirectly accessed variables should force
3810 nir_variable_mode force_indirect_unrolling;
3812 nir_lower_int64_options lower_int64_options;
3813 nir_lower_doubles_options lower_doubles_options;
3814 nir_divergence_options divergence_analysis_options;
3817 * Support pack varyings with different interpolation location
3818 * (center, centroid, sample) and mode (flat, noperspective, smooth)
3821 nir_pack_varying_options pack_varying_options;
3824 * Lower load_deref/store_deref of inputs and outputs into
3825 * load_input/store_input intrinsics. This is used by nir_lower_io_passes.
3827 bool lower_io_variables;
3830 * The masks of shader stages that support indirect indexing with
3831 * load_input and store_output intrinsics. It's used when
3832 * lower_io_variables is true. This is used by nir_lower_io_passes.
3834 uint8_t support_indirect_inputs;
3835 uint8_t support_indirect_outputs;
3838 * Remove varying loaded from uniform, let fragment shader load the
3839 * uniform directly. GPU passing varying by memory can benifit from it
3840 * for sure; but GPU passing varying by on chip resource may not.
3841 * Because it saves on chip resource but may increase memory pressure when
3842 * fragment task is far more than vertex one, so better left it disabled.
3844 bool lower_varying_from_uniform;
3846 /** store the variable offset into the instrinsic range_base instead
3847 * of adding it to the image index.
3849 bool lower_image_offset_to_range_base;
3851 /** store the variable offset into the instrinsic range_base instead
3852 * of adding it to the atomic source
3854 bool lower_atomic_offset_to_range_base;
3855 } nir_shader_compiler_options;
3857 typedef struct nir_shader {
3860 /** list of uniforms (nir_variable) */
3861 struct exec_list variables;
3863 /** Set of driver-specific options for the shader.
3865 * The memory for the options is expected to be kept in a single static
3866 * copy by the driver.
3868 const struct nir_shader_compiler_options *options;
3870 /** Various bits of compile-time information about a given shader */
3871 struct shader_info info;
3873 struct exec_list functions; /** < list of nir_function */
3876 * The size of the variable space for load_input_*, load_uniform_*, etc.
3877 * intrinsics. This is in back-end specific units which is likely one of
3878 * bytes, dwords, or vec4s depending on context and back-end.
3880 unsigned num_inputs, num_uniforms, num_outputs;
3882 /** Size in bytes of required implicitly bound global memory */
3883 unsigned global_mem_size;
3885 /** Size in bytes of required scratch space */
3886 unsigned scratch_size;
3888 /** Constant data associated with this shader.
3890 * Constant data is loaded through load_constant intrinsics (as compared to
3891 * the NIR load_const instructions which have the constant value inlined
3892 * into them). This is usually generated by nir_opt_large_constants (so
3893 * shaders don't have to load_const into a temporary array when they want
3894 * to indirect on a const array).
3896 void *constant_data;
3897 /** Size of the constant data associated with the shader, in bytes */
3898 unsigned constant_data_size;
3900 struct nir_xfb_info *xfb_info;
3902 unsigned printf_info_count;
3903 u_printf_info *printf_info;
3906 #define nir_foreach_function(func, shader) \
3907 foreach_list_typed(nir_function, func, node, &(shader)->functions)
3909 static inline nir_function_impl *
3910 nir_shader_get_entrypoint(const nir_shader *shader)
3912 nir_function *func = NULL;
3914 nir_foreach_function(function, shader) {
3915 assert(func == NULL);
3916 if (function->is_entrypoint) {
3927 assert(func->num_params == 0);
3932 static inline nir_function *
3933 nir_shader_get_function_for_name(const nir_shader *shader, const char *name)
3935 nir_foreach_function(func, shader) {
3936 if (strcmp(func->name, name) == 0)
3943 void nir_remove_non_entrypoints(nir_shader *shader);
3945 nir_shader *nir_shader_create(void *mem_ctx,
3946 gl_shader_stage stage,
3947 const nir_shader_compiler_options *options,
3950 nir_register *nir_local_reg_create(nir_function_impl *impl);
3952 void nir_reg_remove(nir_register *reg);
3954 /** Adds a variable to the appropriate list in nir_shader */
3955 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
3958 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
3960 assert(var->data.mode == nir_var_function_temp);
3961 exec_list_push_tail(&impl->locals, &var->node);
3964 /** creates a variable, sets a few defaults, and adds it to the list */
3965 nir_variable *nir_variable_create(nir_shader *shader,
3966 nir_variable_mode mode,
3967 const struct glsl_type *type,
3969 /** creates a local variable and adds it to the list */
3970 nir_variable *nir_local_variable_create(nir_function_impl *impl,
3971 const struct glsl_type *type,
3974 nir_variable *nir_find_variable_with_location(nir_shader *shader,
3975 nir_variable_mode mode,
3978 nir_variable *nir_find_variable_with_driver_location(nir_shader *shader,
3979 nir_variable_mode mode,
3982 void nir_sort_variables_with_modes(nir_shader *shader,
3983 int (*compar)(const nir_variable *,
3984 const nir_variable *),
3985 nir_variable_mode modes);
3987 /** creates a function and adds it to the shader's list of functions */
3988 nir_function *nir_function_create(nir_shader *shader, const char *name);
3990 nir_function_impl *nir_function_impl_create(nir_function *func);
3991 /** creates a function_impl that isn't tied to any particular function */
3992 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
3994 nir_block *nir_block_create(nir_shader *shader);
3995 nir_if *nir_if_create(nir_shader *shader);
3996 nir_loop *nir_loop_create(nir_shader *shader);
3998 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
4000 /** requests that the given pieces of metadata be generated */
4001 void nir_metadata_require(nir_function_impl *impl, nir_metadata required, ...);
4002 /** dirties all but the preserved metadata */
4003 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
4004 /** Preserves all metadata for the given shader */
4005 void nir_shader_preserve_all_metadata(nir_shader *shader);
4007 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
4008 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
4010 nir_deref_instr *nir_deref_instr_create(nir_shader *shader,
4011 nir_deref_type deref_type);
4013 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
4015 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
4016 unsigned num_components,
4019 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
4020 nir_intrinsic_op op);
4022 nir_call_instr *nir_call_instr_create(nir_shader *shader,
4023 nir_function *callee);
4025 /** Creates a NIR texture instruction */
4026 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
4028 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
4029 nir_phi_src *nir_phi_instr_add_src(nir_phi_instr *instr, nir_block *pred, nir_src src);
4031 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
4033 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
4034 unsigned num_components,
4037 nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
4040 * NIR Cursors and Instruction Insertion API
4043 * A tiny struct representing a point to insert/extract instructions or
4044 * control flow nodes. Helps reduce the combinatorial explosion of possible
4045 * points to insert/extract.
4047 * \sa nir_control_flow.h
4050 nir_cursor_before_block,
4051 nir_cursor_after_block,
4052 nir_cursor_before_instr,
4053 nir_cursor_after_instr,
4054 } nir_cursor_option;
4057 nir_cursor_option option;
4064 static inline nir_block *
4065 nir_cursor_current_block(nir_cursor cursor)
4067 if (cursor.option == nir_cursor_before_instr ||
4068 cursor.option == nir_cursor_after_instr) {
4069 return cursor.instr->block;
4071 return cursor.block;
4075 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
4077 static inline nir_cursor
4078 nir_before_block(nir_block *block)
4081 cursor.option = nir_cursor_before_block;
4082 cursor.block = block;
4086 static inline nir_cursor
4087 nir_after_block(nir_block *block)
4090 cursor.option = nir_cursor_after_block;
4091 cursor.block = block;
4095 static inline nir_cursor
4096 nir_before_instr(nir_instr *instr)
4099 cursor.option = nir_cursor_before_instr;
4100 cursor.instr = instr;
4104 static inline nir_cursor
4105 nir_after_instr(nir_instr *instr)
4108 cursor.option = nir_cursor_after_instr;
4109 cursor.instr = instr;
4113 static inline nir_cursor
4114 nir_before_block_after_phis(nir_block *block)
4116 nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
4118 return nir_after_instr(&last_phi->instr);
4120 return nir_before_block(block);
4123 static inline nir_cursor
4124 nir_after_block_before_jump(nir_block *block)
4126 nir_instr *last_instr = nir_block_last_instr(block);
4127 if (last_instr && last_instr->type == nir_instr_type_jump) {
4128 return nir_before_instr(last_instr);
4130 return nir_after_block(block);
4134 static inline nir_cursor
4135 nir_before_src(nir_src *src)
4138 nir_block *prev_block =
4139 nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
4140 assert(!nir_block_ends_in_jump(prev_block));
4141 return nir_after_block(prev_block);
4142 } else if (src->parent_instr->type == nir_instr_type_phi) {
4144 nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
4146 nir_foreach_phi_src(phi_src, cond_phi) {
4147 if (phi_src->src.ssa == src->ssa) {
4154 /* The list_entry() macro is a generic container-of macro, it just happens
4155 * to have a more specific name.
4157 nir_phi_src *phi_src = list_entry(src, nir_phi_src, src);
4158 return nir_after_block_before_jump(phi_src->pred);
4160 return nir_before_instr(src->parent_instr);
4164 static inline nir_cursor
4165 nir_before_cf_node(nir_cf_node *node)
4167 if (node->type == nir_cf_node_block)
4168 return nir_before_block(nir_cf_node_as_block(node));
4170 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
4173 static inline nir_cursor
4174 nir_after_cf_node(nir_cf_node *node)
4176 if (node->type == nir_cf_node_block)
4177 return nir_after_block(nir_cf_node_as_block(node));
4179 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
4182 static inline nir_cursor
4183 nir_after_phis(nir_block *block)
4185 nir_foreach_instr(instr, block) {
4186 if (instr->type != nir_instr_type_phi)
4187 return nir_before_instr(instr);
4189 return nir_after_block(block);
4192 static inline nir_cursor
4193 nir_after_instr_and_phis(nir_instr *instr)
4195 if (instr->type == nir_instr_type_phi)
4196 return nir_after_phis(instr->block);
4198 return nir_after_instr(instr);
4201 static inline nir_cursor
4202 nir_after_cf_node_and_phis(nir_cf_node *node)
4204 if (node->type == nir_cf_node_block)
4205 return nir_after_block(nir_cf_node_as_block(node));
4207 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
4209 return nir_after_phis(block);
4212 static inline nir_cursor
4213 nir_before_cf_list(struct exec_list *cf_list)
4215 nir_cf_node *first_node = exec_node_data(nir_cf_node,
4216 exec_list_get_head(cf_list), node);
4217 return nir_before_cf_node(first_node);
4220 static inline nir_cursor
4221 nir_after_cf_list(struct exec_list *cf_list)
4223 nir_cf_node *last_node = exec_node_data(nir_cf_node,
4224 exec_list_get_tail(cf_list), node);
4225 return nir_after_cf_node(last_node);
4229 * Insert a NIR instruction at the given cursor.
4231 * Note: This does not update the cursor.
4233 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
4235 bool nir_instr_move(nir_cursor cursor, nir_instr *instr);
4238 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
4240 nir_instr_insert(nir_before_instr(instr), before);
4244 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
4246 nir_instr_insert(nir_after_instr(instr), after);
4250 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
4252 nir_instr_insert(nir_before_block(block), before);
4256 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
4258 nir_instr_insert(nir_after_block(block), after);
4262 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
4264 nir_instr_insert(nir_before_cf_node(node), before);
4268 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
4270 nir_instr_insert(nir_after_cf_node(node), after);
4274 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
4276 nir_instr_insert(nir_before_cf_list(list), before);
4280 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
4282 nir_instr_insert(nir_after_cf_list(list), after);
4285 void nir_instr_remove_v(nir_instr *instr);
4286 void nir_instr_free(nir_instr *instr);
4287 void nir_instr_free_list(struct exec_list *list);
4289 static inline nir_cursor
4290 nir_instr_remove(nir_instr *instr)
4293 nir_instr *prev = nir_instr_prev(instr);
4295 cursor = nir_after_instr(prev);
4297 cursor = nir_before_block(instr->block);
4299 nir_instr_remove_v(instr);
4303 nir_cursor nir_instr_free_and_dce(nir_instr *instr);
4307 nir_ssa_def *nir_instr_ssa_def(nir_instr *instr);
4308 bool nir_instr_def_is_register(nir_instr *instr);
4310 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
4311 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
4312 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
4313 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
4315 static inline bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
4316 static inline bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
4317 bool nir_foreach_phi_src_leaving_block(nir_block *instr,
4318 nir_foreach_src_cb cb,
4321 nir_const_value *nir_src_as_const_value(nir_src src);
4323 #define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
4324 static inline c_type * \
4325 nir_src_as_ ## name (nir_src src) \
4327 return src.is_ssa && src.ssa->parent_instr->type == type_enum \
4328 ? cast_macro(src.ssa->parent_instr) : NULL; \
4331 NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
4332 NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
4333 nir_instr_type_intrinsic, nir_instr_as_intrinsic)
4334 NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
4336 bool nir_src_is_always_uniform(nir_src src);
4337 bool nir_srcs_equal(nir_src src1, nir_src src2);
4338 bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
4341 nir_src_rewrite_ssa(nir_src *src, nir_ssa_def *new_ssa)
4343 assert(src->is_ssa && src->ssa);
4344 assert(src->is_if ? (src->parent_if != NULL) : (src->parent_instr != NULL));
4345 list_del(&src->use_link);
4347 list_addtail(&src->use_link, &new_ssa->uses);
4351 nir_instr_rewrite_src_ssa(ASSERTED nir_instr *instr,
4352 nir_src *src, nir_ssa_def *new_ssa)
4354 assert(!src->is_if);
4355 assert(src->parent_instr == instr);
4356 nir_src_rewrite_ssa(src, new_ssa);
4359 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
4360 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
4362 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
4363 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
4366 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
4367 unsigned num_components, unsigned bit_size,
4369 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
4370 unsigned num_components, unsigned bit_size);
4372 nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
4373 const struct glsl_type *type,
4376 assert(glsl_type_is_vector_or_scalar(type));
4377 nir_ssa_dest_init(instr, dest, glsl_get_components(type),
4378 glsl_get_bit_size(type), name);
4380 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa);
4381 void nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src);
4382 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_ssa_def *new_ssa,
4383 nir_instr *after_me);
4385 nir_component_mask_t nir_src_components_read(const nir_src *src);
4386 nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
4389 nir_ssa_def_is_unused(nir_ssa_def *ssa)
4391 return list_is_empty(&ssa->uses);
4395 /** Returns the next block, disregarding structure
4397 * The ordering is deterministic but has no guarantees beyond that. In
4398 * particular, it is not guaranteed to be dominance-preserving.
4400 nir_block *nir_block_unstructured_next(nir_block *block);
4401 nir_block *nir_unstructured_start_block(nir_function_impl *impl);
4403 #define nir_foreach_block_unstructured(block, impl) \
4404 for (nir_block *block = nir_unstructured_start_block(impl); block != NULL; \
4405 block = nir_block_unstructured_next(block))
4407 #define nir_foreach_block_unstructured_safe(block, impl) \
4408 for (nir_block *block = nir_unstructured_start_block(impl), \
4409 *next = nir_block_unstructured_next(block); \
4411 block = next, next = nir_block_unstructured_next(block))
4414 * finds the next basic block in source-code order, returns NULL if there is
4418 nir_block *nir_block_cf_tree_next(nir_block *block);
4420 /* Performs the opposite of nir_block_cf_tree_next() */
4422 nir_block *nir_block_cf_tree_prev(nir_block *block);
4424 /* Gets the first block in a CF node in source-code order */
4426 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
4428 /* Gets the last block in a CF node in source-code order */
4430 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
4432 /* Gets the next block after a CF node in source-code order */
4434 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
4436 /* Macros for loops that visit blocks in source-code order */
4438 #define nir_foreach_block(block, impl) \
4439 for (nir_block *block = nir_start_block(impl); block != NULL; \
4440 block = nir_block_cf_tree_next(block))
4442 #define nir_foreach_block_safe(block, impl) \
4443 for (nir_block *block = nir_start_block(impl), \
4444 *next = nir_block_cf_tree_next(block); \
4446 block = next, next = nir_block_cf_tree_next(block))
4448 #define nir_foreach_block_reverse(block, impl) \
4449 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
4450 block = nir_block_cf_tree_prev(block))
4452 #define nir_foreach_block_reverse_safe(block, impl) \
4453 for (nir_block *block = nir_impl_last_block(impl), \
4454 *prev = nir_block_cf_tree_prev(block); \
4456 block = prev, prev = nir_block_cf_tree_prev(block))
4458 #define nir_foreach_block_in_cf_node(block, node) \
4459 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
4460 block != nir_cf_node_cf_tree_next(node); \
4461 block = nir_block_cf_tree_next(block))
4463 /* If the following CF node is an if, this function returns that if.
4464 * Otherwise, it returns NULL.
4466 nir_if *nir_block_get_following_if(nir_block *block);
4468 nir_loop *nir_block_get_following_loop(nir_block *block);
4470 nir_block **nir_block_get_predecessors_sorted(const nir_block *block, void *mem_ctx);
4472 void nir_index_local_regs(nir_function_impl *impl);
4473 void nir_index_ssa_defs(nir_function_impl *impl);
4474 unsigned nir_index_instrs(nir_function_impl *impl);
4476 void nir_index_blocks(nir_function_impl *impl);
4478 unsigned nir_shader_index_vars(nir_shader *shader, nir_variable_mode modes);
4479 unsigned nir_function_impl_index_vars(nir_function_impl *impl);
4481 void nir_print_shader(nir_shader *shader, FILE *fp);
4482 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
4483 void nir_print_instr(const nir_instr *instr, FILE *fp);
4484 void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
4485 void nir_log_shader_annotated_tagged(enum mesa_log_level level, const char *tag, nir_shader *shader, struct hash_table *annotations);
4486 #define nir_log_shadere(s) nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), NULL)
4487 #define nir_log_shaderw(s) nir_log_shader_annotated_tagged(MESA_LOG_WARN, (MESA_LOG_TAG), (s), NULL)
4488 #define nir_log_shaderi(s) nir_log_shader_annotated_tagged(MESA_LOG_INFO, (MESA_LOG_TAG), (s), NULL)
4489 #define nir_log_shader_annotated(s, annotations) nir_log_shader_annotated_tagged(MESA_LOG_ERROR, (MESA_LOG_TAG), (s), annotations)
4491 char *nir_shader_as_str(nir_shader *nir, void *mem_ctx);
4492 char *nir_shader_as_str_annotated(nir_shader *nir, struct hash_table *annotations, void *mem_ctx);
4493 char *nir_instr_as_str(const nir_instr *instr, void *mem_ctx);
4495 /** Shallow clone of a single instruction. */
4496 nir_instr *nir_instr_clone(nir_shader *s, const nir_instr *orig);
4498 /** Clone a single instruction, including a remap table to rewrite sources. */
4499 nir_instr *nir_instr_clone_deep(nir_shader *s, const nir_instr *orig,
4500 struct hash_table *remap_table);
4502 /** Shallow clone of a single ALU instruction. */
4503 nir_alu_instr *nir_alu_instr_clone(nir_shader *s, const nir_alu_instr *orig);
4505 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
4506 nir_function_impl *nir_function_impl_clone(nir_shader *shader,
4507 const nir_function_impl *fi);
4508 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
4509 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
4511 void nir_shader_replace(nir_shader *dest, nir_shader *src);
4513 void nir_shader_serialize_deserialize(nir_shader *s);
4516 void nir_validate_shader(nir_shader *shader, const char *when);
4517 void nir_validate_ssa_dominance(nir_shader *shader, const char *when);
4518 void nir_metadata_set_validation_flag(nir_shader *shader);
4519 void nir_metadata_check_validation_flag(nir_shader *shader);
4522 should_skip_nir(const char *name)
4524 static const char *list = NULL;
4526 /* Comma separated list of names to skip. */
4527 list = getenv("NIR_SKIP");
4535 return comma_separated_list_contains(list, name);
4539 should_print_nir(nir_shader *shader)
4541 if ((shader->info.internal && !NIR_DEBUG(PRINT_INTERNAL)) ||
4542 shader->info.stage < 0 ||
4543 shader->info.stage > MESA_SHADER_KERNEL)
4546 return unlikely(nir_debug_print_shader[shader->info.stage]);
4549 static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
4550 static inline void nir_validate_ssa_dominance(nir_shader *shader, const char *when) { (void) shader; (void)when; }
4551 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
4552 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
4553 static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
4554 static inline bool should_print_nir(UNUSED nir_shader *shader) { return false; }
4557 #define _PASS(pass, nir, do_pass) do { \
4558 if (should_skip_nir(#pass)) { \
4559 printf("skipping %s\n", #pass); \
4563 if (NIR_DEBUG(CLONE)) { \
4564 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
4565 nir_shader_replace(nir, clone); \
4567 if (NIR_DEBUG(SERIALIZE)) { \
4568 nir_shader_serialize_deserialize(nir); \
4572 #define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
4573 nir_metadata_set_validation_flag(nir); \
4574 if (should_print_nir(nir)) \
4575 printf("%s\n", #pass); \
4576 if (pass(nir, ##__VA_ARGS__)) { \
4577 nir_validate_shader(nir, "after " #pass " in " __FILE__); \
4580 if (should_print_nir(nir)) \
4581 nir_print_shader(nir, stdout); \
4582 nir_metadata_check_validation_flag(nir); \
4586 #define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
4587 if (should_print_nir(nir)) \
4588 printf("%s\n", #pass); \
4589 pass(nir, ##__VA_ARGS__); \
4590 nir_validate_shader(nir, "after " #pass " in " __FILE__); \
4591 if (should_print_nir(nir)) \
4592 nir_print_shader(nir, stdout); \
4595 #define NIR_SKIP(name) should_skip_nir(#name)
4597 /** An instruction filtering callback with writemask
4599 * Returns true if the instruction should be processed with the associated
4600 * writemask and false otherwise.
4602 typedef bool (*nir_instr_writemask_filter_cb)(const nir_instr *,
4603 unsigned writemask, const void *);
4605 /** A simple instruction lowering callback
4607 * Many instruction lowering passes can be written as a simple function which
4608 * takes an instruction as its input and returns a sequence of instructions
4609 * that implement the consumed instruction. This function type represents
4610 * such a lowering function. When called, a function with this prototype
4611 * should either return NULL indicating that no lowering needs to be done or
4612 * emit a sequence of instructions using the provided builder (whose cursor
4613 * will already be placed after the instruction to be lowered) and return the
4614 * resulting nir_ssa_def.
4616 typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
4617 nir_instr *, void *);
4620 * Special return value for nir_lower_instr_cb when some progress occurred
4621 * (like changing an input to the instr) that didn't result in a replacement
4622 * SSA def being generated.
4624 #define NIR_LOWER_INSTR_PROGRESS ((nir_ssa_def *)(uintptr_t)1)
4627 * Special return value for nir_lower_instr_cb when some progress occurred
4628 * that should remove the current instruction that doesn't create an output
4632 #define NIR_LOWER_INSTR_PROGRESS_REPLACE ((nir_ssa_def *)(uintptr_t)2)
4634 /** Iterate over all the instructions in a nir_function_impl and lower them
4635 * using the provided callbacks
4637 * This function implements the guts of a standard lowering pass for you. It
4638 * iterates over all of the instructions in a nir_function_impl and calls the
4639 * filter callback on each one. If the filter callback returns true, it then
4640 * calls the lowering call back on the instruction. (Splitting it this way
4641 * allows us to avoid some save/restore work for instructions we know won't be
4642 * lowered.) If the instruction is dead after the lowering is complete, it
4643 * will be removed. If new instructions are added, the lowering callback will
4644 * also be called on them in case multiple lowerings are required.
4646 * If the callback indicates that the original instruction is replaced (either
4647 * through a new SSA def or NIR_LOWER_INSTR_PROGRESS_REPLACE), then the
4648 * instruction is removed along with any now-dead SSA defs it used.
4650 * The metadata for the nir_function_impl will also be updated. If any blocks
4651 * are added (they cannot be removed), dominance and block indices will be
4654 bool nir_function_impl_lower_instructions(nir_function_impl *impl,
4655 nir_instr_filter_cb filter,
4656 nir_lower_instr_cb lower,
4658 bool nir_shader_lower_instructions(nir_shader *shader,
4659 nir_instr_filter_cb filter,
4660 nir_lower_instr_cb lower,
4663 void nir_calc_dominance_impl(nir_function_impl *impl);
4664 void nir_calc_dominance(nir_shader *shader);
4666 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
4667 bool nir_block_dominates(nir_block *parent, nir_block *child);
4668 bool nir_block_is_unreachable(nir_block *block);
4670 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
4671 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
4673 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
4674 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
4676 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
4677 void nir_dump_cfg(nir_shader *shader, FILE *fp);
4679 void nir_gs_count_vertices_and_primitives(const nir_shader *shader,
4682 unsigned num_streams);
4686 nir_group_same_resource_only,
4687 } nir_load_grouping;
4689 void nir_group_loads(nir_shader *shader, nir_load_grouping grouping,
4690 unsigned max_distance);
4692 bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes);
4693 bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes);
4694 bool nir_split_var_copies(nir_shader *shader);
4695 bool nir_split_per_member_structs(nir_shader *shader);
4696 bool nir_split_struct_vars(nir_shader *shader, nir_variable_mode modes);
4698 bool nir_lower_returns_impl(nir_function_impl *impl);
4699 bool nir_lower_returns(nir_shader *shader);
4701 void nir_inline_function_impl(struct nir_builder *b,
4702 const nir_function_impl *impl,
4703 nir_ssa_def **params,
4704 struct hash_table *shader_var_remap);
4705 bool nir_inline_functions(nir_shader *shader);
4707 void nir_find_inlinable_uniforms(nir_shader *shader);
4708 void nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
4709 const uint32_t *uniform_values,
4710 const uint16_t *uniform_dw_offsets);
4711 bool nir_collect_src_uniforms(const nir_src *src, int component,
4712 uint32_t *uni_offsets, uint8_t *num_offsets,
4713 unsigned max_num_bo, unsigned max_offset);
4714 void nir_add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
4715 uint32_t *uni_offsets, uint8_t *num_offsets,
4716 unsigned max_num_bo, unsigned max_offset);
4718 bool nir_propagate_invariant(nir_shader *shader, bool invariant_prim);
4720 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
4721 void nir_lower_deref_copy_instr(struct nir_builder *b,
4722 nir_intrinsic_instr *copy);
4723 bool nir_lower_var_copies(nir_shader *shader);
4725 bool nir_opt_memcpy(nir_shader *shader);
4726 bool nir_lower_memcpy(nir_shader *shader);
4728 void nir_fixup_deref_modes(nir_shader *shader);
4730 bool nir_lower_global_vars_to_local(nir_shader *shader);
4733 nir_lower_direct_array_deref_of_vec_load = (1 << 0),
4734 nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
4735 nir_lower_direct_array_deref_of_vec_store = (1 << 2),
4736 nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
4737 } nir_lower_array_deref_of_vec_options;
4739 bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
4740 nir_lower_array_deref_of_vec_options options);
4742 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes,
4743 uint32_t max_lower_array_len);
4745 bool nir_lower_indirect_var_derefs(nir_shader *shader,
4746 const struct set *vars);
4748 bool nir_lower_locals_to_regs(nir_shader *shader);
4750 void nir_lower_io_to_temporaries(nir_shader *shader,
4751 nir_function_impl *entrypoint,
4752 bool outputs, bool inputs);
4754 bool nir_lower_vars_to_scratch(nir_shader *shader,
4755 nir_variable_mode modes,
4757 glsl_type_size_align_func size_align);
4759 void nir_lower_clip_halfz(nir_shader *shader);
4761 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
4763 void nir_gather_ssa_types(nir_function_impl *impl,
4764 BITSET_WORD *float_types,
4765 BITSET_WORD *int_types);
4767 void nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
4769 int (*type_size)(const struct glsl_type *, bool));
4771 /* Some helpers to do very simple linking */
4772 bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
4773 bool nir_remove_unused_io_vars(nir_shader *shader, nir_variable_mode mode,
4774 uint64_t *used_by_other_stage,
4775 uint64_t *used_by_other_stage_patches);
4776 void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
4777 bool default_to_smooth_interp);
4778 void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
4779 bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
4780 void nir_link_varying_precision(nir_shader *producer, nir_shader *consumer);
4782 bool nir_slot_is_sysval_output(gl_varying_slot slot,
4783 gl_shader_stage next_shader);
4784 bool nir_slot_is_varying(gl_varying_slot slot);
4785 bool nir_slot_is_sysval_output_and_varying(gl_varying_slot slot,
4786 gl_shader_stage next_shader);
4787 bool nir_remove_varying(nir_intrinsic_instr *intr, gl_shader_stage next_shader);
4788 void nir_remove_sysval_output(nir_intrinsic_instr *intr);
4790 bool nir_lower_amul(nir_shader *shader,
4791 int (*type_size)(const struct glsl_type *, bool));
4793 bool nir_lower_ubo_vec4(nir_shader *shader);
4795 void nir_assign_io_var_locations(nir_shader *shader,
4796 nir_variable_mode mode,
4798 gl_shader_stage stage);
4801 uint8_t num_linked_io_vars;
4802 uint8_t num_linked_patch_io_vars;
4803 } nir_linked_io_var_info;
4805 nir_linked_io_var_info
4806 nir_assign_linked_io_var_locations(nir_shader *producer,
4807 nir_shader *consumer);
4810 /* If set, this causes all 64-bit IO operations to be lowered on-the-fly
4811 * to 32-bit operations. This is only valid for nir_var_shader_in/out
4814 nir_lower_io_lower_64bit_to_32 = (1 << 0),
4815 } nir_lower_io_options;
4816 bool nir_lower_io(nir_shader *shader,
4817 nir_variable_mode modes,
4818 int (*type_size)(const struct glsl_type *, bool),
4819 nir_lower_io_options);
4821 bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes);
4822 bool nir_lower_color_inputs(nir_shader *nir);
4823 void nir_lower_io_passes(nir_shader *nir);
4824 bool nir_io_add_intrinsic_xfb_info(nir_shader *nir);
4827 nir_lower_vars_to_explicit_types(nir_shader *shader,
4828 nir_variable_mode modes,
4829 glsl_type_size_align_func type_info);
4831 nir_gather_explicit_io_initializers(nir_shader *shader,
4832 void *dst, size_t dst_size,
4833 nir_variable_mode mode);
4835 bool nir_lower_vec3_to_vec4(nir_shader *shader, nir_variable_mode modes);
4839 * An address format which is a simple 32-bit global GPU address.
4841 nir_address_format_32bit_global,
4844 * An address format which is a simple 64-bit global GPU address.
4846 nir_address_format_64bit_global,
4849 * An address format which is a 64-bit global GPU address encoded as a
4852 nir_address_format_2x32bit_global,
4855 * An address format which is a 64-bit global base address and a 32-bit
4858 * This is identical to 64bit_bounded_global except that bounds checking
4859 * is not applied when lowering to global access. Even though the size is
4860 * never used for an actual bounds check, it needs to be valid so we can
4861 * lower deref_buffer_array_length properly.
4863 nir_address_format_64bit_global_32bit_offset,
4866 * An address format which is a bounds-checked 64-bit global GPU address.
4868 * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
4869 * address stored with the low bits in .x and high bits in .y, .z is a
4870 * size, and .w is an offset. When the final I/O operation is lowered, .w
4871 * is checked against .z and the operation is predicated on the result.
4873 nir_address_format_64bit_bounded_global,
4876 * An address format which is comprised of a vec2 where the first
4877 * component is a buffer index and the second is an offset.
4879 nir_address_format_32bit_index_offset,
4882 * An address format which is a 64-bit value, where the high 32 bits
4883 * are a buffer index, and the low 32 bits are an offset.
4885 nir_address_format_32bit_index_offset_pack64,
4888 * An address format which is comprised of a vec3 where the first two
4889 * components specify the buffer and the third is an offset.
4891 nir_address_format_vec2_index_32bit_offset,
4894 * An address format which represents generic pointers with a 62-bit
4895 * pointer and a 2-bit enum in the top two bits. The top two bits have
4896 * the following meanings:
4898 * - 0x0: Global memory
4899 * - 0x1: Shared memory
4900 * - 0x2: Scratch memory
4901 * - 0x3: Global memory
4903 * The redundancy between 0x0 and 0x3 is because of Intel sign-extension of
4904 * addresses. Valid global memory addresses may naturally have either 0 or
4905 * ~0 as their high bits.
4907 * Shared and scratch pointers are represented as 32-bit offsets with the
4908 * top 32 bits only being used for the enum. This allows us to avoid
4909 * 64-bit address calculations in a bunch of cases.
4911 nir_address_format_62bit_generic,
4914 * An address format which is a simple 32-bit offset.
4916 nir_address_format_32bit_offset,
4919 * An address format which is a simple 32-bit offset cast to 64-bit.
4921 nir_address_format_32bit_offset_as_64bit,
4924 * An address format representing a purely logical addressing model. In
4925 * this model, all deref chains must be complete from the dereference
4926 * operation to the variable. Cast derefs are not allowed. These
4927 * addresses will be 32-bit scalars but the format is immaterial because
4928 * you can always chase the chain.
4930 nir_address_format_logical,
4931 } nir_address_format;
4934 nir_address_format_bit_size(nir_address_format addr_format);
4937 nir_address_format_num_components(nir_address_format addr_format);
4939 static inline const struct glsl_type *
4940 nir_address_format_to_glsl_type(nir_address_format addr_format)
4942 unsigned bit_size = nir_address_format_bit_size(addr_format);
4943 assert(bit_size == 32 || bit_size == 64);
4944 return glsl_vector_type(bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64,
4945 nir_address_format_num_components(addr_format));
4948 const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
4950 nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
4951 nir_address_format addr_format);
4953 nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
4954 nir_address_format addr_format);
4956 nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
4957 nir_deref_instr *deref,
4958 nir_ssa_def *base_addr,
4959 nir_address_format addr_format);
4961 bool nir_get_explicit_deref_align(nir_deref_instr *deref,
4962 bool default_to_type_align,
4963 uint32_t *align_mul,
4964 uint32_t *align_offset);
4966 void nir_lower_explicit_io_instr(struct nir_builder *b,
4967 nir_intrinsic_instr *io_instr,
4969 nir_address_format addr_format);
4971 bool nir_lower_explicit_io(nir_shader *shader,
4972 nir_variable_mode modes,
4973 nir_address_format);
4976 uint8_t num_components;
4979 } nir_mem_access_size_align;
4981 typedef nir_mem_access_size_align
4982 (*nir_lower_mem_access_bit_sizes_cb)(nir_intrinsic_op intrin,
4985 uint32_t align_offset,
4986 bool offset_is_const,
4987 const void *cb_data);
4989 bool nir_lower_mem_access_bit_sizes(nir_shader *shader,
4990 nir_variable_mode modes,
4991 nir_lower_mem_access_bit_sizes_cb cb,
4992 const void *cb_data);
4994 typedef bool (*nir_should_vectorize_mem_func)(unsigned align_mul,
4995 unsigned align_offset,
4997 unsigned num_components,
4998 nir_intrinsic_instr *low, nir_intrinsic_instr *high,
5002 nir_should_vectorize_mem_func callback;
5003 nir_variable_mode modes;
5004 nir_variable_mode robust_modes;
5006 bool has_shared2_amd;
5007 } nir_load_store_vectorize_options;
5009 bool nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options);
5011 typedef struct nir_lower_shader_calls_options {
5012 /* Address format used for load/store operations on the call stack. */
5013 nir_address_format address_format;
5015 /* Stack alignment */
5016 unsigned stack_alignment;
5018 /* Put loads from the stack as close as possible from where they're needed.
5019 * You might want to disable combined_loads for best effects.
5021 bool localized_loads;
5023 /* If this function pointer is not NULL, lower_shader_calls will run
5024 * nir_opt_load_store_vectorize for stack load/store operations. Otherwise
5025 * the optimizaion is not run.
5027 nir_should_vectorize_mem_func vectorizer_callback;
5029 /* Data passed to vectorizer_callback */
5030 void *vectorizer_data;
5031 } nir_lower_shader_calls_options;
5034 nir_lower_shader_calls(nir_shader *shader,
5035 const nir_lower_shader_calls_options *options,
5036 nir_shader ***resume_shaders_out,
5037 uint32_t *num_resume_shaders_out,
5040 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
5041 nir_src *nir_get_io_arrayed_index_src(nir_intrinsic_instr *instr);
5042 nir_src *nir_get_shader_call_payload_src(nir_intrinsic_instr *call);
5044 bool nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage);
5046 bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
5047 bool nir_lower_regs_to_ssa(nir_shader *shader);
5048 bool nir_lower_vars_to_ssa(nir_shader *shader);
5050 bool nir_remove_dead_derefs(nir_shader *shader);
5051 bool nir_remove_dead_derefs_impl(nir_function_impl *impl);
5053 typedef struct nir_remove_dead_variables_options {
5054 bool (*can_remove_var)(nir_variable *var, void *data);
5055 void *can_remove_var_data;
5056 } nir_remove_dead_variables_options;
5058 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes,
5059 const nir_remove_dead_variables_options *options);
5061 bool nir_lower_variable_initializers(nir_shader *shader,
5062 nir_variable_mode modes);
5063 bool nir_zero_initialize_shared_memory(nir_shader *shader,
5064 const unsigned shared_size,
5065 const unsigned chunk_size);
5067 bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
5068 bool nir_lower_vec_to_movs(nir_shader *shader, nir_instr_writemask_filter_cb cb,
5070 void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
5072 const gl_state_index16 *alpha_ref_state_tokens);
5073 bool nir_lower_alu(nir_shader *shader);
5075 bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
5076 bool always_precise);
5078 bool nir_scale_fdiv(nir_shader *shader);
5080 bool nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
5081 bool nir_lower_alu_width(nir_shader *shader, nir_vectorize_cb cb, const void *data);
5082 bool nir_lower_bool_to_bitsize(nir_shader *shader);
5083 bool nir_lower_bool_to_float(nir_shader *shader, bool has_fcsel_ne);
5084 bool nir_lower_bool_to_int32(nir_shader *shader);
5085 bool nir_opt_simplify_convert_alu_types(nir_shader *shader);
5086 bool nir_lower_const_arrays_to_uniforms(nir_shader *shader,
5087 unsigned max_uniform_components);
5088 bool nir_lower_convert_alu_types(nir_shader *shader,
5089 bool (*should_lower)(nir_intrinsic_instr *));
5090 bool nir_lower_constant_convert_alu_types(nir_shader *shader);
5091 bool nir_lower_alu_conversion_to_intrinsic(nir_shader *shader);
5092 bool nir_lower_int_to_float(nir_shader *shader);
5093 bool nir_lower_load_const_to_scalar(nir_shader *shader);
5094 bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
5095 bool nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all);
5096 void nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer);
5097 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
5099 bool nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
5100 bool nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
5101 bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
5102 bool nir_vectorize_tess_levels(nir_shader *shader);
5103 nir_shader * nir_create_passthrough_tcs_impl(const nir_shader_compiler_options *options,
5104 unsigned *locations, unsigned num_locations,
5105 uint8_t patch_vertices);
5106 nir_shader * nir_create_passthrough_tcs(const nir_shader_compiler_options *options,
5107 const nir_shader *vs, uint8_t patch_vertices);
5108 nir_shader * nir_create_passthrough_gs(const nir_shader_compiler_options *options,
5109 const nir_shader *prev_stage,
5110 enum shader_prim primitive_type,
5111 int flat_interp_mask_offset,
5112 int last_pv_vert_offset,
5113 bool emulate_edgeflags,
5114 bool force_line_strip_out);
5116 bool nir_lower_fragcolor(nir_shader *shader, unsigned max_cbufs);
5117 bool nir_lower_fragcoord_wtrans(nir_shader *shader);
5118 bool nir_lower_viewport_transform(nir_shader *shader);
5119 bool nir_lower_uniforms_to_ubo(nir_shader *shader, bool dword_packed, bool load_vec4);
5121 bool nir_lower_is_helper_invocation(nir_shader *shader);
5123 bool nir_lower_single_sampled(nir_shader *shader);
5125 typedef struct nir_lower_subgroups_options {
5126 uint8_t subgroup_size;
5127 uint8_t ballot_bit_size;
5128 uint8_t ballot_components;
5129 bool lower_to_scalar:1;
5130 bool lower_vote_trivial:1;
5131 bool lower_vote_eq:1;
5132 bool lower_subgroup_masks:1;
5133 bool lower_relative_shuffle:1;
5134 bool lower_shuffle_to_32bit:1;
5135 bool lower_shuffle_to_swizzle_amd:1;
5136 bool lower_shuffle:1;
5138 bool lower_quad_broadcast_dynamic:1;
5139 bool lower_quad_broadcast_dynamic_to_const:1;
5141 bool lower_read_invocation_to_cond:1;
5142 bool lower_rotate_to_shuffle:1;
5143 } nir_lower_subgroups_options;
5145 bool nir_lower_subgroups(nir_shader *shader,
5146 const nir_lower_subgroups_options *options);
5148 bool nir_lower_system_values(nir_shader *shader);
5151 nir_build_lowered_load_helper_invocation(struct nir_builder *b);
5153 typedef struct nir_lower_compute_system_values_options {
5154 bool has_base_global_invocation_id:1;
5155 bool has_base_workgroup_id:1;
5156 bool shuffle_local_ids_for_quad_derivatives:1;
5157 bool lower_local_invocation_index:1;
5158 bool lower_cs_local_id_to_index:1;
5159 bool lower_workgroup_id_to_index:1;
5160 } nir_lower_compute_system_values_options;
5162 bool nir_lower_compute_system_values(nir_shader *shader,
5163 const nir_lower_compute_system_values_options *options);
5165 struct nir_lower_sysvals_to_varyings_options {
5172 nir_lower_sysvals_to_varyings(nir_shader *shader,
5173 const struct nir_lower_sysvals_to_varyings_options *options);
5175 enum PACKED nir_lower_tex_packing {
5177 nir_lower_tex_packing_none = 0,
5179 * The sampler returns up to 2 32-bit words of half floats or 16-bit signed
5180 * or unsigned ints based on the sampler type
5182 nir_lower_tex_packing_16,
5183 /** The sampler returns 1 32-bit word of 4x8 unorm */
5184 nir_lower_tex_packing_8,
5187 typedef struct nir_lower_tex_options {
5189 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
5190 * sampler types a texture projector is lowered.
5195 * If true, lower texture projector for any array sampler dims
5197 bool lower_txp_array;
5200 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
5202 bool lower_txf_offset;
5205 * If true, lower away nir_tex_src_offset for all rect textures.
5207 bool lower_rect_offset;
5210 * If not NULL, this filter will return true for tex instructions that
5211 * should lower away nir_tex_src_offset.
5213 nir_instr_filter_cb lower_offset_filter;
5216 * If true, lower rect textures to 2D, using txs to fetch the
5217 * texture dimensions and dividing the texture coords by the
5218 * texture dims to normalize.
5223 * If true, convert yuv to rgb.
5225 unsigned lower_y_uv_external;
5226 unsigned lower_y_u_v_external;
5227 unsigned lower_yx_xuxv_external;
5228 unsigned lower_xy_uxvx_external;
5229 unsigned lower_ayuv_external;
5230 unsigned lower_xyuv_external;
5231 unsigned lower_yuv_external;
5232 unsigned lower_yu_yv_external;
5233 unsigned lower_y41x_external;
5234 unsigned bt709_external;
5235 unsigned bt2020_external;
5236 unsigned yuv_full_range_external;
5239 * To emulate certain texture wrap modes, this can be used
5240 * to saturate the specified tex coord to [0.0, 1.0]. The
5241 * bits are according to sampler #, ie. if, for example:
5243 * (conf->saturate_s & (1 << n))
5245 * is true, then the s coord for sampler n is saturated.
5247 * Note that clamping must happen *after* projector lowering
5248 * so any projected texture sample instruction with a clamped
5249 * coordinate gets automatically lowered, regardless of the
5250 * 'lower_txp' setting.
5252 unsigned saturate_s;
5253 unsigned saturate_t;
5254 unsigned saturate_r;
5256 /* Bitmask of textures that need swizzling.
5258 * If (swizzle_result & (1 << texture_index)), then the swizzle in
5259 * swizzles[texture_index] is applied to the result of the texturing
5262 unsigned swizzle_result;
5264 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
5265 * while 4 and 5 represent 0 and 1 respectively.
5267 * Indexed by texture-id.
5269 uint8_t swizzles[32][4];
5271 /* Can be used to scale sampled values in range required by the
5274 * Indexed by texture-id.
5276 float scale_factors[32];
5279 * Bitmap of textures that need srgb to linear conversion. If
5280 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
5281 * of the texture are lowered to linear.
5283 unsigned lower_srgb;
5286 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
5288 bool lower_txd_cube_map;
5291 * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
5296 * If true, lower nir_texop_txd any array surfaces with nir_texop_txl.
5298 bool lower_txd_array;
5301 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
5302 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
5303 * with lower_txd_cube_map.
5305 bool lower_txd_shadow;
5308 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
5309 * Implies lower_txd_cube_map and lower_txd_shadow.
5314 * If true, lower nir_texop_txb that try to use shadow compare and min_lod
5315 * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
5317 bool lower_txb_shadow_clamp;
5320 * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
5321 * with nir_texop_txl. This includes cube maps.
5323 bool lower_txd_shadow_clamp;
5326 * If true, lower nir_texop_txd on when it uses both offset and min_lod
5327 * with nir_texop_txl. This includes cube maps.
5329 bool lower_txd_offset_clamp;
5332 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
5333 * sampler is bindless.
5335 bool lower_txd_clamp_bindless_sampler;
5338 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
5339 * sampler index is not statically determinable to be less than 16.
5341 bool lower_txd_clamp_if_sampler_index_not_lt_16;
5344 * If true, lower nir_texop_txs with a non-0-lod into nir_texop_txs with
5345 * 0-lod followed by a nir_ishr.
5350 * If true, lower nir_texop_txs for cube arrays to a nir_texop_txs with a
5351 * 2D array type followed by a nir_idiv by 6.
5353 bool lower_txs_cube_array;
5356 * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
5357 * mixed-up tg4 locations.
5359 bool lower_tg4_broadcom_swizzle;
5362 * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
5364 bool lower_tg4_offsets;
5367 * Lower txf_ms to fragment_mask_fetch and fragment_fetch and samples_identical to
5368 * fragment_mask_fetch.
5370 bool lower_to_fragment_fetch_amd;
5373 * To lower packed sampler return formats.
5375 * Indexed by sampler-id.
5377 enum nir_lower_tex_packing lower_tex_packing[32];
5380 * If true, lower nir_texop_lod to return -FLT_MAX if the sum of the
5381 * absolute values of derivatives is 0 for all coordinates.
5383 bool lower_lod_zero_width;
5385 /* Turns nir_op_tex and other ops with an implicit derivative, in stages
5386 * without implicit derivatives (like the vertex shader) to have an explicit
5387 * LOD with a value of 0.
5389 bool lower_invalid_implicit_lod;
5391 /* If true, round the layer component of the coordinates source to the nearest
5392 * integer for all array ops.
5394 bool lower_array_layer_round_even;
5396 /* If true, texture_index (sampler_index) will be zero if a texture_offset
5397 * (sampler_offset) source is present. This is convenient for backends that
5398 * support indirect indexing of textures (samplers) but not offsetting it.
5400 bool lower_index_to_offset;
5403 * Payload data to be sent to callback / filter functions.
5405 void *callback_data;
5406 } nir_lower_tex_options;
5408 /** Lowers complex texture instructions to simpler ones */
5409 bool nir_lower_tex(nir_shader *shader,
5410 const nir_lower_tex_options *options);
5413 typedef struct nir_lower_tex_shadow_swizzle {
5414 unsigned swizzle_r:3;
5415 unsigned swizzle_g:3;
5416 unsigned swizzle_b:3;
5417 unsigned swizzle_a:3;
5418 } nir_lower_tex_shadow_swizzle;
5421 nir_lower_tex_shadow(nir_shader *s,
5423 enum compare_func *compare_func,
5424 nir_lower_tex_shadow_swizzle *tex_swizzles);
5426 typedef struct nir_lower_image_options {
5428 * If true, lower cube size operations.
5430 bool lower_cube_size;
5433 * Lower multi sample image load and samples_identical to use fragment_mask_load.
5435 bool lower_to_fragment_mask_load_amd;
5438 * Lower image_samples to a constant in case the driver doesn't support multisampled
5441 bool lower_image_samples_to_one;
5442 } nir_lower_image_options;
5444 bool nir_lower_image(nir_shader *nir,
5445 const nir_lower_image_options *options);
5447 bool nir_lower_readonly_images_to_tex(nir_shader *shader, bool per_variable);
5449 enum nir_lower_non_uniform_access_type {
5450 nir_lower_non_uniform_ubo_access = (1 << 0),
5451 nir_lower_non_uniform_ssbo_access = (1 << 1),
5452 nir_lower_non_uniform_texture_access = (1 << 2),
5453 nir_lower_non_uniform_image_access = (1 << 3),
5456 /* Given the nir_src used for the resource, return the channels which might be non-uniform. */
5457 typedef nir_component_mask_t (*nir_lower_non_uniform_access_callback)(const nir_src *, void *);
5459 typedef struct nir_lower_non_uniform_access_options {
5460 enum nir_lower_non_uniform_access_type types;
5461 nir_lower_non_uniform_access_callback callback;
5462 void *callback_data;
5463 } nir_lower_non_uniform_access_options;
5465 bool nir_has_non_uniform_access(nir_shader *shader, enum nir_lower_non_uniform_access_type types);
5466 bool nir_opt_non_uniform_access(nir_shader *shader);
5467 bool nir_lower_non_uniform_access(nir_shader *shader,
5468 const nir_lower_non_uniform_access_options *options);
5471 /* Whether 16-bit floating point arithmetic should be allowed in 8-bit
5475 } nir_lower_idiv_options;
5477 bool nir_lower_idiv(nir_shader *shader, const nir_lower_idiv_options *options);
5479 typedef struct nir_input_attachment_options {
5480 bool use_fragcoord_sysval;
5481 bool use_layer_id_sysval;
5482 bool use_view_id_for_layer;
5483 } nir_input_attachment_options;
5485 bool nir_lower_input_attachments(nir_shader *shader,
5486 const nir_input_attachment_options *options);
5488 bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables,
5490 bool use_clipdist_array,
5491 const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
5492 bool nir_lower_clip_gs(nir_shader *shader, unsigned ucp_enables,
5493 bool use_clipdist_array,
5494 const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
5495 bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables,
5496 bool use_clipdist_array);
5497 bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
5498 bool nir_lower_clip_disable(nir_shader *shader, unsigned clip_plane_enable);
5500 void nir_lower_point_size_mov(nir_shader *shader,
5501 const gl_state_index16 *pointsize_state_tokens);
5503 bool nir_lower_frexp(nir_shader *nir);
5505 bool nir_lower_two_sided_color(nir_shader *shader, bool face_sysval);
5507 bool nir_lower_clamp_color_outputs(nir_shader *shader);
5509 bool nir_lower_flatshade(nir_shader *shader);
5511 void nir_lower_passthrough_edgeflags(nir_shader *shader);
5512 bool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count,
5513 const gl_state_index16 *uniform_state_tokens);
5515 typedef struct nir_lower_wpos_ytransform_options {
5516 gl_state_index16 state_tokens[STATE_LENGTH];
5517 bool fs_coord_origin_upper_left :1;
5518 bool fs_coord_origin_lower_left :1;
5519 bool fs_coord_pixel_center_integer :1;
5520 bool fs_coord_pixel_center_half_integer :1;
5521 } nir_lower_wpos_ytransform_options;
5523 bool nir_lower_wpos_ytransform(nir_shader *shader,
5524 const nir_lower_wpos_ytransform_options *options);
5525 bool nir_lower_wpos_center(nir_shader *shader);
5527 bool nir_lower_pntc_ytransform(nir_shader *shader,
5528 const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
5530 bool nir_lower_wrmasks(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
5532 bool nir_lower_fb_read(nir_shader *shader);
5534 typedef struct nir_lower_drawpixels_options {
5535 gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
5536 gl_state_index16 scale_state_tokens[STATE_LENGTH];
5537 gl_state_index16 bias_state_tokens[STATE_LENGTH];
5538 unsigned drawpix_sampler;
5539 unsigned pixelmap_sampler;
5541 bool scale_and_bias :1;
5542 } nir_lower_drawpixels_options;
5544 void nir_lower_drawpixels(nir_shader *shader,
5545 const nir_lower_drawpixels_options *options);
5547 typedef struct nir_lower_bitmap_options {
5550 } nir_lower_bitmap_options;
5552 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
5554 bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned offset_align_state);
5557 nir_lower_int_source_mods = 1 << 0,
5558 nir_lower_fabs_source_mods = 1 << 1,
5559 nir_lower_fneg_source_mods = 1 << 2,
5560 nir_lower_64bit_source_mods = 1 << 3,
5561 nir_lower_triop_abs = 1 << 4,
5562 nir_lower_all_source_mods = (1 << 5) - 1
5563 } nir_lower_to_source_mods_flags;
5565 #define nir_lower_float_source_mods (nir_lower_fabs_source_mods | nir_lower_fneg_source_mods)
5567 bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options);
5570 nir_lower_gs_intrinsics_per_stream = 1 << 0,
5571 nir_lower_gs_intrinsics_count_primitives = 1 << 1,
5572 nir_lower_gs_intrinsics_count_vertices_per_primitive = 1 << 2,
5573 nir_lower_gs_intrinsics_overwrite_incomplete = 1 << 3,
5574 } nir_lower_gs_intrinsics_flags;
5576 bool nir_lower_gs_intrinsics(nir_shader *shader, nir_lower_gs_intrinsics_flags options);
5579 bool payload_to_shared_for_atomics : 1;
5580 bool payload_to_shared_for_small_types : 1;
5581 uint32_t payload_offset_in_bytes;
5582 } nir_lower_task_shader_options;
5584 bool nir_lower_task_shader(nir_shader *shader, nir_lower_task_shader_options options);
5586 typedef unsigned (*nir_lower_bit_size_callback)(const nir_instr *, void *);
5588 bool nir_lower_bit_size(nir_shader *shader,
5589 nir_lower_bit_size_callback callback,
5590 void *callback_data);
5591 bool nir_lower_64bit_phis(nir_shader *shader);
5593 bool nir_split_64bit_vec3_and_vec4(nir_shader *shader);
5595 nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
5596 bool nir_lower_int64(nir_shader *shader);
5598 nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
5599 bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
5600 nir_lower_doubles_options options);
5601 bool nir_lower_pack(nir_shader *shader);
5603 bool nir_recompute_io_bases(nir_shader *nir, nir_variable_mode modes);
5604 bool nir_lower_mediump_vars(nir_shader *nir, nir_variable_mode modes);
5605 bool nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
5606 uint64_t varying_mask, bool use_16bit_slots);
5607 bool nir_force_mediump_io(nir_shader *nir, nir_variable_mode modes,
5608 nir_alu_type types);
5609 bool nir_unpack_16bit_varying_slots(nir_shader *nir, nir_variable_mode modes);
5611 struct nir_fold_tex_srcs_options {
5612 unsigned sampler_dims;
5616 struct nir_fold_16bit_tex_image_options {
5617 nir_rounding_mode rounding_mode;
5618 nir_alu_type fold_tex_dest_types;
5619 nir_alu_type fold_image_dest_types;
5620 bool fold_image_store_data;
5621 bool fold_image_srcs;
5622 unsigned fold_srcs_options_count;
5623 struct nir_fold_tex_srcs_options *fold_srcs_options;
5626 bool nir_fold_16bit_tex_image(nir_shader *nir,
5627 struct nir_fold_16bit_tex_image_options *options);
5630 bool legalize_type; /* whether this src should be legalized */
5631 uint8_t bit_size; /* bit_size to enforce */
5632 nir_tex_src_type match_src; /* if bit_size is 0, match bit size of this */
5633 } nir_tex_src_type_constraint, nir_tex_src_type_constraints[nir_num_tex_src_types];
5635 bool nir_legalize_16bit_sampler_srcs(nir_shader *nir,
5636 nir_tex_src_type_constraints constraints);
5638 bool nir_lower_point_size(nir_shader *shader, float min, float max);
5640 void nir_lower_texcoord_replace(nir_shader *s, unsigned coord_replace,
5641 bool point_coord_is_sysval, bool yinvert);
5643 void nir_lower_texcoord_replace_late(nir_shader *s, unsigned coord_replace,
5644 bool point_coord_is_sysval);
5647 nir_lower_interpolation_at_sample = (1 << 1),
5648 nir_lower_interpolation_at_offset = (1 << 2),
5649 nir_lower_interpolation_centroid = (1 << 3),
5650 nir_lower_interpolation_pixel = (1 << 4),
5651 nir_lower_interpolation_sample = (1 << 5),
5652 } nir_lower_interpolation_options;
5654 bool nir_lower_interpolation(nir_shader *shader,
5655 nir_lower_interpolation_options options);
5658 nir_lower_discard_if_to_cf = (1 << 0),
5659 nir_lower_demote_if_to_cf = (1 << 1),
5660 nir_lower_terminate_if_to_cf = (1 << 2),
5661 } nir_lower_discard_if_options;
5663 bool nir_lower_discard_if(nir_shader *shader, nir_lower_discard_if_options options);
5665 bool nir_lower_discard_or_demote(nir_shader *shader,
5666 bool force_correct_quad_ops_after_discard);
5668 bool nir_lower_memory_model(nir_shader *shader);
5670 bool nir_lower_goto_ifs(nir_shader *shader);
5671 bool nir_lower_continue_constructs(nir_shader *shader);
5673 bool nir_shader_uses_view_index(nir_shader *shader);
5674 bool nir_can_lower_multiview(nir_shader *shader);
5675 bool nir_lower_multiview(nir_shader *shader, uint32_t view_mask);
5678 nir_lower_fp16_rtz = (1 << 0),
5679 nir_lower_fp16_rtne = (1 << 1),
5680 nir_lower_fp16_ru = (1 << 2),
5681 nir_lower_fp16_rd = (1 << 3),
5682 nir_lower_fp16_all = 0xf,
5683 } nir_lower_fp16_cast_options;
5684 bool nir_lower_fp16_casts(nir_shader *shader, nir_lower_fp16_cast_options options);
5685 bool nir_normalize_cubemap_coords(nir_shader *shader);
5687 bool nir_shader_supports_implicit_lod(nir_shader *shader);
5689 void nir_live_ssa_defs_impl(nir_function_impl *impl);
5691 const BITSET_WORD *nir_get_live_ssa_defs(nir_cursor cursor, void *mem_ctx);
5693 void nir_loop_analyze_impl(nir_function_impl *impl,
5694 nir_variable_mode indirect_mask,
5695 bool force_unroll_sampler_indirect);
5697 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
5699 bool nir_repair_ssa_impl(nir_function_impl *impl);
5700 bool nir_repair_ssa(nir_shader *shader);
5702 void nir_convert_loop_to_lcssa(nir_loop *loop);
5703 bool nir_convert_to_lcssa(nir_shader *shader, bool skip_invariants, bool skip_bool_invariants);
5704 void nir_divergence_analysis(nir_shader *shader);
5705 bool nir_update_instr_divergence(nir_shader *shader, nir_instr *instr);
5706 bool nir_has_divergent_loop(nir_shader *shader);
5708 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
5709 * registers. If false, convert all values (even those not involved in a phi
5710 * node) to registers.
5712 bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
5714 bool nir_lower_phis_to_regs_block(nir_block *block);
5715 bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
5716 bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
5718 bool nir_lower_samplers(nir_shader *shader);
5719 bool nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_sampler_derefs);
5720 bool nir_dedup_inline_samplers(nir_shader *shader);
5721 bool nir_lower_ssbo(nir_shader *shader);
5722 bool nir_lower_helper_writes(nir_shader *shader, bool lower_plain_stores);
5724 typedef struct nir_lower_printf_options {
5725 bool treat_doubles_as_floats : 1;
5726 unsigned max_buffer_size;
5727 } nir_lower_printf_options;
5729 bool nir_lower_printf(nir_shader *nir, const nir_lower_printf_options *options);
5731 /* This is here for unit tests. */
5732 bool nir_opt_comparison_pre_impl(nir_function_impl *impl);
5734 bool nir_opt_comparison_pre(nir_shader *shader);
5736 typedef struct nir_opt_access_options {
5738 } nir_opt_access_options;
5740 bool nir_opt_access(nir_shader *shader, const nir_opt_access_options *options);
5741 bool nir_opt_algebraic(nir_shader *shader);
5742 bool nir_opt_algebraic_before_ffma(nir_shader *shader);
5743 bool nir_opt_algebraic_late(nir_shader *shader);
5744 bool nir_opt_algebraic_distribute_src_mods(nir_shader *shader);
5745 bool nir_opt_constant_folding(nir_shader *shader);
5747 /* Try to combine a and b into a. Return true if combination was possible,
5748 * which will result in b being removed by the pass. Return false if
5749 * combination wasn't possible.
5751 typedef bool (*nir_combine_barrier_cb)(
5752 nir_intrinsic_instr *a, nir_intrinsic_instr *b, void *data);
5754 bool nir_opt_combine_barriers(nir_shader *shader,
5755 nir_combine_barrier_cb combine_cb,
5758 bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
5760 bool nir_copy_prop_impl(nir_function_impl *impl);
5761 bool nir_copy_prop(nir_shader *shader);
5763 bool nir_opt_copy_prop_vars(nir_shader *shader);
5765 bool nir_opt_cse(nir_shader *shader);
5767 bool nir_opt_dce(nir_shader *shader);
5769 bool nir_opt_dead_cf(nir_shader *shader);
5771 bool nir_opt_dead_write_vars(nir_shader *shader);
5773 bool nir_opt_deref_impl(nir_function_impl *impl);
5774 bool nir_opt_deref(nir_shader *shader);
5776 bool nir_opt_find_array_copies(nir_shader *shader);
5778 bool nir_opt_fragdepth(nir_shader *shader);
5780 bool nir_opt_gcm(nir_shader *shader, bool value_number);
5782 bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
5785 nir_opt_if_aggressive_last_continue = (1 << 0),
5786 nir_opt_if_optimize_phi_true_false = (1 << 1),
5787 } nir_opt_if_options;
5789 bool nir_opt_if(nir_shader *shader, nir_opt_if_options options);
5791 bool nir_opt_intrinsics(nir_shader *shader);
5793 bool nir_opt_large_constants(nir_shader *shader,
5794 glsl_type_size_align_func size_align,
5795 unsigned threshold);
5797 bool nir_opt_loop_unroll(nir_shader *shader);
5800 nir_move_const_undef = (1 << 0),
5801 nir_move_load_ubo = (1 << 1),
5802 nir_move_load_input = (1 << 2),
5803 nir_move_comparisons = (1 << 3),
5804 nir_move_copies = (1 << 4),
5805 nir_move_load_ssbo = (1 << 5),
5806 nir_move_load_uniform = (1 << 6),
5809 bool nir_can_move_instr(nir_instr *instr, nir_move_options options);
5811 bool nir_opt_sink(nir_shader *shader, nir_move_options options);
5813 bool nir_opt_move(nir_shader *shader, nir_move_options options);
5816 /** nir_load_uniform max base offset */
5817 uint32_t uniform_max;
5819 /** nir_load_ubo_vec4 max base offset */
5820 uint32_t ubo_vec4_max;
5822 /** nir_var_mem_shared max base offset */
5823 uint32_t shared_max;
5825 /** nir_load/store_buffer_amd max base offset */
5826 uint32_t buffer_max;
5827 } nir_opt_offsets_options;
5829 bool nir_opt_offsets(nir_shader *shader, const nir_opt_offsets_options *options);
5831 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
5832 bool indirect_load_ok, bool expensive_alu_ok);
5834 bool nir_opt_rematerialize_compares(nir_shader *shader);
5836 bool nir_opt_remove_phis(nir_shader *shader);
5837 bool nir_opt_remove_phis_block(nir_block *block);
5839 bool nir_opt_phi_precision(nir_shader *shader);
5841 bool nir_opt_shrink_stores(nir_shader *shader, bool shrink_image_store);
5843 bool nir_opt_shrink_vectors(nir_shader *shader);
5845 bool nir_opt_trivial_continues(nir_shader *shader);
5847 bool nir_opt_undef(nir_shader *shader);
5849 bool nir_lower_undef_to_zero(nir_shader *shader);
5851 bool nir_opt_uniform_atomics(nir_shader *shader);
5853 bool nir_opt_vectorize(nir_shader *shader, nir_vectorize_cb filter,
5856 bool nir_opt_conditional_discard(nir_shader *shader);
5857 bool nir_opt_move_discards_to_top(nir_shader *shader);
5859 bool nir_opt_ray_queries(nir_shader *shader);
5861 bool nir_opt_ray_query_ranges(nir_shader *shader);
5863 void nir_sweep(nir_shader *shader);
5865 void nir_remap_dual_slot_attributes(nir_shader *shader,
5866 uint64_t *dual_slot_inputs);
5867 uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot);
5869 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
5870 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
5873 nir_variable_is_in_ubo(const nir_variable *var)
5875 return (var->data.mode == nir_var_mem_ubo &&
5876 var->interface_type != NULL);
5880 nir_variable_is_in_ssbo(const nir_variable *var)
5882 return (var->data.mode == nir_var_mem_ssbo &&
5883 var->interface_type != NULL);
5887 nir_variable_is_in_block(const nir_variable *var)
5889 return nir_variable_is_in_ubo(var) || nir_variable_is_in_ssbo(var);
5892 /* See default_ub_config in nir_range_analysis.c for documentation. */
5893 typedef struct nir_unsigned_upper_bound_config {
5894 unsigned min_subgroup_size;
5895 unsigned max_subgroup_size;
5896 unsigned max_workgroup_invocations;
5897 unsigned max_workgroup_count[3];
5898 unsigned max_workgroup_size[3];
5900 uint32_t vertex_attrib_max[32];
5901 } nir_unsigned_upper_bound_config;
5904 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
5905 nir_ssa_scalar scalar,
5906 const nir_unsigned_upper_bound_config *config);
5909 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
5910 nir_ssa_scalar ssa, unsigned const_val,
5911 const nir_unsigned_upper_bound_config *config);
5914 /* True if gl_DrawID is considered uniform, i.e. if the preamble is run
5915 * at least once per "internal" draw rather than per user-visible draw.
5917 bool drawid_uniform;
5919 /* True if the subgroup size is uniform. */
5920 bool subgroup_size_uniform;
5922 /* size/align for load/store_preamble. */
5923 void (*def_size)(nir_ssa_def *def, unsigned *size, unsigned *align);
5925 /* Total available size for load/store_preamble storage, in units
5926 * determined by def_size.
5928 unsigned preamble_storage_size;
5930 /* Give the cost for an instruction. nir_opt_preamble will prioritize
5931 * instructions with higher costs. Instructions with cost 0 may still be
5932 * lifted, but only when required to lift other instructions with non-0
5933 * cost (e.g. a load_const source of an expression).
5935 float (*instr_cost_cb)(nir_instr *instr, const void *data);
5937 /* Give the cost of rewriting the instruction to use load_preamble. This
5938 * may happen from inserting move instructions, etc. If the benefit doesn't
5939 * exceed the cost here then we won't rewrite it.
5941 float (*rewrite_cost_cb)(nir_ssa_def *def, const void *data);
5943 /* Instructions whose definitions should not be rewritten. These could
5944 * still be moved to the preamble, but they shouldn't be the root of a
5945 * replacement expression. Instructions with cost 0 and derefs are
5946 * automatically included by the pass.
5948 nir_instr_filter_cb avoid_instr_cb;
5950 const void *cb_data;
5951 } nir_opt_preamble_options;
5954 nir_opt_preamble(nir_shader *shader,
5955 const nir_opt_preamble_options *options,
5958 nir_function_impl *nir_shader_get_preamble(nir_shader *shader);
5960 bool nir_lower_point_smooth(nir_shader *shader);
5961 bool nir_lower_poly_line_smooth(nir_shader *shader, unsigned num_smooth_aa_sample);
5963 bool nir_mod_analysis(nir_ssa_scalar val, nir_alu_type val_type, unsigned div, unsigned *mod);
5965 #include "nir_inline_helpers.h"