nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
nir_src *nir_get_shader_call_payload_src(nir_intrinsic_instr *call);
-bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
+bool nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage);
bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
bool nir_lower_regs_to_ssa(nir_shader *shader);
*cross_invocation = false;
*indirect = false;
- const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
+ const bool per_vertex = nir_is_arrayed_io(var, shader->info.stage);
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
{
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, shader->info.stage)) {
+ if (nir_is_arrayed_io(var, shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
* on Intel), verify that "peeling" the type twice is correct. This
* assert ensures we remember it.
*/
- assert(!nir_is_per_vertex_io(var, shader->info.stage));
+ assert(!nir_is_arrayed_io(var, shader->info.stage));
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
nir_deref_instr *deref, bool is_output_read)
{
const struct glsl_type *type = var->type;
- bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
+ bool per_vertex = nir_is_arrayed_io(var, shader->info.stage);
if (per_vertex) {
assert(glsl_type_is_array(type));
assert(var->data.location >= 0);
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
+ if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
+ if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
+ if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
continue;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
+ if (nir_is_arrayed_io(var, producer->info.stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (!vc_info->initialised) {
const struct glsl_type *type = in_var->type;
- if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
+ if (nir_is_arrayed_io(in_var, consumer->info.stage) ||
in_var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
if (!vc_info->initialised) {
const struct glsl_type *type = out_var->type;
- if (nir_is_per_vertex_io(out_var, producer->info.stage)) {
+ if (nir_is_arrayed_io(out_var, producer->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
bool last_partial = false;
nir_foreach_variable_in_list(var, &io_vars) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_arrayed_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
{
const struct glsl_type *type = variable->type;
- if (nir_is_per_vertex_io(variable, stage)) {
+ if (nir_is_arrayed_io(variable, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
* array length.
*/
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, nir->info.stage))
+ if (nir_is_arrayed_io(var, nir->info.stage))
type = glsl_get_array_element(type);
assert(glsl_type_is_array(type));
}
/**
- * Return true if the given variable is a per-vertex input/output array.
- * (such as geometry shader inputs).
+ * Some inputs and outputs are arrayed, meaning that there is an extra level
+ * of array indexing to handle mismatches between the shader interface and the
+ * dispatch pattern of the shader. For instance, geometry shaders are
+ * executed per-primitive while their inputs and outputs are specified
+ * per-vertex so all inputs and outputs have to be additionally indexed with
+ * the vertex index within the primitive.
*/
bool
-nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
+nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage)
{
if (var->data.patch || !glsl_type_is_array(var->type))
return false;
{
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, state->builder.shader->info.stage)) {
+ if (nir_is_arrayed_io(var, state->builder.shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
b->cursor = nir_before_instr(instr);
- const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
+ const bool per_vertex = nir_is_arrayed_io(var, b->shader->info.stage);
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
/* For per-vertex input arrays (i.e. geometry shader inputs), skip the
* outermost array index. Process the rest normally.
*/
- if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
+ if (nir_is_arrayed_io(var, b->shader->info.stage)) {
*vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
p++;
}
struct hash_entry *entry = _mesa_hash_table_search(ht, var);
if (!entry) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_arrayed_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (glsl_type_is_matrix(type))
type = glsl_get_column_type(type);
- if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
+ if (nir_is_arrayed_io(var, b->shader->info.stage)) {
type = glsl_array_type(type, glsl_get_length(element->type),
glsl_get_explicit_stride(element->type));
}
nir_deref_instr *element_deref = nir_build_deref_var(b, element);
- if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
+ if (nir_is_arrayed_io(var, b->shader->info.stage)) {
assert(vertex_index);
element_deref = nir_build_deref_array(b, element_deref, vertex_index);
}
assert(path->path[0]->deref_type == nir_deref_type_var);
nir_deref_instr **p = &path->path[1];
- if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
+ if (nir_is_arrayed_io(var, b->shader->info.stage)) {
p++;
}
nir_variable_mode mode = var->data.mode;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, b.shader->info.stage)) {
+ if (nir_is_arrayed_io(var, b.shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
unsigned *num_vertices)
{
- if (nir_is_per_vertex_io(var, shader->info.stage)) {
+ if (nir_is_arrayed_io(var, shader->info.stage)) {
assert(glsl_type_is_array(var->type));
if (num_vertices)
*num_vertices = glsl_get_length(var->type);
const struct glsl_type *a_type_tail = a->type;
const struct glsl_type *b_type_tail = b->type;
- if (nir_is_per_vertex_io(a, shader->info.stage) !=
- nir_is_per_vertex_io(b, shader->info.stage))
+ if (nir_is_arrayed_io(a, shader->info.stage) !=
+ nir_is_arrayed_io(b, shader->info.stage))
return false;
/* They must have the same array structure */
{
nir_deref_instr *deref = nir_build_deref_var(b, new_var);
- if (nir_is_per_vertex_io(new_var, shader->info.stage)) {
+ if (nir_is_arrayed_io(new_var, shader->info.stage)) {
assert(leader->deref_type == nir_deref_type_array);
nir_ssa_def *index = leader->arr.index.ssa;
leader = nir_deref_instr_parent(leader);
assert(glsl_type_is_array(var->type));
const struct glsl_type *type = glsl_get_array_element(var->type);
- if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
+ if (nir_is_arrayed_io(var, state->shader->info.stage)) {
assert(glsl_type_is_array(type));
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
} else {
}
struct vtn_type *per_vertex_type = var->type;
- if (nir_is_per_vertex_io(var->var, b->shader->info.stage))
+ if (nir_is_arrayed_io(var->var, b->shader->info.stage))
per_vertex_type = var->type->array_element;
/* Figure out the interface block type. */
unsigned semantic_name, semantic_index;
const struct glsl_type *type = variable->type;
- if (nir_is_per_vertex_io(variable, nir->info.stage)) {
+ if (nir_is_arrayed_io(variable, nir->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
i = variable->data.driver_location;
const struct glsl_type *type = variable->type;
- if (nir_is_per_vertex_io(variable, nir->info.stage)) {
+ if (nir_is_arrayed_io(variable, nir->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}