case nir_intrinsic_load_output:
case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_load_per_primitive_output:
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
instr->intrinsic == nir_intrinsic_load_output) {
shader->info.patch_outputs_read |= slot_mask;
case nir_intrinsic_store_output:
case nir_intrinsic_store_per_vertex_output:
+ case nir_intrinsic_store_per_primitive_output:
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
instr->intrinsic == nir_intrinsic_store_output) {
shader->info.patch_outputs_written |= slot_mask;
load("output", [1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], flags=[CAN_ELIMINATE])
# src[] = { vertex, offset }.
load("per_vertex_output", [1, 1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE])
+# src[] = { primitive, offset }.
+load("per_primitive_output", [1, 1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE])
# src[] = { offset }.
load("shared", [1], [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
# src[] = { offset }.
store("output", [1], [BASE, WRITE_MASK, COMPONENT, SRC_TYPE, IO_SEMANTICS])
# src[] = { value, vertex, offset }.
store("per_vertex_output", [1, 1], [BASE, WRITE_MASK, COMPONENT, SRC_TYPE, IO_SEMANTICS])
+# src[] = { value, primitive, offset }.
+store("per_primitive_output", [1, 1], [BASE, WRITE_MASK, COMPONENT, SRC_TYPE, IO_SEMANTICS])
# src[] = { value, block_index, offset }
store("ssbo", [-1, 1], [WRITE_MASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
# src[] = { value, offset }.
}
break;
case nir_var_shader_out:
- op = array_index ? nir_intrinsic_load_per_vertex_output :
- nir_intrinsic_load_output;
+ op = !array_index ? nir_intrinsic_load_output :
+ var->data.per_primitive ? nir_intrinsic_load_per_primitive_output :
+ nir_intrinsic_load_per_vertex_output;
break;
case nir_var_uniform:
op = nir_intrinsic_load_uniform;
assert(var->data.mode == nir_var_shader_out);
nir_intrinsic_op op =
- array_index ? nir_intrinsic_store_per_vertex_output :
- nir_intrinsic_store_output;
+ !array_index ? nir_intrinsic_store_output :
+ var->data.per_primitive ? nir_intrinsic_store_per_primitive_output :
+ nir_intrinsic_store_per_vertex_output;
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(state->builder.shader, op);
case nir_intrinsic_load_input_vertex:
case nir_intrinsic_load_per_vertex_input:
case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_load_per_primitive_output:
case nir_intrinsic_load_interpolated_input:
case nir_intrinsic_store_output:
case nir_intrinsic_store_shared:
return &instr->src[1];
case nir_intrinsic_store_ssbo:
case nir_intrinsic_store_per_vertex_output:
+ case nir_intrinsic_store_per_primitive_output:
return &instr->src[2];
default:
return NULL;
{
return intrin->intrinsic == nir_intrinsic_load_output ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+ intrin->intrinsic == nir_intrinsic_load_per_primitive_output ||
intrin->intrinsic == nir_intrinsic_store_output ||
- intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+ intrin->intrinsic == nir_intrinsic_store_per_vertex_output ||
+ intrin->intrinsic == nir_intrinsic_store_per_primitive_output;
}
static bool is_dual_slot(nir_intrinsic_instr *intrin)
break;
case nir_intrinsic_store_output:
case nir_intrinsic_store_per_vertex_output:
+ case nir_intrinsic_store_per_primitive_output:
case nir_intrinsic_store_ssbo:
case nir_intrinsic_store_shared:
case nir_intrinsic_store_global:
case nir_intrinsic_load_interpolated_input:
case nir_intrinsic_load_output:
case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_load_per_primitive_output:
case nir_intrinsic_load_push_constant:
/* All memory load operations must load at least a byte */
validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);