lower_esgs_io_state *st,
nir_intrinsic_instr *instr)
{
- nir_src *vertex_src = nir_get_io_vertex_index_src(instr);
+ nir_src *vertex_src = nir_get_io_arrayed_index_src(instr);
nir_ssa_def *vertex_offset = st->chip_class >= GFX9
? gs_per_vertex_input_vertex_offset_gfx9(b, vertex_src)
: gs_per_vertex_input_vertex_offset_gfx6(b, vertex_src);
* can use temporaries, no need to use shared memory.
*/
nir_src *off_src = nir_get_io_offset_src(intrin);
- nir_src *vertex_index_src = nir_get_io_vertex_index_src(intrin);
+ nir_src *vertex_index_src = nir_get_io_arrayed_index_src(intrin);
nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
bool can_use_temps = nir_src_is_const(*off_src) &&
nir_ssa_def *tcs_in_patch_stride = nir_imul_imm(b, tcs_in_vtxcnt, tcs_in_vertex_stride);
nir_ssa_def *tcs_in_current_patch_offset = nir_imul(b, rel_patch_id, tcs_in_patch_stride);
- nir_ssa_def *vertex_index = nir_get_io_vertex_index_src(instr)->ssa;
+ nir_ssa_def *vertex_index = nir_get_io_arrayed_index_src(instr)->ssa;
nir_ssa_def *vertex_index_off = nir_imul_imm(b, vertex_index, tcs_in_vertex_stride);
nir_ssa_def *io_offset = nir_build_calc_io_offset(b, instr, nir_imm_int(b, 16u), 4u);
nir_ssa_def *output_patch_offset = nir_iadd_nuw(b, patch_offset, output_patch0_offset);
if (per_vertex) {
- nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_vertex_index_src(intrin), 1);
+ nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
nir_ssa_def *vertex_index_off = nir_imul_imm(b, vertex_index, output_vertex_size);
off = nir_iadd_nuw(b, off, vertex_index_off);
nir_ssa_def *rel_patch_id = nir_build_load_tess_rel_patch_id_amd(b);
nir_ssa_def *patch_offset = nir_imul(b, rel_patch_id, nir_imul_imm(b, out_vertices_per_patch, 16u));
- nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_vertex_index_src(intrin), 1);
+ nir_ssa_def *vertex_index = nir_ssa_for_src(b, *nir_get_io_arrayed_index_src(intrin), 1);
nir_ssa_def *vertex_index_off = nir_imul_imm(b, vertex_index, 16u);
return nir_iadd_nuw(b, nir_iadd_nuw(b, patch_offset, vertex_index_off), io_offset);
return false;
nir_src* off_src = nir_get_io_offset_src(instr);
- nir_src* vertex_index_src = nir_get_io_vertex_index_src(instr);
+ nir_src* vertex_index_src = nir_get_io_arrayed_index_src(instr);
nir_instr* vertex_index_instr = vertex_index_src->ssa->parent_instr;
bool can_use_temps =
nir_src_is_const(*off_src) && vertex_index_instr->type == nir_instr_type_intrinsic &&
writemask <<= component;
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
- nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
+ nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
unsigned location = nir_intrinsic_io_semantics(instr).location;
unsigned base = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
unsigned count = instr->dest.ssa.num_components;
- nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
+ nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
nir_src offset = *nir_get_io_offset_src(instr);
LLVMValueRef indir_index = NULL;
void *mem_ctx);
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
-nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
+nir_src *nir_get_io_arrayed_index_src(nir_intrinsic_instr *instr);
nir_src *nir_get_shader_call_payload_src(nir_intrinsic_instr *call);
bool nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage);
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
instr->intrinsic == nir_intrinsic_load_per_vertex_input &&
- !src_is_invocation_id(nir_get_io_vertex_index_src(instr)))
+ !src_is_invocation_id(nir_get_io_arrayed_index_src(instr)))
shader->info.tess.tcs_cross_invocation_inputs_read |= slot_mask;
break;
if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
instr->intrinsic == nir_intrinsic_load_per_vertex_output &&
- !src_is_invocation_id(nir_get_io_vertex_index_src(instr)))
+ !src_is_invocation_id(nir_get_io_arrayed_index_src(instr)))
shader->info.tess.tcs_cross_invocation_outputs_read |= slot_mask;
if (shader->info.stage == MESA_SHADER_FRAGMENT &&
* Return the vertex index source for a load/store per_vertex intrinsic.
*/
nir_src *
-nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
+nir_get_io_arrayed_index_src(nir_intrinsic_instr *instr)
{
switch (instr->intrinsic) {
case nir_intrinsic_load_per_vertex_input:
case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_load_per_primitive_output:
return &instr->src[0];
case nir_intrinsic_store_per_vertex_output:
+ case nir_intrinsic_store_per_primitive_output:
return &instr->src[1];
default:
return NULL;
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
- nir_src *vertex = nir_get_io_vertex_index_src(intrin);
+ nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
if (vertex) {
if (nir_src_is_const(*vertex)) {
intrin->const_index[0] += nir_src_as_uint(*vertex) *