return nir_load_vector_arg_amd(b, num_components, .base = arg_index);
}
+void
+ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
+ nir_ssa_def *val)
+{
+ assert(nir_cursor_current_block(b->cursor)->cf_node.parent->type == nir_cf_node_function);
+
+ if (ac_args->args[arg.arg_index].file == AC_ARG_SGPR)
+ nir_store_scalar_arg_amd(b, val, .base = arg.arg_index);
+ else
+ nir_store_vector_arg_amd(b, val, .base = arg.arg_index);
+}
+
nir_ssa_def *
ac_nir_unpack_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
unsigned rshift, unsigned bitwidth)
return ac_nir_load_arg_at_offset(b, ac_args, arg, 0);
}
+void ac_nir_store_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
+ nir_ssa_def *val);
+
nir_ssa_def *
ac_nir_unpack_arg(nir_builder *b, const struct ac_shader_args *ac_args, struct ac_arg arg,
unsigned rshift, unsigned bitwidth);
Operand::c32(aco_symbol_lds_ngg_gs_out_vertex_base));
break;
}
+ case nir_intrinsic_store_scalar_arg_amd: {
+ ctx->arg_temps[nir_intrinsic_base(instr)] =
+ bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
+ break;
+ }
+ case nir_intrinsic_store_vector_arg_amd: {
+ ctx->arg_temps[nir_intrinsic_base(instr)] =
+ as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
+ break;
+ }
default:
isel_err(&instr->instr, "Unimplemented intrinsic instr");
abort();
intrinsic("load_scalar_arg_amd", dest_comp=0, bit_sizes=[32], indices=[BASE, ARG_UPPER_BOUND_U32_AMD], flags=[CAN_ELIMINATE, CAN_REORDER])
intrinsic("load_vector_arg_amd", dest_comp=0, bit_sizes=[32], indices=[BASE, ARG_UPPER_BOUND_U32_AMD], flags=[CAN_ELIMINATE, CAN_REORDER])
+store("scalar_arg_amd", [], [BASE])
+store("vector_arg_amd", [], [BASE])
# src[] = { 32/64-bit base address, 32-bit offset }.
intrinsic("load_smem_amd", src_comp=[1, 1], dest_comp=0, bit_sizes=[32],