nir_ssa_for_src(b, intr->src[0], 1),
nir_imm_int(b, 4));
- nir_ssa_def *upper = nir_build_load_kernel_input(
- b, 1, 32, offset);
+ nir_ssa_def *upper = nir_load_kernel_input(b, 1, 32, offset);
return nir_pack_64_2x32_split(b, def, upper);
}
*/
if (intr->intrinsic == nir_intrinsic_global_atomic) {
- return nir_build_global_atomic_ir3(
+ return nir_global_atomic_ir3(
b, nir_dest_bit_size(intr->dest), addr,
nir_ssa_for_src(b, intr->src[1], 1),
.atomic_op = nir_intrinsic_atomic_op(intr));
} else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) {
- return nir_build_global_atomic_swap_ir3(
+ return nir_global_atomic_swap_ir3(
b, nir_dest_bit_size(intr->dest), addr,
nir_ssa_for_src(b, intr->src[1], 1),
nir_ssa_for_src(b, intr->src[2], 1),
nir_ssa_def *components[num_comp];
for (unsigned off = 0; off < num_comp;) {
unsigned c = MIN2(num_comp - off, 4);
- nir_ssa_def *val = nir_build_load_global_ir3(
+ nir_ssa_def *val = nir_load_global_ir3(
b, c, nir_dest_bit_size(intr->dest),
addr, nir_imm_int(b, off));
for (unsigned i = 0; i < c; i++) {
for (unsigned off = 0; off < num_comp; off += 4) {
unsigned c = MIN2(num_comp - off, 4);
nir_ssa_def *v = nir_channels(b, value, BITFIELD_MASK(c) << off);
- nir_build_store_global_ir3(b, v, addr, nir_imm_int(b, off));
+ nir_store_global_ir3(b, v, addr, nir_imm_int(b, off));
}
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
}