{
uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- return nir_ishl(b, &intr->dest.ssa, nir_imm_int(b, ssbo_size_to_bytes_shift));
+ return nir_ishl_imm(b, &intr->dest.ssa, ssbo_size_to_bytes_shift);
}
static bool
if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
return nir_iand(
b, nir_load_local_invocation_index(b),
- nir_isub(b, nir_load_subgroup_size(b), nir_imm_int(b, 1)));
+ nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
} else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
return nir_ishr(b, nir_load_local_invocation_index(b),
nir_load_subgroup_id_shift_ir3(b));
uniform_offset = new_offset;
} else {
uniform_offset = shift > 0
- ? nir_ishl(b, ubo_offset, nir_imm_int(b, shift))
- : nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
+ ? nir_ishl_imm(b, ubo_offset, shift)
+ : nir_ushr_imm(b, ubo_offset, -shift);
}
assert(!(const_offset & 0x3));
nir_intrinsic_set_write_mask(store, 0x3);
nir_builder_instr_insert(b, &store->instr);
- off = nir_iadd(b, off, nir_imm_intN_t(b, 8, off->bit_size));
+ off = nir_iadd_imm(b, off, 8);
}
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
if (intr->intrinsic == nir_intrinsic_load_kernel_input) {
assert(num_comp == 1);
- nir_ssa_def *offset = nir_iadd(b,
- nir_ssa_for_src(b, intr->src[0], 1),
- nir_imm_int(b, 4));
+ nir_ssa_def *offset = nir_iadd_imm(b,
+ nir_ssa_for_src(b, intr->src[0], 1), 4);
nir_ssa_def *upper = nir_load_kernel_input(b, 1, 32, offset);
components[i] = nir_pack_64_2x32(b, &load->dest.ssa);
- off = nir_iadd(b, off, nir_imm_intN_t(b, 8, off->bit_size));
+ off = nir_iadd_imm(b, off, 8);
}
} else {
/* The remaining (non load/store) intrinsics just get zero-
/* Add or substract shift depending on the final direction (SHR vs. SHL). */
if (shift * direction < 0)
- shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
+ shift_ssa = nir_iadd_imm(b, shift_ssa, -abs(shift));
else
- shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
+ shift_ssa = nir_iadd_imm(b, shift_ssa, abs(shift));
return shift_ssa;
}
for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) {
results[i] =
nir_load_ssbo_ir3(b, 1, intrinsic->dest.ssa.bit_size, descriptor,
- nir_iadd(b, offset, nir_imm_int(b, i * comp_size)),
- nir_iadd(b, new_offset, nir_imm_int(b, i)),
+ nir_iadd_imm(b, offset, i * comp_size),
+ nir_iadd_imm(b, new_offset, i),
.access = nir_intrinsic_access(intrinsic),
.align_mul = nir_intrinsic_align_mul(intrinsic),
.align_offset = nir_intrinsic_align_offset(intrinsic));
if (new_offset)
offset = new_offset;
else
- offset = nir_ushr(b, offset, nir_imm_int(b, shift));
+ offset = nir_ushr_imm(b, offset, shift);
/* Insert the new intrinsic right before the old one. */
nir_builder_instr_insert(b, &new_intrinsic->instr);
static nir_ssa_def *
bitfield_extract(nir_builder *b, nir_ssa_def *v, uint32_t start, uint32_t mask)
{
- return nir_iand(b, nir_ushr(b, v, nir_imm_int(b, start)),
- nir_imm_int(b, mask));
+ return nir_iand_imm(b, nir_ushr_imm(b, v, start), mask);
}
static nir_ssa_def *
case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_GEOMETRY:
vertex_stride = nir_load_vs_vertex_stride_ir3(b);
- attr_offset = nir_iadd(b, nir_load_primitive_location_ir3(b, index),
- nir_imm_int(b, comp * 4));
+ attr_offset = nir_iadd_imm(b, nir_load_primitive_location_ir3(b, index),
+ comp * 4);
break;
default:
unreachable("bad shader stage");
return nir_iadd(
b, nir_iadd(b, primitive_offset, vertex_offset),
- nir_iadd(b, attr_offset, nir_ishl(b, offset, nir_imm_int(b, 4))));
+ nir_iadd(b, attr_offset, nir_ishl_imm(b, offset, 4)));
}
static nir_intrinsic_instr *
/* Offset is in vec4's, but we need it in unit of components for the
* load/store_global_ir3 offset.
*/
- offset = nir_ishl(b, offset, nir_imm_int(b, 2));
+ offset = nir_ishl_imm(b, offset, 2);
}
nir_ssa_def *vertex_offset;
attr_offset = nir_imm_int(b, state->map.loc[index] + comp);
break;
case MESA_SHADER_TESS_EVAL:
- attr_offset = nir_iadd(b, nir_load_primitive_location_ir3(b, index),
- nir_imm_int(b, comp));
+ attr_offset = nir_iadd_imm(b, nir_load_primitive_location_ir3(b, index),
+ comp);
break;
default:
unreachable("bad shader state");
attr_offset = nir_iadd(b, attr_offset,
nir_imul24(b, offset, build_tcs_out_vertices(b)));
- vertex_offset = nir_ishl(b, vertex, nir_imm_int(b, 2));
+ vertex_offset = nir_ishl_imm(b, vertex, 2);
} else {
assert(location >= VARYING_SLOT_PATCH0 &&
location <= VARYING_SLOT_TESS_MAX);
unsigned index = location - VARYING_SLOT_PATCH0;
- attr_offset = nir_iadd(b, nir_imm_int(b, index * 4 + comp), offset);
+ attr_offset = nir_iadd_imm(b, offset, index * 4 + comp);
vertex_offset = nir_imm_int(b, 0);
}
unreachable("bad");
}
- return nir_iadd(b, patch_offset, nir_imm_int(b, offset + comp));
+ return nir_iadd_imm(b, patch_offset, offset + comp);
}
static void
unsigned stream = nir_intrinsic_stream_id(intr);
/* vertex_flags_out |= stream */
nir_store_var(b, state->vertex_flags_out,
- nir_ior(b, nir_load_var(b, state->vertex_flags_out),
- nir_imm_int(b, stream)),
+ nir_ior_imm(b, nir_load_var(b, state->vertex_flags_out),
+ stream),
0x1 /* .x */);
copy_vars(b, &state->emit_outputs, &state->old_outputs);
nir_instr_remove(&intr->instr);
nir_store_var(b, state->emitted_vertex_var,
- nir_iadd(b, nir_load_var(b, state->emitted_vertex_var),
- nir_imm_int(b, 1)),
+ nir_iadd_imm(b,
+ nir_load_var(b,
+ state->emitted_vertex_var),
+ 1),
0x1);
nir_pop_if(b, NULL);
/* Increment the vertex count by 1 */
nir_store_var(b, state->vertex_count_var,
- nir_iadd(b, count, nir_imm_int(b, 1)), 0x1); /* .x */
+ nir_iadd_imm(b, count, 1), 0x1); /* .x */
nir_store_var(b, state->vertex_flags_out, nir_imm_int(b, 0), 0x1);
break;
nir_ssa_def *load =
nir_load_uniform(b, instr->num_components,
instr->dest.ssa.bit_size,
- nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)),
+ nir_ushr_imm(b, instr->src[0].ssa, 2),
.base = base);
nir_ssa_def_rewrite_uses(&instr->dest.ssa, load);
intrin->intrinsic == nir_intrinsic_load_ssbo &&
(nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
intrin->dest.ssa.bit_size > 16) {
- descriptor_idx = nir_iadd(b, descriptor_idx, nir_imm_int(b, 1));
+ descriptor_idx = nir_iadd_imm(b, descriptor_idx, 1);
}
nir_ssa_def *results[MAX_SETS + 1] = { NULL };
return nir_imm_int(b, idx);
nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
- return nir_iadd(b, nir_imm_int(b, idx),
- nir_imul_imm(b, arr_index, 2));
+ return nir_iadd_imm(b, nir_imul_imm(b, arr_index, 2), idx);
}
shader->active_desc_sets |= 1u << set;
unsigned set = ir3_shader_descriptor_set(b->shader->info.stage);
nir_ssa_def *src = nir_ssa_for_src(b, intr->src[buffer_src], 1);
- src = nir_iadd(b, src, nir_imm_int(b, desc_offset));
+ src = nir_iadd_imm(b, src, desc_offset);
/* An out-of-bounds index into an SSBO/image array can cause a GPU fault
* on access to the descriptor (I don't see any hw mechanism to bound the
* access). We could just allow the resulting iova fault (it is a read