We have to use 1ull instead of 1u because MSVC is stupid...
Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23855>
{
if (s->num_components != 1) {
uint64_t mask = 1ull << (s->bit_size - 1);
- s = nir_iand(b, s, nir_imm_intN_t(b, mask, s->bit_size));
+ s = nir_iand_imm(b, s, mask);
}
return nir_bcsel(b, nir_ieq_imm(b, s, 0), x, y);
}
nir_clz_u(nir_builder *b, nir_ssa_def *a)
{
nir_ssa_def *val;
- val = nir_isub(b, nir_imm_intN_t(b, a->bit_size - 1, 32),
- nir_ufind_msb(b, nir_u2uN(b, a, MAX2(a->bit_size, 32))));
+ val = nir_isub_imm(b, a->bit_size - 1,
+ nir_ufind_msb(b, nir_u2uN(b, a,
+ MAX2(a->bit_size, 32))));
return nir_u2uN(b, val, a->bit_size);
}
nir_shift_imm(nir_builder *b, nir_ssa_def *value, int left_shift)
{
if (left_shift > 0)
- return nir_ishl(b, value, nir_imm_int(b, left_shift));
+ return nir_ishl_imm(b, value, left_shift);
else if (left_shift < 0)
- return nir_ushr(b, value, nir_imm_int(b, -left_shift));
+ return nir_ushr_imm(b, value, -left_shift);
else
return value;
}
nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
uint32_t mask, int left_shift)
{
- return nir_shift_imm(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
+ return nir_shift_imm(b, nir_iand_imm(b, src, mask), left_shift);
}
static inline nir_ssa_def *
assert(src->num_components <= 4);
nir_ssa_def *comps[4];
for (unsigned i = 0; i < src->num_components; i++) {
- nir_ssa_def *shift = nir_imm_int(b, src->bit_size - bits[i]);
- comps[i] = nir_ishr(b, nir_ishl(b, nir_channel(b, src, i), shift), shift);
+ unsigned shift = src->bit_size - bits[i];
+ comps[i] = nir_ishr_imm(b, nir_ishl_imm(b,
+ nir_channel(b, src, i),
+ shift),
+ shift);
}
return nir_vec(b, comps, src->num_components);
}
assert(bits[i] < bit_size);
assert(offset + bits[i] <= bit_size);
nir_ssa_def *chan = nir_channel(b, packed, next_chan);
- nir_ssa_def *lshift = nir_imm_int(b, bit_size - (offset + bits[i]));
- nir_ssa_def *rshift = nir_imm_int(b, bit_size - bits[i]);
+ unsigned lshift = bit_size - (offset + bits[i]);
+ unsigned rshift = bit_size - bits[i];
if (sign_extend)
- comps[i] = nir_ishr(b, nir_ishl(b, chan, lshift), rshift);
+ comps[i] = nir_ishr_imm(b, nir_ishl_imm(b, chan, lshift), rshift);
else
- comps[i] = nir_ushr(b, nir_ishl(b, chan, lshift), rshift);
+ comps[i] = nir_ushr_imm(b, nir_ishl_imm(b, chan, lshift), rshift);
offset += bits[i];
if (offset >= bit_size) {
next_chan++;
unsigned shift = 0;
unsigned dst_idx = 0;
for (unsigned i = 0; i < src->num_components; i++) {
- nir_ssa_def *shifted = nir_ishl(b, nir_channel(b, src, i),
- nir_imm_int(b, shift));
+ nir_ssa_def *shifted = nir_ishl_imm(b, nir_channel(b, src, i),
+ shift);
if (shift == 0) {
dst_chan[dst_idx] = shifted;
} else {
}
}
} else {
- nir_ssa_def *mask = nir_imm_int(b, ~0u >> (32 - dst_bits));
+ unsigned mask = ~0u >> (32 - dst_bits);
unsigned src_idx = 0;
unsigned shift = 0;
for (unsigned i = 0; i < dst_components; i++) {
- dst_chan[i] = nir_iand(b, nir_ushr_imm(b, nir_channel(b, src, src_idx),
- shift),
- mask);
+ dst_chan[i] = nir_iand_imm(b,
+ nir_ushr_imm(b,
+ nir_channel(b, src, src_idx),
+ shift),
+ mask);
shift += dst_bits;
if (shift >= src_bits) {
src_idx++;
nir_channel(b, clamped, 2)));
/* maxrgb.u += maxrgb.u & (1 << (23-9)); */
- maxu = nir_iadd(b, maxu, nir_iand(b, maxu, nir_imm_int(b, 1 << 14)));
+ maxu = nir_iadd(b, maxu, nir_iand_imm(b, maxu, 1 << 14));
/* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
* 1 + RGB9E5_EXP_BIAS - 127;
*/
nir_ssa_def *exp_shared =
- nir_iadd(b, nir_umax(b, nir_ushr_imm(b, maxu, 23),
- nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
- nir_imm_int(b, 1 + RGB9E5_EXP_BIAS - 127));
+ nir_iadd_imm(b, nir_umax(b, nir_ushr_imm(b, maxu, 23),
+ nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
+ 1 + RGB9E5_EXP_BIAS - 127);
/* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
* RGB9E5_MANTISSA_BITS) + 1;
*/
nir_ssa_def *revdenom_biasedexp =
- nir_isub(b, nir_imm_int(b, 127 + RGB9E5_EXP_BIAS +
- RGB9E5_MANTISSA_BITS + 1),
- exp_shared);
+ nir_isub_imm(b, 127 + RGB9E5_EXP_BIAS +
+ RGB9E5_MANTISSA_BITS + 1,
+ exp_shared);
/* revdenom.u = revdenom_biasedexp << 23; */
nir_ssa_def *revdenom =
- nir_ishl(b, revdenom_biasedexp, nir_imm_int(b, 23));
+ nir_ishl_imm(b, revdenom_biasedexp, 23);
/* rm = (int) (rc.f * revdenom.f);
* gm = (int) (gc.f * revdenom.f);
nir_ssa_def *src0_32 = nir_type_convert(b, src0, base_type, base_type | 32, nir_rounding_mode_undef);
nir_ssa_def *src1_32 = nir_type_convert(b, src1, base_type, base_type | 32, nir_rounding_mode_undef);
nir_ssa_def *dest_32 = nir_imul(b, src0_32, src1_32);
- nir_ssa_def *dest_shifted = nir_ishr(b, dest_32, nir_imm_int(b, src0->bit_size));
+ nir_ssa_def *dest_shifted = nir_ishr_imm(b, dest_32, src0->bit_size);
lowered = nir_type_convert(b, dest_shifted, base_type, base_type | src0->bit_size, nir_rounding_mode_undef);
} else {
nir_ssa_def *cshift = nir_imm_int(b, src0->bit_size / 2);
* the low 32 bits are always 0 so we can construct the correct high 32
* bits and then pack it together with zero low 32 bits.
*/
- nir_ssa_def *inf_hi = nir_ior(b, nir_imm_int(b, 0x7ff00000), zero_hi);
+ nir_ssa_def *inf_hi = nir_ior_imm(b, zero_hi, 0x7ff00000);
return nir_pack_64_2x32_split(b, nir_imm_int(b, 0), inf_hi);
}
* small below.
*/
nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra),
- nir_isub(b, get_exponent(b, src),
- nir_imm_int(b, 1023)));
+ nir_iadd_imm(b, get_exponent(b, src),
+ -1023));
ra = set_exponent(b, ra, new_exp);
* shifting right by 1.
*/
- nir_ssa_def *unbiased_exp = nir_isub(b, get_exponent(b, src),
- nir_imm_int(b, 1023));
+ nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
+ -1023);
nir_ssa_def *even = nir_iand_imm(b, unbiased_exp, 1);
nir_ssa_def *half = nir_ishr_imm(b, unbiased_exp, 1);
nir_ssa_def *src_norm = set_exponent(b, src,
- nir_iadd(b, nir_imm_int(b, 1023),
- even));
+ nir_iadd_imm(b, even, 1023));
nir_ssa_def *ra = nir_f2f64(b, nir_frsq(b, nir_f2f32(b, src_norm)));
nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra), half);
nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, g_1), g_1, src);
res = nir_ffma(b, h_1, r_1, g_1);
} else {
- nir_ssa_def *y_1 = nir_fmul(b, nir_imm_double(b, 2.0), h_1);
+ nir_ssa_def *y_1 = nir_fmul_imm(b, h_1, 2.0);
nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, y_1), nir_fmul(b, h_1, src),
one_half);
res = nir_ffma(b, y_1, r_1, y_1);
static nir_ssa_def *
lower_trunc(nir_builder *b, nir_ssa_def *src)
{
- nir_ssa_def *unbiased_exp = nir_isub(b, get_exponent(b, src),
- nir_imm_int(b, 1023));
+ nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
+ -1023);
- nir_ssa_def *frac_bits = nir_isub(b, nir_imm_int(b, 52), unbiased_exp);
+ nir_ssa_def *frac_bits = nir_isub_imm(b, 52, unbiased_exp);
/*
* Decide the operation to apply depending on the unbiased exponent:
nir_imm_int(b, ~0),
nir_ishl(b,
nir_imm_int(b, ~0),
- nir_isub(b, frac_bits, nir_imm_int(b, 32))));
+ nir_iadd_imm(b, frac_bits, -32)));
nir_ssa_def *src_lo = nir_unpack_64_2x32_split_x(b, src);
nir_ssa_def *src_hi = nir_unpack_64_2x32_split_y(b, src);
return nir_bcsel(b,
nir_ior(b, negative, nir_feq(b, src, tr)),
tr,
- nir_fadd(b, tr, nir_imm_double(b, 1.0)));
+ nir_fadd_imm(b, tr, 1.0));
}
static nir_ssa_def *
{
/* Add and subtract 2**52 to round off any fractional bits. */
nir_ssa_def *two52 = nir_imm_double(b, (double)(1ull << 52));
- nir_ssa_def *sign = nir_iand(b, nir_unpack_64_2x32_split_y(b, src),
- nir_imm_int(b, 1ull << 31));
+ nir_ssa_def *sign = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, src),
+ 1ull << 31);
b->exact = true;
nir_ssa_def *res = nir_fsub(b, nir_fadd(b, nir_fabs(b, src), two52), two52);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
- nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+ nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y);
nir_ssa_def *hi_shifted = nir_ishl(b, x_hi, y);
nir_ssa_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
- nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+ nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
nir_ssa_def *hi_shifted = nir_ishr(b, x_hi, y);
nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
- nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+ nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
nir_ssa_def *hi_shifted = nir_ushr(b, x_hi, y);
nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
* quot.y |= 1U << i;
* }
*/
- nir_ssa_def *d_shift = nir_ishl(b, d_lo, nir_imm_int(b, i));
+ nir_ssa_def *d_shift = nir_ishl_imm(b, d_lo, i);
nir_ssa_def *new_n_hi = nir_isub(b, n_hi, d_shift);
- nir_ssa_def *new_q_hi = nir_ior(b, q_hi, nir_imm_int(b, 1u << i));
+ nir_ssa_def *new_q_hi = nir_ior_imm(b, q_hi, 1ull << i);
nir_ssa_def *cond = nir_iand(b, need_high_div,
nir_uge(b, n_hi, d_shift));
if (i != 0) {
* quot.x |= 1U << i;
* }
*/
- nir_ssa_def *d_shift = nir_ishl(b, d, nir_imm_int(b, i));
+ nir_ssa_def *d_shift = nir_ishl_imm(b, d, i);
nir_ssa_def *new_n = nir_isub(b, n, d_shift);
- nir_ssa_def *new_q_lo = nir_ior(b, q_lo, nir_imm_int(b, 1u << i));
+ nir_ssa_def *new_q_lo = nir_ior_imm(b, q_lo, 1ull << i);
nir_ssa_def *cond = nir_uge(b, n, d_shift);
if (i != 0) {
/* log2_denom is always <= 31, so we don't need to bother with it
if (b->shader->options->lower_uadd_sat) {
nir_ssa_def *valid_hi_bits = nir_ine_imm(b, x_hi, 0);
- nir_ssa_def *hi_res = nir_iadd(b, nir_imm_intN_t(b, 32, 32), hi_count);
+ nir_ssa_def *hi_res = nir_iadd_imm(b, hi_count, 32);
return nir_bcsel(b, valid_hi_bits, hi_res, lo_count);
} else {
/* If hi_count was -1, it will still be -1 after this uadd_sat. As a
/* Use umin so that -1 (no bits found) becomes larger (0xFFFFFFFF)
* than any actual bit position, so we return a found bit instead.
*/
- return nir_umin(b, lo_lsb, nir_iadd(b, hi_lsb, nir_imm_int(b, 32)));
+ return nir_umin(b, lo_lsb, nir_iadd_imm(b, hi_lsb, 32));
}
static nir_ssa_def *
}
nir_ssa_def *discard =
- nir_imax(b, nir_isub(b, exp, nir_imm_int(b, significand_bits)),
+ nir_imax(b, nir_iadd_imm(b, exp, -significand_bits),
nir_imm_int(b, 0));
nir_ssa_def *significand = COND_LOWER_OP(b, ushr, x, discard);
if (significand_bits < 32)
* unrounded input manually.
*/
nir_ssa_def *shift =
- nir_imax(b, nir_isub(b, nir_imm_int(b, significand_bits), exp),
+ nir_imax(b, nir_isub_imm(b, significand_bits, exp),
nir_imm_int(b, 0));
significand = COND_LOWER_OP(b, ishl, significand, shift);
*/
nir_ssa_def *biased_exp = nir_bcsel(b, nir_ilt_imm(b, exp, 0),
nir_imm_int(b, 0),
- nir_iadd(b, exp, nir_imm_int(b, 1023)));
+ nir_iadd_imm(b, exp, 1023));
/* Pack the significand and exponent manually. */
nir_ssa_def *lo = nir_unpack_64_2x32_split_x(b, significand);
nir_ssa_def *x_low =
nir_u2u32(b, nir_iand_imm(b, x, 0xffffff));
nir_ssa_def *x_mid =
- nir_u2u32(b, nir_iand_imm(b, nir_ushr(b, x, nir_imm_int(b, 24)),
+ nir_u2u32(b, nir_iand_imm(b, nir_ushr_imm(b, x, 24),
0xffffff));
nir_ssa_def *x_hi =
- nir_u2u32(b, nir_ushr(b, x, nir_imm_int(b, 48)));
+ nir_u2u32(b, nir_ushr_imm(b, x, 48));
nir_ssa_def *scan_low =
build_scan_intrinsic(b, intrin->intrinsic, nir_op_iadd,
case nir_address_format_62bit_generic: {
assert(addr->num_components == 1);
assert(addr->bit_size == 64);
- nir_ssa_def *mode_enum = nir_ushr(b, addr, nir_imm_int(b, 62));
+ nir_ssa_def *mode_enum = nir_ushr_imm(b, addr, 62);
switch (mode) {
case nir_var_function_temp:
case nir_var_shader_temp:
}
index = nir_iadd(b, index,
- nir_imul(b, nir_imm_int(b, array_elements),
- nir_ssa_for_src(b, deref->arr.index, 1)));
+ nir_imul_imm(b,
+ nir_ssa_for_src(b, deref->arr.index, 1),
+ array_elements));
}
array_elements *= glsl_get_length(parent->type);
break;
case nir_intrinsic_quad_broadcast:
assert(intrin->src[1].is_ssa);
- index = nir_ior(b, nir_iand(b, index, nir_imm_int(b, ~0x3)),
+ index = nir_ior(b, nir_iand_imm(b, index, ~0x3),
intrin->src[1].ssa);
break;
case nir_intrinsic_quad_swap_horizontal:
}
if (i)
- dst = nir_bcsel(b, nir_ieq(b, intrin->src[1].ssa,
- nir_src_for_ssa(nir_imm_int(b, i)).ssa),
+ dst = nir_bcsel(b, nir_ieq_imm(b, intrin->src[1].ssa, i),
qbcst_dst, dst);
else
dst = qbcst_dst;
if (intr->intrinsic == nir_intrinsic_load_ubo &&
!b->shader->info.first_ubo_is_default_ubo) {
nir_ssa_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
- nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
+ nir_ssa_def *new_idx = nir_iadd_imm(b, old_idx, 1);
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
nir_src_for_ssa(new_idx));
return true;
if (d == 0) {
return nir_imm_intN_t(b, 0, n->bit_size);
} else if (util_is_power_of_two_or_zero64(d)) {
- return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
+ return nir_iand_imm(b, n, d - 1);
} else {
- return nir_isub(b, n, nir_imul(b, build_udiv(b, n, d),
- nir_imm_intN_t(b, d, n->bit_size)));
+ return nir_isub(b, n, nir_imul_imm(b, build_udiv(b, n, d), d));
}
}
nir_iadd_imm(b, n, d - 1), n);
return nir_isub(b, n, nir_iand_imm(b, tmp, -d));
} else {
- return nir_isub(b, n, nir_imul(b, build_idiv(b, n, d),
- nir_imm_intN_t(b, d, n->bit_size)));
+ return nir_isub(b, n, nir_imul_imm(b, build_idiv(b, n, d), d));
}
}
}
nir_ssa_def *is_zero = nir_ieq_imm(b, n, 0);
return nir_bcsel(b, nir_ior(b, is_neg_not_int_min, is_zero), n, nir_iadd(b, int_min_def, n));
} else if (d > 0 && util_is_power_of_two_or_zero64(d)) {
- return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
+ return nir_iand_imm(b, n, d - 1);
} else if (d < 0 && util_is_power_of_two_or_zero64(-d)) {
nir_ssa_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
nir_ssa_def *res = nir_ior(b, n, d_def);
#include "nir.h"
#include "nir_builder.h"
+#include "util/u_math.h"
+
static bool
nir_scale_fdiv_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
{
nir_ssa_def *orig_a = nir_ssa_for_alu_src(b, alu, 0);
nir_ssa_def *orig_b = nir_ssa_for_alu_src(b, alu, 1);
nir_ssa_def *fabs = nir_fabs(b, orig_b);
- nir_ssa_def *big = nir_flt(b, nir_imm_int(b, 0x7e800000), fabs);
- nir_ssa_def *small = nir_flt(b, fabs, nir_imm_int(b, 0x00800000));
+ nir_ssa_def *big = nir_fgt_imm(b, fabs, uif(0x7e800000));
+ nir_ssa_def *small = nir_flt_imm(b, fabs, uif(0x00800000));
nir_ssa_def *scaled_down_a = nir_fmul_imm(b, orig_a, 0.25);
nir_ssa_def *scaled_down_b = nir_fmul_imm(b, orig_b, 0.25);
nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
for (int j = 0; j < 6; j++) {
/* just add the inner index to get some different derefs */
- nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd(b, &ind_deref->dest.ssa, nir_imm_int(b, j)));
+ nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->dest.ssa, j));
nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
}
}