break;
case nir_deref_type_array: {
- nir_const_value *const_deref_index =
- nir_src_as_const_value(deref->arr.index);
-
- if (const_deref_index && index == NULL) {
+ if (nir_src_is_const(deref->arr.index) && index == NULL) {
/* We're still building a direct index */
- base_index += const_deref_index->u32[0] * array_elements;
+ base_index += nir_src_as_uint(deref->arr.index) * array_elements;
} else {
if (index == NULL) {
/* We used to be direct but not anymore */
return true;
if (instr->deref_type == nir_deref_type_array &&
- !nir_src_as_const_value(instr->arr.index))
+ !nir_src_is_const(instr->arr.index))
return true;
instr = nir_deref_instr_parent(instr);
unsigned offset = 0;
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- offset += nir_src_as_const_value((*p)->arr.index)->u32[0] *
+ offset += nir_src_as_uint((*p)->arr.index) *
type_get_array_stride((*p)->type, size_align);
} else if ((*p)->deref_type == nir_deref_type_struct) {
/* p starts at path[1], so this is safe */
b_tail->deref_type == nir_deref_type_array);
assert(a_tail->arr.index.is_ssa && b_tail->arr.index.is_ssa);
- nir_const_value *a_index_const =
- nir_src_as_const_value(a_tail->arr.index);
- nir_const_value *b_index_const =
- nir_src_as_const_value(b_tail->arr.index);
- if (a_index_const && b_index_const) {
+ if (nir_src_is_const(a_tail->arr.index) &&
+ nir_src_is_const(b_tail->arr.index)) {
/* If they're both direct and have different offsets, they
* don't even alias much less anything else.
*/
- if (a_index_const->u32[0] != b_index_const->u32[0])
+ if (nir_src_as_uint(a_tail->arr.index) !=
+ nir_src_as_uint(b_tail->arr.index))
return 0;
} else if (a_tail->arr.index.ssa == b_tail->arr.index.ssa) {
/* They're the same indirect, continue on */
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
if (d->deref_type == nir_deref_type_array) {
- nir_const_value *const_index = nir_src_as_const_value(d->arr.index);
-
- if (!const_index)
+ if (!nir_src_is_const(d->arr.index))
return -1;
offset += glsl_count_attribute_slots(d->type, is_vertex_input) *
- const_index->u32[0];
+ nir_src_as_uint(d->arr.index);
}
/* TODO: we can get the offset for structs here see nir_lower_io() */
}
if (!intrin)
continue;
- nir_const_value *val = nir_src_as_const_value(intrin->src[0]);
/* We've found a non-constant value. Bail. */
- if (!val)
+ if (!nir_src_is_const(intrin->src[0]))
return -1;
if (count == -1)
- count = val->i32[0];
+ count = nir_src_as_int(intrin->src[0]);
/* We've found contradictory set_vertex_count intrinsics.
* This can happen if there are early-returns in main() and
* different paths emit different numbers of vertices.
*/
- if (count != val->i32[0])
+ if (count != nir_src_as_int(intrin->src[0]))
return -1;
}
}
if ((intr->intrinsic == nir_intrinsic_store_output) &&
nir_intrinsic_base(intr) == drvloc) {
assert(intr->src[0].is_ssa);
- assert(nir_src_as_const_value(intr->src[1]));
+ assert(nir_src_is_const(intr->src[1]));
return intr->src[0].ssa;
}
}
for (; *deref_arr; deref_arr++) {
nir_deref_instr *deref = *deref_arr;
if (deref->deref_type == nir_deref_type_array &&
- nir_src_as_const_value(deref->arr.index) == NULL) {
+ !nir_src_is_const(deref->arr.index)) {
int length = glsl_get_length(parent->type);
emit_indirect_load_store_deref(b, orig_instr, parent, deref_arr,
nir_deref_instr *base = deref;
while (base->deref_type != nir_deref_type_var) {
if (base->deref_type == nir_deref_type_array &&
- nir_src_as_const_value(base->arr.index) == NULL)
+ !nir_src_is_const(base->arr.index))
has_indirect = true;
base = nir_deref_instr_parent(base);
assert(glsl_type_is_scalar((*p)->type));
/* We always lower indirect dereferences for "compact" array vars. */
- nir_const_value *const_index = nir_src_as_const_value((*p)->arr.index);
- assert(const_index);
-
- const unsigned total_offset = *component + const_index->u32[0];
+ const unsigned index = nir_src_as_uint((*p)->arr.index);
+ const unsigned total_offset = *component + index;
const unsigned slot_offset = total_offset / 4;
*component = total_offset % 4;
return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset);
unsigned offset = 0;
for (; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- nir_const_value *c = nir_src_as_const_value((*p)->arr.index);
-
- assert(c); /* must not be indirect dereference */
+ /* must not be indirect dereference */
+ unsigned index = nir_src_as_uint((*p)->arr.index);
unsigned size = glsl_count_attribute_slots((*p)->type, false);
- offset += size * c->u32[0];
+ offset += size * index;
unsigned num_elements = glsl_type_is_array((*p)->type) ?
glsl_get_aoa_size((*p)->type) : 1;
num_elements *= glsl_type_is_matrix(glsl_without_array((*p)->type)) ?
glsl_get_matrix_columns(glsl_without_array((*p)->type)) : 1;
- *element_index += num_elements * c->u32[0];
+ *element_index += num_elements * index;
} else if ((*p)->deref_type == nir_deref_type_struct) {
/* TODO: we could also add struct splitting support to this pass */
break;
if ((*p)->deref_type != nir_deref_type_array)
continue;
- if (!nir_src_as_const_value((*p)->arr.index))
+ if (!nir_src_is_const((*p)->arr.index))
return true;
}
if (d->deref_type != nir_deref_type_array)
continue;
- nir_const_value *const_index = nir_src_as_const_value(d->arr.index);
- if (const_index && !src.reg.indirect) {
- src.reg.base_offset += const_index->u32[0] * inner_array_size;
+ if (nir_src_is_const(d->arr.index) && !src.reg.indirect) {
+ src.reg.base_offset += nir_src_as_uint(d->arr.index) *
+ inner_array_size;
} else {
if (src.reg.indirect) {
assert(src.reg.base_offset == 0);
unsigned drvloc =
state->colors[idx].front->data.driver_location;
if (nir_intrinsic_base(intr) == drvloc) {
- assert(nir_src_as_const_value(intr->src[0]));
+ assert(nir_src_is_const(intr->src[0]));
break;
}
}
return parent->children[deref->strct.index];
case nir_deref_type_array: {
- nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
- if (const_index) {
- uint32_t index = const_index->u32[0];
+ if (nir_src_is_const(deref->arr.index)) {
+ uint32_t index = nir_src_as_uint(deref->arr.index);
/* This is possible if a loop unrolls and generates an
* out-of-bounds offset. We need to handle this at least
* somewhat gracefully.
return;
case nir_deref_type_array: {
- nir_const_value *const_index = nir_src_as_const_value((*path)->arr.index);
- assert(const_index);
- uint32_t index = const_index->u32[0];
+ uint32_t index = nir_src_as_uint((*path)->arr.index);
if (node->children[index]) {
foreach_deref_node_worker(node->children[index],
}
case nir_deref_type_array: {
- nir_const_value *const_index = nir_src_as_const_value((*path)->arr.index);
- if (!const_index)
+ if (!nir_src_is_const((*path)->arr.index))
return true;
- uint32_t index = const_index->u32[0];
+ uint32_t index = nir_src_as_uint((*path)->arr.index);
/* If there is an indirect at this level, we're aliased. */
if (node->indirect)
return true;
}
- nir_const_value *const_value =
- nir_src_as_const_value(following_if->condition);
-
- if (!const_value)
+ if (!nir_src_is_const(following_if->condition))
return false;
- opt_constant_if(following_if, const_value->u32[0] != 0);
+ opt_constant_if(following_if, nir_src_as_bool(following_if->condition));
return true;
}
case nir_deref_type_array:
assert(b->arr.index.is_ssa && d->arr.index.is_ssa);
- nir_const_value *const_b_idx = nir_src_as_const_value(b->arr.index);
- nir_const_value *const_d_idx = nir_src_as_const_value(d->arr.index);
+ const bool const_b_idx = nir_src_is_const(b->arr.index);
+ const bool const_d_idx = nir_src_is_const(d->arr.index);
+ const unsigned b_idx = const_b_idx ? nir_src_as_uint(b->arr.index) : 0;
+ const unsigned d_idx = const_d_idx ? nir_src_as_uint(d->arr.index) : 0;
/* If we don't have an index into the path yet or if this entry in
* the path is at the array index, see if this is a candidate. We're
* in the search deref.
*/
if ((*path_array_idx < 0 || *path_array_idx == i) &&
- const_b_idx && const_b_idx->u32[0] == 0 &&
- const_d_idx && const_d_idx->u32[0] == arr_idx) {
+ const_b_idx && b_idx == 0 &&
+ const_d_idx && d_idx == arr_idx) {
*path_array_idx = i;
continue;
}
* earlier.
*/
if (b->arr.index.ssa == d->arr.index.ssa ||
- (const_b_idx && const_d_idx &&
- const_b_idx->u32[0] == const_d_idx->u32[0]))
+ (const_b_idx && const_d_idx && b_idx == d_idx))
continue;
goto fail;
switch (intrin->intrinsic) {
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all:
- if (nir_src_as_const_value(intrin->src[0]))
+ if (nir_src_is_const(intrin->src[0]))
replacement = nir_ssa_for_src(&b, intrin->src[0], 1);
break;
case nir_intrinsic_vote_feq:
case nir_intrinsic_vote_ieq:
- if (nir_src_as_const_value(intrin->src[0]))
+ if (nir_src_is_const(intrin->src[0]))
replacement = nir_imm_int(&b, NIR_TRUE);
break;
default:
switch (intrin->intrinsic) {
case nir_intrinsic_store_deref:
dst_deref = nir_src_as_deref(intrin->src[0]);
- src_is_const = nir_src_as_const_value(intrin->src[1]);
+ src_is_const = nir_src_is_const(intrin->src[1]);
break;
case nir_intrinsic_load_deref:
for (unsigned i = 0; i < info->num_levels; i++) {
nir_deref_instr *p = path.path[i + 1];
if (p->deref_type == nir_deref_type_array &&
- nir_src_as_const_value(p->arr.index) == NULL)
+ !nir_src_is_const(p->arr.index))
info->levels[i].split = false;
}
}
if (p->deref_type == nir_deref_type_array_wildcard)
continue;
- nir_const_value *const_index = nir_src_as_const_value(p->arr.index);
- if (const_index && const_index->u32[0] >= info->levels[i].array_len)
+ if (nir_src_is_const(p->arr.index) &&
+ nir_src_as_uint(p->arr.index) >= info->levels[i].array_len)
return true;
}
for (unsigned i = 0; i < info->num_levels; i++) {
if (info->levels[i].split) {
nir_deref_instr *p = path.path[i + 1];
- unsigned index = nir_src_as_const_value(p->arr.index)->u32[0];
+ unsigned index = nir_src_as_uint(p->arr.index);
assert(index < info->levels[i].array_len);
split = &split->splits[index];
}
unsigned max_used;
if (deref->deref_type == nir_deref_type_array) {
- nir_const_value *const_index =
- nir_src_as_const_value(deref->arr.index);
- max_used = const_index ? const_index->u32[0] : UINT_MAX;
+ max_used = nir_src_is_const(deref->arr.index) ?
+ nir_src_as_uint(deref->arr.index) : UINT_MAX;
} else {
/* For wildcards, we read or wrote the whole thing. */
assert(deref->deref_type == nir_deref_type_array_wildcard);
if (p->deref_type == nir_deref_type_array_wildcard)
continue;
- nir_const_value *const_index = nir_src_as_const_value(p->arr.index);
- if (const_index && const_index->u32[0] >= usage->levels[i].array_len) {
+ if (nir_src_is_const(p->arr.index) &&
+ nir_src_as_uint(p->arr.index) >= usage->levels[i].array_len) {
oob = true;
break;
}
if (nir_intrinsic_get_var(store_to_v1, 0) == v[1]) {
ASSERT_TRUE(store_to_v1->src[1].is_ssa);
- nir_const_value *const_index = nir_src_as_const_value(store_to_v1->src[1]);
- ASSERT_TRUE(const_index);
- ASSERT_EQ(const_index->u32[1], 20);
+ ASSERT_TRUE(nir_src_is_const(store_to_v1->src[1]));
+ ASSERT_EQ(nir_src_as_uint(store_to_v1->src[1]), 20);
break;
}
}
if (nir_intrinsic_get_var(store_to_v1, 0) == v[1]) {
ASSERT_TRUE(store_to_v1->src[1].is_ssa);
- nir_const_value *const_index = nir_src_as_const_value(store_to_v1->src[1]);
- ASSERT_TRUE(const_index);
- ASSERT_EQ(const_index->u32[1], 20);
+ ASSERT_TRUE(nir_src_is_const(store_to_v1->src[1]));
+ ASSERT_EQ(nir_src_as_uint(store_to_v1->src[1]), 20);
break;
}
}