Stuff Coccinelle missed.
sed -i -e '/assert(.*\.is_ssa)/d' $(git grep -l is_ssa)
sed -i -e '/ASSERT.*\.is_ssa)/d' $(git grep -l is_ssa)
+ a manual fixup to restore the assert for parallel copy lowering.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24432>
}
if (result) {
- assert(instr->dest.dest.is_ssa);
result = ac_to_integer_or_pointer(&ctx->ac, result);
ctx->ssa_defs[instr->dest.dest.ssa.index] = result;
}
vindex =
LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
- assert(instr->dest.is_ssa);
bool can_speculate = access & ACCESS_CAN_REORDER;
res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
args.access, can_speculate,
args.dmask = 15;
args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
- assert(instr->dest.is_ssa);
args.d16 = instr->dest.ssa.bit_size == 16;
res = ac_build_image_opcode(&ctx->ac, &args);
args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
}
- assert(instr->dest.is_ssa);
args.d16 = instr->dest.ssa.bit_size == 16;
args.tfe = instr->is_sparse;
result = ac_build_concat(&ctx->ac, result, code);
if (result) {
- assert(instr->dest.is_ssa);
result = ac_to_integer(&ctx->ac, result);
for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
if (intr->intrinsic != nir_intrinsic_load_front_face)
return false;
- assert(intr->dest.is_ssa);
nir_ssa_def *def = &intr->dest.ssa;
assert(def->bit_size == 1);
{
struct hash_entry *entry;
- assert(src.is_ssa);
nir_intrinsic_instr *load = nir_load_reg_for_def(src.ssa);
if (load == NULL) {
assert(i < src.ssa->num_components);
static struct nir_alu_instr *
ntq_get_alu_parent(nir_src src)
{
- assert(src.is_ssa);
if (src.ssa->parent_instr->type != nir_instr_type_alu)
return NULL;
nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr);
* src.
*/
for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
- assert(instr->src[i].src.is_ssa);
if (nir_load_reg_for_def(instr->src[i].src.ssa))
return NULL;
}
/* Limit the number of channels returned to both how many the NIR
* instruction writes and how many the instruction could produce.
*/
- assert(instr->dest.is_ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa);
if (store == NULL) {
p0_unpacked.return_words_of_texture_data =
/* We compute first the offsets */
nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
while (deref->deref_type != nir_deref_type_var) {
- assert(deref->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(deref->parent.ssa->parent_instr);
unsigned base_index = 0;
while (deref->deref_type != nir_deref_type_var) {
- assert(deref->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(deref->parent.ssa->parent_instr);
{
switch (instr->type) {
case nir_instr_type_alu:
- assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
return &nir_instr_as_alu(instr)->dest.dest.ssa;
case nir_instr_type_deref:
- assert(nir_instr_as_deref(instr)->dest.is_ssa);
return &nir_instr_as_deref(instr)->dest.ssa;
case nir_instr_type_tex:
- assert(nir_instr_as_tex(instr)->dest.is_ssa);
return &nir_instr_as_tex(instr)->dest.ssa;
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
- assert(intrin->dest.is_ssa);
return &intrin->dest.ssa;
} else {
return NULL;
}
case nir_instr_type_phi:
- assert(nir_instr_as_phi(instr)->dest.is_ssa);
return &nir_instr_as_phi(instr)->dest.ssa;
case nir_instr_type_parallel_copy:
bool
nir_alu_instr_is_copy(nir_alu_instr *instr)
{
- assert(instr->src[0].src.is_ssa);
if (instr->op == nir_op_mov) {
return !instr->src[0].abs &&
assert(s.comp < s.def->num_components);
assert(alu->dest.write_mask & (1u << s.comp));
- assert(alu->src[alu_src_idx].src.is_ssa);
out.def = alu->src[alu_src_idx].src.ssa;
if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
static inline uint64_t
nir_alu_src_as_uint(nir_alu_src src)
{
- assert(src.src.is_ssa && "precondition");
nir_ssa_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]);
return nir_ssa_scalar_as_uint(scalar);
}
nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
int64_t index)
{
- assert(parent->dest.is_ssa);
nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
parent->dest.ssa.bit_size);
nir_deref_instr *leader)
{
/* If the derefs would have the same parent, don't make a new one */
- assert(leader->parent.is_ssa);
if (leader->parent.ssa == &parent->dest.ssa)
return leader;
glsl_get_length(leader_parent->type));
if (leader->deref_type == nir_deref_type_array) {
- assert(leader->arr.index.is_ssa);
nir_ssa_def *index = nir_i2iN(b, leader->arr.index.ssa,
parent->dest.ssa.bit_size);
return nir_build_deref_array(b, parent, index);
if (idx < 0)
return NULL;
- assert(tex->src[idx].src.is_ssa);
nir_ssa_def *ssa = tex->src[idx].src.ssa;
nir_tex_instr_remove_src(tex, idx);
return ssa;
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
- assert(d->dest.is_ssa);
if (!nir_ssa_def_is_unused(&d->dest.ssa))
break;
} else {
assert(a[*i]->deref_type == nir_deref_type_array &&
b[*i]->deref_type == nir_deref_type_array);
- assert(a[*i]->arr.index.is_ssa && b[*i]->arr.index.is_ssa);
if (nir_src_is_const(a[*i]->arr.index) &&
nir_src_is_const(b[*i]->arr.index)) {
bool progress = false;
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
- assert(alu->src[i].src.is_ssa);
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
if (src_instr->type != nir_instr_type_deref)
continue;
if (src_deref->deref_type != nir_deref_type_cast)
continue;
- assert(src_deref->parent.is_ssa);
nir_instr_rewrite_src_ssa(&alu->instr, &alu->src[i].src,
src_deref->parent.ssa);
progress = true;
bool trivial_array_cast = is_trivial_array_deref_cast(cast);
- assert(cast->dest.is_ssa);
- assert(cast->parent.is_ssa);
nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) {
assert(!use_src->is_if && "there cannot be if-uses");
parent->deref_type != nir_deref_type_ptr_as_array)
return false;
- assert(parent->parent.is_ssa);
- assert(parent->arr.index.is_ssa);
- assert(deref->arr.index.is_ssa);
deref->arr.in_bounds &= parent->arr.in_bounds;
/* Stomp it to reference the parent */
nir_instr_rewrite_src(&load->instr, &load->src[0],
nir_src_for_ssa(&parent->dest.ssa));
- assert(load->dest.is_ssa);
load->dest.ssa.bit_size = new_bit_size;
load->dest.ssa.num_components = new_num_comps;
load->num_components = new_num_comps;
* results in a LOT of vec4->vec3 casts on loads and stores.
*/
if (is_vector_bitcast_deref(deref, write_mask, true)) {
- assert(store->src[1].is_ssa);
nir_ssa_def *data = store->src[1].ssa;
const unsigned old_bit_size = data->bit_size;
nir_instr_insert_after(&last_phi->instr, &block_pcopy->instr);
nir_foreach_phi(phi, block) {
- assert(phi->dest.is_ssa);
nir_foreach_phi_src(src, phi) {
if (nir_src_is_undef(src->src))
continue;
entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src);
exec_list_push_tail(&pcopy->entries, &entry->node);
- assert(src->src.is_ssa);
nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
nir_instr_rewrite_src(&phi->instr, &src->src,
if (nir_src_is_undef(src->src))
continue;
- assert(src->src.is_ssa);
entry = _mesa_hash_table_search(state->merge_node_table, src->src.ssa);
assert(entry != NULL);
merge_node *src_node = (merge_node *)entry->data;
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->dest_is_reg);
- assert(entry->dest.dest.is_ssa);
assert(nir_ssa_def_is_unused(&entry->dest.dest.ssa));
/* Parallel copy destinations will always be registers */
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->src_is_reg);
- assert(entry->src.is_ssa);
nir_ssa_def *reg = reg_for_ssa_def(entry->src.ssa, state);
if (reg == NULL)
continue;
unsigned num_copies = 0;
nir_foreach_parallel_copy_entry(entry, pcopy) {
/* Sources may be SSA but destinations are always registers */
- assert(entry->src.is_ssa);
- assert(entry->dest_is_reg && entry->dest.dest.is_ssa);
+ assert(entry->dest_is_reg);
if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
continue;
if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
continue;
- assert(entry->src.is_ssa);
struct copy_value src_value = {
.is_reg = entry->src_is_reg,
.ssa = entry->src.ssa,
values[src_idx] = src_value;
}
- assert(entry->dest_is_reg && entry->dest.dest.is_ssa);
+ assert(entry->dest_is_reg);
struct copy_value dest_value = {
.is_reg = true,
.ssa = entry->dest.reg.ssa,
bool progress = false;
nir_foreach_phi_safe(phi, block) {
- assert(phi->dest.is_ssa);
nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
b.cursor = nir_after_instr(&phi->instr);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
nir_foreach_phi_src(src, phi) {
- assert(src->src.is_ssa);
_mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block);
place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks);
if (load->intrinsic != nir_intrinsic_load_reg)
return false;
- assert(load->src[0].is_ssa);
nir_ssa_def *reg = load->src[0].ssa;
return reg->index >= old_num_ssa;
unsigned param_idx = nir_intrinsic_param_idx(load);
assert(param_idx < impl->function->num_params);
- assert(load->dest.is_ssa);
nir_ssa_def_rewrite_uses(&load->dest.ssa,
params[param_idx]);
* this for loads in the same block as the use because uses of loads
* which cross block boundaries aren't trivial anyway.
*/
- assert(alu->src[0].src.is_ssa);
nir_intrinsic_instr *load = nir_load_reg_for_def(alu->src[0].src.ssa);
if (load != NULL) {
/* Duplicate the load before changing it in case there are other
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan: {
- assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
const unsigned old_bit_size = intrin->dest.ssa.bit_size;
assert(old_bit_size < bit_size);
lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv)
{
assert(conv->intrinsic == nir_intrinsic_convert_alu_types);
- assert(conv->src[0].is_ssa && conv->dest.is_ssa);
b->cursor = nir_instr_remove(&conv->instr);
nir_ssa_def *val =
loc.reg, .base = loc.base_offset);
}
- assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
nir_instr_remove(&intrin->instr);
state->progress = true;
struct reg_location loc = get_deref_reg_location(deref, state);
nir_intrinsic_instr *decl = nir_reg_get_decl(loc.reg);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *val = intrin->src[1].ssa;
unsigned num_array_elems = nir_intrinsic_num_array_elems(decl);
unsigned write_mask = nir_intrinsic_write_mask(intrin);
nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
const void *cb_data, bool allow_unaligned_stores_as_atomics)
{
- assert(intrin->src[0].is_ssa);
nir_ssa_def *value = intrin->src[0].ssa;
assert(intrin->num_components == value->num_components);
} else {
assert(intrin->intrinsic == nir_intrinsic_store_deref);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
if (value->bit_size == 1)
value = nir_b2b32(b, value);
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return NULL;
- assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
switch (intrin->intrinsic) {
case nir_intrinsic_interp_deref_at_centroid:
return nir_load_barycentric_coord_centroid(b, 32, .interp_mode = interp_mode);
case nir_intrinsic_interp_deref_at_sample:
- assert(intrin->src[1].is_ssa);
return nir_load_barycentric_coord_at_sample(b, 32, intrin->src[1].ssa,
.interp_mode = interp_mode);
case nir_intrinsic_interp_deref_at_offset:
- assert(intrin->src[1].is_ssa);
return nir_load_barycentric_coord_at_offset(b, 32, intrin->src[1].ssa,
.interp_mode = interp_mode);
default:
* couple of ray-tracing intrinsics which are matrices.
*/
assert(deref->deref_type == nir_deref_type_array);
- assert(deref->arr.index.is_ssa);
column = deref->arr.index.ssa;
nir_deref_instr *arr_deref = deref;
deref = nir_deref_instr_parent(deref);
/* Unfortunately, there's just no good way to handle wildcards except to
* flip the chain around and walk the list from variable to final pointer.
*/
- assert(copy->src[0].is_ssa && copy->src[1].is_ssa);
nir_deref_instr *dst = nir_instr_as_deref(copy->src[0].ssa->parent_instr);
nir_deref_instr *src = nir_instr_as_deref(copy->src[1].ssa->parent_instr);
continue;
case nir_deref_type_array: {
- assert(b->arr.index.is_ssa && d->arr.index.is_ssa);
const bool const_b_idx = nir_src_is_const(b->arr.index);
const bool const_d_idx = nir_src_is_const(d->arr.index);
const unsigned b_idx = const_b_idx ? nir_src_as_uint(b->arr.index) : 0;
alu->op != nir_op_irem)
return false;
- assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
if (alu->dest.dest.ssa.bit_size < *min_bit_size)
return false;
nir_schedule_regs_freed_state *state)
{
assert(nir_is_store_reg(store));
- assert(store->src[0].is_ssa && store->src[1].is_ssa);
nir_schedule_regs_freed_src_cb(&store->src[0], state);
if (store->intrinsic == nir_intrinsic_store_reg_indirect)
nir_schedule_scoreboard *scoreboard)
{
assert(nir_is_store_reg(store));
- assert(store->src[0].is_ssa && store->src[1].is_ssa);
nir_ssa_def *reg = store->src[1].ssa;
nir_schedule_mark_src_scheduled(&store->src[0], scoreboard);
validate_assert(state, instr->cast.align_offset == 0);
}
} else {
- /* We require the parent to be SSA. This may be lifted in the future */
- validate_assert(state, instr->parent.is_ssa);
-
/* The parent pointer value must have the same number of components
* as the destination.
*/
unsigned bit_size,
validate_state *state)
{
- if (!validate_assert(state, handle_src.is_ssa))
- return;
-
nir_ssa_def *handle = handle_src.ssa;
nir_instr *parent = handle->parent_instr;
{
state->instr = &instr->instr;
- validate_assert(state, instr->dest.is_ssa);
exec_list_validate(&instr->srcs);
nir_foreach_phi_src(src, instr) {
if (src->pred == pred) {
- validate_assert(state, src->src.is_ssa);
validate_src(&src->src, state, instr->dest.ssa.bit_size,
instr->dest.ssa.num_components);
state->instr = NULL;
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(first_store->src[1].is_ssa);
nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(third_store->src[1].is_ssa);
EXPECT_EQ(first_store->src[1].ssa, third_store->src[1].ssa);
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
- ASSERT_TRUE(first_copy->src[1].is_ssa);
nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
- ASSERT_TRUE(second_copy->src[1].is_ssa);
EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
}
for (int i = 0; i < 2; i++) {
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, stored_value);
}
}
/* Store to v[1] should use second_value directly. */
nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
- ASSERT_TRUE(store_to_v1->src[1].is_ssa);
EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
}
*/
nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3);
ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
- ASSERT_TRUE(store_to_v1->src[1].is_ssa);
EXPECT_EQ(store_to_v1->src[1].ssa, third_value);
}
for (int i = 0; i < 2; i++) {
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, stored_value);
}
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(store->src[1].is_ssa);
/* NOTE: The ALU instruction is how we get the vec.y. */
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
- ASSERT_TRUE(store->src[1].is_ssa);
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
}
/* Third store will just use the value from first store. */
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(third_store->src[1].is_ssa);
EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
/* Fourth store will compose first and second store values. */
nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
- ASSERT_TRUE(fourth_store->src[1].is_ssa);
EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
}
/* Store to vec[idx] propagated to out. */
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
- ASSERT_TRUE(first->src[1].is_ssa);
- ASSERT_TRUE(second->src[1].is_ssa);
EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
}
/* Store to vec[idx] propagated to out. */
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(second->src[1].is_ssa);
- ASSERT_TRUE(third->src[1].is_ssa);
EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
}
/* Store to arr[idx] propagated to out. */
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
- ASSERT_TRUE(first->src[1].is_ssa);
- ASSERT_TRUE(second->src[1].is_ssa);
EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
}
/* Store to b0.x propagated to out. */
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(first->src[1].is_ssa);
- ASSERT_TRUE(third->src[1].is_ssa);
EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
}
/* Store to b0.x propagated to out. */
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
- ASSERT_TRUE(first->src[1].is_ssa);
- ASSERT_TRUE(third->src[1].is_ssa);
EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
}
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, load_v2);
}
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, load_v2);
}
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, load_v2);
}
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(store->src[1].is_ssa);
EXPECT_EQ(store->src[1].ssa, load_v2);
}
EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
- ASSERT_TRUE(first_store->src[1].is_ssa);
EXPECT_EQ(first_store->src[1].ssa, load_v2);
nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
- ASSERT_TRUE(second_store->src[1].is_ssa);
EXPECT_EQ(second_store->src[1].ssa, load_v3);
}
if (opc == OPC_META_TEX_PREFETCH) {
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
- compile_assert(ctx, tex->src[idx].src.is_ssa);
sam = ir3_SAM(ctx->in_block, opc, type, MASK(ncomp), 0, NULL,
get_barycentric(ctx, IJ_PERSP_PIXEL), 0);
nir_instr_remove(&instr->instr);
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
- assert(d->dest.is_ssa);
if (!list_is_empty(&d->dest.ssa.uses))
break;
auto buf_id = nir_imm_int(b, R600_BUFFER_INFO_CONST_BUFFER);
- assert(intr->src[0].is_ssa);
auto clip_vtx = intr->src[0].ssa;
for (int i = 0; i < 8; ++i) {
{
auto intr = nir_instr_as_intrinsic(instr);
assert(intr->intrinsic == nir_intrinsic_load_ubo_vec4);
- assert(intr->src[0].is_ssa);
auto parent = intr->src[0].ssa->parent_instr;
assert(glsl_get_vector_elements(glsl_without_array(var2->type)) < 4);
if (srcs[var2->data.location_frac] == &instr_undef->def) {
- assert(intr2->src[1].is_ssa);
assert(intr2->src[1].ssa);
srcs[var2->data.location_frac] = intr2->src[1].ssa;
}
}
/* Ignore WPOS; it doesn't require interpolation. */
- assert(intrin->dest.is_ssa);
if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa))
continue;
nir_ssa_bind_infos[instr->dest.ssa.index].binding =
nir_intrinsic_binding(instr);
- assert(instr->src[1].is_ssa);
if (nir_intrinsic_resource_access_intel(instr) &
nir_resource_intel_non_uniform) {
nir_resource_values[instr->dest.ssa.index] = fs_reg();
* assuming an alpha of 1.0 and letting the sample mask pass through
* unaltered seems like the kindest thing to do to apps.
*/
- assert(color0_write->src[0].is_ssa);
nir_ssa_def *color0 = color0_write->src[0].ssa;
if (color0->num_components < 4)
goto skip;
- assert(sample_mask_write->src[0].is_ssa);
nir_ssa_def *sample_mask = sample_mask_write->src[0].ssa;
if (sample_mask_write_first) {
if (add->exact)
return false;
- assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa);
/* This, is the case a + a. We would rather handle this with an
* algebraic reduction than fuse it. Also, we want to only fuse
{
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
nir_ssa_def *index =
build_res_reindex(b, intrin->src[0].ssa,
intrin->src[1].ssa);
nir_address_format addr_format =
addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
- assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
nir_ssa_def *index =
build_res_reindex(b, intrin->src[0].ssa,
intrin->src[1].ssa,
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return false;
- assert(intrin->dest.is_ssa);
const struct dxil_spirv_runtime_conf *conf =
(const struct dxil_spirv_runtime_conf *)cb_data;
break;
case nir_intrinsic_vulkan_resource_reindex:
- assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
res = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa,
addr_format);
break;