We only see SSA now.
Via Coccinelle patch:
@@
expression x;
@@
-assert(x.is_ssa);
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24432>
static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
{
- assert(src.is_ssa);
return nir->ssa_defs[src.ssa->index];
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
- assert(instr->dest.is_ssa);
-
/* Buffers don't support A16. */
if (args->a16)
args->coords[0] = LLVMBuildZExt(ctx->ac.builder, args->coords[0], ctx->ac.i32, "");
b->cursor = nir_after_instr(&instr->instr);
- assert(instr->dest.is_ssa);
nir_ssa_def *result = &instr->dest.ssa;
if (util_format_is_pure_uint(format)) {
result = nir_format_unpack_uint(b, result, bits16, 4);
intr->num_components != 4)
continue;
- assert(intr->src[0].is_ssa);
-
lower_line_smooth_intrinsic(state, &b, intr);
progress = true;
}
for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
d = nir_deref_instr_parent(d)) {
assert(d->deref_type == nir_deref_type_array);
- assert(d->arr.index.is_ssa);
unsigned array_stride = ATOMIC_COUNTER_SIZE;
if (glsl_type_is_array(d->type))
break;
/* We use nir_address_format_32bit_index_offset */
- assert(deref->dest.is_ssa);
assert(deref->dest.ssa.bit_size == 32);
deref->dest.ssa.num_components = 2;
* from the SSBO.
*/
if (glsl_type_is_boolean(deref->type)) {
- assert(intrin->dest.is_ssa);
b.cursor = nir_after_instr(&intrin->instr);
intrin->dest.ssa.bit_size = 32;
nir_ssa_def *bval = nir_i2b(&b, &intrin->dest.ssa);
* step but in practice it doesn't cost much.
*/
if (glsl_type_is_boolean(deref->type)) {
- assert(intrin->src[1].is_ssa);
b.cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *ival = nir_b2i32(&b, intrin->src[1].ssa);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
b->cursor = nir_before_instr(&instr->instr);
if (texture_idx >= 0) {
- assert(instr->src[texture_idx].src.is_ssa);
-
nir_deref_instr *texture_deref =
lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
/* only lower non-bindless: */
}
if (sampler_idx >= 0) {
- assert(instr->src[sampler_idx].src.is_ssa);
nir_deref_instr *sampler_deref =
lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
/* only lower non-bindless: */
/* Remove from this list */
list_del(&src->src.use_link);
- assert(src->src.is_ssa);
src->src.ssa = remap_local(state, src->src.ssa);
list_addtail(&src->src.use_link, &src->src.ssa->uses);
coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
nir_foreach_phi(phi, block) {
- assert(phi->dest.is_ssa);
merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
nir_foreach_phi_src(src, phi) {
- assert(src->src.is_ssa);
if (nir_src_is_undef(src->src))
continue;
{
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->src_is_reg);
- assert(entry->src.is_ssa);
assert(!entry->dest_is_reg);
- assert(entry->dest.dest.is_ssa);
assert(entry->dest.dest.ssa.num_components ==
entry->src.ssa->num_components);
#ifndef NDEBUG
nir_phi_instr *phi = nir_instr_as_phi(instr);
- assert(phi->dest.is_ssa);
struct hash_entry *entry =
_mesa_hash_table_search(state->merge_node_table, &phi->dest.ssa);
assert(entry != NULL);
{
switch (instr->type) {
case nir_instr_type_alu:
- assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
return &nir_instr_as_alu(instr)->dest.dest.ssa;
case nir_instr_type_deref:
- assert(nir_instr_as_deref(instr)->dest.is_ssa);
return &nir_instr_as_deref(instr)->dest.ssa;
case nir_instr_type_load_const:
return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
- assert(nir_instr_as_phi(instr)->dest.is_ssa);
return &nir_instr_as_phi(instr)->dest.ssa;
case nir_instr_type_intrinsic:
- assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
return &nir_instr_as_intrinsic(instr)->dest.ssa;
case nir_instr_type_tex:
- assert(nir_instr_as_tex(instr)->dest.is_ssa);
return &nir_instr_as_tex(instr)->dest.ssa;
default:
unreachable("We never ask for any of these");
nir_legacy_float_mod_folds(nir_alu_instr *mod)
{
assert(mod->op == nir_op_fabs || mod->op == nir_op_fneg);
- assert(mod->dest.dest.is_ssa);
/* No legacy user supports fp64 modifiers */
if (mod->dest.dest.ssa.bit_size == 64)
/* Otherwise, we're good */
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
- assert(alu->dest.dest.is_ssa);
*def = &alu->dest.dest.ssa;
return true;
}
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
nir_foreach_phi(phi, succ) {
- assert(phi->dest.is_ssa);
set_ssa_def_dead(&phi->dest.ssa, live);
}
{
nir_alu_instr *first = NULL;
nir_foreach_phi_src(src, phi) {
- assert(src->src.is_ssa);
if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
return NULL;
alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
{
assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
- assert(alu->dest.dest.is_ssa);
for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
if (alu->src[src_idx].swizzle[i] != i)
return false;
if (d->deref_type != nir_deref_type_array)
continue;
- assert(d->arr.index.is_ssa);
nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
if (array_index->type != basic_induction)
list_for_each_entry(nir_loop_terminator, terminator,
&state->loop->info->loop_terminator_list,
loop_terminator_link) {
- assert(terminator->nif->condition.is_ssa);
nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
if (!nir_ssa_scalar_is_alu(cond)) {
nir_ssa_def *lowered = NULL;
- assert(instr->dest.dest.is_ssa);
-
b->cursor = nir_before_instr(&instr->instr);
b->exact = instr->exact;
/* There is no ALU instruction which has a scalar destination, scalar
* src[0], and some other vector source.
*/
- assert(alu->dest.dest.is_ssa);
- assert(alu->src[0].src.is_ssa);
return alu->dest.dest.ssa.num_components > 1 ||
nir_op_infos[alu->op].input_sizes[0] > 1;
}
unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan;
- assert(alu->dest.dest.is_ssa);
assert(alu->dest.write_mask != 0);
b->exact = alu->exact;
b.cursor = nir_after_instr(&intrin->instr);
if (intrin->intrinsic == nir_intrinsic_store_deref) {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
if (nir_src_is_const(deref->arr.index)) {
lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
nir_phi_instr *last_phi)
{
- assert(phi->dest.is_ssa);
unsigned old_bit_size = phi->dest.ssa.bit_size;
assert(old_bit_size < bit_size);
nir_foreach_phi_src(src, phi) {
b->cursor = nir_after_block_before_jump(src->pred);
- assert(src->src.is_ssa);
nir_ssa_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
return false;
nir_phi_instr *phi = nir_instr_as_phi(instr);
- assert(phi->dest.is_ssa);
if (phi->dest.ssa.bit_size <= 32)
return false;
/* Grab the input color. We always want 4 channels during blend. Dead
* code will clean up any channels we don't need.
*/
- assert(store->src[0].is_ssa);
nir_ssa_def *src = nir_pad_vector(b, store->src[0].ssa, 4);
assert(nir_src_as_uint(store->src[1]) == 0 && "store_output invariant");
if (dst_bit_size == 0) {
dst_bit_size = src_bit_size;
} else if (src_bit_size != dst_bit_size) {
- assert(phi_src->src.is_ssa);
b->cursor = nir_before_src(&phi_src->src);
nir_op convert_op = get_bool_convert_opcode(dst_bit_size);
nir_ssa_def *new_src =
{
const nir_op_info *op_info = &nir_op_infos[alu->op];
- assert(alu->dest.dest.is_ssa);
-
switch (alu->op) {
case nir_op_mov:
case nir_op_vec2:
NIR_SRC_INIT);
continue;
} else {
- assert(tex->src[i].src.is_ssa);
b.cursor = nir_before_instr(&tex->instr);
/* Back-ends expect a 32-bit thing, not 64-bit */
nir_ssa_def *offset = nir_u2u32(&b, tex->src[i].src.ssa);
if (!lower_image_derefs)
break;
- assert(intrin->src[0].is_ssa);
b.cursor = nir_before_instr(&intrin->instr);
/* Back-ends expect a 32-bit thing, not 64-bit */
nir_ssa_def *offset = nir_u2u32(&b, intrin->src[0].ssa);
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if ((intr->intrinsic == nir_intrinsic_store_output) &&
nir_intrinsic_base(intr) == drvloc) {
- assert(intr->src[0].is_ssa);
assert(nir_src_is_const(intr->src[1]));
return intr->src[0].ssa;
}
if (!(options & nir_lower_fp64_full_software))
return NULL;
-
- assert(instr->dest.dest.is_ssa);
-
const char *name;
const char *mangled_name;
const struct glsl_type *return_type = glsl_uint64_t_type();
const nir_alu_instr *alu = nir_instr_as_alu(instr);
- assert(alu->dest.dest.is_ssa);
bool is_64 = alu->dest.dest.ssa.bit_size == 64;
unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
nir_tex_instr *tex;
nir_ssa_def *def;
- assert(intr->dest.is_ssa);
-
b->cursor = nir_before_instr(&intr->instr);
texcoord = get_texcoord(b, state);
return false;
b->cursor = nir_after_instr(&instr->instr);
- assert(instr->src[1].is_ssa);
nir_ssa_def *frag_color = instr->src[1].ssa;
ralloc_free(out->name);
break;
}
else {
- assert(condition.is_ssa);
nir_ssa_def *ssa_def = condition.ssa;
assert(ssa_def->bit_size == 1);
assert(ssa_def->num_components == 1);
nir_pop_if(b, NULL);
if (has_dest) {
- assert(intr->dest.is_ssa);
nir_ssa_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
/* We can't use nir_ssa_def_rewrite_uses_after on phis, so use the global
assert(path.path[0] == base);
if (intrin->intrinsic == nir_intrinsic_store_deref) {
- assert(intrin->src[1].is_ssa);
emit_load_store_deref(b, intrin, base, &path.path[1],
NULL, intrin->src[1].ssa);
} else {
case nir_op_u2u8:
case nir_op_u2u16:
case nir_op_u2u32:
- assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
case nir_op_bcsel:
- assert(alu->src[1].src.is_ssa);
- assert(alu->src[2].src.is_ssa);
assert(alu->src[1].src.ssa->bit_size ==
alu->src[2].src.ssa->bit_size);
if (alu->src[1].src.ssa->bit_size != 64)
case nir_op_ilt:
case nir_op_uge:
case nir_op_ige:
- assert(alu->src[0].src.is_ssa);
- assert(alu->src[1].src.is_ssa);
assert(alu->src[0].src.ssa->bit_size ==
alu->src[1].src.ssa->bit_size);
if (alu->src[0].src.ssa->bit_size != 64)
case nir_op_ufind_msb:
case nir_op_find_lsb:
case nir_op_bit_count:
- assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
case nir_op_amul:
- assert(alu->dest.dest.is_ssa);
if (options->has_imul24)
return false;
if (alu->dest.dest.ssa.bit_size != 64)
case nir_op_u2f32:
case nir_op_i2f16:
case nir_op_u2f16:
- assert(alu->src[0].src.is_ssa);
if (alu->src[0].src.ssa->bit_size != 64)
return false;
break;
case nir_op_f2i64:
FALLTHROUGH;
default:
- assert(alu->dest.dest.is_ssa);
if (alu->dest.dest.ssa.bit_size != 64)
return false;
break;
* no larger than 256 which seems reasonable.) We can then scan on each of
* the chunks and add them back together at the end.
*/
- assert(intrin->src[0].is_ssa);
nir_ssa_def *x = intrin->src[0].ssa;
nir_ssa_def *x_low =
nir_u2u32(b, nir_iand_imm(b, x, 0xffffff));
case nir_intrinsic_quad_swap_horizontal:
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
- assert(intrin->dest.is_ssa);
return intrin->dest.ssa.bit_size == 64 &&
(options->lower_int64_options & nir_lower_subgroup_shuffle64);
case nir_intrinsic_vote_ieq:
- assert(intrin->src[0].is_ssa);
return intrin->src[0].ssa->bit_size == 64 &&
(options->lower_int64_options & nir_lower_vote_ieq64);
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan:
- assert(intrin->dest.is_ssa);
if (intrin->dest.ssa.bit_size != 64)
return false;
return split_64bit_subgroup_op(b, intrin);
case nir_intrinsic_vote_ieq:
- assert(intrin->src[0].is_ssa);
return lower_vote_ieq(b, intrin->src[0].ssa);
case nir_intrinsic_reduce:
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
return false;
- assert(intr->dest.is_ssa);
- assert(intr->src[0].is_ssa);
- assert(intr->src[1].is_ssa);
-
nir_intrinsic_instr *bary_intrinsic =
nir_instr_as_intrinsic(intr->src[0].ssa->parent_instr);
nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
unsigned component, const struct glsl_type *type)
{
- assert(intrin->dest.is_ssa);
if (intrin->dest.ssa.bit_size == 64 &&
(state->options & nir_lower_io_lower_64bit_to_32)) {
nir_builder *b = &state->builder;
nir_ssa_def *array_index, nir_variable *var, nir_ssa_def *offset,
unsigned component, const struct glsl_type *type)
{
- assert(intrin->src[1].is_ssa);
if (intrin->src[1].ssa->bit_size == 64 &&
(state->options & nir_lower_io_lower_64bit_to_32)) {
nir_builder *b = &state->builder;
var->data.precision == GLSL_PRECISION_MEDIUM ||
var->data.precision == GLSL_PRECISION_LOW;
- assert(intrin->dest.is_ssa);
nir_ssa_def *load =
nir_load_interpolated_input(&state->builder,
intrin->dest.ssa.num_components,
nir_intrinsic_set_range(load, range);
}
- assert(intrin->dest.is_ssa);
load->num_components = num_components;
nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
nir_ssa_def *base_addr,
nir_address_format addr_format)
{
- assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
return build_addr_for_var(b, deref->var, addr_format);
}
case nir_intrinsic_store_deref: {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
if (vec_stride > scalar_size) {
}
case nir_intrinsic_store_deref_block_intel: {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
const nir_component_mask_t write_mask = 0;
build_explicit_io_store(b, intrin, addr, addr_format,
nir_ssa_def *base_addr = NULL;
if (deref->deref_type != nir_deref_type_var) {
- assert(deref->parent.is_ssa);
base_addr = deref->parent.ssa;
}
lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
nir_address_format addr_format)
{
- assert(intrin->src[0].is_ssa);
nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
}
return;
}
- assert(intrin->src[0].is_ssa);
nir_ssa_def *addr = intrin->src[0].ssa;
b->cursor = nir_instr_remove(&intrin->instr);
{
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->dest.is_ssa);
-
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < intr->num_components; i++) {
{
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->dest.is_ssa);
-
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
nir_ssa_def *base_offset = nir_get_io_offset_src(intr)->ssa;
{
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->dest.is_ssa);
-
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
nir_variable **chan_vars;
nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *old_value = intrin->src[1].ssa;
nir_ssa_scalar comps[4];
for (unsigned c = 0; c < intrin->num_components; c++) {
nir_src *intrin_offset_src = nir_get_io_offset_src(intrin);
for (unsigned i = 0; i < info->num_srcs; i++) {
- assert(intrin->src[i].is_ssa);
if (i == 0 && data != NULL) {
assert(!info->has_dest);
assert(&intrin->src[i] != intrin_offset_src);
nir_intrinsic_set_align(dup, align_mul, align_offset);
if (info->has_dest) {
- assert(intrin->dest.is_ssa);
nir_ssa_dest_init(&dup->instr, &dup->dest, num_components, bit_size);
} else {
nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1);
nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
const void *cb_data)
{
- assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
const unsigned num_components = intrin->dest.ssa.num_components;
const unsigned bytes_read = num_components * (bit_size / 8);
}
} else {
found_non_const_memcpy = true;
- assert(cpy->src[2].is_ssa);
nir_ssa_def *size = cpy->src[2].ssa;
/* In this case, we don't have any idea what the size is so we
switch (intrin->intrinsic) {
case nir_intrinsic_load_view_index: {
- assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, view_index);
break;
}
if (nir_src_is_const(deref->arr.index))
return false;
- assert(deref->arr.index.is_ssa);
h->handle = deref->arr.index.ssa;
h->parent_deref = parent;
b->cursor = nir_before_instr(instr);
- assert(intr->src[1].is_ssa);
assert(intr->src[1].ssa->num_components == 1);
nir_ssa_def *psiz = intr->src[1].ssa;
out_src_idx = 1;
}
- assert(intr->src[out_src_idx].is_ssa);
assert(intr->num_components == 4);
b->cursor = nir_before_instr(&intr->instr);
nir_intrinsic_src_type(intr) != nir_type_float32)
return false;
- assert(intr->src[0].is_ssa);
assert(intr->num_components == 4);
b->cursor = nir_before_instr(&intr->instr);
switch (intrin->intrinsic) {
case nir_intrinsic_image_deref_load: {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *coord =
nir_trim_vector(b, intrin->src[1].ssa, coord_components);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
tex->coord_components = coord_components;
- assert(intrin->src[3].is_ssa);
nir_ssa_def *lod = intrin->src[3].ssa;
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
}
case nir_intrinsic_image_deref_size: {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *lod = intrin->src[1].ssa;
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
/* We compute first the offsets */
nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
while (deref->deref_type != nir_deref_type_var) {
- assert(deref->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(deref->parent.ssa->parent_instr);
nir_foreach_phi_src(phi_src, phi) {
if (phi_src->pred == pred) {
found = true;
- assert(phi_src->src.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa);
break;
}
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
b->cursor = nir_before_instr(instr);
- assert(intrin->src[0].is_ssa);
lowered = nir_load_deref(b, nir_src_as_deref(intrin->src[0]));
break;
static nir_ssa_def *
lower_vote_eq_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin)
{
- assert(intrin->src[0].is_ssa);
nir_ssa_def *value = intrin->src[0].ssa;
nir_ssa_def *result = NULL;
static nir_ssa_def *
lower_vote_eq(nir_builder *b, nir_intrinsic_instr *intrin)
{
- assert(intrin->src[0].is_ssa);
nir_ssa_def *value = intrin->src[0].ssa;
/* We have to implicitly lower to scalar */
bool is_shuffle = false;
switch (intrin->intrinsic) {
case nir_intrinsic_shuffle_xor:
- assert(intrin->src[1].is_ssa);
index = nir_ixor(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_shuffle_up:
- assert(intrin->src[1].is_ssa);
index = nir_isub(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_shuffle_down:
- assert(intrin->src[1].is_ssa);
index = nir_iadd(b, index, intrin->src[1].ssa);
is_shuffle = true;
break;
case nir_intrinsic_quad_broadcast:
- assert(intrin->src[1].is_ssa);
index = nir_ior(b, nir_iand_imm(b, index, ~0x3),
intrin->src[1].ssa);
break;
static nir_ssa_def *
lower_shuffle(nir_builder *b, nir_intrinsic_instr *intrin)
{
- assert(intrin->src[0].is_ssa);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *val = intrin->src[0].ssa;
nir_ssa_def *id = intrin->src[1].ssa;
case nir_intrinsic_ballot_bit_count_reduce:
case nir_intrinsic_ballot_find_lsb:
case nir_intrinsic_ballot_find_msb: {
- assert(intrin->src[0].is_ssa);
nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
options);
switch (intrin->intrinsic) {
case nir_intrinsic_ballot_bitfield_extract: {
- assert(intrin->src[1].is_ssa);
nir_ssa_def *idx = intrin->src[1].ssa;
if (int_val->num_components > 1) {
/* idx will be truncated by nir_ushr, so we just need to select
case nir_intrinsic_ballot_bit_count_exclusive:
case nir_intrinsic_ballot_bit_count_inclusive: {
- assert(intrin->src[0].is_ssa);
nir_ssa_def *int_val = ballot_type_to_uint(b, intrin->src[0].ssa,
options);
if (options->lower_ballot_bit_count_to_mbcnt_amd) {
static nir_ssa_def *
sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
{
- assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
if (bit_size == 32)
return NULL;
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return NULL;
- assert(intrin->dest.is_ssa);
const unsigned bit_size = intrin->dest.ssa.bit_size;
switch (intrin->intrinsic) {
int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
assert(coord_index >= 0);
- assert(tex->src[coord_index].src.is_ssa);
nir_ssa_def *coord = tex->src[coord_index].src.ssa;
b->cursor = nir_before_instr(&tex->instr);
sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
const nir_lower_tex_options *options)
{
- assert(tex->dest.is_ssa);
assert(nir_tex_instr_dest_size(tex) == 4);
assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
assert(tex->op == nir_texop_tex);
{
assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
assert(tex->op == nir_texop_txd);
- assert(tex->dest.is_ssa);
/* Use textureSize() to get the width and height of LOD 0 */
nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
assert(tex->op == nir_texop_txd);
- assert(tex->dest.is_ssa);
/* Use textureSize() to get the width and height of LOD 0 */
unsigned component_mask;
static void
swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex)
{
- assert(tex->dest.is_ssa);
-
b->cursor = nir_after_instr(&tex->instr);
assert(nir_tex_instr_dest_size(tex) == 4);
static void
swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
{
- assert(tex->dest.is_ssa);
-
b->cursor = nir_after_instr(&tex->instr);
nir_ssa_def *swizzled;
static void
linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
{
- assert(tex->dest.is_ssa);
assert(nir_tex_instr_dest_size(tex) == 4);
assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
b->cursor = nir_after_instr(&tex->instr);
- assert(tex->dest.is_ssa);
assert(tex->dest.ssa.num_components == 3);
nir_ssa_def *size = &tex->dest.ssa;
size = nir_vec3(b, nir_channel(b, size, 1),
if ((*index) == 0)
continue;
- assert(tex->src[i].src.is_ssa);
nir_ssa_def *sum = nir_iadd_imm(b, tex->src[i].src.ssa, *index);
nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
nir_src_for_ssa(sum));
}
nir_ssa_def *color = nir_bcsel(b, face, front, back);
- assert(intr->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, color);
return true;
for (unsigned i = intrin->num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
mov->src[0].swizzle[i] = 0;
- assert(intrin->dest.is_ssa);
-
mov->dest.write_mask = (1 << intrin->num_components) - 1;
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
intrin->num_components,
/* Should have been removed before rename_variables(). */
assert(node != UNDEF_NODE);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
if (!node->lower_to_ssa)
if (!nir_deref_mode_is_in_set(deref, modes))
break;
- assert(intrin->dest.is_ssa);
intrin->num_components = 4;
intrin->dest.ssa.num_components = 4;
if (!nir_deref_mode_is_in_set(deref, modes))
break;
- assert(intrin->src[1].is_ssa);
nir_ssa_def *data = intrin->src[1].ssa;
b->cursor = nir_before_instr(&intrin->instr);
unsigned start_idx)
{
assert(start_idx < nir_op_infos[vec->op].num_inputs);
- assert(vec->src[start_idx].src.is_ssa);
nir_ssa_def *src = vec->src[start_idx].src.ssa;
unsigned num_components = nir_dest_num_components(vec->dest.dest);
unsigned start_idx, struct data *data)
{
assert(start_idx < nir_op_infos[vec->op].num_inputs);
- assert(vec->src[start_idx].src.is_ssa);
/* If we are going to do a reswizzle, then the vecN operation must be the
* only use of the source value.
if (vec->op == nir_op_mov || !nir_op_is_vec(vec->op))
return false;
- assert(vec->dest.dest.is_ssa);
unsigned num_components = nir_dest_num_components(vec->dest.dest);
/* Special case: if all sources are the same, just swizzle instead to avoid
unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
for (unsigned i = 0; i < num_components; ++i) {
- assert(vec->src[i].src.is_ssa);
swiz[i] = vec->src[i].swizzle[0];
}
{
nir_ssa_def *wpos = &intr->dest.ssa;
- assert(intr->dest.is_ssa);
-
b->cursor = nir_after_instr(&intr->instr);
nir_ssa_def *spos = nir_load_sample_pos_or_center(b);
nir_builder *b = &state->b;
nir_ssa_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;
- assert(intr->dest.is_ssa);
wpos_input = &intr->dest.ssa;
b->cursor = nir_after_instr(&intr->instr);
nir_intrinsic_instr *store = combo->stores[i];
if (combo->write_mask & (1 << i)) {
assert(store);
- assert(store->src[1].is_ssa);
/* If store->num_components == 1 then we are in the deref-of-vec case
* and store->src[1] is a scalar. Otherwise, we're a regular vector
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_intrinsic_op op = intrin->intrinsic;
- assert(if_stmt->condition.is_ssa);
nir_ssa_def *cond = if_stmt->condition.ssa;
b->cursor = nir_before_cf_node(prev_node);
case nir_intrinsic_discard_if:
case nir_intrinsic_demote_if:
case nir_intrinsic_terminate_if:
- assert(intrin->src[0].is_ssa);
cond = nir_iand(b, cond, intrin->src[0].ssa);
break;
default:
if (phi_src->pred != last_block)
continue;
- assert(phi_src->src.is_ssa);
def = phi_src->src.ssa;
}
assert(def);
- assert(phi->dest.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
nir_instr_remove(&phi->instr);
}
alu->op != nir_op_irem)
return false;
- assert(alu->dest.dest.is_ssa);
assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
if (alu->dest.dest.ssa.bit_size < *min_bit_size)
nalu->dest.write_mask = alu->dest.write_mask;
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
- assert(alu->src[i].src.is_ssa);
nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
nalu->src[i].negate = alu->src[i].negate;
nalu->src[i].abs = alu->src[i].abs;
bool progress = false;
/* Evaluate any uses of the if condition inside the if branches */
- assert(nif->condition.is_ssa);
nir_foreach_use_including_if_safe(use_src, nif->condition.ssa) {
if (!(use_src->is_if && use_src->parent_if == nif))
progress |= evaluate_condition_use(b, nif, use_src);
if (nir_ssa_def_used_by_if(&shuffle->dest.ssa))
return false;
- assert(shuffle->src[0].is_ssa);
- assert(shuffle->src[1].is_ssa);
-
*data = shuffle->src[0].ssa;
*index = shuffle->src[1].ssa;
assert(exec_list_length(&phi->srcs) == 2);
nir_foreach_phi_src(src, phi) {
assert(src->pred == then_block || src->pred == else_block);
- assert(src->src.is_ssa);
unsigned idx = src->pred == then_block ? 1 : 2;
nir_src_copy(&sel->src[idx].src, &src->src, &sel->instr);
{
nir_op op = INVALID_OP;
- assert(phi->dest.is_ssa);
-
/* If the phi has already been narrowed, nothing more to do: */
if (phi->dest.ssa.bit_size != 32)
return false;
/* Push the conversion into the new phi sources: */
nir_foreach_phi_src (src, phi) {
- assert(src->src.is_ssa);
-
/* insert new conversion instr in block of original phi src: */
b->cursor = nir_after_instr_and_phis(src->src.ssa->parent_instr);
nir_ssa_def *old_src = src->src.ssa;
*bit_size = 0;
nir_foreach_phi_src (src, phi) {
- assert(src->src.is_ssa);
-
nir_instr *instr = src->src.ssa->parent_instr;
if (instr->type == nir_instr_type_load_const) {
has_load_const = true;
* sequence to make the rest of the transformation possible:
*/
nir_foreach_phi_src (src, phi) {
- assert(src->src.is_ssa);
-
nir_instr *instr = src->src.ssa->parent_instr;
if (instr->type != nir_instr_type_load_const)
continue;
static bool
try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
{
- assert(phi->dest.is_ssa);
-
/* If the phi has already been narrowed, nothing more to do: */
if (phi->dest.ssa.bit_size != 32)
return false;
/* Remove the widening conversions from the phi sources: */
nir_foreach_phi_src (src, phi) {
- assert(src->src.is_ssa);
-
nir_instr *instr = src->src.ssa->parent_instr;
nir_ssa_def *new_src;
bool srcs_same = true;
nir_foreach_phi_src(src, phi) {
- assert(src->src.is_ssa);
-
/* For phi nodes at the beginning of loops, we may encounter some
* sources from backedges that point back to the destination of the
* same phi, i.e. something like:
def = nir_mov_alu(b, mov->src[0], def->num_components);
}
- assert(phi->dest.is_ssa);
nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
nir_instr_remove(&phi->instr);
if (!nir_op_is_selection(instr->op))
return false;
- assert(instr->dest.dest.is_ssa);
-
for (int i = 1; i <= 2; i++) {
if (!instr->src[i].src.is_ssa)
continue;
if (!nir_op_is_vec(alu->op))
return false;
- assert(alu->dest.dest.is_ssa);
-
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
if (!alu->src[i].src.is_ssa ||
alu->src[i].src.ssa->parent_instr->type != nir_instr_type_ssa_undef)
return;
}
- assert(instr->parent.is_ssa);
nir_deref_instr *parent =
nir_instr_as_deref(instr->parent.ssa->parent_instr);
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
if (src_idx == 0) {
- assert(use_intrin->dest.is_ssa);
bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur);
} else {
if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) {
nir_schedule_load_reg_deps(nir_intrinsic_instr *load,
nir_deps_state *state)
{
- assert(load->src[0].is_ssa);
nir_ssa_def *reg = load->src[0].ssa;
(void)nir_reg_get_decl(reg);
nir_schedule_store_reg_deps(nir_intrinsic_instr *store,
nir_deps_state *state)
{
- assert(store->src[1].is_ssa);
nir_ssa_def *reg = store->src[1].ssa;
(void)nir_reg_get_decl(reg);
nir_schedule_regs_freed_state *state)
{
assert(nir_is_load_reg(load));
- assert(load->src[0].is_ssa);
if (load->intrinsic == nir_intrinsic_load_reg_indirect)
nir_schedule_regs_freed_src_cb(&load->src[1], state);
state->regs_freed += nir_schedule_reg_pressure(reg);
}
- assert(load->dest.is_ssa);
nir_schedule_regs_freed_def_cb(&load->dest.ssa, state);
}
nir_schedule_scoreboard *scoreboard)
{
assert(nir_is_load_reg(load));
- assert(load->src[0].is_ssa);
nir_ssa_def *reg = load->src[0].ssa;
if (load->intrinsic == nir_intrinsic_load_reg_indirect)
nir_schedule_mark_use(scoreboard, reg, &load->instr,
nir_schedule_reg_pressure(reg));
- assert(load->dest.is_ssa);
nir_schedule_mark_def_scheduled(&load->dest.ssa, scoreboard);
}
{
uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
- /* Searching only works on SSA values because, if it's not SSA, we can't
- * know if the value changed between one instance of that value in the
- * expression and another. Also, the replace operation will place reads of
- * that value right before the last instruction in the expression we're
- * replacing so those reads will happen after the original reads and may
- * not be valid if they're register reads.
- */
- assert(instr->src[src].src.is_ssa);
-
/* If the source is an explicitly sized source, then we need to reset
* both the number of components and the swizzle.
*/
if (!nir_op_matches_search_op(instr->op, expr->opcode))
return false;
- assert(instr->dest.dest.is_ssa);
-
if (expr->value.bit_size > 0 &&
instr->dest.dest.ssa.bit_size != expr->value.bit_size)
return false;
for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
swizzle[i] = i;
- assert(instr->dest.dest.is_ssa);
-
struct match_state state;
state.inexact_match = false;
state.has_exact_alu = false;
header.alu.op = alu->op;
header.alu.packed_src_ssa_16bit = is_alu_src_ssa_16bit(ctx, alu);
- assert(alu->dest.dest.is_ssa);
-
if (header.alu.packed_src_ssa_16bit) {
/* For packed srcs of SSA ALUs, this field stores the swizzles. */
header.alu.writemask_or_two_swizzles = alu->src[0].swizzle[0];
if (header.alu.packed_src_ssa_16bit) {
for (unsigned i = 0; i < num_srcs; i++) {
- assert(alu->src[i].src.is_ssa);
unsigned idx = write_lookup_object(ctx, alu->src[i].src.ssa);
assert(idx < (1 << 16));
blob_write_uint16(ctx->blob, idx);
} else if (deref->deref_type == nir_deref_type_cast) {
deref->modes = decode_deref_modes(header.deref.modes);
} else {
- assert(deref->parent.is_ssa);
deref->modes = nir_instr_as_deref(deref->parent.ssa->parent_instr)->modes;
}
write_dest(ctx, &phi->dest, header, phi->instr.type);
nir_foreach_phi_src(src, phi) {
- assert(src->src.is_ssa);
size_t blob_offset = blob_reserve_uint32(ctx->blob);
ASSERTED size_t blob_offset2 = blob_reserve_uint32(ctx->blob);
assert(blob_offset + sizeof(uint32_t) == blob_offset2);
if (load == NULL || load->intrinsic != nir_intrinsic_load_deref)
return false;
- assert(load->src[0].is_ssa);
-
return load->src[0].ssa == deref_src.ssa;
}
{
nir_component_mask_t comps = nir_intrinsic_write_mask(store);
- assert(store->src[1].is_ssa);
nir_instr *src_instr = store->src[1].ssa->parent_instr;
if (src_instr->type != nir_instr_type_alu)
return comps;
if (nir_src_is_const(src))
return nir_src_comp_as_int(src, comp);
- assert(src.is_ssa);
nir_ssa_scalar s = { src.ssa, comp };
assert(nir_op_is_vec(nir_ssa_scalar_alu_op(s)));
return nir_ssa_scalar_as_int(nir_ssa_scalar_chase_alu_src(s, comp));
check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
int32_t direction, int32_t shift)
{
- assert(alu_instr->src[1].src.is_ssa);
nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
/* Only propagate if the shift is a const value so we can check value range
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
- assert(intrinsic->src[offset_src_idx].is_ssa);
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
/* Since we don't have value range checking, we first try to propagate
*target_src = nir_src_for_ssa(offset);
if (has_dest) {
- assert(intrinsic->dest.is_ssa);
nir_ssa_def *dest = &intrinsic->dest.ssa;
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
dest->num_components, dest->bit_size);
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
/* First source should be the sampling coordinate. */
nir_tex_src *coord = &tex->src[idx];
- assert(coord->src.is_ssa);
if (ir3_nir_coord_offset(coord->src.ssa) >= 0) {
tex->op = nir_texop_tex_prefetch;
continue;
}
- assert(intr->dest.is_ssa);
-
move_instruction_to_start_block(state, instr);
progress = true;
if (var->data.location != VARYING_SLOT_POS)
continue;
- assert(intrin->src[1].is_ssa);
nir_ssa_def *orig_src = intrin->src[1].ssa;
b.cursor = nir_before_instr(instr);
break;
}
if (result[0]) {
- assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa, result);
}
}
params.resource = resource;
bld_base->tex_size(bld_base, ¶ms);
- assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa,
&sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
}
}
}
- assert(instr->dest.is_ssa);
assign_ssa_dest(bld_base, &instr->dest.ssa, texel);
}
if (tex_src < 0)
return;
- assert(instr->src[tex_src].src.is_ssa);
-
nir_ssa_def *def = instr->src[tex_src].src.ssa;
for (int i = 0; i < def->num_components; i++) {
s->channels[s->i++] = nir_get_ssa_scalar(def, i);
b.cursor = nir_before_instr(instr);
- assert(load->src[0].is_ssa);
-
if (load->src[0].ssa == temp_ubo_name) {
nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
pos = nir_vector_insert_imm(b, pos, depth, 2);
- assert(intr->dest.is_ssa);
nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, pos,
pos->parent_instr);
}
} break;
case nir_intrinsic_load_front_face:
case nir_intrinsic_load_frag_coord:
- assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_instance_id:
struct ir2_instr *instr;
if (nir_src_as_const_value(src)) {
- assert(src.is_ssa);
instr = instr_create_alu(ctx, nir_op_mov, src.ssa->num_components);
instr->src[0] = make_src(ctx, src);
return ir2_src(instr->idx, 0, IR2_SRC_SSA);
b.cursor = nir_before_instr(instr);
- assert(load->src[0].is_ssa);
-
if (load->src[0].ssa == temp_ubo_name) {
nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
{
gpir_node *child = gpir_node_find(block, &instr->src[0], 0);
assert(child);
- assert(instr->src[0].is_ssa);
- assert(instr->src[1].is_ssa);
register_node_reg(block, child, instr->src[1].ssa->index);
return true;
}
nir_intrinsic_instr *new_intrin =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
- assert(new_intrin->dest.is_ssa);
-
unsigned num_srcs = nir_intrinsic_infos[new_intrin->intrinsic].num_srcs;
for (unsigned i = 0; i < num_srcs; i++) {
- assert(new_intrin->src[i].is_ssa);
}
nir_builder_instr_insert(b, &new_intrin->instr);
/* Add parent node as a the folded dest ssa node to keep
* the dependency chain */
nir_alu_src *ns = &instr->src[0];
- assert(ns->src.is_ssa);
ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index];
assert(parent);
block->comp->var_nodes[dst->ssa.index] = parent;
case nir_intrinsic_load_frag_coord:
case nir_intrinsic_load_point_coord:
case nir_intrinsic_load_front_face: {
- assert(instr->dest.is_ssa);
mask = u_bit_consecutive(0, instr->num_components);
ppir_op op;
for (nir_deref_instr *d = deref; d->deref_type != nir_deref_type_var;
d = nir_deref_instr_parent(d)) {
assert(d->deref_type == nir_deref_type_array);
- assert(d->arr.index.is_ssa);
unsigned array_stride = 1;
if (glsl_type_is_array(d->type))
auto intr = nir_instr_as_intrinsic(instr);
int old_components = nir_dest_num_components(intr->dest);
assert(old_components <= 2);
- assert(intr->dest.is_ssa);
intr->dest.ssa.num_components *= 2;
intr->dest.ssa.bit_size = 32;
intr->num_components *= 2;
static uint32_t
get_dest_usee_mask(nir_intrinsic_instr *op)
{
- assert(op->dest.is_ssa);
-
MaskQuery mq = {0};
mq.full_mask = (1 << nir_dest_num_components(op->dest)) - 1;
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->dest.is_ssa);
-
nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic);
nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, num_comps,
intr->dest.ssa.bit_size);
void
ValueFactory::inject_value(const nir_dest& dest, int chan, PVirtualValue value)
{
- assert(dest.is_ssa);
RegisterKey key(dest.ssa.index, chan, vp_ssa);
sfn_log << SfnLog::reg << "Inject value with key " << key << "\n";
assert(m_values.find(key) == m_values.end());
PRegister
ValueFactory::dest(const nir_dest& dst, int chan, Pin pin_channel, uint8_t chan_mask)
{
- assert(dst.is_ssa);
return dest(dst.ssa, chan, pin_channel, chan_mask);
}
{
if (pin != pin_group && pin != pin_chgr)
pin = pin_chan;
- assert(dst.is_ssa);
PRegister x = dest(dst, 0, pin);
PRegister y = dest(dst, 1, pin);
PRegister z = dest(dst, 2, pin);
{
sfn_log << SfnLog::reg << "search (ref) " << (void *)&src << "\n";
- assert(src.is_ssa);
sfn_log << SfnLog::reg << "search ssa " << src.ssa->index << " c:" << chan
<< " got ";
auto val = ssa_src(*src.ssa, chan);
nir_ssa_def *coord = NULL, *sample_index = NULL;
for (int i = 0; i < txf_ms->num_srcs; i++) {
- assert(txf_ms->src[i].src.is_ssa);
-
switch (txf_ms->src[i].src_type) {
case nir_tex_src_coord:
coord = txf_ms->src[i].src.ssa;
{
struct hash_entry *entry;
- assert(src.is_ssa);
nir_intrinsic_instr *load = nir_load_reg_for_def(src.ssa);
if (load == NULL) {
entry = _mesa_hash_table_search(c->def_ht, src.ssa);
/* If packing from a vec4 op (as expected), identify it so that we can
* peek back at what generated its sources.
*/
- assert(instr->src[0].src.is_ssa);
if (instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
nir_op_vec4) {
static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
struct qreg *src)
{
- assert(instr->src[0].src.is_ssa);
if (nir_load_reg_for_def(instr->src[0].src.ssa))
goto out;
if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
SpvId type = get_dest_type(ctx, &intr->dest, nir_type_uint);
/* this will always be stored with the ssa index of the parent instr */
- assert(intr->src[0].is_ssa);
nir_ssa_def *ssa = intr->src[0].ssa;
assert(ssa->parent_instr->type == nir_instr_type_alu);
nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
- assert(alu->src[0].src.is_ssa);
unsigned index = alu->src[0].src.ssa->index;
assert(index < ctx->num_defs);
assert(ctx->resident_defs[index] != 0);
case nir_deref_type_var:
return new;
case nir_deref_type_array:
- assert(old->arr.index.is_ssa);
return nir_build_deref_array(b, replicate_derefs(b, parent, new), old->arr.index.ssa);
case nir_deref_type_struct:
return nir_build_deref_struct(b, replicate_derefs(b, parent, new), old->strct.index);
gl_varying_slot location = var->data.location;
unsigned location_frac = var->data.location_frac;
assert(state->varyings[location][location_frac]);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *pos_counter = nir_load_var(b, state->pos_counter);
nir_ssa_def *index = lower_pv_mode_gs_ring_index(b, state, pos_counter);
nir_deref_instr *varying_deref = nir_build_deref_var(b, state->varyings[location][location_frac]);
unsigned location_frac = var->data.location_frac;
if (location != VARYING_SLOT_POS) {
assert(state->varyings[location]);
- assert(intrin->src[1].is_ssa);
nir_store_var(b, state->varyings[location][location_frac],
intrin->src[1].ssa,
nir_intrinsic_write_mask(intrin));
if (nir_src_is_const(lod_src) && nir_src_as_const_value(lod_src)->u32 == 0)
return false;
- assert(lod_src.is_ssa);
nir_ssa_def *lod = lod_src.ssa;
int offset_idx = nir_tex_instr_src_index(txf, nir_tex_src_texture_offset);
switch (intrins->intrinsic) {
case nir_intrinsic_image_deref_format:
case nir_intrinsic_image_deref_order: {
- assert(intrins->src[0].is_ssa);
-
int32_t offset;
nir_deref_instr *deref;
nir_ssa_def *val;
/* Scalarize the load_global_constant. */
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->dest.is_ssa);
assert(intr->num_components > 1);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
const fs_reg &result)
{
- assert(instr->src[0].src.is_ssa);
if (!instr->src[0].src.ssa->parent_instr)
return false;
fs_reg temp = result;
bool need_extra_copy = false;
- assert(instr->dest.dest.is_ssa);
nir_intrinsic_instr *store_reg =
nir_store_reg_for_def(&instr->dest.dest.ssa);
if (store_reg != NULL) {
nir_ssa_def *dest_reg = store_reg->src[1].ssa;
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
- assert(instr->src[i].src.is_ssa);
nir_intrinsic_instr *load_reg =
nir_load_reg_for_def(instr->src[i].src.ssa);
if (load_reg == NULL)
bool
fs_visitor::get_nir_src_bindless(const nir_src &src)
{
- assert(src.is_ssa);
-
return nir_ssa_bind_infos[src.ssa->index].bindless;
}
unsigned
fs_visitor::get_nir_src_block(const nir_src &src)
{
- assert(src.is_ssa);
-
return nir_ssa_bind_infos[src.ssa->index].valid ?
nir_ssa_bind_infos[src.ssa->index].block :
UINT32_MAX;
static bool
is_resource_src(nir_src src)
{
- assert(src.is_ssa);
return src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
nir_instr_as_intrinsic(src.ssa->parent_instr)->intrinsic == nir_intrinsic_resource_intel;
}
fs_reg
fs_visitor::get_nir_src(const nir_src &src)
{
- assert(src.is_ssa);
nir_intrinsic_instr *load_reg = nir_load_reg_for_def(src.ssa);
fs_reg reg;
fs_reg
fs_visitor::get_nir_dest(const nir_dest &dest)
{
- assert(dest.is_ssa);
nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&dest.ssa);
if (!store_reg) {
const brw_reg_type reg_type =
nir_component_mask_t
fs_visitor::get_nir_write_mask(const nir_alu_dest &dest)
{
- assert(dest.dest.is_ssa);
assert(dest.write_mask == nir_component_mask(dest.dest.ssa.num_components));
nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&dest.dest.ssa);
const unsigned dest_size = nir_tex_instr_dest_size(instr);
if (devinfo->ver >= 9 &&
instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
- assert(instr->dest.is_ssa);
unsigned write_mask = nir_ssa_def_components_read(&instr->dest.ssa);
assert(write_mask != 0); /* dead code should have been eliminated */
if (instr->is_sparse) {
nir_ssa_def *src = NULL, *dest = NULL;
if (write) {
- assert(intr->src[0].is_ssa);
assert(intr->num_components == intr->src[0].ssa->num_components);
} else {
- assert(intr->dest.is_ssa);
assert(intr->num_components == intr->dest.ssa.num_components);
}
* 32-bit and so the bit size of the instruction is given by the
* source.
*/
- assert(alu->src[0].src.is_ssa);
return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
default:
break;
}
- assert(alu->dest.dest.is_ssa);
if (alu->dest.dest.ssa.bit_size >= 32)
return 0;
nir_ssa_def *base_addr, unsigned off)
{
assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
- assert(load_uniform->dest.is_ssa);
- assert(load_uniform->src[0].is_ssa);
unsigned bit_size = load_uniform->dest.ssa.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);
return false;
nir_alu_instr *alu = nir_instr_as_alu(instr);
- assert(alu->dest.dest.is_ssa);
if (!nir_op_infos[alu->op].is_conversion)
return false;
b->cursor = is_store ? nir_before_instr(instr) : nir_after_instr(instr);
if (is_store) {
- assert(intrin->src[0].is_ssa);
nir_ssa_def *bit_field = intrin->src[0].ssa;
nir_ssa_def *fp16_x =
nir_i2f16(b,
case nir_op_mov:
case nir_op_fneg:
case nir_op_fabs:
- assert(use_alu->dest.dest.is_ssa);
if (!are_all_uses_fadd(&use_alu->dest.dest.ssa))
return false;
break;
if (add->op != nir_op_fadd)
return false;
- assert(add->dest.dest.is_ssa);
if (add->exact)
return false;
}
nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src], ffma);
- assert(add->dest.dest.is_ssa);
-
nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
add->dest.dest.ssa.num_components, bit_size);
nir_ssa_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa);
resize_deref(nir_builder *b, nir_deref_instr *deref,
unsigned num_components, unsigned bit_size)
{
- assert(deref->dest.is_ssa);
if (deref->dest.ssa.num_components == num_components &&
deref->dest.ssa.bit_size == bit_size)
return false;
(deref->deref_type == nir_deref_type_array ||
deref->deref_type == nir_deref_type_ptr_as_array)) {
b->cursor = nir_before_instr(&deref->instr);
- assert(deref->arr.index.is_ssa);
nir_ssa_def *idx;
if (nir_src_is_const(deref->arr.index)) {
idx = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index), bit_size);
b.cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *global_arg_addr =
load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
- assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
global_arg_addr);
nir_instr_remove(instr);
dst_reg
vec4_visitor::get_nir_dest(const nir_dest &dest)
{
- assert(dest.is_ssa);
nir_intrinsic_instr *store_reg = nir_store_reg_for_def(&dest.ssa);
if (!store_reg) {
dst_reg dst =
vec4_visitor::get_nir_src(const nir_src &src, enum brw_reg_type type,
unsigned num_components)
{
- assert(src.is_ssa);
nir_intrinsic_instr *load_reg = nir_load_reg_for_def(src.ssa);
dst_reg reg;
if (new_derefs[location] == NULL)
return false;
- assert(deref->dest.is_ssa);
- assert(new_derefs[location]->dest.is_ssa);
-
nir_instr_remove(&deref->instr);
nir_ssa_def_rewrite_uses(&deref->dest.ssa, &new_derefs[location]->dest.ssa);
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
- assert(deref->arr.index.is_ssa);
array_index = deref->arr.index.ssa;
} else {
array_index = nir_imm_int(b, 0);
{
if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[0].is_ssa);
*set = nir_intrinsic_desc_set(intrin);
*binding = nir_intrinsic_binding(intrin);
return build_res_index(b, *set, *binding, intrin->src[0].ssa, state);
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[1].is_ssa);
return build_res_reindex(b, index, intrin->src[1].ssa);
}
}
/* Acceleration structure descriptors are always uint64_t */
nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
- assert(load_desc->dest.is_ssa);
assert(load_desc->dest.ssa.bit_size == 64);
assert(load_desc->dest.ssa.num_components == 1);
nir_ssa_def_rewrite_uses(&load_desc->dest.ssa, desc);
{
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[0].is_ssa);
nir_ssa_def *index =
build_res_index(b, nir_intrinsic_desc_set(intrin),
nir_intrinsic_binding(intrin),
intrin->src[0].ssa,
state);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == index->bit_size);
assert(intrin->dest.ssa.num_components == index->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
build_res_reindex(b, intrin->src[0].ssa,
intrin->src[1].ssa);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == index->bit_size);
assert(intrin->dest.ssa.num_components == index->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
nir_address_format addr_format = addr_format_for_desc_type(desc_type, state);
- assert(intrin->src[0].is_ssa);
nir_ssa_def *desc =
build_buffer_addr_for_res_index(b,
desc_type, intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == desc->bit_size);
assert(intrin->dest.ssa.num_components == desc->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
const nir_address_format addr_format =
nir_address_format_64bit_bounded_global;
- assert(intrin->src[0].is_ssa);
nir_ssa_def *desc_addr =
nir_build_addr_iadd_imm(
b,
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
- assert(deref->arr.index.is_ssa);
array_index = deref->arr.index.ssa;
} else {
array_index = nir_imm_int(b, 0);
load->intrinsic != nir_intrinsic_load_view_index)
continue;
- assert(load->dest.is_ssa);
-
nir_ssa_def *value;
if (load->intrinsic == nir_intrinsic_load_instance_id) {
value = build_instance_id(&state);
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
assert(nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var);
- assert(deref->arr.index.is_ssa);
array_index = deref->arr.index.ssa;
} else {
array_index = nir_imm_int(b, 0);
{
if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[0].is_ssa);
*set = nir_intrinsic_desc_set(intrin);
*binding = nir_intrinsic_binding(intrin);
return build_res_index(b, *set, *binding, intrin->src[0].ssa,
b->cursor = nir_before_instr(&intrin->instr);
- assert(intrin->src[1].is_ssa);
return build_res_reindex(b, index, intrin->src[1].ssa, addr_format);
}
}
/* Acceleration structure descriptors are always uint64_t */
nir_ssa_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
- assert(load_desc->dest.is_ssa);
assert(load_desc->dest.ssa.bit_size == 64);
assert(load_desc->dest.ssa.num_components == 1);
nir_ssa_def_rewrite_uses(&load_desc->dest.ssa, desc);
nir_address_format addr_format =
addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
- assert(intrin->src[0].is_ssa);
nir_ssa_def *index =
build_res_index(b, nir_intrinsic_desc_set(intrin),
nir_intrinsic_binding(intrin),
intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == index->bit_size);
assert(intrin->dest.ssa.num_components == index->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
intrin->src[1].ssa,
addr_format);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == index->bit_size);
assert(intrin->dest.ssa.num_components == index->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, index);
const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
nir_address_format addr_format = addr_format_for_desc_type(desc_type, state);
- assert(intrin->src[0].is_ssa);
nir_ssa_def *desc =
build_buffer_addr_for_res_index(b, desc_type, intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == desc->bit_size);
assert(intrin->dest.ssa.num_components == desc->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, desc);
nir_address_format addr_format =
addr_format_for_desc_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, state);
- assert(intrin->src[0].is_ssa);
nir_ssa_def *desc =
build_buffer_addr_for_res_index(b, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
intrin->src[0].ssa, addr_format, state);
load->intrinsic != nir_intrinsic_load_view_index)
continue;
- assert(load->dest.is_ssa);
-
nir_ssa_def *value;
if (load->intrinsic == nir_intrinsic_load_instance_id) {
value = build_instance_id(&state);
switch (old_tex->src[i].src_type) {
case nir_tex_src_coord:
if (plane_format->has_chroma && conversion->state.chroma_reconstruction) {
- assert(old_tex->src[i].src.is_ssa);
tex->src[i].src =
nir_src_for_ssa(implicit_downsampled_coords(state,
old_tex->src[i].src.ssa,
def = nir_swizzle(b, def, swiz, intrin->num_components);
/* and rewrite uses of original instruction: */
- assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
/* at this point intrin should be unused. We need to remove it
static bool
lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var)
{
- assert(intr->dest.is_ssa);
unsigned bit_size = nir_dest_bit_size(intr->dest);
unsigned num_components = nir_dest_num_components(intr->dest);
unsigned num_bits = num_components * bit_size;
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->src[0].is_ssa);
nir_ssa_def *offset = intr->src[0].ssa;
if (intr->intrinsic == nir_intrinsic_load_shared)
offset = nir_iadd_imm(b, offset, nir_intrinsic_base(intr));
static bool
lower_32b_offset_store(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var)
{
- assert(intr->src[0].is_ssa);
unsigned num_components = nir_src_num_components(intr->src[0]);
unsigned bit_size = nir_src_bit_size(intr->src[0]);
unsigned num_bits = num_components * bit_size;
{
b->cursor = nir_before_instr(&intr->instr);
- assert(intr->src[0].is_ssa);
nir_ssa_def *offset =
nir_iadd_imm(b, intr->src[0].ssa, nir_intrinsic_base(intr));
nir_ssa_def *index = nir_ushr_imm(b, offset, 2);
nir_foreach_block_reverse(block, impl) {
nir_foreach_phi_safe(phi, block) {
- assert(phi->dest.is_ssa);
-
if (phi->dest.ssa.bit_size == 1 ||
phi->dest.ssa.bit_size >= min_bit_size)
continue;
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return false;
- assert(intrin->dest.is_ssa);
-
zero_system_values_state* state = (zero_system_values_state*)cb_state;
for (uint32_t i = 0; i < state->count; ++i) {
gl_system_value value = state->values[i];
intr->intrinsic == nir_intrinsic_terminate) {
nir_demote(builder);
} else {
- assert(intr->src[0].is_ssa);
nir_demote_if(builder, intr->src[0].ssa);
}
{
enum gl_access_qualifier access = nir_intrinsic_access(intrin);
- assert(intrin->src[1].is_ssa);
nir_ssa_def *value = intrin->src[1].ssa;
unsigned comp_size = value->bit_size / 8;
unsigned num_comps = value->num_components;
nir_ssa_def *val;
if (intrin->intrinsic == nir_intrinsic_load_deref) {
- assert(intrin->dest.is_ssa);
val = &intrin->dest.ssa;
} else {
- assert(intrin->src[1].is_ssa);
val = intrin->src[1].ssa;
}
nir_foreach_block(block, impl) {
nir_if *following_if = nir_block_get_following_if(block);
if (following_if) {
- assert(following_if->condition.is_ssa);
add_instr_and_srcs_to_set(instr_set, following_if->condition.ssa->parent_instr);
}
nir_foreach_instr_safe(instr, block) {
emit_load_global_invocation_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- assert(intr->dest.is_ssa);
nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
emit_load_local_invocation_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- assert(intr->dest.is_ssa);
nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
emit_load_local_invocation_index(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- assert(intr->dest.is_ssa);
-
const struct dxil_value
*flattenedthreadidingroup = emit_flattenedthreadidingroup_call(ctx);
if (!flattenedthreadidingroup)
emit_load_local_workgroup_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- assert(intr->dest.is_ssa);
nir_component_mask_t comps = nir_ssa_def_components_read(&intr->dest.ssa);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
for (unsigned i = 0; i < vphi->num_components; ++i) {
size_t num_incoming = 0;
nir_foreach_phi_src(src, instr) {
- assert(src->src.is_ssa);
const struct dxil_value *val = get_src_ssa(ctx, src->src.ssa, i);
values[num_incoming] = val;
blocks[num_incoming] = src->pred->index;
DataType
Converter::getDType(nir_alu_instr *insn)
{
- assert(insn->dest.dest.is_ssa);
return getDType(insn->op, insn->dest.dest.ssa.bit_size);
}
break;
}
- assert(insn->dest.is_ssa);
return typeOfSize(insn->dest.ssa.bit_size / 8, isFloat, isSigned);
}
DataType
Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
{
- assert(src.is_ssa);
const uint8_t bitSize = src.ssa->bit_size;
DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
nir_pop_if(b, NULL);
if (has_dest) {
- assert(c_intr->dest.is_ssa);
nir_ssa_def *c_ssa = &c_intr->dest.ssa;
res = nir_if_phi(b, c_ssa, res);
}
b->cursor = nir_before_instr(instr);
- assert(intr->src[0].is_ssa);
nir_ssa_def *addr = intr->src[0].ssa;
nir_ssa_def *comps[MIR_VEC_COMPONENTS];
* up with unconsumed load_register instructions. Translate them here. 99%
* of the time, these moves will be DCE'd away.
*/
- assert(instr->src[0].is_ssa);
nir_ssa_def *handle = instr->src[0].ssa;
midgard_instruction ins =
return false;
BITSET_WORD *float_types = data;
- assert(alu->dest.dest.is_ssa);
if (BITSET_TEST(float_types, alu->dest.dest.ssa.index)) {
alu->op = nir_op_b32fcsel_mdg;
return true;
b->cursor = nir_after_instr(instr);
- assert(intr->dest.is_ssa);
intr->dest.ssa.bit_size = 32;
nir_ssa_def *conv = nir_u2u64(b, &intr->dest.ssa);
nir_ssa_def *res;
switch (intrin->intrinsic) {
case nir_intrinsic_vulkan_resource_index:
- assert(intrin->src[0].is_ssa);
res = build_res_index(b, nir_intrinsic_desc_set(intrin),
nir_intrinsic_binding(intrin), intrin->src[0].ssa,
addr_format, ctx);
break;
case nir_intrinsic_load_vulkan_descriptor:
- assert(intrin->src[0].is_ssa);
res = build_buffer_addr_for_res_index(b, intrin->src[0].ssa, addr_format,
ctx);
break;
unreachable("Unhandled resource intrinsic");
}
- assert(intrin->dest.is_ssa);
assert(intrin->dest.ssa.bit_size == res->bit_size);
assert(intrin->dest.ssa.num_components == res->num_components);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
*index_ssa = NULL;
if (deref->deref_type == nir_deref_type_array) {
- assert(deref->arr.index.is_ssa);
if (index_imm != NULL && nir_src_is_const(deref->arr.index))
*index_imm = nir_src_as_uint(deref->arr.index);
else
if (intr->intrinsic == nir_intrinsic_image_deref_size ||
intr->intrinsic == nir_intrinsic_image_deref_samples) {
- assert(intr->dest.is_ssa);
-
const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
nir_ssa_def *res;
switch (old_tex->src[i].src_type) {
case nir_tex_src_coord:
if (format_plane->has_chroma && conversion->chroma_reconstruction) {
- assert(old_tex->src[i].src.is_ssa);
tex->src[i].src =
nir_src_for_ssa(implicit_downsampled_coords(state,
old_tex->src[i].src.ssa,