}
assert(replacement);
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;
}
nir_def *off = gs_per_vertex_input_offset(b, st, intrin);
if (st->gfx_level >= GFX9)
- return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
+ return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
unsigned wave_size = 64u; /* GFX6-8 only support wave64 */
nir_def *ring = nir_load_ring_esgs_amd(b);
return emit_split_buffer_load(b, ring, off, nir_imm_zero(b, 1, 32), 4u * wave_size,
- intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ intrin->def.num_components, intrin->def.bit_size);
}
static bool
new_intrin->num_components = intrin->num_components;
if (op != nir_intrinsic_store_global_amd)
- nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa,
- intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ nir_def_init(&new_intrin->instr, &new_intrin->def,
+ intrin->def.num_components, intrin->def.bit_size);
unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
for (unsigned i = 0; i < num_src; i++)
nir_builder_instr_insert(b, &new_intrin->instr);
if (op != nir_intrinsic_store_global_amd)
- nir_def_rewrite_uses(&intrin->dest.ssa, &new_intrin->dest.ssa);
+ nir_def_rewrite_uses(&intrin->def, &new_intrin->def);
nir_instr_remove(&intrin->instr);
return true;
nir_def *desc = NULL, *result = NULL;
ASSERTED const char *intr_name;
- nir_def *dst = &intr->dest.ssa;
+ nir_def *dst = &intr->def;
b->cursor = nir_before_instr(instr);
switch (intr->intrinsic) {
case nir_intrinsic_image_load:
case nir_intrinsic_image_deref_load:
case nir_intrinsic_bindless_image_load:
- result = emulated_image_load(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
+ result = emulated_image_load(b, intr->def.num_components, intr->def.bit_size,
desc, intr->src[1].ssa, access, dim, is_array, true);
nir_def_rewrite_uses_after(dst, result, instr);
nir_instr_remove(instr);
nir_tex_instr *new_tex;
nir_def *coord = NULL, *desc = NULL, *sampler_desc = NULL, *result = NULL;
- nir_def *dst = &tex->dest.ssa;
+ nir_def *dst = &tex->def;
b->cursor = nir_before_instr(instr);
switch (tex->op) {
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
- nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
+ nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
- desc = &new_tex->dest.ssa;
+ desc = &new_tex->def;
break;
case nir_tex_src_sampler_deref:
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
- nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
+ nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
- sampler_desc = &new_tex->dest.ssa;
+ sampler_desc = &new_tex->def;
break;
case nir_tex_src_coord:
switch (tex->op) {
case nir_texop_txf:
- result = emulated_image_load(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size,
+ result = emulated_image_load(b, tex->def.num_components, tex->def.bit_size,
desc, coord,
ACCESS_RESTRICT | ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER,
tex->sampler_dim, tex->is_array, true);
case nir_texop_tex:
case nir_texop_txl:
- result = emulated_tex_level_zero(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size,
+ result = emulated_tex_level_zero(b, tex->def.num_components, tex->def.bit_size,
desc, sampler_desc, coord, tex->sampler_dim, tex->is_array);
nir_def_rewrite_uses_after(dst, result, instr);
nir_instr_remove(instr);
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (!nir_intrinsic_can_reorder(intrin) ||
!nir_intrinsic_infos[intrin->intrinsic].has_dest ||
- intrin->dest.ssa.divergent)
+ intrin->def.divergent)
return NULL;
- return &intrin->dest.ssa;
+ return &intrin->def;
}
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
- if (phi->dest.ssa.divergent)
+ if (phi->def.divergent)
return NULL;
- return &phi->dest.ssa;
+ return &phi->def;
}
default:
return NULL;
unsigned location = nir_intrinsic_io_semantics(intrin).location;
unsigned component_offset = nir_intrinsic_component(intrin);
- unsigned bit_size = intrin->dest.ssa.bit_size;
- unsigned num_components = intrin->dest.ssa.num_components;
+ unsigned bit_size = intrin->def.bit_size;
+ unsigned num_components = intrin->def.num_components;
unsigned load_bit_size = MAX2(bit_size, 32);
nir_def *load =
b->cursor = nir_before_instr(&intrin->instr);
nir_def *replacement = nir_load_var(b, var);
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;
nir_def *sample_mask = nir_load_sample_mask_in(b);
nir_def *replacement = nir_iand(b, sample_mask, submask);
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;
bool is_array;
nir_def *desc = NULL;
- dst = &intr->dest.ssa;
+ dst = &intr->def;
b->cursor = nir_before_instr(instr);
switch (intr->intrinsic) {
nir_def *desc = NULL;
nir_src *lod = NULL;
- dst = &tex->dest.ssa;
+ dst = &tex->def;
b->cursor = nir_before_instr(instr);
switch (tex->op) {
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
- nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
+ nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
- desc = &new_tex->dest.ssa;
+ desc = &new_tex->def;
break;
case nir_tex_src_lod:
return false;
}
- unsigned bit_size = intr->dest.ssa.bit_size;
+ unsigned bit_size = intr->def.bit_size;
if (bit_size >= 32)
return false;
nir_src *src_offset = nir_get_io_offset_src(intr);
nir_def *offset = src_offset->ssa;
- nir_def *result = &intr->dest.ssa;
+ nir_def *result = &intr->def;
/* Change the load to 32 bits per channel, update the channel count,
* and increase the declared load alignment.
*/
- intr->dest.ssa.bit_size = 32;
+ intr->def.bit_size = 32;
if (align_mul == 4 && align_offset == 0) {
- intr->num_components = intr->dest.ssa.num_components =
+ intr->num_components = intr->def.num_components =
DIV_ROUND_UP(num_components, comp_per_dword);
/* Aligned loads. Just bitcast the vector and trim it if there are
b->cursor = nir_after_instr(instr);
result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
- nir_def_rewrite_uses_after(&intr->dest.ssa, result,
+ nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}
/* Multi-component unaligned loads may straddle the dword boundary.
* E.g. for 2 components, we need to load an extra dword, and so on.
*/
- intr->num_components = intr->dest.ssa.num_components =
+ intr->num_components = intr->def.num_components =
DIV_ROUND_UP(4 - align_mul + align_offset + num_components * component_size, 4);
nir_intrinsic_set_align(intr,
result = nir_extract_bits(b, &result, 1, comp_offset * bit_size,
num_components, bit_size);
- nir_def_rewrite_uses_after(&intr->dest.ssa, result,
+ nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}
result = nir_vec(b, elems, intr->num_components);
result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
- nir_def_rewrite_uses_after(&intr->dest.ssa, result,
+ nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}
lower_tsms_io_state *s)
{
unsigned base = nir_intrinsic_base(intrin);
- unsigned num_components = intrin->dest.ssa.num_components;
- unsigned bit_size = intrin->dest.ssa.bit_size;
+ unsigned num_components = intrin->def.num_components;
+ unsigned bit_size = intrin->def.bit_size;
nir_def *ptr =
b->shader->info.stage == MESA_SHADER_TASK ?
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_def *off = hs_per_vertex_input_lds_offset(b, st, intrin);
- return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
+ return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
}
static nir_def *
lower_tess_io_state *st)
{
nir_def *off = hs_output_lds_offset(b, st, intrin);
- return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
+ return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
}
static void
nir_def *zero = nir_imm_int(b, 0);
- return nir_load_buffer_amd(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, offchip_ring,
+ return nir_load_buffer_amd(b, intrin->def.num_components,
+ intrin->def.bit_size, offchip_ring,
off, offchip_offset, zero,
.access = ACCESS_COHERENT);
}
void
visit_load_interpolated_input(isel_context* ctx, nir_intrinsic_instr* instr)
{
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
unsigned idx = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
assert(nir_src_is_const(instr->src[1]) && !nir_src_as_uint(instr->src[1]));
- if (instr->dest.ssa.num_components == 1) {
+ if (instr->def.num_components == 1) {
emit_interp_instr(ctx, idx, component, coords, dst, prim_mask);
} else {
aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(
- aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1));
- for (unsigned i = 0; i < instr->dest.ssa.num_components; i++) {
- Temp tmp = ctx->program->allocateTmp(instr->dest.ssa.bit_size == 16 ? v2b : v1);
+ aco_opcode::p_create_vector, Format::PSEUDO, instr->def.num_components, 1));
+ for (unsigned i = 0; i < instr->def.num_components; i++) {
+ Temp tmp = ctx->program->allocateTmp(instr->def.bit_size == 16 ? v2b : v1);
emit_interp_instr(ctx, idx, component + i, coords, tmp, prim_mask);
vec->operands[i] = Operand(tmp);
}
visit_load_fs_input(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
nir_src offset = *nir_get_io_offset_src(instr);
if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
if (instr->intrinsic == nir_intrinsic_load_input_vertex)
vertex_id = nir_src_as_uint(instr->src[0]);
- if (instr->dest.ssa.num_components == 1 && instr->dest.ssa.bit_size != 64) {
+ if (instr->def.num_components == 1 && instr->def.bit_size != 64) {
emit_interp_mov_instr(ctx, idx, component, vertex_id, dst, prim_mask);
} else {
- unsigned num_components = instr->dest.ssa.num_components;
- if (instr->dest.ssa.bit_size == 64)
+ unsigned num_components = instr->def.num_components;
+ if (instr->def.bit_size == 64)
num_components *= 2;
aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
for (unsigned i = 0; i < num_components; i++) {
unsigned chan_component = (component + i) % 4;
unsigned chan_idx = idx + (component + i) / 4;
- vec->operands[i] = Operand(bld.tmp(instr->dest.ssa.bit_size == 16 ? v2b : v1));
+ vec->operands[i] = Operand(bld.tmp(instr->def.bit_size == 16 ? v2b : v1));
emit_interp_mov_instr(ctx, chan_idx, chan_component, vertex_id, vec->operands[i].getTemp(),
prim_mask);
}
assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (load_input_from_temps(ctx, instr, dst))
return;
assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Operand tes_u(get_arg(ctx, ctx->args->tes_u));
Operand tes_v(get_arg(ctx, ctx->args->tes_v));
void
visit_load_ubo(isel_context* ctx, nir_intrinsic_instr* instr)
{
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Builder bld(ctx->program, ctx->block);
Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
- unsigned size = instr->dest.ssa.bit_size / 8;
+ unsigned size = instr->def.bit_size / 8;
load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr));
}
visit_load_push_constant(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
unsigned offset = nir_intrinsic_base(instr);
- unsigned count = instr->dest.ssa.num_components;
+ unsigned count = instr->def.num_components;
nir_const_value* index_cv = nir_src_as_const_value(instr->src[0]);
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
count *= 2;
- if (index_cv && instr->dest.ssa.bit_size >= 32) {
+ if (index_cv && instr->def.bit_size >= 32) {
unsigned start = (offset + index_cv->u32) / 4u;
uint64_t mask = BITFIELD64_MASK(count) << start;
if ((ctx->args->inline_push_const_mask | mask) == ctx->args->inline_push_const_mask &&
bool trim = false;
bool aligned = true;
- if (instr->dest.ssa.bit_size == 8) {
+ if (instr->def.bit_size == 8) {
aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
if (!aligned)
vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
- } else if (instr->dest.ssa.bit_size == 16) {
+ } else if (instr->def.bit_size == 16) {
aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
if (!aligned)
vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), emit_extract_vector(ctx, vec, 0, rc),
emit_extract_vector(ctx, vec, 1, rc), emit_extract_vector(ctx, vec, 2, rc));
}
- emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
+ emit_split_vector(ctx, dst, instr->def.num_components);
}
void
visit_load_constant(isel_context* ctx, nir_intrinsic_instr* instr)
{
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Builder bld(ctx->program, ctx->block);
Operand::c32(ctx->constant_data_offset)),
Operand::c32(MIN2(base + range, ctx->shader->constant_data_size)),
Operand::c32(desc_type));
- unsigned size = instr->dest.ssa.bit_size / 8;
+ unsigned size = instr->def.bit_size / 8;
// TODO: get alignment information for subdword constants
load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0);
}
visit_bvh64_intersect_ray_amd(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp resource = get_ssa_temp(ctx, instr->src[0].ssa);
Temp node = get_ssa_temp(ctx, instr->src[1].ssa);
Temp tmax = get_ssa_temp(ctx, instr->src[2].ssa);
mimg->unrm = true;
mimg->r128 = true;
- emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
+ emit_split_vector(ctx, dst, instr->def.num_components);
}
static std::vector<Temp>
const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
bool is_array = nir_intrinsic_image_array(instr);
bool is_sparse = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
unsigned access = nir_intrinsic_access(instr);
- unsigned result_size = instr->dest.ssa.num_components - is_sparse;
- unsigned expand_mask =
- nir_def_components_read(&instr->dest.ssa) & u_bit_consecutive(0, result_size);
+ unsigned result_size = instr->def.num_components - is_sparse;
+ unsigned expand_mask = nir_def_components_read(&instr->def) & u_bit_consecutive(0, result_size);
expand_mask = MAX2(expand_mask, 1); /* this can be zero in the case of sparse image loads */
if (dim == GLSL_SAMPLER_DIM_BUF)
expand_mask = (1u << util_last_bit(expand_mask)) - 1u;
unsigned dmask = expand_mask;
- if (instr->dest.ssa.bit_size == 64) {
+ if (instr->def.bit_size == 64) {
expand_mask &= 0x9;
/* only R64_UINT and R64_SINT supported. x is in xy of the result, w in zw */
dmask = ((expand_mask & 0x1) ? 0x3 : 0) | ((expand_mask & 0x8) ? 0xc : 0);
if (is_sparse)
expand_mask |= 1 << result_size;
- bool d16 = instr->dest.ssa.bit_size == 16;
+ bool d16 = instr->def.bit_size == 16;
assert(!d16 || !is_sparse);
unsigned num_bytes = util_bitcount(dmask) * (d16 ? 2 : 4) + is_sparse * 4;
}
}
- if (is_sparse && instr->dest.ssa.bit_size == 64) {
+ if (is_sparse && instr->def.bit_size == 64) {
/* The result components are 64-bit but the sparse residency code is
* 32-bit. So add a zero to the end so expand_vector() works correctly.
*/
Operand::zero());
}
- expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, expand_mask,
- instr->dest.ssa.bit_size == 64);
+ expand_vector(ctx, tmp, dst, instr->def.num_components, expand_mask, instr->def.bit_size == 64);
}
void
void
visit_image_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
{
- bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
+ bool return_previous = !nir_def_is_unused(&instr->def);
const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
bool is_array = nir_intrinsic_image_array(instr);
Builder bld(ctx->program, ctx->block);
data = bld.pseudo(aco_opcode::p_create_vector, bld.def(is_64bit ? v4 : v2),
get_ssa_temp(ctx, instr->src[4].ssa), data);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
memory_sync_info sync = get_memory_sync_info(instr, storage_image, semantic_atomicrmw);
if (dim == GLSL_SAMPLER_DIM_BUF) {
Builder bld(ctx->program, ctx->block);
unsigned num_components = instr->num_components;
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
unsigned access = nir_intrinsic_access(instr);
bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
- unsigned size = instr->dest.ssa.bit_size / 8;
+ unsigned size = instr->def.bit_size / 8;
bool allow_smem = access & ACCESS_CAN_REORDER;
visit_atomic_ssbo(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
+ bool return_previous = !nir_def_is_unused(&instr->def);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
Temp rsrc = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
- aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
+ aco_opcode op = instr->def.bit_size == 32 ? op32 : op64;
aco_ptr<MUBUF_instruction> mubuf{
create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
mubuf->operands[0] = Operand(rsrc);
{
Builder bld(ctx->program, ctx->block);
unsigned num_components = instr->num_components;
- unsigned component_size = instr->dest.ssa.bit_size / 8;
+ unsigned component_size = instr->def.bit_size / 8;
Temp addr, offset;
uint32_t const_offset;
parse_global(ctx, instr, &addr, &const_offset, &offset);
- LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->dest.ssa), num_components,
+ LoadEmitInfo info = {Operand(addr), get_ssa_temp(ctx, &instr->def), num_components,
component_size};
if (offset.id()) {
info.resource = addr;
visit_global_atomic(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
+ bool return_previous = !nir_def_is_unused(&instr->def);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
const nir_atomic_op nir_op = nir_intrinsic_atomic_op(instr);
data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
get_ssa_temp(ctx, instr->src[2].ssa), data);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
aco_opcode op32, op64;
default: unreachable("unsupported atomic operation");
}
- aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
+ aco_opcode op = instr->def.bit_size == 32 ? op32 : op64;
aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(
op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
if (addr.regClass() == s2) {
Temp rsrc = get_gfx6_global_rsrc(bld, addr);
- aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
+ aco_opcode op = instr->def.bit_size == 32 ? op32 : op64;
aco_ptr<MUBUF_instruction> mubuf{
create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
bool v_offset_zero = nir_src_is_const(intrin->src[1]) && !nir_src_as_uint(intrin->src[1]);
bool s_offset_zero = nir_src_is_const(intrin->src[2]) && !nir_src_as_uint(intrin->src[2]);
- Temp dst = get_ssa_temp(ctx, &intrin->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &intrin->def);
Temp descriptor = bld.as_uniform(get_ssa_temp(ctx, intrin->src[0].ssa));
Temp v_offset =
v_offset_zero ? Temp(0, v1) : as_vgpr(ctx, get_ssa_temp(ctx, intrin->src[1].ssa));
bool slc = nir_intrinsic_access(intrin) & ACCESS_NON_TEMPORAL;
unsigned const_offset = nir_intrinsic_base(intrin);
- unsigned elem_size_bytes = intrin->dest.ssa.bit_size / 8u;
- unsigned num_components = intrin->dest.ssa.num_components;
+ unsigned elem_size_bytes = intrin->def.bit_size / 8u;
+ unsigned num_components = intrin->def.num_components;
nir_variable_mode mem_mode = nir_intrinsic_memory_modes(intrin);
memory_sync_info sync(aco_storage_mode_from_nir_mem_mode(mem_mode));
visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp base = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
Temp offset = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
} else {
bld.smem(opcode, Definition(dst), base, offset);
}
- emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
+ emit_split_vector(ctx, dst, instr->def.num_components);
}
sync_scope
visit_load_shared(isel_context* ctx, nir_intrinsic_instr* instr)
{
// TODO: implement sparse reads using ds_read2_b32 and nir_def_components_read()
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Builder bld(ctx->program, ctx->block);
- unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
- unsigned num_components = instr->dest.ssa.num_components;
+ unsigned elem_size_bytes = instr->def.bit_size / 8;
+ unsigned num_components = instr->def.num_components;
unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
load_lds(ctx, elem_size_bytes, num_components, dst, address, nir_intrinsic_base(instr), align);
}
default: unreachable("Unhandled shared atomic intrinsic");
}
- bool return_previous = !nir_def_is_unused(&instr->dest.ssa);
+ bool return_previous = !nir_def_is_unused(&instr->def);
aco_opcode op;
if (data.size() == 1) {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
op = return_previous ? op32_rtn : op32;
} else {
- assert(instr->dest.ssa.bit_size == 64);
+ assert(instr->def.bit_size == 64);
op = return_previous ? op64_rtn : op64;
}
ds->operands[num_operands - 1] = m;
ds->offset0 = offset;
if (return_previous)
- ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
+ ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->def));
ds->sync = memory_sync_info(storage_shared, semantic_atomicrmw);
if (m.isUndefined())
assert(bld.program->gfx_level >= GFX7);
- bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->dest.ssa.bit_size) == 64;
+ bool is64bit = (is_store ? instr->src[0].ssa->bit_size : instr->def.bit_size) == 64;
uint8_t offset0 = nir_intrinsic_offset0(instr);
uint8_t offset1 = nir_intrinsic_offset1(instr);
bool st64 = nir_intrinsic_st64(instr);
Temp data1 = emit_extract_vector(ctx, data, 1, comp_rc);
ds = bld.ds(op, address, data0, data1, m, offset0, offset1);
} else {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Definition tmp_dst(dst.type() == RegType::vgpr ? dst : bld.tmp(is64bit ? v4 : v2));
aco_opcode op = st64 ? (is64bit ? aco_opcode::ds_read2st64_b64 : aco_opcode::ds_read2st64_b32)
: (is64bit ? aco_opcode::ds_read2_b64 : aco_opcode::ds_read2_b32);
ds->operands.pop_back();
if (!is_store) {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (dst.type() == RegType::sgpr) {
emit_split_vector(ctx, ds->definitions[0].getTemp(), dst.size());
Temp comp[4];
visit_load_scratch(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
- LoadEmitInfo info = {Operand(v1), dst, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size / 8u};
+ LoadEmitInfo info = {Operand(v1), dst, instr->def.num_components, instr->def.bit_size / 8u};
info.align_mul = nir_intrinsic_align_mul(instr);
info.align_offset = nir_intrinsic_align_offset(instr);
info.swizzle_component_size = ctx->program->gfx_level <= GFX8 ? 4 : 0;
emit_uniform_subgroup(isel_context* ctx, nir_intrinsic_instr* instr, Temp src)
{
Builder bld(ctx->program, ctx->block);
- Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
+ Definition dst(get_ssa_temp(ctx, &instr->def));
assert(dst.regClass().type() != RegType::vgpr);
if (src.regClass().type() == RegType::vgpr)
bld.pseudo(aco_opcode::p_as_uniform, dst, src);
if (op == nir_op_iadd || op == nir_op_ixor || op == nir_op_fadd) {
Builder bld(ctx->program, ctx->block);
- Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
+ Definition dst(get_ssa_temp(ctx, &instr->def));
unsigned bit_size = instr->src[0].ssa->bit_size;
if (bit_size > 32)
return false;
emit_uniform_scan(isel_context* ctx, nir_intrinsic_instr* instr)
{
Builder bld(ctx->program, ctx->block);
- Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
+ Definition dst(get_ssa_temp(ctx, &instr->def));
nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
bool inc = instr->intrinsic == nir_intrinsic_inclusive_scan;
glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
Temp bary = get_interp_param(ctx, instr->intrinsic, mode);
assert(bary.size() == 2);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), bary);
emit_split_vector(ctx, dst, 2);
break;
case nir_intrinsic_load_barycentric_model: {
Temp model = get_arg(ctx, ctx->args->pull_model);
assert(model.size() == 3);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), model);
emit_split_vector(ctx, dst, 3);
break;
bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset);
Temp bary = get_interp_param(ctx, instr->intrinsic,
(glsl_interp_mode)nir_intrinsic_interp_mode(instr));
- emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), bary, pos1, pos2);
+ emit_interp_center(ctx, get_ssa_temp(ctx, &instr->def), bary, pos1, pos2);
break;
}
case nir_intrinsic_load_front_face: {
- bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
+ bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->def)),
Operand::zero(), get_arg(ctx, ctx->args->front_face));
break;
}
case nir_intrinsic_load_view_index: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->view_index)));
break;
}
case nir_intrinsic_load_frag_coord: {
- emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4);
+ emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->def), 4);
break;
}
case nir_intrinsic_load_frag_shading_rate:
- emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
+ emit_load_frag_shading_rate(ctx, get_ssa_temp(ctx, &instr->def));
break;
case nir_intrinsic_load_sample_pos: {
Temp posx = get_arg(ctx, ctx->args->frag_pos[0]);
Temp posy = get_arg(ctx, ctx->args->frag_pos[1]);
bld.pseudo(
- aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
+ aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->def)),
posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand::zero(),
posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand::zero());
break;
case nir_intrinsic_store_scratch: visit_store_scratch(ctx, instr); break;
case nir_intrinsic_barrier: emit_barrier(ctx, instr); break;
case nir_intrinsic_load_num_workgroups: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (ctx->options->load_grid_size_from_user_sgpr) {
bld.copy(Definition(dst), get_arg(ctx, ctx->args->num_work_groups));
} else {
break;
}
case nir_intrinsic_load_ray_launch_size: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->rt.launch_size)));
emit_split_vector(ctx, dst, 3);
break;
}
case nir_intrinsic_load_ray_launch_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->rt.launch_id)));
emit_split_vector(ctx, dst, 3);
break;
}
case nir_intrinsic_load_ray_launch_size_addr_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp addr = get_arg(ctx, ctx->args->rt.launch_size_addr);
assert(addr.regClass() == s2);
bld.copy(Definition(dst), Operand(addr));
break;
}
case nir_intrinsic_load_local_invocation_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (ctx->options->gfx_level >= GFX11) {
Temp local_ids[3];
break;
}
case nir_intrinsic_load_workgroup_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (ctx->stage.hw == AC_HW_COMPUTE_SHADER) {
const struct ac_arg* ids = ctx->args->workgroup_ids;
bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
Temp temp = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), wave_id,
Operand::c32(ctx->program->wave_size));
- emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa), Operand(), Operand(temp));
+ emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def), Operand(), Operand(temp));
} else {
- bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
+ bld.copy(Definition(get_ssa_temp(ctx, &instr->def)),
get_arg(ctx, ctx->args->vs_rel_patch_id));
}
break;
} else if (ctx->stage.hw == AC_HW_LEGACY_GEOMETRY_SHADER ||
ctx->stage.hw == AC_HW_NEXT_GEN_GEOMETRY_SHADER) {
- bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), thread_id_in_threadgroup(ctx));
+ bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), thread_id_in_threadgroup(ctx));
break;
} else if (ctx->program->workgroup_size <= ctx->program->wave_size) {
- emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
+ emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def));
break;
}
* feed that to v_or */
Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc),
Operand::c32(0xfc0u), get_arg(ctx, ctx->args->tg_size));
- bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num,
- id);
+ bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->def)), tg_num, id);
} else {
/* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
Temp tg_num =
bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
get_arg(ctx, ctx->args->tg_size), Operand::c32(0x6u | (0x6u << 16)));
- bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
- tg_num, Operand::c32(0x5u), id);
+ bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->def)), tg_num,
+ Operand::c32(0x5u), id);
}
break;
}
case nir_intrinsic_load_subgroup_invocation: {
- emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->dest.ssa));
+ emit_mbcnt(ctx, get_ssa_temp(ctx, &instr->def));
break;
}
case nir_intrinsic_ballot: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (instr->src[0].ssa->bit_size == 1) {
assert(src.regClass() == bld.lm);
if (instr->intrinsic == nir_intrinsic_read_invocation ||
!nir_src_is_divergent(instr->src[1]))
tid = bld.as_uniform(tid);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
- if (instr->dest.ssa.bit_size != 1)
+ if (instr->def.bit_size != 1)
src = as_vgpr(ctx, src);
if (src.regClass() == v1b || src.regClass() == v2b) {
hi = emit_wqm(bld, emit_bpermute(ctx, bld, tid, hi));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
- } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
+ } else if (instr->def.bit_size == 1 && tid.regClass() == s1) {
assert(src.regClass() == bld.lm);
Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
bool_to_vector_condition(ctx, emit_wqm(bld, tmp), dst);
- } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
+ } else if (instr->def.bit_size == 1 && tid.regClass() == v1) {
assert(src.regClass() == bld.lm);
Temp tmp;
if (ctx->program->gfx_level <= GFX7)
break;
}
case nir_intrinsic_load_sample_id: {
- bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
+ bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->def)),
get_arg(ctx, ctx->args->ancillary), Operand::c32(8u), Operand::c32(4u));
break;
}
case nir_intrinsic_read_first_invocation: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) {
emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src), dst);
} else if (src.regClass() == v2) {
hi = emit_wqm(bld, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
- } else if (instr->dest.ssa.bit_size == 1) {
+ } else if (instr->def.bit_size == 1) {
assert(src.regClass() == bld.lm);
Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
}
case nir_intrinsic_vote_all: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
assert(src.regClass() == bld.lm);
assert(dst.regClass() == bld.lm);
}
case nir_intrinsic_vote_any: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
assert(src.regClass() == bld.lm);
assert(dst.regClass() == bld.lm);
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
nir_op op = (nir_op)nir_intrinsic_reduction_op(instr);
unsigned cluster_size =
instr->intrinsic == nir_intrinsic_reduce ? nir_intrinsic_cluster_size(instr) : 0;
instr->intrinsic == nir_intrinsic_reduce && nir_intrinsic_include_helpers(instr);
if (!nir_src_is_divergent(instr->src[0]) && cluster_size == ctx->program->wave_size &&
- instr->dest.ssa.bit_size != 1) {
+ instr->def.bit_size != 1) {
/* We use divergence analysis to assign the regclass, so check if it's
* working as expected */
ASSERTED bool expected_divergent = instr->intrinsic == nir_intrinsic_exclusive_scan;
if (instr->intrinsic == nir_intrinsic_inclusive_scan)
expected_divergent = op == nir_op_iadd || op == nir_op_fadd || op == nir_op_ixor;
- assert(instr->dest.ssa.divergent == expected_divergent);
+ assert(instr->def.divergent == expected_divergent);
if (instr->intrinsic == nir_intrinsic_reduce) {
if (emit_uniform_reduce(ctx, instr))
}
}
- if (instr->dest.ssa.bit_size == 1) {
+ if (instr->def.bit_size == 1) {
if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
op = nir_op_iand;
else if (op == nir_op_iadd)
case nir_intrinsic_quad_swizzle_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!instr->dest.ssa.divergent) {
+ if (!instr->def.divergent) {
emit_uniform_subgroup(ctx, instr, src);
break;
}
/* Quad broadcast lane. */
unsigned lane = 0;
/* Use VALU for the bool instructions that don't have a SALU-only special case. */
- bool bool_use_valu = instr->dest.ssa.bit_size == 1;
+ bool bool_use_valu = instr->def.bit_size == 1;
uint16_t dpp_ctrl = 0;
default: break;
}
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp tmp(dst);
/* Setup source. */
if (bool_use_valu)
src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
Operand::c32(-1), src);
- else if (instr->dest.ssa.bit_size != 1)
+ else if (instr->def.bit_size != 1)
src = as_vgpr(ctx, src);
/* Setup temporary destination. */
else if (ctx->program->stage == fragment_fs)
tmp = bld.tmp(dst.regClass());
- if (instr->dest.ssa.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) {
+ if (instr->def.bit_size == 1 && instr->intrinsic == nir_intrinsic_quad_broadcast) {
/* Special case for quad broadcast using SALU only. */
assert(src.regClass() == bld.lm && tmp.regClass() == bld.lm);
bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp, src);
bld.sop1(Builder::s_wqm, Definition(tmp), src);
- } else if (instr->dest.ssa.bit_size <= 32 || bool_use_valu) {
- unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->dest.ssa.bit_size / 8;
+ } else if (instr->def.bit_size <= 32 || bool_use_valu) {
+ unsigned excess_bytes = bool_use_valu ? 0 : 4 - instr->def.bit_size / 8;
Definition def = excess_bytes ? bld.def(v1) : Definition(tmp);
if (ctx->program->gfx_level >= GFX8)
if (excess_bytes)
bld.pseudo(aco_opcode::p_split_vector, Definition(tmp),
bld.def(RegClass::get(tmp.type(), excess_bytes)), def.getTemp());
- } else if (instr->dest.ssa.bit_size == 64) {
+ } else if (instr->def.bit_size == 64) {
Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
}
case nir_intrinsic_masked_swizzle_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!instr->dest.ssa.divergent) {
+ if (!instr->def.divergent) {
emit_uniform_subgroup(ctx, instr, src);
break;
}
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
uint32_t mask = nir_intrinsic_swizzle_mask(instr);
- if (instr->dest.ssa.bit_size != 1)
+ if (instr->def.bit_size != 1)
src = as_vgpr(ctx, src);
- if (instr->dest.ssa.bit_size == 1) {
+ if (instr->def.bit_size == 1) {
assert(src.regClass() == bld.lm);
src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand::zero(),
Operand::c32(-1), src);
Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa));
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (dst.regClass() == v1) {
/* src2 is ignored for writelane. RA assigns the same reg for dst */
emit_wqm(bld, bld.writelane(bld.def(v1), val, lane, src), dst);
case nir_intrinsic_mbcnt_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
Temp add_src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
/* Fit 64-bit mask for wave32 */
src = emit_extract_vector(ctx, src, 0, RegClass(src.type(), bld.lm.size()));
Temp wqm_tmp = emit_mbcnt(ctx, bld.tmp(v1), Operand(src), Operand(add_src));
}
case nir_intrinsic_lane_permute_16_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
assert(ctx->program->gfx_level >= GFX10);
if (src.regClass() == s1) {
case nir_intrinsic_is_helper_invocation: {
/* load_helper() after demote() get lowered to is_helper().
* Otherwise, these two behave the same. */
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.pseudo(aco_opcode::p_is_helper, Definition(dst), Operand(exec, bld.lm));
ctx->block->kind |= block_kind_needs_lowering;
ctx->program->needs_exact = true;
}
case nir_intrinsic_first_invocation: {
emit_wqm(bld, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
- get_ssa_temp(ctx, &instr->dest.ssa));
+ get_ssa_temp(ctx, &instr->def));
break;
}
case nir_intrinsic_last_invocation: {
Temp flbit = bld.sop1(Builder::s_flbit_i32, bld.def(s1), Operand(exec, bld.lm));
Temp last = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc),
Operand::c32(ctx->program->wave_size - 1u), flbit);
- emit_wqm(bld, last, get_ssa_temp(ctx, &instr->dest.ssa));
+ emit_wqm(bld, last, get_ssa_temp(ctx, &instr->def));
break;
}
case nir_intrinsic_elect: {
* two p_elect with different exec masks as the same.
*/
Temp elected = bld.pseudo(aco_opcode::p_elect, bld.def(bld.lm), Operand(exec, bld.lm));
- emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->dest.ssa));
+ emit_wqm(bld, elected, get_ssa_temp(ctx, &instr->def));
ctx->block->kind |= block_kind_needs_lowering;
break;
}
case nir_intrinsic_shader_clock: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (nir_intrinsic_memory_scope(instr) == SCOPE_SUBGROUP &&
ctx->options->gfx_level >= GFX10_3) {
/* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */
break;
}
case nir_intrinsic_load_vertex_id_zero_base: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->vertex_id));
break;
}
case nir_intrinsic_load_first_vertex: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->base_vertex));
break;
}
case nir_intrinsic_load_base_instance: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->start_instance));
break;
}
case nir_intrinsic_load_instance_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->instance_id));
break;
}
case nir_intrinsic_load_draw_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->draw_id));
break;
}
case nir_intrinsic_load_invocation_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
if (ctx->options->gfx_level >= GFX10)
break;
}
case nir_intrinsic_load_primitive_id: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
switch (ctx->shader->info.stage) {
case MESA_SHADER_GEOMETRY:
break;
}
case nir_intrinsic_load_gs_wave_id_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
if (ctx->args->merged_wave_info.used)
bld.pseudo(aco_opcode::p_extract, Definition(dst), bld.def(s1, scc),
get_arg(ctx, ctx->args->merged_wave_info), Operand::c32(2u), Operand::c32(8u),
}
case nir_intrinsic_is_subgroup_invocation_lt_amd: {
Temp src = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
- bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), lanecount_to_mask(ctx, src));
+ bld.copy(Definition(get_ssa_temp(ctx, &instr->def)), lanecount_to_mask(ctx, src));
break;
}
case nir_intrinsic_gds_atomic_add_amd: {
break;
}
case nir_intrinsic_load_sbt_base_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp addr = get_arg(ctx, ctx->args->rt.sbt_descriptors);
assert(addr.regClass() == s2);
bld.copy(Definition(dst), Operand(addr));
}
case nir_intrinsic_bvh64_intersect_ray_amd: visit_bvh64_intersect_ray_amd(ctx, instr); break;
case nir_intrinsic_load_rt_dynamic_callable_stack_base_amd:
- bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
+ bld.copy(Definition(get_ssa_temp(ctx, &instr->def)),
get_arg(ctx, ctx->args->rt.dynamic_callable_stack_base));
break;
case nir_intrinsic_load_resume_shader_address_amd: {
- bld.pseudo(aco_opcode::p_resume_shader_address,
- Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc),
- Operand::c32(nir_intrinsic_call_idx(instr)));
+ bld.pseudo(aco_opcode::p_resume_shader_address, Definition(get_ssa_temp(ctx, &instr->def)),
+ bld.def(s1, scc), Operand::c32(nir_intrinsic_call_idx(instr)));
break;
}
case nir_intrinsic_overwrite_vs_arguments_amd: {
case nir_intrinsic_load_scalar_arg_amd:
case nir_intrinsic_load_vector_arg_amd: {
assert(nir_intrinsic_base(instr) < ctx->args->arg_count);
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp src = ctx->arg_temps[nir_intrinsic_base(instr)];
assert(src.id());
assert(src.type() == (instr->intrinsic == nir_intrinsic_load_scalar_arg_amd ? RegType::sgpr
break;
}
case nir_intrinsic_ordered_xfb_counter_add_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp ordered_id = get_ssa_temp(ctx, instr->src[0].ssa);
Temp counter = get_ssa_temp(ctx, instr->src[1].ssa);
break;
}
case nir_intrinsic_strict_wqm_coord_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
Temp tmp = bld.tmp(RegClass::get(RegType::vgpr, dst.bytes()));
unsigned begin_size = nir_intrinsic_base(instr);
break;
}
case nir_intrinsic_load_lds_ngg_scratch_base_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.sop1(aco_opcode::p_load_symbol, Definition(dst),
Operand::c32(aco_symbol_lds_ngg_scratch_base));
break;
}
case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd: {
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
bld.sop1(aco_opcode::p_load_symbol, Definition(dst),
Operand::c32(aco_symbol_lds_ngg_gs_out_vertex_base));
break;
}
/* Build tex instruction */
- unsigned dmask = nir_def_components_read(&instr->dest.ssa) & 0xf;
+ unsigned dmask = nir_def_components_read(&instr->def) & 0xf;
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
dmask = u_bit_consecutive(0, util_last_bit(dmask));
if (instr->is_sparse)
dmask = MAX2(dmask, 1) | 0x10;
- bool d16 = instr->dest.ssa.bit_size == 16;
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ bool d16 = instr->def.bit_size == 16;
+ Temp dst = get_ssa_temp(ctx, &instr->def);
Temp tmp_dst = dst;
/* gather4 selects the component by dmask and always returns vec4 (vec5 if sparse) */
if (instr->op == nir_texop_tg4) {
- assert(instr->dest.ssa.num_components == (4 + instr->is_sparse));
+ assert(instr->def.num_components == (4 + instr->is_sparse));
if (instr->is_shadow)
dmask = 1;
else
tmp_dst = bld.tmp(instr->is_sparse ? v5 : (d16 ? v2 : v4));
} else if (instr->op == nir_texop_fragment_mask_fetch_amd) {
tmp_dst = bld.tmp(v1);
- } else if (util_bitcount(dmask) != instr->dest.ssa.num_components ||
- dst.type() == RegType::sgpr) {
- unsigned bytes = util_bitcount(dmask) * instr->dest.ssa.bit_size / 8;
+ } else if (util_bitcount(dmask) != instr->def.num_components || dst.type() == RegType::sgpr) {
+ unsigned bytes = util_bitcount(dmask) * instr->def.bit_size / 8;
tmp_dst = bld.tmp(RegClass::get(RegType::vgpr, bytes));
}
mubuf->operands[3] = emit_tfe_init(bld, tmp_dst);
ctx->block->instructions.emplace_back(std::move(mubuf));
- expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
+ expand_vector(ctx, tmp_dst, dst, instr->def.num_components, dmask);
return;
}
bld.copy(bld.def(v1), Operand::c32(0x76543210)), tmp_dst, is_not_null);
}
} else {
- expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
+ expand_vector(ctx, tmp_dst, dst, instr->def.num_components, dmask);
}
return;
}
val[3]);
}
unsigned mask = instr->op == nir_texop_tg4 ? (instr->is_sparse ? 0x1F : 0xF) : dmask;
- expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask);
+ expand_vector(ctx, tmp_dst, dst, instr->def.num_components, mask);
}
Operand
visit_phi(isel_context* ctx, nir_phi_instr* instr)
{
aco_ptr<Pseudo_instruction> phi;
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
+ Temp dst = get_ssa_temp(ctx, &instr->def);
+ assert(instr->def.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
- bool logical = !dst.is_linear() || instr->dest.ssa.divergent;
+ bool logical = !dst.is_linear() || instr->def.divergent;
logical |= (ctx->block->kind & block_kind_merge) != 0;
aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
return false;
nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr);
- if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false))
+ if (!only_used_by_cross_lane_instrs(&phi->def, false))
return false;
continue;
if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
break;
if (intrinsic->intrinsic == nir_intrinsic_strict_wqm_coord_amd) {
- regclasses[intrinsic->dest.ssa.index] =
- RegClass::get(RegType::vgpr, intrinsic->dest.ssa.num_components * 4 +
+ regclasses[intrinsic->def.index] =
+ RegClass::get(RegType::vgpr, intrinsic->def.num_components * 4 +
nir_intrinsic_base(intrinsic))
.as_linear();
break;
* it is beneficial to use a VGPR destination. This is because this allows
* to put the s_waitcnt further down, which decreases latency.
*/
- if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) {
+ if (only_used_by_cross_lane_instrs(&intrinsic->def)) {
type = RegType::vgpr;
break;
}
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_global_amd:
- type = intrinsic->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr;
+ type = intrinsic->def.divergent ? RegType::vgpr : RegType::sgpr;
break;
case nir_intrinsic_load_view_index:
type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
}
break;
}
- RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components,
- intrinsic->dest.ssa.bit_size);
- regclasses[intrinsic->dest.ssa.index] = rc;
+ RegClass rc =
+ get_reg_class(ctx, type, intrinsic->def.num_components, intrinsic->def.bit_size);
+ regclasses[intrinsic->def.index] = rc;
break;
}
case nir_instr_type_tex: {
nir_tex_instr* tex = nir_instr_as_tex(instr);
- RegType type = tex->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr;
+ RegType type = tex->def.divergent ? RegType::vgpr : RegType::sgpr;
if (tex->op == nir_texop_texture_samples) {
- assert(!tex->dest.ssa.divergent);
+ assert(!tex->def.divergent);
}
- RegClass rc =
- get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
- regclasses[tex->dest.ssa.index] = rc;
+ RegClass rc = get_reg_class(ctx, type, tex->def.num_components, tex->def.bit_size);
+ regclasses[tex->def.index] = rc;
break;
}
case nir_instr_type_ssa_undef: {
case nir_instr_type_phi: {
nir_phi_instr* phi = nir_instr_as_phi(instr);
RegType type = RegType::sgpr;
- unsigned num_components = phi->dest.ssa.num_components;
- assert((phi->dest.ssa.bit_size != 1 || num_components == 1) &&
+ unsigned num_components = phi->def.num_components;
+ assert((phi->def.bit_size != 1 || num_components == 1) &&
"Multiple components not supported on boolean phis.");
- if (phi->dest.ssa.divergent) {
+ if (phi->def.divergent) {
type = RegType::vgpr;
} else {
nir_foreach_phi_src (src, phi) {
}
}
- RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size);
- if (rc != regclasses[phi->dest.ssa.index])
+ RegClass rc = get_reg_class(ctx, type, num_components, phi->def.bit_size);
+ if (rc != regclasses[phi->def.index])
done = false;
- regclasses[phi->dest.ssa.index] = rc;
+ regclasses[phi->def.index] = rc;
break;
}
default: break;
assert((!args->tfe || !args->d16) && "unsupported");
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
- unsigned mask = nir_def_components_read(&instr->dest.ssa);
+ unsigned mask = nir_def_components_read(&instr->def);
/* Buffers don't support A16. */
if (args->a16)
return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
util_last_bit(mask), 0, true,
- instr->dest.ssa.bit_size == 16,
+ instr->def.bit_size == 16,
args->tfe);
}
/* Load constant values from user SGPRS when possible, otherwise
* fallback to the default path that loads directly from memory.
*/
- if (LLVMIsConstant(src0) && instr->dest.ssa.bit_size >= 32) {
- unsigned count = instr->dest.ssa.num_components;
+ if (LLVMIsConstant(src0) && instr->def.bit_size >= 32) {
+ unsigned count = instr->def.num_components;
unsigned offset = index;
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
count *= 2;
offset += LLVMConstIntGetZExtValue(src0);
for (unsigned i = 0; i < count; i++)
push_constants[i] = ac_get_arg(&ctx->ac, ctx->args->inline_push_consts[arg_index++]);
LLVMValueRef res = ac_build_gather_values(&ctx->ac, push_constants, count);
- return instr->dest.ssa.bit_size == 64
- ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "")
+ return instr->def.bit_size == 64
+ ? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "")
: res;
}
}
struct ac_llvm_pointer pc = ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->push_constants);
ptr = LLVMBuildGEP2(ctx->ac.builder, pc.t, pc.v, &addr, 1, "");
- if (instr->dest.ssa.bit_size == 8) {
- unsigned load_dwords = instr->dest.ssa.num_components > 1 ? 2 : 1;
+ if (instr->def.bit_size == 8) {
+ unsigned load_dwords = instr->def.num_components > 1 ? 2 : 1;
LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i8, 4 * load_dwords);
ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
res = LLVMBuildTrunc(
ctx->ac.builder, res,
- LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.num_components * 8), "");
- if (instr->dest.ssa.num_components > 1)
+ LLVMIntTypeInContext(ctx->ac.context, instr->def.num_components * 8), "");
+ if (instr->def.num_components > 1)
res = LLVMBuildBitCast(ctx->ac.builder, res,
- LLVMVectorType(ctx->ac.i8, instr->dest.ssa.num_components), "");
+ LLVMVectorType(ctx->ac.i8, instr->def.num_components), "");
return res;
- } else if (instr->dest.ssa.bit_size == 16) {
- unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1;
+ } else if (instr->def.bit_size == 16) {
+ unsigned load_dwords = instr->def.num_components / 2 + 1;
LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i16, 2 * load_dwords);
ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
ctx->ac.i32_0, ctx->ac.i32_1,
LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
LLVMConstInt(ctx->ac.i32, 4, false)};
- LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components);
- LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components);
+ LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->def.num_components);
+ LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->def.num_components);
LLVMValueRef shuffle_aligned =
LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
LLVMValueRef shuffle_unaligned =
LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
- return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "");
+ return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "");
}
- LLVMTypeRef ptr_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef ptr_type = get_def_type(ctx, &instr->def);
ptr = ac_cast_ptr(&ctx->ac, ptr, ptr_type);
return LLVMBuildLoad2(ctx->ac.builder, ptr_type, ptr, "");
struct waterfall_context wctx;
LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
- int elem_size_bytes = instr->dest.ssa.bit_size / 8;
+ int elem_size_bytes = instr->def.bit_size / 8;
int num_components = instr->num_components;
enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base;
LLVMValueRef vindex = ctx->ac.i32_0;
- LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
LLVMValueRef results[4];
static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
- LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
LLVMValueRef val;
LLVMValueRef addr = get_global_address(ctx, instr, result_type);
LLVMValueRef offset = get_src(ctx, instr->src[1]);
int num_components = instr->num_components;
- assert(instr->dest.ssa.bit_size >= 32 && instr->dest.ssa.bit_size % 32 == 0);
+ assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0);
if (ctx->abi->load_ubo)
rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
/* Convert to a 32-bit load. */
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
num_components *= 2;
ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
ctx->ac.f32, 0, true, true);
- ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
+ ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
return exit_waterfall(ctx, &wctx, ret);
}
args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
if (dim == GLSL_SAMPLER_DIM_BUF) {
- unsigned num_channels = util_last_bit(nir_def_components_read(&instr->dest.ssa));
- if (instr->dest.ssa.bit_size == 64)
+ unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def));
+ if (instr->def.bit_size == 64)
num_channels = num_channels < 4 ? 2 : 4;
LLVMValueRef rsrc, vindex;
bool can_speculate = access & ACCESS_CAN_REORDER;
res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
args.access, can_speculate,
- instr->dest.ssa.bit_size == 16,
+ instr->def.bit_size == 16,
args.tfe);
res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
- res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
+ res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
res = ac_to_integer(&ctx->ac, res);
} else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) {
assert(ctx->ac.gfx_level < GFX11);
args.dmask = 15;
args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
- args.d16 = instr->dest.ssa.bit_size == 16;
+ args.d16 = instr->def.bit_size == 16;
res = ac_build_image_opcode(&ctx->ac, &args);
}
- if (instr->dest.ssa.bit_size == 64) {
+ if (instr->def.bit_size == 64) {
LLVMValueRef code = NULL;
if (args.tfe) {
code = ac_llvm_extract_elem(&ctx->ac, res, 4);
params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
- if (cmpswap && instr->dest.ssa.bit_size == 64) {
+ if (cmpswap && instr->def.bit_size == 64) {
result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
} else {
LLVMTypeRef data_type = LLVMTypeOf(params[0]);
LLVMValueRef values[16], derived_ptr, index, ret;
unsigned const_off = nir_intrinsic_base(instr);
- LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
+ LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off);
for (int chan = 0; chan < instr->num_components; chan++) {
ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
- return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
+ return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
}
static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
- LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
+ LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
LLVMValueRef values[2];
}
LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
- return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
+ return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
}
static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
bool is_output)
{
LLVMValueRef values[8];
- LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef dest_type = get_def_type(ctx, &instr->def);
LLVMTypeRef component_type;
unsigned base = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
- unsigned count = instr->dest.ssa.num_components;
+ unsigned count = instr->def.num_components;
nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
nir_src offset = *nir_get_io_offset_src(instr);
LLVMValueRef indir_index = NULL;
- switch (instr->dest.ssa.bit_size) {
+ switch (instr->def.bit_size) {
case 16:
case 32:
break;
vertex_index, indir_index,
base, component,
count, !is_output);
- if (instr->dest.ssa.bit_size == 16) {
+ if (instr->def.bit_size == 16) {
result = ac_to_integer(&ctx->ac, result);
result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
}
values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number,
ac_get_arg(&ctx->ac, ctx->args->prim_mask));
values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
- if (instr->dest.ssa.bit_size == 16 &&
+ if (instr->def.bit_size == 16 &&
nir_intrinsic_io_semantics(instr).high_16bits)
values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
values[chan] =
LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
- instr->dest.ssa.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
+ instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
}
LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
switch (instr->intrinsic) {
case nir_intrinsic_ballot:
result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
- if (instr->dest.ssa.bit_size > ctx->ac.wave_size) {
- LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
+ if (instr->def.bit_size > ctx->ac.wave_size) {
+ LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, "");
}
break;
values[i] = ctx->args->workgroup_ids[i].used
? ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i])
: ctx->ac.i32_0;
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
values[i] = LLVMBuildZExt(ctx->ac.builder, values[i], ctx->ac.i64, "");
}
result = ac_build_load_invariant(&ctx->ac,
ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
}
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
result = LLVMBuildZExt(ctx->ac.builder, result, LLVMVectorType(ctx->ac.i64, 3), "");
break;
case nir_intrinsic_load_local_invocation_index:
unsigned index = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
result = load_interpolated_input(ctx, interp_param, index, component,
- instr->dest.ssa.num_components, instr->dest.ssa.bit_size,
+ instr->def.num_components, instr->def.bit_size,
nir_intrinsic_io_semantics(instr).high_16bits);
break;
}
case nir_intrinsic_load_scratch: {
LLVMValueRef offset = get_src(ctx, instr->src[0]);
LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
- LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
- LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
+ LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
+ LLVMTypeRef vec_type = instr->def.num_components == 1
? comp_type
- : LLVMVectorType(comp_type, instr->dest.ssa.num_components);
+ : LLVMVectorType(comp_type, instr->def.num_components);
result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
break;
}
offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
- LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
- LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
+ LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
+ LLVMTypeRef vec_type = instr->def.num_components == 1
? comp_type
- : LLVMVectorType(comp_type, instr->dest.ssa.num_components);
+ : LLVMVectorType(comp_type, instr->def.num_components);
result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
break;
}
LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]);
LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]);
LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL;
- unsigned num_components = instr->dest.ssa.num_components;
+ unsigned num_components = instr->def.num_components;
unsigned const_offset = nir_intrinsic_base(instr);
bool reorder = nir_intrinsic_can_reorder(instr);
enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) {
- assert(instr->dest.ssa.bit_size == 16 || instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components,
access, reorder,
- instr->dest.ssa.bit_size == 16, false);
+ instr->def.bit_size == 16, false);
result = ac_to_integer(&ctx->ac, result);
} else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) {
assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32);
* Workaround by using i32 and casting to the correct type later.
*/
const unsigned fetch_num_components =
- num_components * MAX2(32, instr->dest.ssa.bit_size) / 32;
+ num_components * MAX2(32, instr->def.bit_size) / 32;
LLVMTypeRef channel_type =
- LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->dest.ssa.bit_size));
+ LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size));
if (instr->intrinsic == nir_intrinsic_load_buffer_amd) {
result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset,
result = ac_trim_vector(&ctx->ac, result, fetch_num_components);
/* Cast to larger than 32-bit sized components if needed. */
- if (instr->dest.ssa.bit_size > 32) {
+ if (instr->def.bit_size > 32) {
LLVMTypeRef cast_channel_type =
- LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
+ LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMTypeRef cast_type =
num_components == 1 ? cast_channel_type :
LLVMVectorType(cast_channel_type, num_components);
arg.used = true;
result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg));
if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32)
- result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+ result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), "");
break;
}
case nir_intrinsic_load_smem_amd: {
bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32;
int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST;
- LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space);
LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
return false;
}
if (result) {
- ctx->ssa_defs[instr->dest.ssa.index] = result;
+ ctx->ssa_defs[instr->def.index] = result;
}
return true;
}
args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
}
- args.d16 = instr->dest.ssa.bit_size == 16;
+ args.d16 = instr->def.bit_size == 16;
args.tfe = instr->is_sparse;
result = build_tex_intrinsic(ctx, instr, &args);
LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
} else if (nir_tex_instr_result_size(instr) != 4)
- result = ac_trim_vector(&ctx->ac, result, instr->dest.ssa.num_components);
+ result = ac_trim_vector(&ctx->ac, result, instr->def.num_components);
if (instr->is_sparse)
result = ac_build_concat(&ctx->ac, result, code);
result = exit_waterfall(ctx, wctx + i, result);
}
- ctx->ssa_defs[instr->dest.ssa.index] = result;
+ ctx->ssa_defs[instr->def.index] = result;
}
}
static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
{
- LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef type = get_def_type(ctx, &instr->def);
LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
- ctx->ssa_defs[instr->dest.ssa.index] = result;
+ ctx->ssa_defs[instr->def.index] = result;
_mesa_hash_table_insert(ctx->phis, instr, result);
}
nir_def *coord = nir_replicate(&b, tmp, 4);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
return b.shader;
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1),
is_3d ? nir_channel(&b, coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = dim);
return b.shader;
nir_def *coord = nir_replicate(&b, local_pos, 4);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32),
nir_channel(&b, outval, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}
is_3d ? nir_channel(&b, dst_coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
for (uint32_t i = 0; i < samples; i++) {
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_imm_int(&b, i),
- tex_vals[i], nir_imm_int(&b, 0), .image_dim = dim);
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_imm_int(&b, i), tex_vals[i],
+ nir_imm_int(&b, 0), .image_dim = dim);
}
return b.shader;
nir_def *dst_coord = nir_replicate(&b, dst_local_pos, 4);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, nir_undef(&b, 1, 32),
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, nir_undef(&b, 1, 32),
nir_channel(&b, outval, 0), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}
global_id = nir_vec(&b, comps, 4);
for (uint32_t i = 0; i < samples; i++) {
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_imm_int(&b, i),
- clear_val, nir_imm_int(&b, 0), .image_dim = dim);
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_imm_int(&b, i), clear_val,
+ nir_imm_int(&b, 0), .image_dim = dim);
}
return b.shader;
nir_def *coord = nir_replicate(&b, local_pos, 4);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32),
nir_channel(&b, clear_val, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}
/* Store the clear color values. */
nir_def *sample_id = is_msaa ? nir_imm_int(&b, 0) : nir_undef(&b, 1, 32);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, sample_id, data, nir_imm_int(&b, 0),
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, sample_id, data, nir_imm_int(&b, 0),
.image_dim = dim, .image_array = true);
return b.shader;
output_dcc->data.descriptor_set = 0;
output_dcc->data.binding = 1;
- nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->dest.ssa;
- nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->dest.ssa;
+ nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->def;
+ nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->def;
nir_def *coord = get_global_ids(&b, 2);
nir_def *zero = nir_imm_int(&b, 0);
nir_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
- nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id,
+ nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, global_id,
nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_undef(&b, 1, 32), data,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}
nir_push_if(&b, is_3d);
{
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
- outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->def, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
}
nir_push_else(&b, NULL);
{
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
- outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->def, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
}
nir_pop_if(&b, NULL);
return b.shader;
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, global_id, 0), nir_channel(&b, global_id, 1), nir_undef(&b, 1, 32),
nir_undef(&b, 1, 32));
- nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord,
+ nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, img_coord,
nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), data,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}
nir_def *outval = nir_build_tex_deref_instr(&b, nir_texop_fragment_fetch_amd, nir_build_deref_var(&b, input_img),
NULL, ARRAY_SIZE(frag_fetch_srcs), frag_fetch_srcs);
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, sample_id, outval,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, sample_id, outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS);
radv_break_on_count(&b, counter, max_sample_index);
output_img->data.access = ACCESS_NON_READABLE;
nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
- nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
+ nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->def;
nir_def *tex_coord = get_global_ids(&b, 3);
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1), nir_undef(&b, 1, 32),
nir_undef(&b, 1, 32));
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}
nir_def *coord = nir_vec4(&b, nir_channel(&b, img_coord, 0), nir_channel(&b, img_coord, 1),
nir_channel(&b, img_coord, 2), nir_undef(&b, 1, 32));
- nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
+ nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
return b.shader;
}
if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
assert(stride == 16);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
+ nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
} else {
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
+ nir_def_rewrite_uses(&intrin->def, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
}
nir_instr_remove(&intrin->instr);
}
binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
+ nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
} else {
assert(desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
+ nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
}
nir_instr_remove(&intrin->instr);
}
nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa)));
nir_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
} else {
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
+ nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
}
nir_instr_remove(&intrin->instr);
}
size = nir_channel(b, desc, 2);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, size);
+ nir_def_rewrite_uses(&intrin->def, size);
nir_instr_remove(&intrin->instr);
}
nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load);
if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
} else {
nir_rewrite_image_intrinsic(intrin, desc, true);
}
if (tex->op == nir_texop_descriptor_amd) {
- nir_def_rewrite_uses(&tex->dest.ssa, image);
+ nir_def_rewrite_uses(&tex->def, image);
nir_instr_remove(&tex->instr);
return;
}
return false;
if (replacement)
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
}
}
- nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+ nir_def_rewrite_uses(&intrin->def, new_dest);
nir_instr_remove(&intrin->instr);
return true;
def = sample_coverage;
}
- nir_def_rewrite_uses(&intrin->dest.ssa, def);
+ nir_def_rewrite_uses(&intrin->def, def);
nir_instr_remove(instr);
progress = true;
if (!key->adjust_frag_coord_z)
continue;
- if (!(nir_def_components_read(&intrin->dest.ssa) & (1 << 2)))
+ if (!(nir_def_components_read(&intrin->def) & (1 << 2)))
continue;
- nir_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
+ nir_def *frag_z = nir_channel(&b, &intrin->def, 2);
/* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */
nir_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
nir_def *cond = nir_ieq_imm(&b, x_rate, 1);
frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z);
- nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
- nir_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
+ nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->def, frag_z, 2);
+ nir_def_rewrite_uses_after(&intrin->def, new_dest, new_dest->parent_instr);
progress = true;
break;
}
}
- nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+ nir_def_rewrite_uses(&intrin->def, new_dest);
nir_instr_remove(instr);
progress = true;
continue;
}
- nir_def_rewrite_uses(&intrin->dest.ssa, def);
+ nir_def_rewrite_uses(&intrin->def, def);
nir_instr_remove(instr);
progress = true;
}
if (new_dest)
- nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
+ nir_def_rewrite_uses(&intrinsic->def, new_dest);
nir_instr_remove(instr);
nir_instr_free(instr);
layer->data.per_primitive = per_primitive;
b.cursor = nir_before_instr(instr);
nir_def *def = nir_load_var(&b, layer);
- nir_def_rewrite_uses(&load->dest.ssa, def);
+ nir_def_rewrite_uses(&load->def, def);
/* Update inputs_read to reflect that the pass added a new input. */
nir->info.inputs_read |= VARYING_BIT_LAYER;
b.cursor = nir_before_instr(instr);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_zero(&b, 1, 32));
+ nir_def_rewrite_uses(&intr->def, nir_imm_zero(&b, 1, 32));
progress = true;
break;
}
const unsigned base_offset = nir_src_as_uint(*offset_src);
const unsigned driver_location = base + base_offset - VERT_ATTRIB_GENERIC0;
const unsigned component = nir_intrinsic_component(intrin);
- const unsigned bit_size = intrin->dest.ssa.bit_size;
- const unsigned num_components = intrin->dest.ssa.num_components;
+ const unsigned bit_size = intrin->def.bit_size;
+ const unsigned num_components = intrin->def.num_components;
/* 64-bit inputs: they occupy twice as many 32-bit components.
* 16-bit inputs: they occupy a 32-bit component (not packed).
const unsigned base = nir_intrinsic_base(intrin);
const unsigned base_offset = nir_src_as_uint(*offset_src);
const unsigned location = base + base_offset - VERT_ATTRIB_GENERIC0;
- const unsigned bit_size = intrin->dest.ssa.bit_size;
- const unsigned dest_num_components = intrin->dest.ssa.num_components;
+ const unsigned bit_size = intrin->def.bit_size;
+ const unsigned dest_num_components = intrin->def.num_components;
/* Convert the component offset to bit_size units.
* (Intrinsic component offset is in 32-bit units.)
/* Bitmask of components in bit_size units
* of the current input load that are actually used.
*/
- const unsigned dest_use_mask = nir_def_components_read(&intrin->dest.ssa) << component;
+ const unsigned dest_use_mask = nir_def_components_read(&intrin->def) << component;
/* If the input is entirely unused, just replace it with undef.
* This is just in case we debug this pass without running DCE first.
replacement = lower_load_vs_input(b, intrin, s);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
b.cursor = nir_before_instr(&deref->instr);
nir_deref_instr *replacement =
nir_build_deref_cast(&b, arg_offset, nir_var_function_temp, deref->var->type, 0);
- nir_def_rewrite_uses(&deref->dest.ssa, &replacement->dest.ssa);
+ nir_def_rewrite_uses(&deref->def, &replacement->def);
nir_instr_remove(&deref->instr);
}
}
}
if (ret)
- nir_def_rewrite_uses(&intr->dest.ssa, ret);
+ nir_def_rewrite_uses(&intr->def, ret);
nir_instr_remove(instr);
break;
}
b->cursor = nir_after_instr(instr);
if (intrin->intrinsic == nir_intrinsic_load_deref) {
- uint32_t num_components = intrin->dest.ssa.num_components;
- uint32_t bit_size = intrin->dest.ssa.bit_size;
+ uint32_t num_components = intrin->def.num_components;
+ uint32_t bit_size = intrin->def.bit_size;
nir_def *components[NIR_MAX_VEC_COMPONENTS];
}
}
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec(b, components, num_components));
+ nir_def_rewrite_uses(&intrin->def, nir_vec(b, components, num_components));
} else {
nir_def *value = intrin->src[1].ssa;
uint32_t num_components = value->num_components;
break;
case nir_intrinsic_load_ray_t_max:
- nir_def_rewrite_uses(&intrin->dest.ssa, hit_t);
+ nir_def_rewrite_uses(&intrin->def, hit_t);
nir_instr_remove(&intrin->instr);
break;
case nir_intrinsic_load_ray_hit_kind:
- nir_def_rewrite_uses(&intrin->dest.ssa, hit_kind);
+ nir_def_rewrite_uses(&intrin->def, hit_kind);
nir_instr_remove(&intrin->instr);
break;
break;
case nir_intrinsic_load_rt_arg_scratch_offset_amd:
b->cursor = nir_after_instr(instr);
- nir_def *arg_offset = nir_isub(b, &intrin->dest.ssa, scratch_offset);
- nir_def_rewrite_uses_after(&intrin->dest.ssa, arg_offset, arg_offset->parent_instr);
+ nir_def *arg_offset = nir_isub(b, &intrin->def, scratch_offset);
+ nir_def_rewrite_uses_after(&intrin->def, arg_offset, arg_offset->parent_instr);
break;
default:
nir_push_if(b, nir_inot(b, nir_load_intersection_opaque_amd(b)));
{
nir_def *params[] = {
- &nir_build_deref_var(b, commit_tmp)->dest.ssa,
+ &nir_build_deref_var(b, commit_tmp)->def,
hit_t,
hit_kind,
nir_imm_int(b, intersection->scratch_size),
nir_pop_if(b, NULL);
nir_def *accepted = nir_load_var(b, commit_tmp);
- nir_def_rewrite_uses(&intrin->dest.ssa, accepted);
+ nir_def_rewrite_uses(&intrin->def, accepted);
}
}
nir_metadata_preserve(impl, nir_metadata_none);
case MESA_SHADER_VERTEX: {
unsigned idx = nir_intrinsic_io_semantics(instr).location;
unsigned component = nir_intrinsic_component(instr);
- unsigned mask = nir_def_components_read(&instr->dest.ssa);
- mask = (instr->dest.ssa.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
+ unsigned mask = nir_def_components_read(&instr->def);
+ mask = (instr->def.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
info->vs.input_usage_mask[idx] |= mask & 0xf;
if (mask >> 4)
{
info->loads_push_constants = true;
- if (nir_src_is_const(instr->src[0]) && instr->dest.ssa.bit_size >= 32) {
+ if (nir_src_is_const(instr->src[0]) && instr->def.bit_size >= 32) {
uint32_t start = (nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0])) / 4u;
- uint32_t size = instr->num_components * (instr->dest.ssa.bit_size / 32u);
+ uint32_t size = instr->num_components * (instr->def.bit_size / 32u);
if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) {
info->inline_push_constant_mask |= u_bit_consecutive64(start, size);
break;
case nir_intrinsic_load_local_invocation_id:
case nir_intrinsic_load_workgroup_id: {
- unsigned mask = nir_def_components_read(&instr->dest.ssa);
+ unsigned mask = nir_def_components_read(&instr->def);
while (mask) {
unsigned i = u_bit_scan(&mask);
break;
}
case nir_intrinsic_load_frag_coord:
- info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->dest.ssa);
+ info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->def);
break;
case nir_intrinsic_load_sample_pos:
- info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->dest.ssa);
+ info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->def);
break;
case nir_intrinsic_load_push_constant:
gather_push_constant_info(nir, instr, info);
nir_src *offset = nir_get_io_offset_src(instr);
assert(nir_src_is_const(*offset) && "no indirects");
- assert(nir_def_components_read(&instr->dest.ssa) ==
+ assert(nir_def_components_read(&instr->def) ==
nir_component_mask(components) &&
"iter does not handle write-after-write hazards");
b->shader->did_writeout = true;
b->shader->out->reads_tib = true;
- unsigned nr_comps = instr->dest.ssa.num_components;
+ unsigned nr_comps = instr->def.num_components;
agx_ld_tile_to(b, dest, agx_src_index(&instr->src[0]),
agx_format_for_pipe(nir_intrinsic_format(instr)),
BITFIELD_MASK(nr_comps), nir_intrinsic_base(instr));
offset = agx_abs(offset);
agx_device_load_to(b, dest, addr, offset, fmt,
- BITFIELD_MASK(instr->dest.ssa.num_components), shift, 0);
- agx_emit_cached_split(b, dest, instr->dest.ssa.num_components);
+ BITFIELD_MASK(instr->def.num_components), shift, 0);
+ agx_emit_cached_split(b, dest, instr->def.num_components);
}
static void
nir_intrinsic_instr *instr)
{
agx_index srcs[4] = {agx_null()};
- unsigned dim = instr->dest.ssa.num_components;
+ unsigned dim = instr->def.num_components;
assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors");
unsigned base = nir_intrinsic_base(instr);
agx_load_compute_dimension(agx_builder *b, agx_index dst,
nir_intrinsic_instr *instr, enum agx_sr base)
{
- unsigned dim = instr->dest.ssa.num_components;
- unsigned size = instr->dest.ssa.bit_size;
+ unsigned dim = instr->def.num_components;
+ unsigned size = instr->def.bit_size;
assert(size == 16 || size == 32);
agx_index srcs[] = {
agx_index index = agx_zero(); /* TODO: optimize address arithmetic */
assert(base.size == AGX_SIZE_16);
- enum agx_format format = format_for_bitsize(instr->dest.ssa.bit_size);
- unsigned nr = instr->dest.ssa.num_components;
+ enum agx_format format = format_for_bitsize(instr->def.bit_size);
+ unsigned nr = instr->def.num_components;
unsigned mask = BITFIELD_MASK(nr);
agx_local_load_to(b, dst, base, index, format, mask);
agx_instr *I = agx_image_load_to(
b, tmp, coords, lod, bindless, texture, agx_txf_sampler(b->shader),
agx_null(), agx_tex_dim(dim, is_array), lod_mode, 0, 0, false);
- I->mask = agx_expand_tex_to(b, &intr->dest.ssa, tmp, true);
+ I->mask = agx_expand_tex_to(b, &intr->def, tmp, true);
return NULL;
}
agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
{
agx_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest
- ? agx_def_index(&instr->dest.ssa)
+ ? agx_def_index(&instr->def)
: agx_null();
gl_shader_stage stage = b->shader->stage;
}
}
- agx_index dst = agx_def_index(&instr->dest.ssa);
+ agx_index dst = agx_def_index(&instr->def);
/* Pack shadow reference value (compare) and packed offset together */
agx_index compare_offset = agx_null();
* textureGatherOffsets. Don't try to mask the destination for gathers.
*/
bool masked = (instr->op != nir_texop_tg4);
- I->mask = agx_expand_tex_to(b, &instr->dest.ssa, tmp, masked);
+ I->mask = agx_expand_tex_to(b, &instr->def, tmp, masked);
}
/*
static void
agx_emit_phi(agx_builder *b, nir_phi_instr *instr)
{
- agx_instr *I = agx_phi_to(b, agx_def_index(&instr->dest.ssa),
- exec_list_length(&instr->srcs));
+ agx_instr *I =
+ agx_phi_to(b, agx_def_index(&instr->def), exec_list_length(&instr->srcs));
/* Deferred */
I->phi = instr;
nir_phi_instr *phi = I->phi;
/* Guaranteed by lower_phis_to_scalar */
- assert(phi->dest.ssa.num_components == 1);
+ assert(phi->def.num_components == 1);
nir_foreach_phi_src(src, phi) {
agx_block *pred = agx_from_nir_block(ctx, src->pred);
if (intr->intrinsic != nir_intrinsic_load_front_face)
return false;
- nir_def *def = &intr->dest.ssa;
+ nir_def *def = &intr->def;
assert(def->bit_size == 1);
b->cursor = nir_before_instr(&intr->instr);
static inline agx_index
agx_vec_for_intr(agx_context *ctx, nir_intrinsic_instr *instr)
{
- return agx_vec_for_def(ctx, &instr->dest.ssa);
+ return agx_vec_for_def(ctx, &instr->def);
}
static inline unsigned
unsigned bitsize = intr->intrinsic == nir_intrinsic_store_global
? nir_src_bit_size(intr->src[0])
- : intr->dest.ssa.bit_size;
+ : intr->def.bit_size;
enum pipe_format format = format_for_bitsize(bitsize);
unsigned format_shift = util_logbase2(util_format_get_blocksize(format));
nir_def *repl = NULL;
bool has_dest = (intr->intrinsic != nir_intrinsic_store_global);
- unsigned num_components = has_dest ? intr->dest.ssa.num_components : 0;
- unsigned bit_size = has_dest ? intr->dest.ssa.bit_size : 0;
+ unsigned num_components = has_dest ? intr->def.num_components : 0;
+ unsigned bit_size = has_dest ? intr->def.bit_size : 0;
if (intr->intrinsic == nir_intrinsic_load_global) {
repl =
}
if (repl)
- nir_def_rewrite_uses(&intr->dest.ssa, repl);
+ nir_def_rewrite_uses(&intr->def, repl);
nir_instr_remove(instr);
return true;
.interp_mode = interp_mode_for_load(load), .io_semantics = sem);
if (load->intrinsic == nir_intrinsic_load_input) {
- assert(load->dest.ssa.bit_size == 32);
+ assert(load->def.bit_size == 32);
return interpolate_flat(b, coefficients);
} else {
nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);
b, coefficients, bary->src[0].ssa,
nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE);
- return nir_f2fN(b, interp, load->dest.ssa.bit_size);
+ return nir_f2fN(b, interp, load->def.bit_size);
}
}
/* Each component is loaded separated */
nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
values[i] = interpolate_channel(b, intr, i);
}
- return nir_vec(b, values, intr->dest.ssa.num_components);
+ return nir_vec(b, values, intr->def.num_components);
}
bool
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
return false;
- unsigned mask = nir_def_components_read(&intr->dest.ssa);
+ unsigned mask = nir_def_components_read(&intr->def);
if (mask == 0 || mask == nir_component_mask(intr->num_components))
return false;
b->cursor = nir_before_instr(instr);
- unsigned bit_size = intr->dest.ssa.bit_size;
+ unsigned bit_size = intr->def.bit_size;
nir_def *comps[4] = {NULL};
for (unsigned c = 0; c < intr->num_components; ++c) {
nir_intrinsic_instr *clone_intr = nir_instr_as_intrinsic(clone);
/* Shrink the load to count contiguous components */
- nir_def_init(clone, &clone_intr->dest.ssa, count, bit_size);
- nir_def *clone_vec = &clone_intr->dest.ssa;
+ nir_def_init(clone, &clone_intr->def, count, bit_size);
+ nir_def *clone_vec = &clone_intr->def;
clone_intr->num_components = count;
/* The load starts from component c relative to the original load */
}
}
- nir_def_rewrite_uses(&intr->dest.ssa,
- nir_vec(b, comps, intr->num_components));
+ nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, intr->num_components));
return true;
}
height = depth;
/* How we finish depends on the size of the result */
- unsigned nr_comps = tex->dest.ssa.num_components;
+ unsigned nr_comps = tex->def.num_components;
assert(nr_comps <= 3);
/* Adjust for LOD, do not adjust array size */
return false;
nir_def *res = agx_txs(b, tex);
- nir_def_rewrite_uses_after(&tex->dest.ssa, res, instr);
+ nir_def_rewrite_uses_after(&tex->def, res, instr);
nir_instr_remove(instr);
return true;
}
nir_iand_imm(b, nir_ushr_imm(b, desc_hi, 2), BITFIELD64_MASK(36));
nir_def *base = nir_ishl_imm(b, base_shr4, 4);
- nir_def *raw = nir_load_constant_agx(b, 3, tex->dest.ssa.bit_size, base,
+ nir_def *raw = nir_load_constant_agx(b, 3, tex->def.bit_size, base,
nir_imul_imm(b, coordinate, 3),
.format = AGX_INTERNAL_FORMAT_I32);
nir_pop_if(b, nif);
/* Put it together with a phi */
- nir_def *phi = nir_if_phi(b, rgb32, &tex->dest.ssa);
- nir_def_rewrite_uses(&tex->dest.ssa, phi);
+ nir_def *phi = nir_if_phi(b, rgb32, &tex->def);
+ nir_def_rewrite_uses(&tex->def, phi);
nir_phi_instr *phi_instr = nir_instr_as_phi(phi->parent_instr);
nir_phi_src *else_src = nir_phi_get_src_from_block(phi_instr, else_block);
- nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->dest.ssa);
+ nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->def);
return true;
}
query->op = nir_texop_lod_bias_agx;
query->dest_type = nir_type_float16;
- nir_def_init(instr, &query->dest.ssa, 1, 16);
- return &query->dest.ssa;
+ nir_def_init(instr, &query->def, 1, 16);
+ return &query->def;
}
static bool
nir_tex_src_for_ssa(nir_tex_src_texture_offset, intr->src[0].ssa);
}
- nir_def_init(&tex->instr, &tex->dest.ssa, num_components, bit_size);
+ nir_def_init(&tex->instr, &tex->def, num_components, bit_size);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static nir_def *
case nir_intrinsic_image_size:
case nir_intrinsic_bindless_image_size:
- nir_def_rewrite_uses(&intr->dest.ssa,
- txs_for_image(b, intr, intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size));
+ nir_def_rewrite_uses(
+ &intr->def,
+ txs_for_image(b, intr, intr->def.num_components, intr->def.bit_size));
return true;
case nir_intrinsic_image_texel_address:
case nir_intrinsic_bindless_image_texel_address:
- nir_def_rewrite_uses(&intr->dest.ssa,
- image_texel_address(b, intr, false));
+ nir_def_rewrite_uses(&intr->def, image_texel_address(b, intr, false));
return true;
default:
nir_iadd(b, nir_load_ubo_base_agx(b, ubo_index), nir_u2u64(b, offset));
nir_def *value =
nir_load_global_constant(b, address, nir_intrinsic_align(intr),
- intr->num_components, intr->dest.ssa.bit_size);
+ intr->num_components, intr->def.bit_size);
- nir_def_rewrite_uses(&intr->dest.ssa, value);
+ nir_def_rewrite_uses(&intr->def, value);
return true;
}
tex->coord_components = 2;
tex->texture_index = rt;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- return nir_trim_vector(b, &tex->dest.ssa, nr);
+ return nir_trim_vector(b, &tex->def, nr);
} else {
assert(op == AGX_META_OP_CLEAR);
switch (intr->intrinsic) {
case nir_intrinsic_load_sample_id: {
- unsigned size = intr->dest.ssa.bit_size;
- nir_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size));
+ unsigned size = intr->def.bit_size;
+ nir_def_rewrite_uses(&intr->def, nir_u2uN(b, sample_id, size));
nir_instr_remove(instr);
return true;
}
if (intr->intrinsic != nir_intrinsic_load_sample_mask_in)
return false;
- nir_def *old = &intr->dest.ssa;
+ nir_def *old = &intr->def;
nir_def *lowered = nir_iand(
b, old, nir_u2uN(b, nir_load_api_sample_mask_agx(b), old->bit_size));
xy[i] = nir_fmul_imm(b, nir_u2f16(b, nibble), 1.0 / 16.0);
/* Upconvert if necessary */
- xy[i] = nir_f2fN(b, xy[i], intr->dest.ssa.bit_size);
+ xy[i] = nir_f2fN(b, xy[i], intr->def.bit_size);
}
/* Collect and rewrite */
- nir_def_rewrite_uses(&intr->dest.ssa, nir_vec2(b, xy[0], xy[1]));
+ nir_def_rewrite_uses(&intr->def, nir_vec2(b, xy[0], xy[1]));
nir_instr_remove(instr);
return true;
}
* by the sample ID to make that happen.
*/
b->cursor = nir_after_instr(instr);
- nir_def *old = &intr->dest.ssa;
+ nir_def *old = &intr->def;
nir_def *lowered = mask_by_sample_id(b, old);
nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
return true;
* interpolateAtSample() with the sample ID
*/
b->cursor = nir_after_instr(instr);
- nir_def *old = &intr->dest.ssa;
+ nir_def *old = &intr->def;
nir_def *lowered = nir_load_barycentric_at_sample(
- b, intr->dest.ssa.bit_size, nir_load_sample_id(b),
+ b, intr->def.bit_size, nir_load_sample_id(b),
.interp_mode = nir_intrinsic_interp_mode(intr));
nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
} else {
- uint8_t bit_size = intr->dest.ssa.bit_size;
+ uint8_t bit_size = intr->def.bit_size;
/* Loads from non-existent render targets are undefined in NIR but not
* possible to encode in the hardware, delete them.
util_format_is_pure_uint(interchange_format) &&
!util_format_is_pure_uint(attrib.format)
? (interchange_align * 8)
- : intr->dest.ssa.bit_size;
+ : intr->def.bit_size;
/* Non-UNORM R10G10B10A2 loaded as a scalar and unpacked */
if (interchange_format == PIPE_FORMAT_R32_UINT && !desc->is_array)
b, interchange_comps, interchange_register_size, base, stride_offset_el,
.format = interchange_format, .base = shift);
- unsigned dest_size = intr->dest.ssa.bit_size;
+ unsigned dest_size = intr->def.bit_size;
/* Unpack but do not convert non-native non-array formats */
if (is_rgb10_a2(desc) && interchange_format == PIPE_FORMAT_R32_UINT) {
channels[i] = apply_swizzle_channel(b, memory, desc->swizzle[i], is_int);
nir_def *logical = nir_vec(b, channels, intr->num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, logical);
+ nir_def_rewrite_uses(&intr->def, logical);
return true;
}
tmu_op, has_index,
&tmu_writes);
} else if (is_load) {
- type_size = instr->dest.ssa.bit_size / 8;
+ type_size = instr->def.bit_size / 8;
}
/* For atomics we use 32bit except for CMPXCHG, that we need
*/
const uint32_t component_mask =
(1 << dest_components) - 1;
- ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
+ ntq_add_pending_tmu_flush(c, &instr->def,
component_mask);
}
}
unreachable("Bad sampler type");
}
- ntq_store_def(c, &instr->dest.ssa, i, size);
+ ntq_store_def(c, &instr->def, i, size);
}
}
*/
switch (instr->op) {
case nir_texop_query_levels:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
return;
case nir_texop_texture_samples:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_TEXTURE_SAMPLES, unit));
return;
case nir_texop_txs:
struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
array_len * num_components);
- nir_def *nir_reg = &decl->dest.ssa;
+ nir_def *nir_reg = &decl->def;
_mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
for (int i = 0; i < array_len * num_components; i++)
assert(nir_src_as_uint(instr->src[1]) == 0);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
if (instr->num_components > 1) {
- ntq_store_def(c, &instr->dest.ssa, 1,
+ ntq_store_def(c, &instr->def, 1,
vir_uniform(c,
instr->num_components == 2 && is_array ?
QUNIFORM_IMAGE_ARRAY_SIZE :
image_index));
}
if (instr->num_components > 2) {
- ntq_store_def(c, &instr->dest.ssa, 2,
+ ntq_store_def(c, &instr->def, 2,
vir_uniform(c,
is_array ?
QUNIFORM_IMAGE_ARRAY_SIZE :
}
assert(color_reads_for_sample[component].file != QFILE_NULL);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_MOV(c, color_reads_for_sample[component]));
}
ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr)
{
/* We scalarize general TMU access for anything that is not 32-bit. */
- assert(instr->dest.ssa.bit_size == 32 ||
+ assert(instr->def.bit_size == 32 ||
instr->num_components == 1);
/* Try to emit ldunif if possible, otherwise fallback to general TMU */
nir_src_as_uint(instr->src[0]));
if (try_emit_uniform(c, offset, instr->num_components,
- &instr->dest.ssa, QUNIFORM_UNIFORM)) {
+ &instr->def, QUNIFORM_UNIFORM)) {
return;
}
}
return false;
/* We scalarize general TMU access for anything that is not 32-bit */
- assert(instr->dest.ssa.bit_size == 32 ||
+ assert(instr->def.bit_size == 32 ||
instr->num_components == 1);
if (nir_src_is_const(instr->src[1])) {
int offset = nir_src_as_uint(instr->src[1]);
if (try_emit_uniform(c, offset, instr->num_components,
- &instr->dest.ssa,
+ &instr->def,
QUNIFORM_INLINE_UBO_0 + index)) {
return true;
}
index += nir_intrinsic_component(instr);
for (int i = 0; i < instr->num_components; i++) {
struct qreg vpm_offset = vir_uniform_ui(c, index++);
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_LDVPMV_IN(c, vpm_offset));
}
} else {
for (int i = 0; i < instr->num_components; i++) {
int comp = nir_intrinsic_component(instr) + i;
struct qreg input = c->inputs[offset * 4 + comp];
- ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, input));
+ ntq_store_def(c, &instr->def, i, vir_MOV(c, input));
if (c->s->info.stage == MESA_SHADER_FRAGMENT &&
input.file == c->payload_z.file &&
* use ldunifa if we can verify alignment, which we can only do for
* loads with a constant offset.
*/
- uint32_t bit_size = instr->dest.ssa.bit_size;
+ uint32_t bit_size = instr->def.bit_size;
uint32_t value_skips = 0;
if (bit_size < 32) {
if (dynamic_src) {
if (bit_size == 32) {
assert(value_skips == 0);
- ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, data));
+ ntq_store_def(c, &instr->def, i, vir_MOV(c, data));
i++;
} else {
assert((bit_size == 16 && value_skips <= 1) ||
uint32_t mask = (1 << bit_size) - 1;
tmp = vir_AND(c, vir_MOV(c, data),
vir_uniform_ui(c, mask));
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_MOV(c, tmp));
i++;
valid_count--;
break;
case nir_intrinsic_get_ssbo_size:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_GET_SSBO_SIZE,
nir_src_comp_as_uint(instr->src[0], 0)));
break;
case nir_intrinsic_get_ubo_size:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_GET_UBO_SIZE,
nir_src_comp_as_uint(instr->src[0], 0)));
break;
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
nir_intrinsic_ucp_id(instr) *
4 + i));
break;
case nir_intrinsic_load_viewport_x_scale:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE, 0));
break;
case nir_intrinsic_load_viewport_y_scale:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Y_SCALE, 0));
break;
case nir_intrinsic_load_viewport_z_scale:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0));
break;
case nir_intrinsic_load_viewport_z_offset:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0));
break;
case nir_intrinsic_load_line_coord:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->line_x));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->line_x));
break;
case nir_intrinsic_load_line_width:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_LINE_WIDTH, 0));
break;
case nir_intrinsic_load_aa_line_width:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_AA_LINE_WIDTH, 0));
break;
case nir_intrinsic_load_sample_mask_in:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MSF(c));
+ ntq_store_def(c, &instr->def, 0, vir_MSF(c));
break;
case nir_intrinsic_load_helper_invocation:
vir_set_pf(c, vir_MSF_dest(c, vir_nop_reg()), V3D_QPU_PF_PUSHZ);
struct qreg qdest = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA);
- ntq_store_def(c, &instr->dest.ssa, 0, qdest);
+ ntq_store_def(c, &instr->def, 0, qdest);
break;
case nir_intrinsic_load_front_face:
/* The register contains 0 (front) or 1 (back), and we need to
* turn it into a NIR bool where true means front.
*/
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_ADD(c,
vir_uniform_ui(c, -1),
vir_REVF(c)));
break;
case nir_intrinsic_load_base_instance:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->biid));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->biid));
break;
case nir_intrinsic_load_instance_id:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->iid));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->iid));
break;
case nir_intrinsic_load_vertex_id:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->vid));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->vid));
break;
case nir_intrinsic_load_tlb_color_v3d:
case nir_intrinsic_load_num_workgroups:
for (int i = 0; i < 3; i++) {
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_uniform(c, QUNIFORM_NUM_WORK_GROUPS,
i));
}
case nir_intrinsic_load_workgroup_id: {
struct qreg x = vir_AND(c, c->cs_payload[0],
vir_uniform_ui(c, 0xffff));
- ntq_store_def(c, &instr->dest.ssa, 0, x);
+ ntq_store_def(c, &instr->def, 0, x);
struct qreg y = vir_SHR(c, c->cs_payload[0],
vir_uniform_ui(c, 16));
- ntq_store_def(c, &instr->dest.ssa, 1, y);
+ ntq_store_def(c, &instr->def, 1, y);
struct qreg z = vir_AND(c, c->cs_payload[1],
vir_uniform_ui(c, 0xffff));
- ntq_store_def(c, &instr->dest.ssa, 2, z);
+ ntq_store_def(c, &instr->def, 2, z);
break;
}
case nir_intrinsic_load_base_workgroup_id: {
struct qreg x = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 0);
- ntq_store_def(c, &instr->dest.ssa, 0, x);
+ ntq_store_def(c, &instr->def, 0, x);
struct qreg y = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 1);
- ntq_store_def(c, &instr->dest.ssa, 1, y);
+ ntq_store_def(c, &instr->def, 1, y);
struct qreg z = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 2);
- ntq_store_def(c, &instr->dest.ssa, 2, z);
+ ntq_store_def(c, &instr->def, 2, z);
break;
}
case nir_intrinsic_load_local_invocation_index:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
emit_load_local_invocation_index(c));
break;
STATIC_ASSERT(IS_POT(V3D_CHANNELS) && V3D_CHANNELS > 0);
const uint32_t divide_shift = ffs(V3D_CHANNELS) - 1;
struct qreg lii = emit_load_local_invocation_index(c);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_SHR(c, lii,
vir_uniform_ui(c, divide_shift)));
break;
struct qreg col = ntq_get_src(c, instr->src[0], 0);
for (int i = 0; i < instr->num_components; i++) {
struct qreg row = vir_uniform_ui(c, row_idx++);
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_LDVPMG_IN(c, row, col));
}
break;
* using ldvpm(v,d)_in (See Table 71).
*/
assert(c->s->info.stage == MESA_SHADER_GEOMETRY);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_LDVPMV_IN(c, vir_uniform_ui(c, 0)));
break;
}
case nir_intrinsic_load_invocation_id:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_IID(c));
+ ntq_store_def(c, &instr->def, 0, vir_IID(c));
break;
case nir_intrinsic_load_fb_layers_v3d:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_FB_LAYERS, 0));
break;
case nir_intrinsic_load_sample_id:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_SAMPID(c));
+ ntq_store_def(c, &instr->def, 0, vir_SAMPID(c));
break;
case nir_intrinsic_load_sample_pos:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_FSUB(c, vir_FXCD(c), vir_ITOF(c, vir_XCD(c))));
- ntq_store_def(c, &instr->dest.ssa, 1,
+ ntq_store_def(c, &instr->def, 1,
vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c))));
break;
case nir_intrinsic_load_barycentric_at_offset:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_MOV(c, ntq_get_src(c, instr->src[0], 0)));
- ntq_store_def(c, &instr->dest.ssa, 1,
+ ntq_store_def(c, &instr->def, 1,
vir_MOV(c, ntq_get_src(c, instr->src[0], 1)));
break;
case nir_intrinsic_load_barycentric_pixel:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f));
- ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f));
+ ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f));
+ ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f));
break;
case nir_intrinsic_load_barycentric_at_sample: {
if (!c->fs_key->msaa) {
- ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f));
- ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f));
+ ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f));
+ ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f));
return;
}
struct qreg sample_idx = ntq_get_src(c, instr->src[0], 0);
ntq_get_sample_offset(c, sample_idx, &offset_x, &offset_y);
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x));
- ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x));
+ ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y));
break;
}
struct qreg offset_y =
vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c)));
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_FSUB(c, offset_x, vir_uniform_f(c, 0.5f)));
- ntq_store_def(c, &instr->dest.ssa, 1,
+ ntq_store_def(c, &instr->def, 1,
vir_FSUB(c, offset_y, vir_uniform_f(c, 0.5f)));
break;
}
case nir_intrinsic_load_barycentric_centroid: {
struct qreg offset_x, offset_y;
ntq_get_barycentric_centroid(c, &offset_x, &offset_y);
- ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x));
- ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y));
+ ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x));
+ ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y));
break;
}
*/
if (!c->fs_key->msaa ||
c->interp[input_idx].vp.file == QFILE_NULL) {
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
vir_MOV(c, c->inputs[input_idx]));
continue;
}
ntq_emit_load_interpolated_input(c, p, C,
offset_x, offset_y,
interp_mode);
- ntq_store_def(c, &instr->dest.ssa, i, result);
+ ntq_store_def(c, &instr->def, i, result);
}
break;
}
case nir_intrinsic_load_subgroup_size:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform_ui(c, V3D_CHANNELS));
break;
case nir_intrinsic_load_subgroup_invocation:
- ntq_store_def(c, &instr->dest.ssa, 0, vir_EIDX(c));
+ ntq_store_def(c, &instr->def, 0, vir_EIDX(c));
break;
case nir_intrinsic_elect: {
first, vir_uniform_ui(c, 1)),
V3D_QPU_PF_PUSHZ);
struct qreg result = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA);
- ntq_store_def(c, &instr->dest.ssa, 0, result);
+ ntq_store_def(c, &instr->def, 0, result);
break;
}
break;
case nir_intrinsic_load_view_index:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEW_INDEX, 0));
break;
* instruction writes and how many the instruction could produce.
*/
p1_unpacked.return_words_of_texture_data =
- nir_def_components_read(&instr->dest.ssa);
+ nir_def_components_read(&instr->def);
uint32_t p0_packed;
V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL,
for (int i = 0; i < 4; i++) {
if (p1_unpacked.return_words_of_texture_data & (1 << i))
- ntq_store_def(c, &instr->dest.ssa, i, vir_LDTMU(c));
+ ntq_store_def(c, &instr->def, i, vir_LDTMU(c));
}
}
/* Limit the number of channels returned to both how many the NIR
* instruction writes and how many the instruction could produce.
*/
- nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa);
+ nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->def);
if (store == NULL) {
p0_unpacked.return_words_of_texture_data =
- nir_def_components_read(&instr->dest.ssa);
+ nir_def_components_read(&instr->def);
} else {
nir_def *reg = store->src[1].ssa;
nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
}
retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data;
- ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
+ ntq_add_pending_tmu_flush(c, &instr->def,
p0_unpacked.return_words_of_texture_data);
}
struct qinst *retiring =
vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);
retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data;
- ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
+ ntq_add_pending_tmu_flush(c, &instr->def,
p0_unpacked.return_words_of_texture_data);
}
b->cursor = nir_after_instr(&instr->instr);
- nir_def *result = &instr->dest.ssa;
+ nir_def *result = &instr->def;
if (util_format_is_pure_uint(format)) {
result = nir_format_unpack_uint(b, result, bits16, 4);
} else if (util_format_is_pure_sint(format)) {
nir_unpack_half_2x16_split_y(b, ba));
}
- nir_def_rewrite_uses_after(&instr->dest.ssa, result,
+ nir_def_rewrite_uses_after(&instr->def, result,
result->parent_instr);
return true;
lower_load_bitsize(nir_builder *b,
nir_intrinsic_instr *intr)
{
- uint32_t bit_size = intr->dest.ssa.bit_size;
+ uint32_t bit_size = intr->def.bit_size;
if (bit_size == 32)
return false;
}
}
- nir_def_init(&new_intr->instr, &new_intr->dest.ssa, 1,
+ nir_def_init(&new_intr->instr, &new_intr->def, 1,
bit_size);
- dest_components[component] = &new_intr->dest.ssa;
+ dest_components[component] = &new_intr->def;
nir_builder_instr_insert(b, &new_intr->instr);
}
nir_def *new_dst = nir_vec(b, dest_components, num_comp);
- nir_def_rewrite_uses(&intr->dest.ssa, new_dst);
+ nir_def_rewrite_uses(&intr->def, new_dst);
nir_instr_remove(&intr->instr);
return true;
nir_intrinsic_instr *chan_instr =
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
chan_instr->num_components = 1;
- nir_def_init(&chan_instr->instr, &chan_instr->dest.ssa, 1,
- instr->dest.ssa.bit_size);
+ nir_def_init(&chan_instr->instr, &chan_instr->def, 1,
+ instr->def.bit_size);
chan_instr->src[0] = nir_src_for_ssa(chan_offset);
nir_builder_instr_insert(b, &chan_instr->instr);
- chans[i] = &chan_instr->dest.ssa;
+ chans[i] = &chan_instr->def;
}
nir_def *result = nir_vec(b, chans, instr->num_components);
- nir_def_rewrite_uses(&instr->dest.ssa, result);
+ nir_def_rewrite_uses(&instr->def, result);
nir_instr_remove(&instr->instr);
}
c->s->info.workgroup_size[1] *
c->s->info.workgroup_size[2], V3D_CHANNELS);
nir_def *result = nir_imm_int(b, num_subgroups);
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(&intr->instr);
}
nir_iadd(&b, nir_iadd(&b, offset, x_offset),
nir_imul(&b, y_offset, stride));
- nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
+ nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->def;
nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
tex->op = nir_texop_txf;
tex->dest_type = nir_type_uint32;
tex->is_array = false;
tex->coord_components = 1;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
uint32_t swiz[4];
component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_B, cswizzle->b);
swiz[3] =
component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_A, cswizzle->a);
- nir_def *s = nir_swizzle(&b, &tex->dest.ssa, swiz, 4);
+ nir_def *s = nir_swizzle(&b, &tex->def, swiz, 4);
nir_store_var(&b, fs_out_color, s, 0xf);
return b.shader;
sampler->data.descriptor_set = 0;
sampler->data.binding = 0;
- nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+ nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
tex->sampler_dim = dim;
tex->op = nir_texop_tex;
tex->is_array = glsl_sampler_type_is_array(sampler_type);
tex->coord_components = tex_pos->num_components;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static nir_def *
tex->is_array = false;
tex->coord_components = tex_pos->num_components;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
/* Fetches all samples at the given position and averages them */
const bool is_int = glsl_base_type_is_integer(tex_type);
nir_def *tmp = NULL;
- nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+ nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
for (uint32_t i = 0; i < src_samples; i++) {
nir_def *s =
build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
sampler->data.descriptor_set = 0;
sampler->data.binding = 0;
- nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
+ nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
return build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
tex_type, tex_pos,
* vulkan_load_descriptor return a vec2 providing an index and
* offset. Our backend compiler only cares about the index part.
*/
- nir_def_rewrite_uses(&instr->dest.ssa,
+ nir_def_rewrite_uses(&instr->def,
nir_imm_ivec2(b, index, 0));
nir_instr_remove(&instr->instr);
}
/* Loading the descriptor happens as part of load/store instructions,
* so for us this is a no-op.
*/
- nir_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa);
+ nir_def_rewrite_uses(&instr->def, instr->src[0].ssa);
nir_instr_remove(&instr->instr);
return true;
}
return false;
b->cursor = nir_after_instr(&intr->instr);
- nir_def *result = &intr->dest.ssa;
+ nir_def *result = &intr->def;
result =
nir_vector_insert_imm(b, result,
nir_fsub_imm(b, 1.0, nir_channel(b, result, 1)), 1);
- nir_def_rewrite_uses_after(&intr->dest.ssa,
+ nir_def_rewrite_uses_after(&intr->def,
result, result->parent_instr);
return true;
}
if (var->data.mode == nir_var_shader_temp) {
/* Create undef and rewrite the interp uses */
nir_def *undef =
- nir_undef(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, undef);
+ nir_undef(b, intrin->def.num_components,
+ intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def, undef);
nir_instr_remove(&intrin->instr);
return true;
break;
/* We use nir_address_format_32bit_index_offset */
- assert(deref->dest.ssa.bit_size == 32);
- deref->dest.ssa.num_components = 2;
+ assert(deref->def.bit_size == 32);
+ deref->def.num_components = 2;
progress = true;
cast->cast.align_mul = NIR_ALIGN_MUL_MAX;
cast->cast.align_offset = offset % NIR_ALIGN_MUL_MAX;
- nir_def_rewrite_uses(&deref->dest.ssa,
- &cast->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &cast->def);
nir_deref_instr_remove_if_unused(deref);
break;
}
*/
if (glsl_type_is_boolean(deref->type)) {
b.cursor = nir_after_instr(&intrin->instr);
- intrin->dest.ssa.bit_size = 32;
- nir_def *bval = nir_i2b(&b, &intrin->dest.ssa);
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ intrin->def.bit_size = 32;
+ nir_def *bval = nir_i2b(&b, &intrin->def);
+ nir_def_rewrite_uses_after(&intrin->def,
bval,
bval->parent_instr);
progress = true;
swizzle_values[i] = i + location_frac;
}
- nir_def *ssa_def = &packed_deref->dest.ssa;
+ nir_def *ssa_def = &packed_deref->def;
ssa_def = nir_load_deref(&state->b, packed_deref);
nir_def *swizzle =
nir_swizzle(&state->b, ssa_def, swizzle_values, components);
/* only lower non-bindless: */
if (texture_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,
- nir_src_for_ssa(&texture_deref->dest.ssa));
+ nir_src_for_ssa(&texture_deref->def));
record_textures_used(&b->shader->info, texture_deref, instr->op);
}
}
/* only lower non-bindless: */
if (sampler_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,
- nir_src_for_ssa(&sampler_deref->dest.ssa));
+ nir_src_for_ssa(&sampler_deref->def));
record_samplers_used(&b->shader->info, sampler_deref, instr->op);
}
}
if (!deref)
return false;
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
- nir_src_for_ssa(&deref->dest.ssa));
+ nir_src_for_ssa(&deref->def));
return true;
}
if (instr->intrinsic == nir_intrinsic_image_deref_order ||
unsigned i = nir_src_as_uint(deref->arr.index);
nir_deref_instr *new_deref =
nir_build_deref_var(b, rv_data->new_texcoord[i]);
- nir_def_rewrite_uses(&deref->dest.ssa, &new_deref->dest.ssa);
+ nir_def_rewrite_uses(&deref->def, &new_deref->def);
return;
}
}
}
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
- nir_def *ret = &instr->dest.ssa;
+ nir_def *ret = &instr->def;
switch (op) {
case nir_intrinsic_deref_atomic:
nir_deref = nir_build_deref_array_imm(&b, nir_deref,
swizzle->mask.x);
}
- instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa);
+ instr->src[0] = nir_src_for_ssa(&nir_deref->def);
nir_intrinsic_set_atomic_op(instr, atomic_op);
nir_intrinsic_set_access(instr, deref_get_qualifier(nir_deref));
/* Atomic result */
assert(ir->return_deref);
if (ir->return_deref->type->is_integer_64()) {
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 64);
} else {
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
}
nir_builder_instr_insert(&b, &instr->instr);
exec_node *param = ir->actual_parameters.get_head();
ir_dereference *counter = (ir_dereference *)param;
- instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->dest.ssa);
+ instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->def);
param = param->get_next();
/* Set the intrinsic destination. */
if (ir->return_deref) {
- nir_def_init(&instr->instr, &instr->dest.ssa, 1, 32);
+ nir_def_init(&instr->instr, &instr->def, 1, 32);
}
/* Set the intrinsic parameters. */
nir_intrinsic_set_atomic_op(instr, atomic_op);
}
- instr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ instr->src[0] = nir_src_for_ssa(&deref->def);
param = param->get_next();
nir_intrinsic_set_image_dim(instr,
(glsl_sampler_dim)type->sampler_dimensionality);
} else
num_components = ir->return_deref->type->vector_elements;
- nir_def_init(&instr->instr, &instr->dest.ssa, num_components, 32);
+ nir_def_init(&instr->instr, &instr->def, num_components, 32);
}
if (op == nir_intrinsic_image_deref_size) {
- instr->num_components = instr->dest.ssa.num_components;
+ instr->num_components = instr->def.num_components;
} else if (op == nir_intrinsic_image_deref_load ||
op == nir_intrinsic_image_deref_sparse_load) {
- instr->num_components = instr->dest.ssa.num_components;
+ instr->num_components = instr->def.num_components;
nir_intrinsic_set_dest_type(instr,
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
} else if (op == nir_intrinsic_image_deref_store) {
break;
}
case nir_intrinsic_shader_clock:
- nir_def_init(&instr->instr, &instr->dest.ssa, 2, 32);
+ nir_def_init(&instr->instr, &instr->def, 2, 32);
nir_intrinsic_set_memory_scope(instr, SCOPE_SUBGROUP);
nir_builder_instr_insert(&b, &instr->instr);
break;
/* Setup destination register */
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
- nir_def_init(&instr->instr, &instr->dest.ssa, type->vector_elements,
+ nir_def_init(&instr->instr, &instr->def, type->vector_elements,
bit_size);
nir_builder_instr_insert(&b, &instr->instr);
/* The value in shared memory is a 32-bit value */
if (type->is_boolean())
- ret = nir_b2b1(&b, &instr->dest.ssa);
+ ret = nir_b2b1(&b, &instr->def);
break;
}
case nir_intrinsic_store_shared: {
FALLTHROUGH;
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all: {
- nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
+ nir_def_init(&instr->instr, &instr->def, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
}
case nir_intrinsic_ballot: {
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 64);
instr->num_components = ir->return_deref->type->vector_elements;
break;
}
case nir_intrinsic_read_invocation: {
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
break;
}
case nir_intrinsic_read_first_invocation: {
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
break;
}
case nir_intrinsic_is_helper_invocation: {
- nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
+ nir_def_init(&instr->instr, &instr->def, 1, 1);
nir_builder_instr_insert(&b, &instr->instr);
break;
}
case nir_intrinsic_is_sparse_texels_resident: {
- nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
+ nir_def_init(&instr->instr, &instr->def, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
nir_local_variable_create(this->impl, ir->return_deref->type,
"return_tmp");
ret_deref = nir_build_deref_var(&b, ret_tmp);
- call->params[i++] = nir_src_for_ssa(&ret_deref->dest.ssa);
+ call->params[i++] = nir_src_for_ssa(&ret_deref->def);
}
foreach_two_lists(formal_node, &ir->callee->parameters,
if (sig_param->data.mode == ir_var_function_out) {
nir_deref_instr *out_deref = evaluate_deref(param_rvalue);
- call->params[i] = nir_src_for_ssa(&out_deref->dest.ssa);
+ call->params[i] = nir_src_for_ssa(&out_deref->def);
} else if (sig_param->data.mode == ir_var_function_in) {
nir_def *val = evaluate_rvalue(param_rvalue);
nir_src src = nir_src_for_ssa(val);
case nir_instr_type_intrinsic:
intrinsic_instr = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrinsic_instr->intrinsic].has_dest)
- return &intrinsic_instr->dest.ssa;
+ return &intrinsic_instr->def;
else
return NULL;
case nir_instr_type_tex:
tex_instr = nir_instr_as_tex(instr);
- return &tex_instr->dest.ssa;
+ return &tex_instr->def;
default:
unreachable("not reached");
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(shader, op);
intrin->num_components = deref->type->vector_elements;
- intrin->src[0] = nir_src_for_ssa(&this->deref->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&this->deref->def);
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intrin->intrinsic == nir_intrinsic_interp_deref_at_sample)
nir_intrinsic_deref_buffer_array_length);
ir_dereference *deref = ir->operands[0]->as_dereference();
- intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->def);
add_instr(&intrin->instr, 1, 32);
return;
instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_handle, load);
} else {
instr->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &sampler_deref->dest.ssa);
+ &sampler_deref->def);
instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &sampler_deref->dest.ssa);
+ &sampler_deref->def);
}
unsigned src_number = 2;
return &nir_instr_as_alu(instr)->def;
case nir_instr_type_deref:
- return &nir_instr_as_deref(instr)->dest.ssa;
+ return &nir_instr_as_deref(instr)->def;
case nir_instr_type_tex:
- return &nir_instr_as_tex(instr)->dest.ssa;
+ return &nir_instr_as_tex(instr)->def;
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
- return &intrin->dest.ssa;
+ return &intrin->def;
} else {
return NULL;
}
}
case nir_instr_type_phi:
- return &nir_instr_as_phi(instr)->dest.ssa;
+ return &nir_instr_as_phi(instr)->def;
case nir_instr_type_parallel_copy:
unreachable("Parallel copies are unsupported by this function");
return false;
}
-typedef struct {
- nir_def ssa;
-} nir_dest;
-
static inline nir_src
nir_src_for_ssa(nir_def *def)
{
};
/** Destination to store the resulting "pointer" */
- nir_dest dest;
+ nir_def def;
} nir_deref_instr;
/** Returns true if deref might have one of the given modes
nir_intrinsic_op intrinsic;
- nir_dest dest;
+ nir_def def;
/** number of components if this is a vectorized intrinsic
*
nir_texop op;
/** Destination */
- nir_dest dest;
+ nir_def def;
/** Array of sources
*
struct exec_list srcs; /** < list of nir_phi_src */
- nir_dest dest;
+ nir_def def;
} nir_phi_instr;
static inline nir_phi_src *
bool dest_is_reg;
nir_src src;
union {
- nir_dest dest;
+ nir_def def;
nir_src reg;
} dest;
} nir_parallel_copy_entry;
#define nir_foreach_reg_load(load, reg) \
assert(reg->intrinsic == nir_intrinsic_decl_reg); \
\
- nir_foreach_use(load, ®->dest.ssa) \
+ nir_foreach_use(load, ®->def) \
if (nir_is_load_reg(nir_instr_as_intrinsic(load->parent_instr)))
#define nir_foreach_reg_store(store, reg) \
assert(reg->intrinsic == nir_intrinsic_decl_reg); \
\
- nir_foreach_use(store, ®->dest.ssa) \
+ nir_foreach_use(store, ®->def) \
if (nir_is_store_reg(nir_instr_as_intrinsic(store->parent_instr)))
static inline nir_intrinsic_instr *
unsigned src_idx = 0;
tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &texture->dest.ssa);
+ &texture->def);
if (sampler != NULL) {
assert(glsl_type_is_sampler(sampler->type));
tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &sampler->dest.ssa);
+ &sampler->def);
}
for (unsigned i = 0; i < num_extra_srcs; i++) {
switch (extra_srcs[i].src_type) {
}
assert(src_idx == num_srcs);
- nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex),
+ nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex),
nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(build, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
nir_def *
load->num_components = num_components;
load->const_index[0] = index;
- nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
void
assert(then_def->num_components == else_def->num_components);
assert(then_def->bit_size == else_def->bit_size);
- nir_def_init(&phi->instr, &phi->dest.ssa, then_def->num_components,
+ nir_def_init(&phi->instr, &phi->def, then_def->num_components,
then_def->bit_size);
nir_builder_instr_insert(build, &phi->instr);
- return &phi->dest.ssa;
+ return &phi->def;
}
nir_loop *
deref->type = var->type;
deref->var = var;
- nir_def_init(&deref->instr, &deref->dest.ssa, 1,
+ nir_def_init(&deref->instr, &deref->def, 1,
nir_get_ptr_bitsize(build->shader));
nir_builder_instr_insert(build, &deref->instr);
glsl_type_is_matrix(parent->type) ||
glsl_type_is_vector(parent->type));
- assert(index->bit_size == parent->dest.ssa.bit_size);
+ assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_array);
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
- nir_def_init(&deref->instr, &deref->dest.ssa,
- parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
int64_t index)
{
nir_def *idx_ssa = nir_imm_intN_t(build, index,
- parent->dest.ssa.bit_size);
+ parent->def.bit_size);
return nir_build_deref_array(build, parent, idx_ssa);
}
parent->deref_type == nir_deref_type_ptr_as_array ||
parent->deref_type == nir_deref_type_cast);
- assert(index->bit_size == parent->dest.ssa.bit_size);
+ assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
deref->modes = parent->modes;
deref->type = parent->type;
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
- nir_def_init(&deref->instr, &deref->dest.ssa,
- parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
- nir_def_init(&deref->instr, &deref->dest.ssa,
- parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
deref->modes = parent->modes;
deref->type = glsl_get_struct_field(parent->type, index);
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->strct.index = index;
- nir_def_init(&deref->instr, &deref->dest.ssa,
- parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
deref->parent = nir_src_for_ssa(parent);
deref->cast.ptr_stride = ptr_stride;
- nir_def_init(&deref->instr, &deref->dest.ssa, parent->num_components,
+ nir_def_init(&deref->instr, &deref->def, parent->num_components,
parent->bit_size);
nir_builder_instr_insert(build, &deref->instr);
deref->modes = parent->modes;
deref->type = parent->type;
- deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ deref->parent = nir_src_for_ssa(&parent->def);
deref->cast.ptr_stride = nir_deref_instr_array_stride(deref);
deref->cast.align_mul = align_mul;
deref->cast.align_offset = align_offset;
- nir_def_init(&deref->instr, &deref->dest.ssa,
- parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
+ nir_def_init(&deref->instr, &deref->def,
+ parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
nir_deref_instr *leader)
{
/* If the derefs would have the same parent, don't make a new one */
- if (leader->parent.ssa == &parent->dest.ssa)
+ if (leader->parent.ssa == &parent->def)
return leader;
UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
if (leader->deref_type == nir_deref_type_array) {
nir_def *index = nir_i2iN(b, leader->arr.index.ssa,
- parent->dest.ssa.bit_size);
+ parent->def.bit_size);
return nir_build_deref_array(b, parent, index);
} else {
return nir_build_deref_array_wildcard(b, parent);
enum gl_access_qualifier access)
{
return nir_build_load_deref(build, glsl_get_vector_elements(deref->type),
- glsl_get_bit_size(deref->type), &deref->dest.ssa,
+ glsl_get_bit_size(deref->type), &deref->def,
access);
}
enum gl_access_qualifier access)
{
writemask &= (1u << value->num_components) - 1u;
- nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access);
+ nir_build_store_deref(build, &deref->def, value, writemask, access);
}
#undef nir_store_deref
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
- nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access);
+ nir_build_copy_deref(build, &dest->def, &src->def, dest_access, src_access);
}
#undef nir_copy_deref
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
- nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa,
+ nir_build_memcpy_deref(build, &dest->def, &src->def,
size, dest_access, src_access);
}
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#undef nir_store_global
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#undef nir_load_param
nir_intrinsic_set_bit_size(decl, bit_size);
nir_intrinsic_set_num_array_elems(decl, num_array_elems);
nir_intrinsic_set_divergent(decl, true);
- nir_def_init(&decl->instr, &decl->dest.ssa, 1, 32);
+ nir_def_init(&decl->instr, &decl->def, 1, 32);
nir_instr_insert(nir_before_cf_list(&b->impl->body), &decl->instr);
- return &decl->dest.ssa;
+ return &decl->def;
}
#undef nir_load_reg
{
unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
- nir_def_init(&bary->instr, &bary->dest.ssa, num_components, 32);
+ nir_def_init(&bary->instr, &bary->def, num_components, 32);
nir_intrinsic_set_interp_mode(bary, interp_mode);
nir_builder_instr_insert(build, &bary->instr);
- return &bary->dest.ssa;
+ return &bary->def;
}
static inline void
% endif
% if opcode.has_dest:
% if opcode.dest_components == 0:
- nir_def_init(&intrin->instr, &intrin->dest.ssa, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
+ nir_def_init(&intrin->instr, &intrin->def, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
% else:
- nir_def_init(&intrin->instr, &intrin->dest.ssa, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
+ nir_def_init(&intrin->instr, &intrin->def, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
% endif
% endif
% for i in range(opcode.num_srcs):
indices.align_mul = src${opcode.src_components.index(0)}->bit_size / 8u;
% elif ALIGN_MUL in opcode.indices and opcode.dest_components == 0:
if (!indices.align_mul)
- indices.align_mul = intrin->dest.ssa.bit_size / 8u;
+ indices.align_mul = intrin->def.bit_size / 8u;
% endif
% for index in opcode.indices:
nir_intrinsic_set_${index.name}(intrin, indices.${index.name});
nir_builder_instr_insert(build, &intrin->instr);
% if opcode.has_dest:
- return &intrin->dest.ssa;
+ return &intrin->def;
% else:
return intrin;
% endif
/* Add in an LOD because some back-ends require it */
txs->src[idx] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0));
- nir_def_init(&txs->instr, &txs->dest.ssa, nir_tex_instr_dest_size(txs), 32);
+ nir_def_init(&txs->instr, &txs->def, nir_tex_instr_dest_size(txs), 32);
nir_builder_instr_insert(b, &txs->instr);
- return &txs->dest.ssa;
+ return &txs->def;
}
nir_def *
}
}
- nir_def_init(&tql->instr, &tql->dest.ssa, 2, 32);
+ nir_def_init(&tql->instr, &tql->def, 2, 32);
nir_builder_instr_insert(b, &tql->instr);
/* The LOD is the y component of the result */
- return nir_channel(b, &tql->dest.ssa, 1);
+ return nir_channel(b, &tql->def, 1);
}
nir_deref_instr *nderef =
nir_deref_instr_create(state->ns, deref->deref_type);
- __clone_def(state, &nderef->instr, &nderef->dest.ssa, &deref->dest.ssa);
+ __clone_def(state, &nderef->instr, &nderef->def, &deref->def);
nderef->modes = deref->modes;
nderef->type = deref->type;
unsigned num_srcs = nir_intrinsic_infos[itr->intrinsic].num_srcs;
if (nir_intrinsic_infos[itr->intrinsic].has_dest)
- __clone_def(state, &nitr->instr, &nitr->dest.ssa, &itr->dest.ssa);
+ __clone_def(state, &nitr->instr, &nitr->def, &itr->def);
nitr->num_components = itr->num_components;
memcpy(nitr->const_index, itr->const_index, sizeof(nitr->const_index));
ntex->sampler_dim = tex->sampler_dim;
ntex->dest_type = tex->dest_type;
ntex->op = tex->op;
- __clone_def(state, &ntex->instr, &ntex->dest.ssa, &tex->dest.ssa);
+ __clone_def(state, &ntex->instr, &ntex->def, &tex->def);
for (unsigned i = 0; i < ntex->num_srcs; i++) {
ntex->src[i].src_type = tex->src[i].src_type;
__clone_src(state, &ntex->instr, &ntex->src[i].src, &tex->src[i].src);
{
nir_phi_instr *nphi = nir_phi_instr_create(state->ns);
- __clone_def(state, &nphi->instr, &nphi->dest.ssa, &phi->dest.ssa);
+ __clone_def(state, &nphi->instr, &nphi->def, &phi->def);
/* Cloning a phi node is a bit different from other instructions. The
* sources of phi instructions are the only time where we can use an SSA
nir_foreach_phi(phi, block) {
nir_undef_instr *undef =
nir_undef_instr_create(impl->function->shader,
- phi->dest.ssa.num_components,
- phi->dest.ssa.bit_size);
+ phi->def.num_components,
+ phi->def.bit_size);
nir_instr_insert_before_cf_list(&impl->body, &undef->instr);
nir_phi_src *src = nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(&undef->def));
list_addtail(&src->src.use_link, &undef->def.uses);
return cast->modes == parent->modes &&
cast->type == parent->type &&
- cast->dest.ssa.num_components == parent->dest.ssa.num_components &&
- cast->dest.ssa.bit_size == parent->dest.ssa.bit_size;
+ cast->def.num_components == parent->def.num_components &&
+ cast->def.bit_size == parent->def.bit_size;
}
void
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
- if (!nir_def_is_unused(&d->dest.ssa))
+ if (!nir_def_is_unused(&d->def))
break;
nir_instr_remove(&d->instr);
nir_deref_instr_has_complex_use(nir_deref_instr *deref,
nir_deref_instr_has_complex_use_options opts)
{
- nir_foreach_use_including_if(use_src, &deref->dest.ssa) {
+ nir_foreach_use_including_if(use_src, &deref->def) {
if (use_src->is_if)
return true;
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
- nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
+ nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size);
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
switch ((*p)->deref_type) {
case nir_deref_type_array:
nir_deref_instr *parent = nir_src_as_deref(deref->parent);
if (parent) {
parent = rematerialize_deref_in_block(parent, state);
- new_deref->parent = nir_src_for_ssa(&parent->dest.ssa);
+ new_deref->parent = nir_src_for_ssa(&parent->def);
} else {
nir_src_copy(&new_deref->parent, &deref->parent, &new_deref->instr);
}
unreachable("Invalid deref instruction type");
}
- nir_def_init(&new_deref->instr, &new_deref->dest.ssa,
- deref->dest.ssa.num_components, deref->dest.ssa.bit_size);
+ nir_def_init(&new_deref->instr, &new_deref->def,
+ deref->def.num_components, deref->def.bit_size);
nir_builder_instr_insert(b, &new_deref->instr);
return new_deref;
nir_deref_instr *block_deref = rematerialize_deref_in_block(deref, state);
if (block_deref != deref) {
nir_instr_rewrite_src(src->parent_instr, src,
- nir_src_for_ssa(&block_deref->dest.ssa));
+ nir_src_for_ssa(&block_deref->def));
nir_deref_instr_remove_if_unused(deref);
state->progress = true;
}
static void
nir_deref_instr_fixup_child_types(nir_deref_instr *parent)
{
- nir_foreach_use(use, &parent->dest.ssa) {
+ nir_foreach_use(use, &parent->def) {
if (use->parent_instr->type != nir_instr_type_deref)
continue;
/* We're a cast from a more detailed sampler type to a bare sampler or a
* texture type with the same dimensionality.
*/
- nir_def_rewrite_uses(&cast->dest.ssa,
- &parent->dest.ssa);
+ nir_def_rewrite_uses(&cast->def,
+ &parent->def);
nir_instr_remove(&cast->instr);
/* Recursively crawl the deref tree and clean up types */
return false;
nir_deref_instr *replace = nir_build_deref_struct(b, parent, 0);
- nir_def_rewrite_uses(&cast->dest.ssa, &replace->dest.ssa);
+ nir_def_rewrite_uses(&cast->def, &replace->def);
nir_deref_instr_remove_if_unused(cast);
return true;
}
bool trivial_array_cast = is_trivial_array_deref_cast(cast);
- nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) {
+ nir_foreach_use_including_if_safe(use_src, &cast->def) {
assert(!use_src->is_if && "there cannot be if-uses");
/* If this isn't a trivial array cast, we can't propagate into
parent->cast.align_mul == 0 &&
is_trivial_deref_cast(parent))
parent = nir_deref_instr_parent(parent);
- nir_def_rewrite_uses(&deref->dest.ssa,
- &parent->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &parent->def);
nir_instr_remove(&deref->instr);
return true;
}
{
nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
nir_component_mask_t read_mask =
- nir_def_components_read(&load->dest.ssa);
+ nir_def_components_read(&load->def);
/* LLVM loves take advantage of the fact that vec3s in OpenCL are
* vec4-aligned and so it can just read/write them as vec4s. This
* results in a LOT of vec4->vec3 casts on loads and stores.
*/
if (is_vector_bitcast_deref(deref, read_mask, false)) {
- const unsigned old_num_comps = load->dest.ssa.num_components;
- const unsigned old_bit_size = load->dest.ssa.bit_size;
+ const unsigned old_num_comps = load->def.num_components;
+ const unsigned old_bit_size = load->def.bit_size;
nir_deref_instr *parent = nir_src_as_deref(deref->parent);
const unsigned new_num_comps = glsl_get_vector_elements(parent->type);
/* Stomp it to reference the parent */
nir_instr_rewrite_src(&load->instr, &load->src[0],
- nir_src_for_ssa(&parent->dest.ssa));
- load->dest.ssa.bit_size = new_bit_size;
- load->dest.ssa.num_components = new_num_comps;
+ nir_src_for_ssa(&parent->def));
+ load->def.bit_size = new_bit_size;
+ load->def.num_components = new_num_comps;
load->num_components = new_num_comps;
b->cursor = nir_after_instr(&load->instr);
- nir_def *data = &load->dest.ssa;
+ nir_def *data = &load->def;
if (old_bit_size != new_bit_size)
- data = nir_bitcast_vector(b, &load->dest.ssa, old_bit_size);
+ data = nir_bitcast_vector(b, &load->def, old_bit_size);
data = resize_vector(b, data, old_num_comps);
- nir_def_rewrite_uses_after(&load->dest.ssa, data,
+ nir_def_rewrite_uses_after(&load->def, data,
data->parent_instr);
return true;
}
const unsigned new_bit_size = glsl_get_bit_size(parent->type);
nir_instr_rewrite_src(&store->instr, &store->src[0],
- nir_src_for_ssa(&parent->dest.ssa));
+ nir_src_for_ssa(&parent->def));
/* Restrict things down as needed so the bitcast doesn't fail */
data = nir_trim_vector(b, data, util_last_bit(write_mask));
if (deref_is == NULL)
return false;
- nir_def_rewrite_uses(&intrin->dest.ssa, deref_is);
+ nir_def_rewrite_uses(&intrin->def, deref_is);
nir_instr_remove(&intrin->instr);
return true;
}
if (!nir_intrinsic_infos[instr->intrinsic].has_dest)
return false;
- if (instr->dest.ssa.divergent)
+ if (instr->def.divergent)
return false;
nir_divergence_options options = shader->options->divergence_analysis_options;
#endif
}
- instr->dest.ssa.divergent = is_divergent;
+ instr->def.divergent = is_divergent;
return is_divergent;
}
static bool
visit_tex(nir_tex_instr *instr)
{
- if (instr->dest.ssa.divergent)
+ if (instr->def.divergent)
return false;
bool is_divergent = false;
}
}
- instr->dest.ssa.divergent = is_divergent;
+ instr->def.divergent = is_divergent;
return is_divergent;
}
static bool
visit_deref(nir_shader *shader, nir_deref_instr *deref)
{
- if (deref->dest.ssa.divergent)
+ if (deref->def.divergent)
return false;
bool is_divergent = false;
break;
}
- deref->dest.ssa.divergent = is_divergent;
+ deref->def.divergent = is_divergent;
return is_divergent;
}
static bool
visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent)
{
- if (phi->dest.ssa.divergent)
+ if (phi->def.divergent)
return false;
unsigned defined_srcs = 0;
nir_foreach_phi_src(src, phi) {
/* if any source value is divergent, the resulting value is divergent */
if (src->src.ssa->divergent) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
if (src->src.ssa->parent_instr->type != nir_instr_type_ssa_undef) {
/* if the condition is divergent and two sources defined, the definition is divergent */
if (defined_srcs > 1 && if_cond_divergent) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
static bool
visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_continue)
{
- if (phi->dest.ssa.divergent)
+ if (phi->def.divergent)
return false;
nir_def *same = NULL;
nir_foreach_phi_src(src, phi) {
/* if any source value is divergent, the resulting value is divergent */
if (src->src.ssa->divergent) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
/* if this loop is uniform, we're done here */
if (!same)
same = src->src.ssa;
else if (same != src->src.ssa) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
}
static bool
visit_loop_exit_phi(nir_phi_instr *phi, bool divergent_break)
{
- if (phi->dest.ssa.divergent)
+ if (phi->def.divergent)
return false;
if (divergent_break) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
/* if any source value is divergent, the resulting value is divergent */
nir_foreach_phi_src(src, phi) {
if (src->src.ssa->divergent) {
- phi->dest.ssa.divergent = true;
+ phi->def.divergent = true;
return true;
}
}
/* handle phis after the IF */
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&if_stmt->cf_node)) {
if (state->first_visit)
- phi->dest.ssa.divergent = false;
+ phi->def.divergent = false;
progress |= visit_if_merge_phi(phi, if_stmt->condition.ssa->divergent);
}
/* handle loop header phis first: we have no knowledge yet about
* the loop's control flow or any loop-carried sources. */
nir_foreach_phi(phi, loop_header) {
- if (!state->first_visit && phi->dest.ssa.divergent)
+ if (!state->first_visit && phi->def.divergent)
continue;
nir_foreach_phi_src(src, phi) {
if (src->pred == loop_preheader) {
- phi->dest.ssa.divergent = src->src.ssa->divergent;
+ phi->def.divergent = src->src.ssa->divergent;
break;
}
}
- progress |= phi->dest.ssa.divergent;
+ progress |= phi->def.divergent;
}
/* setup loop state */
/* handle phis after the loop */
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&loop->cf_node)) {
if (state->first_visit)
- phi->dest.ssa.divergent = false;
+ phi->def.divergent = false;
progress |= visit_loop_exit_phi(phi, loop_state.divergent_loop_break);
}
nir_parallel_copy_entry);
entry->src_is_reg = false;
entry->dest_is_reg = false;
- nir_def_init(&pcopy->instr, &entry->dest.dest.ssa,
- phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
- entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src);
+ nir_def_init(&pcopy->instr, &entry->dest.def,
+ phi->def.num_components, phi->def.bit_size);
+ entry->dest.def.divergent = nir_src_is_divergent(src->src);
exec_list_push_tail(&pcopy->entries, &entry->node);
nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
nir_instr_rewrite_src(&phi->instr, &src->src,
- nir_src_for_ssa(&entry->dest.dest.ssa));
+ nir_src_for_ssa(&entry->dest.def));
}
nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
nir_parallel_copy_entry);
entry->src_is_reg = false;
entry->dest_is_reg = false;
- nir_def_init(&block_pcopy->instr, &entry->dest.dest.ssa,
- phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
- entry->dest.dest.ssa.divergent = phi->dest.ssa.divergent;
+ nir_def_init(&block_pcopy->instr, &entry->dest.def,
+ phi->def.num_components, phi->def.bit_size);
+ entry->dest.def.divergent = phi->def.divergent;
exec_list_push_tail(&block_pcopy->entries, &entry->node);
- nir_def_rewrite_uses(&phi->dest.ssa,
- &entry->dest.dest.ssa);
+ nir_def_rewrite_uses(&phi->def,
+ &entry->dest.def);
nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
- nir_src_for_ssa(&phi->dest.ssa));
+ nir_src_for_ssa(&phi->def));
}
return true;
coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
nir_foreach_phi(phi, block) {
- merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
+ merge_node *dest_node = get_merge_node(&phi->def, state);
nir_foreach_phi_src(src, phi) {
if (nir_src_is_undef(src->src))
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->src_is_reg);
assert(!entry->dest_is_reg);
- assert(entry->dest.dest.ssa.num_components ==
+ assert(entry->dest.def.num_components ==
entry->src.ssa->num_components);
/* Since load_const instructions are SSA only, we can't replace their
continue;
merge_node *src_node = get_merge_node(entry->src.ssa, state);
- merge_node *dest_node = get_merge_node(&entry->dest.dest.ssa, state);
+ merge_node *dest_node = get_merge_node(&entry->dest.def, state);
if (src_node->set == dest_node->set)
continue;
if (intr->intrinsic == nir_intrinsic_load_reg &&
intr->src[0].ssa == reg &&
nir_intrinsic_base(intr) == 0)
- load = &intr->dest.ssa;
+ load = &intr->def;
}
}
nir_phi_instr *phi = nir_instr_as_phi(instr);
struct hash_entry *entry =
- _mesa_hash_table_search(state->merge_node_table, &phi->dest.ssa);
+ _mesa_hash_table_search(state->merge_node_table, &phi->def);
assert(entry != NULL);
merge_node *node = (merge_node *)entry->data;
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->dest_is_reg);
- assert(nir_def_is_unused(&entry->dest.dest.ssa));
+ assert(nir_def_is_unused(&entry->dest.def));
/* Parallel copy destinations will always be registers */
- nir_def *reg = reg_for_ssa_def(&entry->dest.dest.ssa, state);
+ nir_def *reg = reg_for_ssa_def(&entry->dest.def, state);
assert(reg != NULL);
entry->dest_is_reg = true;
bool progress = false;
nir_foreach_phi_safe(phi, block) {
- nir_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
+ nir_def *reg = decl_reg_for_ssa_def(&b, &phi->def);
b.cursor = nir_after_instr(&phi->instr);
- nir_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
+ nir_def_rewrite_uses(&phi->def, nir_load_reg(&b, reg));
nir_foreach_phi_src(src, phi) {
nir_tex_instr_src_type(tex, i),
float_types, int_types, &progress);
}
- set_type(tex->dest.ssa.index, tex->dest_type,
+ set_type(tex->def.index, tex->dest_type,
float_types, int_types, &progress);
break;
}
nir_alu_type dest_type = nir_intrinsic_instr_dest_type(intrin);
if (dest_type != nir_type_invalid) {
- set_type(intrin->dest.ssa.index, dest_type,
+ set_type(intrin->def.index, dest_type,
float_types, int_types, &progress);
}
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
nir_foreach_phi_src(src, phi) {
- copy_types(src->src, &phi->dest.ssa,
+ copy_types(src->src, &phi->def,
float_types, int_types, &progress);
}
break;
unsigned param_idx = nir_intrinsic_param_idx(load);
assert(param_idx < impl->function->num_params);
- nir_def_rewrite_uses(&load->dest.ssa,
+ nir_def_rewrite_uses(&load->def,
params[param_idx]);
/* Remove any left-over load_param intrinsics because they're soon
case nir_instr_type_alu:
return cb(&nir_instr_as_alu(instr)->def, state);
case nir_instr_type_deref:
- return cb(&nir_instr_as_deref(instr)->dest.ssa, state);
+ return cb(&nir_instr_as_deref(instr)->def, state);
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrin->intrinsic].has_dest)
- return cb(&intrin->dest.ssa, state);
+ return cb(&intrin->def, state);
return true;
}
case nir_instr_type_tex:
- return cb(&nir_instr_as_tex(instr)->dest.ssa, state);
+ return cb(&nir_instr_as_tex(instr)->def, state);
case nir_instr_type_phi:
- return cb(&nir_instr_as_phi(instr)->dest.ssa, state);
+ return cb(&nir_instr_as_phi(instr)->def, state);
case nir_instr_type_parallel_copy: {
nir_foreach_parallel_copy_entry(entry, nir_instr_as_parallel_copy(instr)) {
- if (!entry->dest_is_reg && !cb(&entry->dest.dest.ssa, state))
+ if (!entry->dest_is_reg && !cb(&entry->dest.def, state))
return false;
}
return true;
nir_src_is_const(intr->src[1]) &&
nir_src_as_uint(intr->src[1]) <= max_offset &&
/* TODO: Can't handle other bit sizes for now. */
- intr->dest.ssa.bit_size == 32) {
+ intr->def.bit_size == 32) {
/* num_offsets can be NULL if-and-only-if uni_offsets is NULL. */
assert((num_offsets == NULL) == (uni_offsets == NULL));
nir_src_as_uint(intr->src[0]) == 0 &&
nir_src_is_const(intr->src[1]) &&
/* TODO: Can't handle other bit sizes for now. */
- intr->dest.ssa.bit_size == 32) {
- int num_components = intr->dest.ssa.num_components;
+ intr->def.bit_size == 32) {
+ int num_components = intr->def.num_components;
uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
if (num_components == 1) {
if (offset == uniform_dw_offsets[i]) {
b.cursor = nir_before_instr(&intr->instr);
nir_def *def = nir_imm_int(&b, uniform_values[i]);
- nir_def_rewrite_uses(&intr->dest.ssa, def);
+ nir_def_rewrite_uses(&intr->def, def);
nir_instr_remove(&intr->instr);
break;
}
for (unsigned i = 0; i < num_components; i++) {
if (!components[i]) {
uint32_t scalar_offset = (offset + i) * 4;
- components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
+ components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
intr->src[0].ssa,
nir_imm_int(&b, scalar_offset));
nir_intrinsic_instr *load =
}
/* Replace the original uniform load. */
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(&b, components, num_components));
nir_instr_remove(&intr->instr);
}
hash = HASH(hash, instr->intrinsic);
if (info->has_dest) {
- hash = HASH(hash, instr->dest.ssa.num_components);
- hash = HASH(hash, instr->dest.ssa.bit_size);
+ hash = HASH(hash, instr->def.num_components);
+ hash = HASH(hash, instr->def.bit_size);
}
hash = XXH32(instr->const_index, info->num_indices * sizeof(instr->const_index[0]), hash);
/* In case of phis with no sources, the dest needs to be checked
* to ensure that phis with incompatible dests won't get merged
* during CSE. */
- if (phi1->dest.ssa.num_components != phi2->dest.ssa.num_components)
+ if (phi1->def.num_components != phi2->def.num_components)
return false;
- if (phi1->dest.ssa.bit_size != phi2->dest.ssa.bit_size)
+ if (phi1->def.bit_size != phi2->def.bit_size)
return false;
nir_foreach_phi_src(src1, phi1) {
intrinsic1->num_components != intrinsic2->num_components)
return false;
- if (info->has_dest && intrinsic1->dest.ssa.num_components !=
- intrinsic2->dest.ssa.num_components)
+ if (info->has_dest && intrinsic1->def.num_components !=
+ intrinsic2->def.num_components)
return false;
- if (info->has_dest && intrinsic1->dest.ssa.bit_size !=
- intrinsic2->dest.ssa.bit_size)
+ if (info->has_dest && intrinsic1->def.bit_size !=
+ intrinsic2->def.bit_size)
return false;
for (unsigned i = 0; i < info->num_srcs; i++) {
}
static nir_def *
-nir_instr_get_dest_ssa_def(nir_instr *instr)
+nir_instr_get_def_def(nir_instr *instr)
{
switch (instr->type) {
case nir_instr_type_alu:
return &nir_instr_as_alu(instr)->def;
case nir_instr_type_deref:
- return &nir_instr_as_deref(instr)->dest.ssa;
+ return &nir_instr_as_deref(instr)->def;
case nir_instr_type_load_const:
return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
- return &nir_instr_as_phi(instr)->dest.ssa;
+ return &nir_instr_as_phi(instr)->def;
case nir_instr_type_intrinsic:
- return &nir_instr_as_intrinsic(instr)->dest.ssa;
+ return &nir_instr_as_intrinsic(instr)->def;
case nir_instr_type_tex:
- return &nir_instr_as_tex(instr)->dest.ssa;
+ return &nir_instr_as_tex(instr)->def;
default:
unreachable("We never ask for any of these");
}
if (!cond_function || cond_function(match, instr)) {
/* rewrite instruction if condition is matched */
- nir_def *def = nir_instr_get_dest_ssa_def(instr);
- nir_def *new_def = nir_instr_get_dest_ssa_def(match);
+ nir_def *def = nir_instr_get_def_def(instr);
+ nir_def *new_def = nir_instr_get_def_def(match);
/* It's safe to replace an exact instruction with an inexact one as
* long as we make it exact. If we got here, the two instructions are
assert(!use->is_if);
assert(use->parent_instr->type == nir_instr_type_alu);
nir_alu_src *alu_use = list_entry(use, nir_alu_src, src);
- nir_src_rewrite(&alu_use->src, &load->dest.ssa);
+ nir_src_rewrite(&alu_use->src, &load->def);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
alu_use->swizzle[i] = alu->src[0].swizzle[alu_use->swizzle[i]];
}
/* Add new const to replace the input */
nir_def *nconst = nir_build_imm(&b, store_intr->num_components,
- intr->dest.ssa.bit_size,
+ intr->def.bit_size,
out_const->value);
- nir_def_rewrite_uses(&intr->dest.ssa, nconst);
+ nir_def_rewrite_uses(&intr->def, nconst);
progress = true;
}
b.cursor = nir_before_instr(instr);
nir_def *load = nir_load_var(&b, input_var);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
progress = true;
}
nir_load_const_instr *index =
nir_instr_as_load_const(deref->arr.index.ssa->parent_instr);
nir_def *ssa = nir_imm_intN_t(b, index->value->i64,
- parent->dest.ssa.bit_size);
+ parent->def.bit_size);
return nir_build_deref_ptr_as_array(b, parent, ssa);
}
case nir_deref_type_struct:
}
/* Replace load input with load uniform. */
- nir_def_rewrite_uses(&intr->dest.ssa, uni_def);
+ nir_def_rewrite_uses(&intr->def, uni_def);
progress = true;
}
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
nir_foreach_phi(phi, succ) {
- set_ssa_def_dead(&phi->dest.ssa, live);
+ set_ssa_def_dead(&phi->def, live);
}
nir_foreach_phi(phi, succ) {
/* Is one of the operands const or uniform, and the other the phi.
* The phi source can't be swizzled in any way.
*/
- if (alu->src[1 - i].src.ssa == &phi->dest.ssa &&
+ if (alu->src[1 - i].src.ssa == &phi->def &&
alu_src_has_identity_swizzle(alu, 1 - i)) {
if (is_only_uniform_src(&alu->src[i].src))
var->update_src = alu->src + i;
/* Turn the load into a vector load */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
- nir_src_for_ssa(&vec_deref->dest.ssa));
- intrin->dest.ssa.num_components = num_components;
+ nir_src_for_ssa(&vec_deref->def));
+ intrin->def.num_components = num_components;
intrin->num_components = num_components;
nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
nir_def *scalar =
- nir_vector_extract(&b, &intrin->dest.ssa, index);
+ nir_vector_extract(&b, &intrin->def, index);
if (scalar->parent_instr->type == nir_instr_type_ssa_undef) {
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
scalar);
nir_instr_remove(&intrin->instr);
} else {
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
scalar,
scalar->parent_instr);
}
* num_components with one that has variable number. So
* best to take this from the dest:
*/
- new_instr->num_components = instr->dest.ssa.num_components;
+ new_instr->num_components = instr->def.num_components;
}
- nir_def_init(&new_instr->instr, &new_instr->dest.ssa,
- instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
+ nir_def_init(&new_instr->instr, &new_instr->def,
+ instr->def.num_components, instr->def.bit_size);
nir_instr_insert_before(&instr->instr, &new_instr->instr);
nir_instr_remove(&instr->instr);
if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
b->cursor = nir_after_instr(&new_instr->instr);
- nir_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
- nir_def_rewrite_uses(&instr->dest.ssa, result);
+ nir_def *result = nir_iadd(b, &new_instr->def, temp);
+ nir_def_rewrite_uses(&instr->def, result);
} else {
- nir_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
+ nir_def_rewrite_uses(&instr->def, &new_instr->def);
}
return true;
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan: {
- const unsigned old_bit_size = intrin->dest.ssa.bit_size;
+ const unsigned old_bit_size = intrin->def.bit_size;
assert(old_bit_size < bit_size);
nir_alu_type type = nir_type_uint;
if (intrin->intrinsic == nir_intrinsic_vote_feq ||
intrin->intrinsic == nir_intrinsic_vote_ieq) {
/* These return a Boolean; it's always 1-bit */
- assert(new_intrin->dest.ssa.bit_size == 1);
+ assert(new_intrin->def.bit_size == 1);
} else {
/* These return the same bit size as the source; we need to adjust
* the size and then we'll have to emit a down-cast.
*/
- assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
- new_intrin->dest.ssa.bit_size = bit_size;
+ assert(intrin->src[0].ssa->bit_size == intrin->def.bit_size);
+ new_intrin->def.bit_size = bit_size;
}
nir_builder_instr_insert(b, &new_intrin->instr);
- nir_def *res = &new_intrin->dest.ssa;
+ nir_def *res = &new_intrin->def;
if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
/* For exclusive scan, we have to be careful because the identity
* value for the higher bit size may get added into the mix by
intrin->intrinsic != nir_intrinsic_vote_ieq)
res = nir_u2uN(b, res, old_bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, res);
+ nir_def_rewrite_uses(&intrin->def, res);
break;
}
lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
nir_phi_instr *last_phi)
{
- unsigned old_bit_size = phi->dest.ssa.bit_size;
+ unsigned old_bit_size = phi->def.bit_size;
assert(old_bit_size < bit_size);
nir_foreach_phi_src(src, phi) {
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
}
- phi->dest.ssa.bit_size = bit_size;
+ phi->def.bit_size = bit_size;
b->cursor = nir_after_instr(&last_phi->instr);
- nir_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size);
- nir_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
+ nir_def *new_dest = nir_u2uN(b, &phi->def, old_bit_size);
+ nir_def_rewrite_uses_after(&phi->def, new_dest,
new_dest->parent_instr);
}
nir_phi_instr_create(b->shader),
nir_phi_instr_create(b->shader)
};
- int num_components = phi->dest.ssa.num_components;
- assert(phi->dest.ssa.bit_size == 64);
+ int num_components = phi->def.num_components;
+ assert(phi->def.bit_size == 64);
nir_foreach_phi_src(src, phi) {
assert(num_components == src->src.ssa->num_components);
nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
}
- nir_def_init(&lowered[0]->instr, &lowered[0]->dest.ssa, num_components, 32);
- nir_def_init(&lowered[1]->instr, &lowered[1]->dest.ssa, num_components, 32);
+ nir_def_init(&lowered[0]->instr, &lowered[0]->def, num_components, 32);
+ nir_def_init(&lowered[1]->instr, &lowered[1]->def, num_components, 32);
b->cursor = nir_before_instr(&phi->instr);
nir_builder_instr_insert(b, &lowered[0]->instr);
nir_builder_instr_insert(b, &lowered[1]->instr);
b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
- nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
- nir_def_rewrite_uses(&phi->dest.ssa, merged);
+ nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->def, &lowered[1]->def);
+ nir_def_rewrite_uses(&phi->def, merged);
nir_instr_remove(&phi->instr);
}
nir_phi_instr *phi = nir_instr_as_phi(instr);
- if (phi->dest.ssa.bit_size <= 32)
+ if (phi->def.bit_size <= 32)
return false;
split_phi(b, phi);
tex->coord_components = 2;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, texcoord, tex->coord_components));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
/* kill if tex != 0.0.. take .x or .w channel according to format: */
- cond = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, options->swizzle_xxxx ? 0 : 3),
+ cond = nir_fneu_imm(b, nir_channel(b, &tex->def, options->swizzle_xxxx ? 0 : 3),
0.0);
nir_discard_if(b, cond);
static bool
lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
{
- if (phi->dest.ssa.bit_size != 1)
+ if (phi->def.bit_size != 1)
return false;
/* Ensure all phi sources have a canonical bit-size. We choose the
}
}
- phi->dest.ssa.bit_size = dst_bit_size;
+ phi->def.bit_size = dst_bit_size;
return true;
}
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
- rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
+ rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
- rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
+ rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
- rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
+ rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;
b.cursor = nir_instr_remove(&deref->instr);
nir_def *loc =
nir_imm_intN_t(&b, deref->var->data.driver_location,
- deref->dest.ssa.bit_size);
- nir_def_rewrite_uses(&deref->dest.ssa, loc);
+ deref->def.bit_size);
+ nir_def_rewrite_uses(&deref->def, loc);
progress = true;
break;
}
nir_def *new_def = nir_load_deref(&b, new_deref_instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, new_def);
+ nir_def_rewrite_uses(&intrin->def, new_def);
nir_instr_remove(&intrin->instr);
}
}
nir_intrinsic_dest_type(conv),
nir_intrinsic_rounding_mode(conv),
nir_intrinsic_saturate(conv));
- nir_def_rewrite_uses(&conv->dest.ssa, val);
+ nir_def_rewrite_uses(&conv->def, val);
}
static bool
* we can assume there are none */
b->cursor = nir_before_instr(instr);
nir_def *zero = nir_imm_false(b);
- nir_def_rewrite_uses(&intrin->dest.ssa, zero);
+ nir_def_rewrite_uses(&intrin->def, zero);
nir_instr_remove_v(instr);
return true;
}
* top-level blocks to ensure correct behavior w.r.t. loops */
if (is_helper == NULL)
is_helper = insert_is_helper(b, instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, is_helper);
+ nir_def_rewrite_uses(&intrin->def, is_helper);
nir_instr_remove_v(instr);
return true;
default:
nir_variable *ret_tmp =
nir_local_variable_create(b->impl, return_type, "return_tmp");
nir_deref_instr *ret_deref = nir_build_deref_var(b, ret_tmp);
- params[0] = &ret_deref->dest.ssa;
+ params[0] = &ret_deref->def;
assert(nir_op_infos[instr->op].num_inputs + 1 == func->num_params);
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
tex->coord_components = 2;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[2] =
nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, texcoord, tex->coord_components));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- def = &tex->dest.ssa;
+ def = &tex->def;
/* Apply the scale and bias. */
if (state->options->scale_and_bias) {
tex->texture_index = state->options->pixelmap_sampler;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &pixelmap_deref->dest.ssa);
+ &pixelmap_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &pixelmap_deref->dest.ssa);
+ &pixelmap_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, def, 2));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- def_xy = &tex->dest.ssa;
+ def_xy = &tex->def;
/* TEX def.zw, def.zwww, pixelmap_sampler, 2D; */
tex = nir_tex_instr_create(state->shader, 1);
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_channels(b, def, 0xc));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- def_zw = &tex->dest.ssa;
+ def_zw = &tex->def;
/* def = vec4(def.xy, def.zw); */
def = nir_vec4(b,
nir_channel(b, def_zw, 1));
}
- nir_def_rewrite_uses(&intr->dest.ssa, def);
+ nir_def_rewrite_uses(&intr->def, def);
return true;
}
b->cursor = nir_before_instr(&intr->instr);
nir_def *texcoord_const = get_texcoord_const(b, state);
- nir_def_rewrite_uses(&intr->dest.ssa, texcoord_const);
+ nir_def_rewrite_uses(&intr->def, texcoord_const);
return true;
}
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_texture_handle,
nir_imm_intN_t(b, io.location - FRAG_RESULT_DATA0, 32));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- nir_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
+ nir_def_rewrite_uses(&intr->def, &tex->def);
return true;
}
nir_intrinsic_dest_type(intrin) != nir_type_float16)
return false;
src = intrin->src[0].ssa;
- dst = &intrin->dest.ssa;
+ dst = &intrin->def;
mode = nir_intrinsic_rounding_mode(intrin);
} else {
return false;
nir_def *vec = nir_vec4(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1),
nir_load_frag_coord_zw(b, .component = 2),
nir_load_frag_coord_zw(b, .component = 3));
- nir_def_rewrite_uses(&intr->dest.ssa, vec);
+ nir_def_rewrite_uses(&intr->def, vec);
return true;
}
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
return nir_vec4(b,
- nir_channel(b, &intr->dest.ssa, 0),
- nir_channel(b, &intr->dest.ssa, 1),
- nir_channel(b, &intr->dest.ssa, 2),
- nir_frcp(b, nir_channel(b, &intr->dest.ssa, 3)));
+ nir_channel(b, &intr->def, 0),
+ nir_channel(b, &intr->def, 1),
+ nir_channel(b, &intr->def, 2),
+ nir_frcp(b, nir_channel(b, &intr->def, 3)));
}
bool
*/
if (has_dest) {
nir_push_else(b, NULL);
- undef = nir_undef(b, intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size);
+ undef = nir_undef(b, intr->def.num_components,
+ intr->def.bit_size);
}
nir_pop_if(b, NULL);
if (has_dest) {
- nir_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
+ nir_def *phi = nir_if_phi(b, &intr->def, undef);
/* We can't use nir_def_rewrite_uses_after on phis, so use the global
* version and fixup the phi manually
*/
- nir_def_rewrite_uses(&intr->dest.ssa, phi);
+ nir_def_rewrite_uses(&intr->def, phi);
nir_instr *phi_instr = phi->parent_instr;
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
instr->block);
nir_instr_rewrite_src_ssa(phi->parent_instr, &phi_src->src,
- &intr->dest.ssa);
+ &intr->def);
}
return true;
nir_def *size = nir_instr_ssa_def(&_2darray_size->instr);
nir_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
- unsigned coord_comps = intrin->dest.ssa.num_components;
+ unsigned coord_comps = intrin->def.num_components;
for (unsigned c = 0; c < coord_comps; c++) {
if (c == 2) {
comps[2] = nir_get_ssa_scalar(nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6)), 0);
}
}
- nir_def *vec = nir_vec_scalars(b, comps, intrin->dest.ssa.num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, vec);
+ nir_def *vec = nir_vec_scalars(b, comps, intrin->def.num_components);
+ nir_def_rewrite_uses(&intrin->def, vec);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
}
break;
}
- nir_def_init(&fmask_load->instr, &fmask_load->dest.ssa, 1, 32);
+ nir_def_init(&fmask_load->instr, &fmask_load->def, 1, 32);
nir_builder_instr_insert(b, &fmask_load->instr);
- nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
- nir_def_rewrite_uses(&intrin->dest.ssa, samples_identical);
+ nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->def, 0);
+ nir_def_rewrite_uses(&intrin->def, samples_identical);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
case nir_intrinsic_bindless_image_samples: {
if (options->lower_image_samples_to_one) {
b->cursor = nir_after_instr(&intrin->instr);
- nir_def *samples = nir_imm_intN_t(b, 1, intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, samples);
+ nir_def *samples = nir_imm_intN_t(b, 1, intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def, samples);
return true;
}
return false;
b->cursor = nir_before_instr(instr);
nir_atomic_op atomic_op = nir_intrinsic_atomic_op(intr);
enum pipe_format format = nir_intrinsic_format(intr);
- unsigned bit_size = intr->dest.ssa.bit_size;
+ unsigned bit_size = intr->def.bit_size;
/* Even for "formatless" access, we know the size of the texel accessed,
* since it's the size of the atomic. We can use that to synthesize a
/* Replace the image atomic with the global atomic. Remove the image
* explicitly because it has side effects so is not DCE'd.
*/
- nir_def_rewrite_uses(&intr->dest.ssa, global);
+ nir_def_rewrite_uses(&intr->def, global);
nir_instr_remove(instr);
return true;
}
nir_intrinsic_instr_create(b->shader, orig_instr->intrinsic);
load->num_components = orig_instr->num_components;
- load->src[0] = nir_src_for_ssa(&parent->dest.ssa);
+ load->src[0] = nir_src_for_ssa(&parent->def);
/* Copy over any other sources. This is needed for interp_deref_at */
for (unsigned i = 1;
i < nir_intrinsic_infos[orig_instr->intrinsic].num_srcs; i++)
nir_src_copy(&load->src[i], &orig_instr->src[i], &load->instr);
- nir_def_init(&load->instr, &load->dest.ssa,
- orig_instr->dest.ssa.num_components,
- orig_instr->dest.ssa.bit_size);
+ nir_def_init(&load->instr, &load->def,
+ orig_instr->def.num_components,
+ orig_instr->def.bit_size);
nir_builder_instr_insert(b, &load->instr);
- *dest = &load->dest.ssa;
+ *dest = &load->def;
} else {
assert(orig_instr->intrinsic == nir_intrinsic_store_deref);
nir_store_deref(b, parent, src, nir_intrinsic_write_mask(orig_instr));
nir_def *result;
emit_load_store_deref(b, intrin, base, &path.path[1],
&result, NULL);
- nir_def_rewrite_uses(&intrin->dest.ssa, result);
+ nir_def_rewrite_uses(&intrin->def, result);
}
nir_deref_path_finish(&path);
tex->sampler_index = 0;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_coord, coord);
tex->coord_components = 3;
tex->texture_non_uniform = nir_intrinsic_access(load) & ACCESS_NON_UNIFORM;
- nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32);
+ nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32);
nir_builder_instr_insert(b, &tex->instr);
if (tex->is_sparse) {
- unsigned load_result_size = load->dest.ssa.num_components - 1;
+ unsigned load_result_size = load->def.num_components - 1;
nir_component_mask_t load_result_mask = nir_component_mask(load_result_size);
nir_def *res = nir_channels(
- b, &tex->dest.ssa, load_result_mask | 0x10);
+ b, &tex->def, load_result_mask | 0x10);
- nir_def_rewrite_uses(&load->dest.ssa, res);
+ nir_def_rewrite_uses(&load->def, res);
} else {
- nir_def_rewrite_uses(&load->dest.ssa,
- &tex->dest.ssa);
+ nir_def_rewrite_uses(&load->def,
+ &tex->def);
}
return true;
nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa),
};
- assert(info->has_dest && intrin->dest.ssa.bit_size == 64);
+ assert(info->has_dest && intrin->def.bit_size == 64);
nir_def *res[2];
for (unsigned i = 0; i < 2; i++) {
memcpy(split->const_index, intrin->const_index,
sizeof(intrin->const_index));
- nir_def_init(&split->instr, &split->dest.ssa,
- intrin->dest.ssa.num_components, 32);
+ nir_def_init(&split->instr, &split->def,
+ intrin->def.num_components, 32);
nir_builder_instr_insert(b, &split->instr);
- res[i] = &split->dest.ssa;
+ res[i] = &split->def;
}
return nir_pack_64_2x32_split(b, res[0], res[1]);
nir_intrinsic_instr_create(b->shader, nir_intrinsic_vote_ieq);
vote->src[0] = nir_src_for_ssa(x);
vote->num_components = x->num_components;
- nir_def_init(&vote->instr, &vote->dest.ssa, 1, 1);
+ nir_def_init(&vote->instr, &vote->def, 1, 1);
nir_builder_instr_insert(b, &vote->instr);
- return &vote->dest.ssa;
+ return &vote->def;
}
static nir_def *
nir_intrinsic_set_reduction_op(scan, reduction_op);
if (scan_op == nir_intrinsic_reduce)
nir_intrinsic_set_cluster_size(scan, cluster_size);
- nir_def_init(&scan->instr, &scan->dest.ssa, val->num_components,
+ nir_def_init(&scan->instr, &scan->def, val->num_components,
val->bit_size);
nir_builder_instr_insert(b, &scan->instr);
- return &scan->dest.ssa;
+ return &scan->def;
}
static nir_def *
case nir_intrinsic_quad_swap_horizontal:
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
- return intrin->dest.ssa.bit_size == 64 &&
+ return intrin->def.bit_size == 64 &&
(options->lower_int64_options & nir_lower_subgroup_shuffle64);
case nir_intrinsic_vote_ieq:
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan:
- if (intrin->dest.ssa.bit_size != 64)
+ if (intrin->def.bit_size != 64)
return false;
switch (nir_intrinsic_reduction_op(intrin)) {
comps[i] = val;
}
nir_def *vec = nir_vec(b, comps, intr->num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, vec);
+ nir_def_rewrite_uses(&intr->def, vec);
return true;
}
load->src[0] = nir_src_for_ssa(offset);
}
- nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
static nir_def *
unsigned component, const struct glsl_type *type)
{
const bool lower_double = !glsl_type_is_integer(type) && state->options & nir_lower_io_lower_64bit_float_to_32;
- if (intrin->dest.ssa.bit_size == 64 &&
+ if (intrin->def.bit_size == 64 &&
(lower_double || (state->options & nir_lower_io_lower_64bit_to_32))) {
nir_builder *b = &state->builder;
nir_def *comp64[4];
assert(component == 0 || component == 2);
unsigned dest_comp = 0;
- while (dest_comp < intrin->dest.ssa.num_components) {
+ while (dest_comp < intrin->def.num_components) {
const unsigned num_comps =
- MIN2(intrin->dest.ssa.num_components - dest_comp,
+ MIN2(intrin->def.num_components - dest_comp,
(4 - component) / 2);
nir_def *data32 =
offset = nir_iadd_imm(b, offset, slot_size);
}
- return nir_vec(b, comp64, intrin->dest.ssa.num_components);
- } else if (intrin->dest.ssa.bit_size == 1) {
+ return nir_vec(b, comp64, intrin->def.num_components);
+ } else if (intrin->def.bit_size == 1) {
/* Booleans are 32-bit */
assert(glsl_type_is_boolean(type));
return nir_b2b1(&state->builder,
emit_load(state, array_index, var, offset, component,
- intrin->dest.ssa.num_components, 32,
+ intrin->def.num_components, 32,
nir_type_bool32));
} else {
return emit_load(state, array_index, var, offset, component,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.num_components,
+ intrin->def.bit_size,
nir_get_nir_type_for_glsl_type(type));
}
}
write_mask >>= num_comps;
offset = nir_iadd_imm(b, offset, slot_size);
}
- } else if (intrin->dest.ssa.bit_size == 1) {
+ } else if (intrin->def.bit_size == 1) {
/* Booleans are 32-bit */
assert(glsl_type_is_boolean(type));
nir_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
}
/* None of the supported APIs allow interpolation on 64-bit things */
- assert(intrin->dest.ssa.bit_size <= 32);
+ assert(intrin->def.bit_size <= 32);
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {
nir_intrinsic_instr *bary_setup =
nir_intrinsic_instr_create(state->builder.shader, bary_op);
- nir_def_init(&bary_setup->instr, &bary_setup->dest.ssa, 2, 32);
+ nir_def_init(&bary_setup->instr, &bary_setup->def, 2, 32);
nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
nir_def *load =
nir_load_interpolated_input(&state->builder,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
- &bary_setup->dest.ssa,
+ intrin->def.num_components,
+ intrin->def.bit_size,
+ &bary_setup->def,
offset,
.base = var->data.driver_location,
.component = component,
.io_semantics = semantics,
- .dest_type = nir_type_float | intrin->dest.ssa.bit_size);
+ .dest_type = nir_type_float | intrin->def.bit_size);
return load;
}
*/
if (intrin->intrinsic != nir_intrinsic_store_deref) {
nir_def *zero =
- nir_imm_zero(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_imm_zero(b, intrin->def.num_components,
+ intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def,
zero);
}
}
if (replacement) {
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
replacement);
}
nir_instr_remove(&intrin->instr);
nir_intrinsic_set_range(load, glsl_get_explicit_size(var->type, false));
}
- unsigned bit_size = intrin->dest.ssa.bit_size;
+ unsigned bit_size = intrin->def.bit_size;
if (bit_size == 1) {
/* TODO: Make the native bool bit_size an option. */
bit_size = 32;
}
load->num_components = num_components;
- nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
+ nir_def_init(&load->instr, &load->def, num_components, bit_size);
assert(bit_size % 8 == 0);
nir_pop_if(b, NULL);
- result = nir_if_phi(b, &load->dest.ssa, zero);
+ result = nir_if_phi(b, &load->def, zero);
} else {
nir_builder_instr_insert(b, &load->instr);
- result = &load->dest.ssa;
+ result = &load->def;
}
- if (intrin->dest.ssa.bit_size == 1) {
+ if (intrin->def.bit_size == 1) {
/* For shared, we can go ahead and use NIR's and/or the back-end's
* standard encoding for booleans rather than forcing a 0/1 boolean.
* This should save an instruction or two.
if (nir_intrinsic_has_access(atomic))
nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
- assert(intrin->dest.ssa.num_components == 1);
- nir_def_init(&atomic->instr, &atomic->dest.ssa, 1,
- intrin->dest.ssa.bit_size);
+ assert(intrin->def.num_components == 1);
+ nir_def_init(&atomic->instr, &atomic->def, 1,
+ intrin->def.bit_size);
- assert(atomic->dest.ssa.bit_size % 8 == 0);
+ assert(atomic->def.bit_size % 8 == 0);
if (addr_format_needs_bounds_check(addr_format)) {
- const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
+ const unsigned atomic_size = atomic->def.bit_size / 8;
nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
nir_builder_instr_insert(b, &atomic->instr);
nir_pop_if(b, NULL);
- return nir_if_phi(b, &atomic->dest.ssa,
- nir_undef(b, 1, atomic->dest.ssa.bit_size));
+ return nir_if_phi(b, &atomic->def,
+ nir_undef(b, 1, atomic->def.bit_size));
} else {
nir_builder_instr_insert(b, &atomic->instr);
- return &atomic->dest.ssa;
+ return &atomic->def;
}
}
deref->modes, align_mul, align_offset,
intrin->num_components);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, value);
+ nir_def_rewrite_uses(&intrin->def, value);
break;
}
deref->modes,
align_mul, align_offset,
intrin->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, value);
+ nir_def_rewrite_uses(&intrin->def, value);
break;
}
default: {
nir_def *value =
build_explicit_io_atomic(b, intrin, addr, addr_format, deref->modes);
- nir_def_rewrite_uses(&intrin->dest.ssa, value);
+ nir_def_rewrite_uses(&intrin->def, value);
break;
}
}
* one deref which could break our list walking since we walk the list
* backwards.
*/
- if (nir_def_is_unused(&deref->dest.ssa)) {
+ if (nir_def_is_unused(&deref->def)) {
nir_instr_remove(&deref->instr);
return;
}
nir_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
addr_format);
- assert(addr->bit_size == deref->dest.ssa.bit_size);
- assert(addr->num_components == deref->dest.ssa.num_components);
+ assert(addr->bit_size == deref->def.bit_size);
+ assert(addr->num_components == deref->def.num_components);
nir_instr_remove(&deref->instr);
- nir_def_rewrite_uses(&deref->dest.ssa, addr);
+ nir_def_rewrite_uses(&deref->def, addr);
}
static void
unsigned stride = glsl_get_explicit_stride(deref->type);
assert(stride > 0);
- nir_def *addr = &deref->dest.ssa;
+ nir_def *addr = &deref->def;
nir_def *offset, *size;
switch (addr_format) {
nir_def *remaining = nir_usub_sat(b, size, offset);
nir_def *arr_size = nir_udiv_imm(b, remaining, stride);
- nir_def_rewrite_uses(&intrin->dest.ssa, arr_size);
+ nir_def_rewrite_uses(&intrin->def, arr_size);
nir_instr_remove(&intrin->instr);
}
build_runtime_addr_mode_check(b, addr, addr_format,
nir_intrinsic_memory_modes(intrin));
- nir_def_rewrite_uses(&intrin->dest.ssa, is_mode);
+ nir_def_rewrite_uses(&intrin->def, is_mode);
}
static bool
nir_src_num_components(intrin->src[0]) >= 3;
}
- return intrin->dest.ssa.bit_size == 64 &&
- intrin->dest.ssa.num_components >= 3;
+ return intrin->def.bit_size == 64 &&
+ intrin->def.num_components >= 3;
}
/**
load = nir_channels(&b, load, BITFIELD_RANGE(start, count));
}
- nir_def_rewrite_uses(&intrin->dest.ssa, load);
+ nir_def_rewrite_uses(&intrin->def, load);
nir_instr_remove(instr);
progress = true;
}
if (nir_deref_instr_is_known_out_of_bounds(nir_src_as_deref(intr->src[0]))) {
/* See Section 5.11 (Out-of-Bounds Accesses) of the GLSL 4.60 */
if (intr->intrinsic != nir_intrinsic_store_deref) {
- nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def *zero = nir_imm_zero(b, intr->def.num_components,
+ intr->def.bit_size);
+ nir_def_rewrite_uses(&intr->def,
zero);
}
nir_instr_remove(&intr->instr);
nir_intrinsic_instr *element_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
element_intr->num_components = intr->num_components;
- element_intr->src[0] = nir_src_for_ssa(&element_deref->dest.ssa);
+ element_intr->src[0] = nir_src_for_ssa(&element_deref->def);
if (intr->intrinsic != nir_intrinsic_store_deref) {
- nir_def_init(&element_intr->instr, &element_intr->dest.ssa,
- intr->num_components, intr->dest.ssa.bit_size);
+ nir_def_init(&element_intr->instr, &element_intr->def,
+ intr->num_components, intr->def.bit_size);
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
&element_intr->instr);
}
- nir_def_rewrite_uses(&intr->dest.ssa,
- &element_intr->dest.ssa);
+ nir_def_rewrite_uses(&intr->def,
+ &element_intr->def);
} else {
nir_intrinsic_set_write_mask(element_intr,
nir_intrinsic_write_mask(intr));
unsigned newc = nir_intrinsic_component(intr);
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
+ intr->def.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
nir_builder_instr_insert(b, &chan_intr->instr);
- loads[i] = &chan_intr->dest.ssa;
+ loads[i] = &chan_intr->def;
}
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(b, loads, intr->num_components));
nir_instr_remove(&intr->instr);
}
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
+ intr->def.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_align_offset(chan_intr,
(nir_intrinsic_align_offset(intr) +
- i * (intr->dest.ssa.bit_size / 8)) %
+ i * (intr->def.bit_size / 8)) %
nir_intrinsic_align_mul(intr));
nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
if (nir_intrinsic_has_access(intr))
nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
/* increment offset per component */
- nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8));
+ nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->def.bit_size / 8));
*nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
nir_builder_instr_insert(b, &chan_intr->instr);
- loads[i] = &chan_intr->dest.ssa;
+ loads[i] = &chan_intr->def;
}
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(b, loads, intr->num_components));
nir_instr_remove(&intr->instr);
}
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
+ intr->def.bit_size);
chan_intr->num_components = 1;
nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
- chan_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ chan_intr->src[0] = nir_src_for_ssa(&deref->def);
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
nir_builder_instr_insert(b, &chan_intr->instr);
- loads[i] = &chan_intr->dest.ssa;
+ loads[i] = &chan_intr->def;
}
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(b, loads, intr->num_components));
/* Remove the old load intrinsic */
deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
- chan_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ chan_intr->src[0] = nir_src_for_ssa(&deref->def);
chan_intr->src[1] = nir_src_for_ssa(nir_channel(b, value, i));
nir_builder_instr_insert(b, &chan_intr->instr);
nir_intrinsic_instr *new_interp =
nir_intrinsic_instr_create(b->shader, interp->intrinsic);
- new_interp->src[0] = nir_src_for_ssa(&new_interp_deref->dest.ssa);
+ new_interp->src[0] = nir_src_for_ssa(&new_interp_deref->def);
if (interp->intrinsic == nir_intrinsic_interp_deref_at_sample ||
interp->intrinsic == nir_intrinsic_interp_deref_at_offset ||
interp->intrinsic == nir_intrinsic_interp_deref_at_vertex) {
}
new_interp->num_components = interp->num_components;
- nir_def_init(&new_interp->instr, &new_interp->dest.ssa,
- interp->dest.ssa.num_components, interp->dest.ssa.bit_size);
+ nir_def_init(&new_interp->instr, &new_interp->def,
+ interp->def.num_components, interp->def.bit_size);
nir_builder_instr_insert(b, &new_interp->instr);
- nir_store_deref(b, temp_deref, &new_interp->dest.ssa,
- (1 << interp->dest.ssa.num_components) - 1);
+ nir_store_deref(b, temp_deref, &new_interp->def,
+ (1 << interp->def.num_components) - 1);
}
static void
* correct part of the temporary.
*/
nir_def *load = nir_load_deref(b, nir_src_as_deref(interp->src[0]));
- nir_def_rewrite_uses(&interp->dest.ssa, load);
+ nir_def_rewrite_uses(&interp->def, load);
nir_instr_remove(&interp->instr);
nir_deref_path_finish(&interp_path);
return base;
case nir_deref_type_array: {
nir_def *index = nir_i2iN(b, deref->arr.index.ssa,
- deref->dest.ssa.bit_size);
+ deref->def.bit_size);
if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var &&
per_vertex)
assert(glsl_type_is_vector(new_deref->type));
}
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
- nir_src_for_ssa(&new_deref->dest.ssa));
+ nir_src_for_ssa(&new_deref->def));
intrin->num_components =
glsl_get_components(new_deref->type);
- intrin->dest.ssa.num_components = intrin->num_components;
+ intrin->def.num_components = intrin->num_components;
b.cursor = nir_after_instr(&intrin->instr);
- nir_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
+ nir_def *new_vec = nir_channels(&b, &intrin->def,
vec4_comp_mask >> new_frac);
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
new_vec,
new_vec->parent_instr);
assert(glsl_type_is_vector(new_deref->type));
}
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
- nir_src_for_ssa(&new_deref->dest.ssa));
+ nir_src_for_ssa(&new_deref->def));
intrin->num_components =
glsl_get_components(new_deref->type);
unsigned vec_size = glsl_get_vector_elements(var->type);
b.cursor = nir_before_instr(instr);
- nir_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa;
+ nir_def *new_deref = &nir_build_deref_var(&b, var)->def;
nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref));
nir_deref_instr_remove_if_unused(deref);
if (intrin->intrinsic == nir_intrinsic_load_deref) {
/* Return undef from out of bounds loads. */
b.cursor = nir_after_instr(instr);
- nir_def *val = &intrin->dest.ssa;
+ nir_def *val = &intrin->def;
nir_def *u = nir_undef(&b, val->num_components, val->bit_size);
nir_def_rewrite_uses(val, u);
}
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val));
} else {
b.cursor = nir_after_instr(instr);
- nir_def *val = &intrin->dest.ssa;
+ nir_def *val = &intrin->def;
val->num_components = intrin->num_components;
nir_def *comp = nir_channel(&b, val, index);
nir_def_rewrite_uses_after(val, comp, comp->parent_instr);
case nir_intrinsic_is_helper_invocation: {
b->cursor = nir_before_instr(instr);
nir_def *is_helper = nir_load_deref(b, is_helper_deref);
- nir_def_rewrite_uses(&intrin->dest.ssa, is_helper);
+ nir_def_rewrite_uses(&intrin->def, is_helper);
nir_instr_remove_v(instr);
return true;
}
loc.reg, .base = loc.base_offset);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, value);
+ nir_def_rewrite_uses(&intrin->def, value);
nir_instr_remove(&intrin->instr);
state->progress = true;
break;
/* Convert the 32-bit load into a 16-bit load. */
b.cursor = nir_after_instr(&intr->instr);
- intr->dest.ssa.bit_size = 16;
+ intr->def.bit_size = 16;
nir_intrinsic_set_dest_type(intr, (type & ~32) | 16);
- nir_def *dst = convert(&b, &intr->dest.ssa);
- nir_def_rewrite_uses_after(&intr->dest.ssa, dst,
+ nir_def *dst = convert(&b, &intr->def);
+ nir_def_rewrite_uses_after(&intr->def, dst,
dst->parent_instr);
}
switch (intrin->intrinsic) {
case nir_intrinsic_load_deref: {
- if (intrin->dest.ssa.bit_size != 32)
+ if (intrin->def.bit_size != 32)
break;
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
if (glsl_get_bit_size(deref->type) != 16)
break;
- intrin->dest.ssa.bit_size = 16;
+ intrin->def.bit_size = 16;
b.cursor = nir_after_instr(&intrin->instr);
nir_def *replace = NULL;
switch (glsl_get_base_type(deref->type)) {
case GLSL_TYPE_FLOAT16:
- replace = nir_f2f32(&b, &intrin->dest.ssa);
+ replace = nir_f2f32(&b, &intrin->def);
break;
case GLSL_TYPE_INT16:
- replace = nir_i2i32(&b, &intrin->dest.ssa);
+ replace = nir_i2i32(&b, &intrin->def);
break;
case GLSL_TYPE_UINT16:
- replace = nir_u2u32(&b, &intrin->dest.ssa);
+ replace = nir_u2u32(&b, &intrin->def);
break;
default:
unreachable("Invalid 16-bit type");
}
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
replace,
replace->parent_instr);
progress = true;
if (!(nir_alu_type_get_base_type(dest_type) & allowed_types))
return false;
- if (!fold_16bit_destination(&instr->dest.ssa, dest_type, exec_mode, rdm))
+ if (!fold_16bit_destination(&instr->def, dest_type, exec_mode, rdm))
return false;
nir_intrinsic_set_dest_type(instr, (dest_type & ~32) | 16);
if (!(nir_alu_type_get_base_type(tex->dest_type) & allowed_types))
return false;
- if (!fold_16bit_destination(&tex->dest.ssa, tex->dest_type, exec_mode, rdm))
+ if (!fold_16bit_destination(&tex->def, tex->dest_type, exec_mode, rdm))
return false;
tex->dest_type = (tex->dest_type & ~32) | 16;
nir_intrinsic_set_align(dup, align_mul, align_offset);
if (info->has_dest) {
- nir_def_init(&dup->instr, &dup->dest.ssa, num_components, bit_size);
+ nir_def_init(&dup->instr, &dup->def, num_components, bit_size);
} else {
nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1);
}
nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
const void *cb_data)
{
- const unsigned bit_size = intrin->dest.ssa.bit_size;
- const unsigned num_components = intrin->dest.ssa.num_components;
+ const unsigned bit_size = intrin->def.bit_size;
+ const unsigned num_components = intrin->def.num_components;
const unsigned bytes_read = num_components * (bit_size / 8);
const uint32_t align_mul = nir_intrinsic_align_mul(intrin);
const uint32_t whole_align_offset = nir_intrinsic_align_offset(intrin);
chunk_bytes = MIN2(bytes_left, requested_bytes - max_pad);
nir_def *shift = nir_imul_imm(b, pad, 8);
- nir_def *shifted = nir_ushr(b, &load->dest.ssa, shift);
+ nir_def *shifted = nir_ushr(b, &load->def, shift);
- if (load->dest.ssa.num_components > 1) {
+ if (load->def.num_components > 1) {
nir_def *rev_shift =
- nir_isub_imm(b, load->dest.ssa.bit_size, shift);
- nir_def *rev_shifted = nir_ishl(b, &load->dest.ssa, rev_shift);
+ nir_isub_imm(b, load->def.bit_size, shift);
+ nir_def *rev_shifted = nir_ishl(b, &load->def, rev_shift);
nir_def *comps[NIR_MAX_VEC_COMPONENTS];
- for (unsigned i = 1; i < load->dest.ssa.num_components; i++)
+ for (unsigned i = 1; i < load->def.num_components; i++)
comps[i - 1] = nir_channel(b, rev_shifted, i);
- comps[load->dest.ssa.num_components - 1] =
- nir_imm_zero(b, 1, load->dest.ssa.bit_size);
+ comps[load->def.num_components - 1] =
+ nir_imm_zero(b, 1, load->def.bit_size);
- rev_shifted = nir_vec(b, comps, load->dest.ssa.num_components);
- shifted = nir_bcsel(b, nir_ieq_imm(b, shift, 0), &load->dest.ssa,
+ rev_shifted = nir_vec(b, comps, load->def.num_components);
+ shifted = nir_bcsel(b, nir_ieq_imm(b, shift, 0), &load->def,
nir_ior(b, shifted, rev_shifted));
}
/* There's no guarantee that chunk_num_components is a valid NIR
* vector size, so just loop one chunk component at a time
*/
- nir_def *chunk_data = &load->dest.ssa;
+ nir_def *chunk_data = &load->def;
for (unsigned i = 0; i < chunk_num_components; i++) {
assert(num_chunks < ARRAY_SIZE(chunks));
chunks[num_chunks++] =
chunk_bytes = requested.num_components * (requested.bit_size / 8);
assert(num_chunks < ARRAY_SIZE(chunks));
- chunks[num_chunks++] = &load->dest.ssa;
+ chunks[num_chunks++] = &load->def;
}
chunk_start += chunk_bytes;
nir_def *result = nir_extract_bits(b, chunks, num_chunks, 0,
num_components, bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, result);
+ nir_def_rewrite_uses(&intrin->def, result);
nir_instr_remove(&intrin->instr);
return true;
{
nir_deref_instr *deref;
- index = nir_i2iN(b, index, parent->dest.ssa.bit_size);
+ index = nir_i2iN(b, index, parent->def.bit_size);
assert(parent->deref_type == nir_deref_type_cast);
deref = nir_build_deref_ptr_as_array(b, parent, index);
memcpy_load_deref_elem_imm(nir_builder *b, nir_deref_instr *parent,
uint64_t index)
{
- nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
+ nir_def *idx = nir_imm_intN_t(b, index, parent->def.bit_size);
return memcpy_load_deref_elem(b, parent, idx);
}
{
nir_deref_instr *deref;
- index = nir_i2iN(b, index, parent->dest.ssa.bit_size);
+ index = nir_i2iN(b, index, parent->def.bit_size);
assert(parent->deref_type == nir_deref_type_cast);
deref = nir_build_deref_ptr_as_array(b, parent, index);
nir_store_deref(b, deref, value, ~0);
memcpy_store_deref_elem_imm(nir_builder *b, nir_deref_instr *parent,
uint64_t index, nir_def *value)
{
- nir_def *idx = nir_imm_intN_t(b, index, parent->dest.ssa.bit_size);
+ nir_def *idx = nir_imm_intN_t(b, index, parent->def.bit_size);
memcpy_store_deref_elem(b, parent, idx, value);
}
copy_type_for_byte_size(copy_size);
nir_deref_instr *copy_dst =
- nir_build_deref_cast(&b, &dst->dest.ssa, dst->modes,
+ nir_build_deref_cast(&b, &dst->def, dst->modes,
copy_type, copy_size);
nir_deref_instr *copy_src =
- nir_build_deref_cast(&b, &src->dest.ssa, src->modes,
+ nir_build_deref_cast(&b, &src->def, src->modes,
copy_type, copy_size);
uint64_t index = offset / copy_size;
* emit a loop which copies one byte at a time.
*/
nir_deref_instr *copy_dst =
- nir_build_deref_cast(&b, &dst->dest.ssa, dst->modes,
+ nir_build_deref_cast(&b, &dst->def, dst->modes,
glsl_uint8_t_type(), 1);
nir_deref_instr *copy_src =
- nir_build_deref_cast(&b, &src->dest.ssa, src->modes,
+ nir_build_deref_cast(&b, &src->def, src->modes,
glsl_uint8_t_type(), 1);
nir_variable *i = nir_local_variable_create(impl,
switch (intrin->intrinsic) {
case nir_intrinsic_load_view_index: {
- nir_def_rewrite_uses(&intrin->dest.ssa, view_index);
+ nir_def_rewrite_uses(&intrin->def, view_index);
break;
}
nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
nir_instr_rewrite_src(instr, &intrin->src[0],
- nir_src_for_ssa(&pos_deref->dest.ssa));
+ nir_src_for_ssa(&pos_deref->def));
/* Remove old deref since it has the wrong type. */
nir_deref_instr_remove_if_unused(old_deref);
/* Replicate the deref. */
nir_deref_instr *deref =
nir_build_deref_array(b, h->parent_deref, h->first);
- *(h->src) = nir_src_for_ssa(&deref->dest.ssa);
+ *(h->src) = nir_src_for_ssa(&deref->def);
} else {
*(h->src) = nir_src_for_ssa(h->first);
}
}
progress = true;
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
val);
nir_instr_remove(instr);
}
should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
{
/* Already scalar */
- if (phi->dest.ssa.num_components == 1)
+ if (phi->def.num_components == 1)
return false;
if (state->lower_all)
if (!should_lower_phi(phi, state))
continue;
- unsigned bit_size = phi->dest.ssa.bit_size;
+ unsigned bit_size = phi->def.bit_size;
/* Create a vecN operation to combine the results. Most of these
* will be redundant, but copy propagation should clean them up for
* us. No need to add the complexity here.
*/
- nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components);
+ nir_op vec_op = nir_op_vec(phi->def.num_components);
nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
nir_def_init(&vec->instr, &vec->def,
- phi->dest.ssa.num_components, bit_size);
+ phi->def.num_components, bit_size);
- for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
+ for (unsigned i = 0; i < phi->def.num_components; i++) {
nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
- nir_def_init(&new_phi->instr, &new_phi->dest.ssa, 1,
- phi->dest.ssa.bit_size);
+ nir_def_init(&new_phi->instr, &new_phi->def, 1,
+ phi->def.bit_size);
- vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
+ vec->src[i].src = nir_src_for_ssa(&new_phi->def);
nir_foreach_phi_src(src, phi) {
/* We need to insert a mov to grab the i'th component of src */
nir_instr_insert_after(&last_phi->instr, &vec->instr);
- nir_def_rewrite_uses(&phi->dest.ssa,
+ nir_def_rewrite_uses(&phi->def,
&vec->def);
nir_instr_remove(&phi->instr);
nir_builder *b = &state->b;
b->cursor = nir_after_instr(&intr->instr);
- nir_def *pntc = &intr->dest.ssa;
+ nir_def *pntc = &intr->def;
nir_def *transform = get_pntc_transform(state);
nir_def *y = nir_channel(b, pntc, 1);
/* The offset is 1 if we're flipping, 0 otherwise. */
nir_channel(b, pntc, 0),
nir_fadd(b, offset, scaled));
- nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pntc,
+ nir_def_rewrite_uses_after(&intr->def, flipped_pntc,
flipped_pntc->parent_instr);
}
/* Increment the counter at the beginning of the buffer */
const unsigned counter_size = 4;
nir_deref_instr *counter = nir_build_deref_array_imm(b, buffer, 0);
- counter = nir_build_deref_cast(b, &counter->dest.ssa,
+ counter = nir_build_deref_cast(b, &counter->def,
nir_var_mem_global,
glsl_uint_type(), 0);
counter->cast.align_mul = 4;
nir_def *offset =
- nir_deref_atomic(b, 32, &counter->dest.ssa,
+ nir_deref_atomic(b, 32, &counter->def,
nir_imm_int(b, fmt_str_id_size + args_size),
.atomic_op = nir_atomic_op_iadd);
nir_i2iN(b, offset, ptr_bit_size);
nir_deref_instr *fmt_str_id_deref =
nir_build_deref_array(b, buffer, fmt_str_id_offset);
- fmt_str_id_deref = nir_build_deref_cast(b, &fmt_str_id_deref->dest.ssa,
+ fmt_str_id_deref = nir_build_deref_cast(b, &fmt_str_id_deref->def,
nir_var_mem_global,
glsl_uint_type(), 0);
fmt_str_id_deref->cast.align_mul = 4;
ptr_bit_size);
nir_deref_instr *dst_arg_deref =
nir_build_deref_array(b, buffer, arg_offset);
- dst_arg_deref = nir_build_deref_cast(b, &dst_arg_deref->dest.ssa,
+ dst_arg_deref = nir_build_deref_cast(b, &dst_arg_deref->def,
nir_var_mem_global, arg_type, 0);
assert(field_offset % 4 == 0);
dst_arg_deref->cast.align_mul = 4;
nir_pop_if(b, NULL);
nir_def *ret_val = nir_if_phi(b, printf_succ_val, printf_fail_val);
- nir_def_rewrite_uses(&prntf->dest.ssa, ret_val);
+ nir_def_rewrite_uses(&prntf->def, ret_val);
nir_instr_remove(&prntf->instr);
return true;
coord_components++;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
if (options->per_variable) {
assert(nir_deref_instr_get_variable(deref));
assert(num_srcs == 3);
tex->dest_type = nir_intrinsic_dest_type(intrin);
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
break;
}
assert(num_srcs == 2);
tex->dest_type = nir_type_uint32;
- nir_def_init(&tex->instr, &tex->dest.ssa, coord_components, 32);
+ nir_def_init(&tex->instr, &tex->def, coord_components, 32);
break;
}
nir_builder_instr_insert(b, &tex->instr);
- nir_def *res = nir_trim_vector(b, &tex->dest.ssa,
- intrin->dest.ssa.num_components);
+ nir_def *res = nir_trim_vector(b, &tex->def,
+ intrin->def.num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, res);
+ nir_def_rewrite_uses(&intrin->def, res);
nir_instr_remove(&intrin->instr);
return true;
static void
setup_reg(nir_intrinsic_instr *decl, struct regs_to_ssa_state *state)
{
- assert(state->values[decl->dest.ssa.index] == NULL);
+ assert(state->values[decl->def.index] == NULL);
if (!should_lower_reg(decl))
return;
nir_foreach_reg_store(store, decl)
BITSET_SET(state->defs, store->parent_instr->block->index);
- state->values[decl->dest.ssa.index] =
+ state->values[decl->def.index] =
nir_phi_builder_add_value(state->phi_builder, num_components,
bit_size, state->defs);
}
nir_intrinsic_instr *decl = nir_instr_as_intrinsic(reg->parent_instr);
nir_def *def = nir_phi_builder_value_get_block_def(value, block);
- nir_def_rewrite_uses(&load->dest.ssa, def);
+ nir_def_rewrite_uses(&load->def, def);
nir_instr_remove(&load->instr);
- if (nir_def_is_unused(&decl->dest.ssa))
+ if (nir_def_is_unused(&decl->def))
nir_instr_remove(&decl->instr);
}
nir_phi_builder_value_set_block_def(value, block, new_value);
nir_instr_remove(&store->instr);
- if (nir_def_is_unused(&decl->dest.ssa))
+ if (nir_def_is_unused(&decl->def))
nir_instr_remove(&decl->instr);
}
nir_def *res, *zero;
if (has_dest) {
- zero = nir_imm_zero(b, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size);
+ zero = nir_imm_zero(b, instr->def.num_components,
+ instr->def.bit_size);
}
nir_push_if(b, valid);
nir_builder_instr_insert(b, orig);
if (has_dest)
- res = &nir_instr_as_intrinsic(orig)->dest.ssa;
+ res = &nir_instr_as_intrinsic(orig)->def;
}
nir_pop_if(b, NULL);
if (has_dest)
- nir_def_rewrite_uses(&instr->dest.ssa, nir_if_phi(b, res, zero));
+ nir_def_rewrite_uses(&instr->def, nir_if_phi(b, res, zero));
/* We've cloned and wrapped, so drop original instruction */
nir_instr_remove(&instr->instr);
nir_intrinsic_instr *instr,
const nir_lower_robust_access_options *opts)
{
- uint32_t type_sz = instr->dest.ssa.bit_size / 8;
+ uint32_t type_sz = instr->def.bit_size / 8;
nir_def *size;
nir_def *index = instr->src[0].ssa;
uint32_t type_sz, offset_src;
if (instr->intrinsic == nir_intrinsic_load_shared) {
offset_src = 0;
- type_sz = instr->dest.ssa.bit_size / 8;
+ type_sz = instr->def.bit_size / 8;
} else if (instr->intrinsic == nir_intrinsic_store_shared) {
offset_src = 1;
type_sz = nir_src_bit_size(instr->src[0]) / 8;
size_align(deref->type, &size, &align);
if (intrin->intrinsic == nir_intrinsic_load_deref) {
- unsigned bit_size = intrin->dest.ssa.bit_size;
+ unsigned bit_size = intrin->def.bit_size;
nir_def *value = nir_load_scratch(
b, intrin->num_components, bit_size == 1 ? 32 : bit_size, offset, .align_mul = align);
if (bit_size == 1)
value = nir_b2b1(b, value);
- nir_def_rewrite_uses(&intrin->dest.ssa, value);
+ nir_def_rewrite_uses(&intrin->def, value);
} else {
assert(intrin->intrinsic == nir_intrinsic_store_deref);
static bool
only_used_for_load_store(nir_deref_instr *deref)
{
- nir_foreach_use(src, &deref->dest.ssa) {
+ nir_foreach_use(src, &deref->def) {
if (!src->parent_instr)
return false;
if (src->parent_instr->type == nir_instr_type_deref) {
nir_foreach_phi_src(phi_src, phi) {
if (phi_src->pred == pred) {
found = true;
- nir_def_rewrite_uses(&phi->dest.ssa, phi_src->src.ssa);
+ nir_def_rewrite_uses(&phi->def, phi_src->src.ssa);
break;
}
}
nir_intrinsic_base(stack));
data = nir_load_global(b, addr,
nir_intrinsic_align_mul(stack),
- stack->dest.ssa.num_components,
- stack->dest.ssa.bit_size);
+ stack->def.num_components,
+ stack->def.bit_size);
} else {
assert(state->address_format == nir_address_format_32bit_offset);
data = nir_load_scratch(b,
}
}
- intrin->dest.ssa.num_components = intrin->num_components = swiz_count;
+ intrin->def.num_components = intrin->num_components = swiz_count;
progress = true;
}
if (intrin->intrinsic != nir_intrinsic_load_stack)
continue;
- nir_def *value = &intrin->dest.ssa;
+ nir_def *value = &intrin->def;
nir_block *new_block = find_last_dominant_use_block(impl, value);
if (new_block == block)
continue;
return false;
if (intrin->intrinsic == nir_intrinsic_load_stack &&
- intrin->dest.ssa.num_components == 1)
+ intrin->def.num_components == 1)
return false;
if (intrin->intrinsic == nir_intrinsic_store_stack &&
nir_def *components[NIR_MAX_VEC_COMPONENTS] = {
0,
};
- for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++) {
- components[c] = nir_load_stack(b, 1, intrin->dest.ssa.bit_size,
+ for (unsigned c = 0; c < intrin->def.num_components; c++) {
+ components[c] = nir_load_stack(b, 1, intrin->def.bit_size,
.base = nir_intrinsic_base(intrin) +
- c * intrin->dest.ssa.bit_size / 8,
+ c * intrin->def.bit_size / 8,
.call_idx = nir_intrinsic_call_idx(intrin),
.value_id = nir_intrinsic_value_id(intrin),
.align_mul = nir_intrinsic_align_mul(intrin));
}
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
nir_vec(b, components,
- intrin->dest.ssa.num_components));
+ intrin->def.num_components));
} else {
assert(intrin->intrinsic == nir_intrinsic_store_stack);
for (unsigned c = 0; c < intrin->src[0].ssa->num_components; c++) {
return false;
}
- nir_def_rewrite_uses(&intrin->dest.ssa, lowered);
+ nir_def_rewrite_uses(&intrin->def, lowered);
nir_instr_remove(instr);
return true;
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
load->num_components = 1;
nir_src_copy(&load->src[0], idx, &load->instr);
- nir_def_init(&load->instr, &load->dest.ssa, 1, bitsize);
+ nir_def_init(&load->instr, &load->def, 1, bitsize);
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#define nir_ssbo_prop(b, prop, index, bitsize) \
nir_src_copy(&global->src[0], &intr->src[0], &global->instr);
nir_intrinsic_set_write_mask(global, nir_intrinsic_write_mask(intr));
} else {
- nir_def_init(&global->instr, &global->dest.ssa,
- intr->dest.ssa.num_components, intr->dest.ssa.bit_size);
+ nir_def_init(&global->instr, &global->def,
+ intr->def.num_components, intr->def.bit_size);
if (is_atomic) {
nir_src_copy(&global->src[1], &intr->src[2], &global->instr);
}
nir_builder_instr_insert(b, &global->instr);
- return is_store ? NULL : &global->dest.ssa;
+ return is_store ? NULL : &global->def;
}
static bool
nir_def *replace = lower_ssbo_instr(&b, intr);
if (replace) {
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
replace);
}
comp = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
nir_intrinsic_instr *intr = nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_def_init(&intr->instr, &intr->dest.ssa, 1, 32);
+ nir_def_init(&intr->instr, &intr->def, 1, 32);
intr->const_index[0] = intrin->const_index[0];
intr->const_index[1] = intrin->const_index[1];
intr->src[0] = nir_src_for_ssa(comp);
assert(intrin->src[0].ssa->bit_size == 64);
nir_intrinsic_instr *intr_x = lower_subgroups_64bit_split_intrinsic(b, intrin, 0);
nir_intrinsic_instr *intr_y = lower_subgroups_64bit_split_intrinsic(b, intrin, 1);
- return nir_pack_64_2x32_split(b, &intr_x->dest.ssa, &intr_y->dest.ssa);
+ return nir_pack_64_2x32_split(b, &intr_x->def, &intr_y->def);
}
static nir_def *
bool lower_to_32bit)
{
/* This is safe to call on scalar things but it would be silly */
- assert(intrin->dest.ssa.num_components > 1);
+ assert(intrin->def.num_components > 1);
nir_def *value = nir_ssa_for_src(b, intrin->src[0],
intrin->num_components);
for (unsigned i = 0; i < intrin->num_components; i++) {
nir_intrinsic_instr *chan_intrin =
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_def_init(&chan_intrin->instr, &chan_intrin->dest.ssa, 1,
- intrin->dest.ssa.bit_size);
+ nir_def_init(&chan_intrin->instr, &chan_intrin->def, 1,
+ intrin->def.bit_size);
chan_intrin->num_components = 1;
/* value */
reads[i] = lower_subgroup_op_to_32bit(b, chan_intrin);
} else {
nir_builder_instr_insert(b, &chan_intrin->instr);
- reads[i] = &chan_intrin->dest.ssa;
+ reads[i] = &chan_intrin->def;
}
}
for (unsigned i = 0; i < intrin->num_components; i++) {
nir_intrinsic_instr *chan_intrin =
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_def_init(&chan_intrin->instr, &chan_intrin->dest.ssa, 1,
- intrin->dest.ssa.bit_size);
+ nir_def_init(&chan_intrin->instr, &chan_intrin->def, 1,
+ intrin->def.bit_size);
chan_intrin->num_components = 1;
chan_intrin->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
nir_builder_instr_insert(b, &chan_intrin->instr);
if (result) {
- result = nir_iand(b, result, &chan_intrin->dest.ssa);
+ result = nir_iand(b, result, &chan_intrin->def);
} else {
- result = &chan_intrin->dest.ssa;
+ result = &chan_intrin->def;
}
}
swizzle->num_components = intrin->num_components;
nir_src_copy(&swizzle->src[0], &intrin->src[0], &swizzle->instr);
nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f);
- nir_def_init(&swizzle->instr, &swizzle->dest.ssa,
- intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ nir_def_init(&swizzle->instr, &swizzle->def,
+ intrin->def.num_components, intrin->def.bit_size);
if (options->lower_to_scalar && swizzle->num_components > 1) {
return lower_subgroup_op_to_scalar(b, swizzle, options->lower_shuffle_to_32bit);
return lower_subgroup_op_to_32bit(b, swizzle);
} else {
nir_builder_instr_insert(b, &swizzle->instr);
- return &swizzle->dest.ssa;
+ return &swizzle->def;
}
}
shuffle->num_components = intrin->num_components;
nir_src_copy(&shuffle->src[0], &intrin->src[0], &shuffle->instr);
shuffle->src[1] = nir_src_for_ssa(index);
- nir_def_init(&shuffle->instr, &shuffle->dest.ssa,
- intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ nir_def_init(&shuffle->instr, &shuffle->def,
+ intrin->def.num_components, intrin->def.bit_size);
bool lower_to_32bit = options->lower_shuffle_to_32bit && is_shuffle;
if (options->lower_to_scalar && shuffle->num_components > 1) {
return lower_subgroup_op_to_32bit(b, shuffle);
} else {
nir_builder_instr_insert(b, &shuffle->instr);
- return &shuffle->dest.ssa;
+ return &shuffle->def;
}
}
qbcst->num_components = intrin->num_components;
qbcst->src[1] = nir_src_for_ssa(nir_imm_int(b, i));
nir_src_copy(&qbcst->src[0], &intrin->src[0], &qbcst->instr);
- nir_def_init(&qbcst->instr, &qbcst->dest.ssa,
- intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ nir_def_init(&qbcst->instr, &qbcst->def,
+ intrin->def.num_components, intrin->def.bit_size);
nir_def *qbcst_dst = NULL;
qbcst_dst = lower_subgroup_op_to_scalar(b, qbcst, false);
} else {
nir_builder_instr_insert(b, &qbcst->instr);
- qbcst_dst = &qbcst->dest.ssa;
+ qbcst_dst = &qbcst->def;
}
if (i)
static nir_def *
lower_read_invocation_to_cond(nir_builder *b, nir_intrinsic_instr *intrin)
{
- return nir_read_invocation_cond_ir3(b, intrin->dest.ssa.bit_size,
+ return nir_read_invocation_cond_ir3(b, intrin->def.bit_size,
intrin->src[0].ssa,
nir_ieq(b, intrin->src[1].ssa,
nir_load_subgroup_invocation(b)));
}
return uint_to_ballot_type(b, val,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
}
case nir_intrinsic_ballot: {
- if (intrin->dest.ssa.num_components == options->ballot_components &&
- intrin->dest.ssa.bit_size == options->ballot_bit_size)
+ if (intrin->def.num_components == options->ballot_components &&
+ intrin->def.bit_size == options->ballot_bit_size)
return NULL;
nir_def *ballot =
intrin->src[0].ssa);
return uint_to_ballot_type(b, ballot,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
}
case nir_intrinsic_ballot_bitfield_extract:
static nir_def *
sanitize_32bit_sysval(nir_builder *b, nir_intrinsic_instr *intrin)
{
- const unsigned bit_size = intrin->dest.ssa.bit_size;
+ const unsigned bit_size = intrin->def.bit_size;
if (bit_size == 32)
return NULL;
- intrin->dest.ssa.bit_size = 32;
- return nir_u2uN(b, &intrin->dest.ssa, bit_size);
+ intrin->def.bit_size = 32;
+ return nir_u2uN(b, &intrin->def, bit_size);
}
static nir_def *
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return NULL;
- const unsigned bit_size = intrin->dest.ssa.bit_size;
+ const unsigned bit_size = intrin->def.bit_size;
switch (intrin->intrinsic) {
case nir_intrinsic_load_vertex_id:
nir_intrinsic_op op =
nir_intrinsic_from_system_value(var->data.location);
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
- nir_def_init_for_type(&load->instr, &load->dest.ssa, var->type);
- load->num_components = load->dest.ssa.num_components;
+ nir_def_init_for_type(&load->instr, &load->def, var->type);
+ load->num_components = load->def.num_components;
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
case SYSTEM_VALUE_DEVICE_INDEX:
}
case SYSTEM_VALUE_MESH_VIEW_INDICES:
- return nir_load_mesh_view_indices(b, intrin->dest.ssa.num_components,
+ return nir_load_mesh_view_indices(b, intrin->def.num_components,
bit_size, column, .base = 0,
- .range = intrin->dest.ssa.num_components * bit_size / 8);
+ .range = intrin->def.num_components * bit_size / 8);
default:
break;
assert(nir_intrinsic_infos[sysval_op].index_map[NIR_INTRINSIC_COLUMN] > 0);
unsigned num_cols = glsl_get_matrix_columns(var->type);
ASSERTED unsigned num_rows = glsl_get_vector_elements(var->type);
- assert(num_rows == intrin->dest.ssa.num_components);
+ assert(num_rows == intrin->def.num_components);
nir_def *cols[4];
for (unsigned i = 0; i < num_cols; i++) {
cols[i] = nir_load_system_value(b, sysval_op, i,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
assert(cols[i]->num_components == num_rows);
}
return nir_select_from_ssa_def_array(b, cols, num_cols, column);
} else if (glsl_type_is_array(var->type)) {
unsigned num_elems = glsl_get_length(var->type);
ASSERTED const struct glsl_type *elem_type = glsl_get_array_element(var->type);
- assert(glsl_get_components(elem_type) == intrin->dest.ssa.num_components);
+ assert(glsl_get_components(elem_type) == intrin->def.num_components);
nir_def *elems[4];
assert(ARRAY_SIZE(elems) >= num_elems);
for (unsigned i = 0; i < num_elems; i++) {
elems[i] = nir_load_system_value(b, sysval_op, i,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
}
return nir_select_from_ssa_def_array(b, elems, num_elems, column);
} else {
return nir_load_system_value(b, sysval_op, 0,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
}
}
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
return NULL;
- const unsigned bit_size = intrin->dest.ssa.bit_size;
+ const unsigned bit_size = intrin->def.bit_size;
switch (intrin->intrinsic) {
case nir_intrinsic_load_local_invocation_id:
if (!b->shader->info.workgroup_size_variable && is_zero) {
nir_scalar defs[3];
for (unsigned i = 0; i < 3; i++) {
- defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->dest.ssa, i);
+ defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->def, i);
}
return nir_vec_scalars(b, defs, 3);
}
b->cursor = nir_after_instr(instr);
- nir_def *num_wgs = &intrin->dest.ssa;
+ nir_def *num_wgs = &intrin->def;
for (unsigned i = 0; i < 3; ++i) {
if (num_wgs_imm[i])
num_wgs = nir_vector_insert_imm(b, num_wgs, nir_imm_int(b, num_wgs_imm[i]), i);
nir_def *load =
nir_load_shared(b, 1, 32, nir_imm_int(b, 0),
.base = s->task_count_shared_addr);
- nir_def_rewrite_uses(&intrin->dest.ssa, load);
+ nir_def_rewrite_uses(&intrin->def, load);
nir_instr_remove(instr);
return true;
}
return true;
break;
case nir_intrinsic_load_task_payload:
- if (small_types && intrin->dest.ssa.bit_size < 32)
+ if (small_types && intrin->def.bit_size < 32)
return true;
break;
case nir_intrinsic_store_task_payload:
else
z = nir_imm_float(b, 0.0f);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_vec3(b, x, y, z));
+ nir_def_rewrite_uses(&intr->def, nir_vec3(b, x, y, z));
return true;
}
b->cursor = nir_before_instr(&tex->instr);
if (tex->op == nir_texop_lod) {
- nir_def_rewrite_uses(&tex->dest.ssa, nir_imm_int(b, 0));
+ nir_def_rewrite_uses(&tex->def, nir_imm_int(b, 0));
nir_instr_remove(&tex->instr);
return;
}
nir_imm_int(b, plane));
plane_tex->op = nir_texop_tex;
plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
- plane_tex->dest_type = nir_type_float | tex->dest.ssa.bit_size;
+ plane_tex->dest_type = nir_type_float | tex->def.bit_size;
plane_tex->coord_components = 2;
plane_tex->texture_index = tex->texture_index;
plane_tex->sampler_index = tex->sampler_index;
- nir_def_init(&plane_tex->instr, &plane_tex->dest.ssa, 4,
- tex->dest.ssa.bit_size);
+ nir_def_init(&plane_tex->instr, &plane_tex->def, 4,
+ tex->def.bit_size);
nir_builder_instr_insert(b, &plane_tex->instr);
/* If scaling_factor is set, return a scaled value. */
if (options->scale_factors[tex->texture_index])
- return nir_fmul_imm(b, &plane_tex->dest.ssa,
+ return nir_fmul_imm(b, &plane_tex->def,
options->scale_factors[tex->texture_index]);
- return &plane_tex->dest.ssa;
+ return &plane_tex->def;
}
static void
}
}
- unsigned bit_size = tex->dest.ssa.bit_size;
+ unsigned bit_size = tex->def.bit_size;
nir_def *offset =
nir_vec4(b,
nir_def *result =
nir_ffma(b, y, m0, nir_ffma(b, u, m1, nir_ffma(b, v, m2, offset)));
- nir_def_rewrite_uses(&tex->dest.ssa, result);
+ nir_def_rewrite_uses(&tex->def, result);
}
static void
txd->src[tex->num_srcs] = nir_tex_src_for_ssa(nir_tex_src_ddx, dfdx);
txd->src[tex->num_srcs + 1] = nir_tex_src_for_ssa(nir_tex_src_ddy, dfdy);
- nir_def_init(&txd->instr, &txd->dest.ssa,
- tex->dest.ssa.num_components,
- tex->dest.ssa.bit_size);
+ nir_def_init(&txd->instr, &txd->def,
+ tex->def.num_components,
+ tex->def.bit_size);
nir_builder_instr_insert(b, &txd->instr);
- nir_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
+ nir_def_rewrite_uses(&tex->def, &txd->def);
nir_instr_remove(&tex->instr);
return txd;
}
lod = nir_fadd(b, nir_channel(b, lod, 1), nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
txl->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
- nir_def_init(&txl->instr, &txl->dest.ssa,
- tex->dest.ssa.num_components,
- tex->dest.ssa.bit_size);
+ nir_def_init(&txl->instr, &txl->def,
+ tex->def.num_components,
+ tex->def.bit_size);
nir_builder_instr_insert(b, &txl->instr);
- nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
+ nir_def_rewrite_uses(&tex->def, &txl->def);
nir_instr_remove(&tex->instr);
return txl;
}
assert(nir_tex_instr_dest_size(tex) == 4);
unsigned swiz[4] = { 2, 3, 1, 0 };
- nir_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
+ nir_def *swizzled = nir_swizzle(b, &tex->def, swiz, 4);
- nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
+ nir_def_rewrite_uses_after(&tex->def, swizzled,
swizzled->parent_instr);
}
swizzle[2] < 4 && swizzle[3] < 4) {
unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
/* We have no 0s or 1s, just emit a swizzling MOV */
- swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
+ swizzled = nir_swizzle(b, &tex->def, swiz, 4);
} else {
nir_scalar srcs[4];
for (unsigned i = 0; i < 4; i++) {
if (swizzle[i] < 4) {
- srcs[i] = nir_get_ssa_scalar(&tex->dest.ssa, swizzle[i]);
+ srcs[i] = nir_get_ssa_scalar(&tex->def, swizzle[i]);
} else {
srcs[i] = nir_get_ssa_scalar(get_zero_or_one(b, tex->dest_type, swizzle[i]), 0);
}
}
}
- nir_def_rewrite_uses_after(&tex->dest.ssa, swizzled,
+ nir_def_rewrite_uses_after(&tex->def, swizzled,
swizzled->parent_instr);
}
b->cursor = nir_after_instr(&tex->instr);
nir_def *rgb =
- nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->dest.ssa, 3));
+ nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->def, 3));
/* alpha is untouched: */
nir_def *result = nir_vec4(b,
nir_channel(b, rgb, 0),
nir_channel(b, rgb, 1),
nir_channel(b, rgb, 2),
- nir_channel(b, &tex->dest.ssa, 3));
+ nir_channel(b, &tex->def, 3));
- nir_def_rewrite_uses_after(&tex->dest.ssa, result,
+ nir_def_rewrite_uses_after(&tex->def, result,
result->parent_instr);
}
lower_tex_packing(nir_builder *b, nir_tex_instr *tex,
const nir_lower_tex_options *options)
{
- nir_def *color = &tex->dest.ssa;
+ nir_def *color = &tex->def;
b->cursor = nir_after_instr(&tex->instr);
break;
}
- nir_def_rewrite_uses_after(&tex->dest.ssa, color,
+ nir_def_rewrite_uses_after(&tex->def, color,
color->parent_instr);
return true;
}
nir_tex_src src = nir_tex_src_for_ssa(nir_tex_src_offset, offset);
tex_copy->src[tex_copy->num_srcs - 1] = src;
- nir_def_init(&tex_copy->instr, &tex_copy->dest.ssa,
+ nir_def_init(&tex_copy->instr, &tex_copy->def,
nir_tex_instr_dest_size(tex), 32);
nir_builder_instr_insert(b, &tex_copy->instr);
- dest[i] = nir_get_ssa_scalar(&tex_copy->dest.ssa, 3);
+ dest[i] = nir_get_ssa_scalar(&tex_copy->def, 3);
if (tex->is_sparse) {
- nir_def *code = nir_channel(b, &tex_copy->dest.ssa, 4);
+ nir_def *code = nir_channel(b, &tex_copy->def, 4);
if (residency)
residency = nir_sparse_residency_code_and(b, residency, code);
else
}
dest[4] = nir_get_ssa_scalar(residency, 0);
- nir_def *res = nir_vec_scalars(b, dest, tex->dest.ssa.num_components);
- nir_def_rewrite_uses(&tex->dest.ssa, res);
+ nir_def *res = nir_vec_scalars(b, dest, tex->def.num_components);
+ nir_def_rewrite_uses(&tex->def, res);
nir_instr_remove(&tex->instr);
return true;
* which should return 0, not 1.
*/
b->cursor = nir_after_instr(&tex->instr);
- nir_def *minified = nir_imin(b, &tex->dest.ssa,
- nir_imax(b, nir_ushr(b, &tex->dest.ssa, lod),
+ nir_def *minified = nir_imin(b, &tex->def,
+ nir_imax(b, nir_ushr(b, &tex->def, lod),
nir_imm_int(b, 1)));
/* Make sure the component encoding the array size (if any) is not
for (unsigned i = 0; i < dest_size - 1; i++)
comp[i] = nir_channel(b, minified, i);
- comp[dest_size - 1] = nir_channel(b, &tex->dest.ssa, dest_size - 1);
+ comp[dest_size - 1] = nir_channel(b, &tex->def, dest_size - 1);
minified = nir_vec(b, comp, dest_size);
}
- nir_def_rewrite_uses_after(&tex->dest.ssa, minified,
+ nir_def_rewrite_uses_after(&tex->def, minified,
minified->parent_instr);
return true;
}
b->cursor = nir_after_instr(&tex->instr);
- assert(tex->dest.ssa.num_components == 3);
- nir_def *size = &tex->dest.ssa;
+ assert(tex->def.num_components == 3);
+ nir_def *size = &tex->def;
size = nir_vec3(b, nir_channel(b, size, 1),
nir_channel(b, size, 1),
nir_idiv(b, nir_channel(b, size, 2),
nir_imm_int(b, 6)));
- nir_def_rewrite_uses_after(&tex->dest.ssa, size, size->parent_instr);
+ nir_def_rewrite_uses_after(&tex->def, size, size->parent_instr);
}
/* Adjust the sample index according to AMD FMASK (fragment mask).
fmask_fetch->is_array = tex->is_array;
fmask_fetch->texture_non_uniform = tex->texture_non_uniform;
fmask_fetch->dest_type = nir_type_uint32;
- nir_def_init(&fmask_fetch->instr, &fmask_fetch->dest.ssa, 1, 32);
+ nir_def_init(&fmask_fetch->instr, &fmask_fetch->def, 1, 32);
fmask_fetch->num_srcs = 0;
for (unsigned i = 0; i < tex->num_srcs; i++) {
int ms_index = nir_tex_instr_src_index(tex, nir_tex_src_ms_index);
assert(ms_index >= 0);
nir_src sample = tex->src[ms_index].src;
- nir_def *new_sample = nir_ubfe(b, &fmask_fetch->dest.ssa,
+ nir_def *new_sample = nir_ubfe(b, &fmask_fetch->def,
nir_ishl_imm(b, sample.ssa, 2), nir_imm_int(b, 3));
/* Update instruction. */
nir_tex_instr *fmask_fetch = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
fmask_fetch->op = nir_texop_fragment_mask_fetch_amd;
fmask_fetch->dest_type = nir_type_uint32;
- nir_def_init(&fmask_fetch->instr, &fmask_fetch->dest.ssa, 1, 32);
+ nir_def_init(&fmask_fetch->instr, &fmask_fetch->def, 1, 32);
nir_builder_instr_insert(b, &fmask_fetch->instr);
- nir_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0));
+ nir_def_rewrite_uses(&tex->def, nir_ieq_imm(b, &fmask_fetch->def, 0));
nir_instr_remove_v(&tex->instr);
}
/* Replace the raw LOD by -FLT_MAX if the sum is 0 for all coordinates. */
nir_def *adjusted_lod =
nir_bcsel(b, is_zero, nir_imm_float(b, -FLT_MAX),
- nir_channel(b, &tex->dest.ssa, 1));
+ nir_channel(b, &tex->def, 1));
nir_def *def =
- nir_vec2(b, nir_channel(b, &tex->dest.ssa, 0), adjusted_lod);
+ nir_vec2(b, nir_channel(b, &tex->def, 0), adjusted_lod);
- nir_def_rewrite_uses_after(&tex->dest.ssa, def, def->parent_instr);
+ nir_def_rewrite_uses_after(&tex->def, def, def->parent_instr);
}
static bool
}
/* NIR expects a vec4 result from the above texture instructions */
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
- nir_def *tex_r = nir_channel(b, &tex->dest.ssa, 0);
+ nir_def *tex_r = nir_channel(b, &tex->def, 0);
nir_def *cmp = tex->src[comp_index].src.ssa;
int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
nir_def *cond = nir_test_mask(&b, mask, coord_replace);
nir_def *result = nir_bcsel(&b, cond, new_coord,
- &intrin->dest.ssa);
+ &intrin->def);
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
result,
result->parent_instr);
}
sem.location = VARYING_SLOT_PNTC;
nir_instr_rewrite_src_ssa(instr, offset, nir_imm_int(b, 0));
nir_intrinsic_set_io_semantics(intr, sem);
- nir_def *raw = &intr->dest.ssa;
+ nir_def *raw = &intr->def;
b->cursor = nir_after_instr(instr);
channels[0] = nir_channel_or_undef(b, raw, 0 - component);
}
nir_def *res = nir_vec(b, &channels[component], intr->num_components);
- nir_def_rewrite_uses_after(&intr->dest.ssa, res,
+ nir_def_rewrite_uses_after(&intr->def, res,
res->parent_instr);
return true;
}
}
nir_def *color = nir_bcsel(b, face, front, back);
- nir_def_rewrite_uses(&intr->dest.ssa, color);
+ nir_def_rewrite_uses(&intr->def, color);
return true;
}
unsigned align_mul = nir_intrinsic_align_mul(intr);
unsigned align_offset = nir_intrinsic_align_offset(intr);
- int chan_size_bytes = intr->dest.ssa.bit_size / 8;
+ int chan_size_bytes = intr->def.bit_size / 8;
int chans_per_vec4 = 16 / chan_size_bytes;
/* We don't care if someone figured out that things are aligned beyond
num_components = chans_per_vec4;
nir_intrinsic_instr *load = create_load(b, intr->src[0].ssa, vec4_offset,
- intr->dest.ssa.bit_size,
+ intr->def.bit_size,
num_components);
nir_intrinsic_set_access(load, nir_intrinsic_access(intr));
- nir_def *result = &load->dest.ssa;
+ nir_def *result = &load->def;
int align_chan_offset = align_offset / chan_size_bytes;
if (aligned_mul) {
*/
nir_def *next_vec4_offset = nir_iadd_imm(b, vec4_offset, 1);
nir_intrinsic_instr *next_load = create_load(b, intr->src[0].ssa, next_vec4_offset,
- intr->dest.ssa.bit_size,
+ intr->def.bit_size,
num_components);
nir_def *channels[NIR_MAX_VEC_COMPONENTS];
nir_ieq(b,
chan_vec4_offset,
vec4_offset),
- &load->dest.ssa,
- &next_load->dest.ssa),
+ &load->def,
+ &next_load->def),
component);
}
nir_def *ubo_idx = nir_imm_int(b, 0);
nir_def *uniform_offset = nir_ssa_for_src(b, intr->src[0], 1);
- assert(intr->dest.ssa.bit_size >= 8);
+ assert(intr->def.bit_size >= 8);
nir_def *load_result;
if (state->load_vec4) {
/* No asking us to generate load_vec4 when you've packed your uniforms
* as dwords instead of vec4s.
*/
assert(!state->dword_packed);
- load_result = nir_load_ubo_vec4(b, intr->num_components, intr->dest.ssa.bit_size,
+ load_result = nir_load_ubo_vec4(b, intr->num_components, intr->def.bit_size,
ubo_idx, uniform_offset, .base = nir_intrinsic_base(intr));
} else {
/* For PIPE_CAP_PACKED_UNIFORMS, the uniforms are packed with the
* base/offset in dword units instead of vec4 units.
*/
int multiplier = state->dword_packed ? 4 : 16;
- load_result = nir_load_ubo(b, intr->num_components, intr->dest.ssa.bit_size,
+ load_result = nir_load_ubo(b, intr->num_components, intr->def.bit_size,
ubo_idx,
nir_iadd_imm(b, nir_imul_imm(b, uniform_offset, multiplier),
nir_intrinsic_base(intr) * multiplier));
nir_intrinsic_base(intr) * multiplier) %
NIR_ALIGN_MUL_MAX);
} else {
- nir_intrinsic_set_align(load, MAX2(multiplier, intr->dest.ssa.bit_size / 8), 0);
+ nir_intrinsic_set_align(load, MAX2(multiplier, intr->def.bit_size / 8), 0);
}
nir_intrinsic_set_range_base(load, nir_intrinsic_base(intr) * multiplier);
nir_intrinsic_set_range(load, nir_intrinsic_range(intr) * multiplier);
}
- nir_def_rewrite_uses(&intr->dest.ssa, load_result);
+ nir_def_rewrite_uses(&intr->def, load_result);
nir_instr_remove(&intr->instr);
return true;
nir_deref_instr *dst_deref = nir_build_deref_var(b, var);
/* Note that this stores a pointer to src into dst */
- nir_store_deref(b, dst_deref, &src_deref->dest.ssa, ~0);
+ nir_store_deref(b, dst_deref, &src_deref->def, ~0);
progress = true;
var->pointer_initializer = NULL;
nir_undef_instr *undef =
nir_undef_instr_create(state->shader,
load_instr->num_components,
- load_instr->dest.ssa.bit_size);
+ load_instr->def.bit_size);
nir_instr_insert_before(&load_instr->instr, &undef->instr);
nir_instr_remove(&load_instr->instr);
- nir_def_rewrite_uses(&load_instr->dest.ssa, &undef->def);
+ nir_def_rewrite_uses(&load_instr->def, &undef->def);
return true;
}
mov->src[0].swizzle[i] = 0;
nir_def_init(&mov->instr, &mov->def,
- intrin->num_components, intrin->dest.ssa.bit_size);
+ intrin->num_components, intrin->def.bit_size);
nir_instr_insert_before(&intrin->instr, &mov->instr);
nir_instr_remove(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
&mov->def);
break;
}
break;
intrin->num_components = 4;
- intrin->dest.ssa.num_components = 4;
+ intrin->def.num_components = 4;
b->cursor = nir_after_instr(&intrin->instr);
- nir_def *vec3 = nir_trim_vector(b, &intrin->dest.ssa, 3);
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def *vec3 = nir_trim_vector(b, &intrin->def, 3);
+ nir_def_rewrite_uses_after(&intrin->def,
vec3,
vec3->parent_instr);
return true;
static void
update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr)
{
- nir_def *wpos = &intr->dest.ssa;
+ nir_def *wpos = &intr->def;
b->cursor = nir_after_instr(&intr->instr);
nir_imm_float(b, 0.0f),
nir_imm_float(b, 0.0f)));
- nir_def_rewrite_uses_after(&intr->dest.ssa, wpos,
+ nir_def_rewrite_uses_after(&intr->def, wpos,
wpos->parent_instr);
}
nir_builder *b = &state->b;
nir_def *wpostrans, *wpos_temp, *wpos_temp_y, *wpos_input;
- wpos_input = &intr->dest.ssa;
+ wpos_input = &intr->def;
b->cursor = nir_after_instr(&intr->instr);
nir_channel(b, wpos_temp, 2),
nir_channel(b, wpos_temp, 3));
- nir_def_rewrite_uses_after(&intr->dest.ssa,
+ nir_def_rewrite_uses_after(&intr->def,
wpos_temp,
wpos_temp->parent_instr);
}
nir_builder *b = &state->b;
b->cursor = nir_after_instr(&intr->instr);
- nir_def *pos = &intr->dest.ssa;
+ nir_def *pos = &intr->def;
nir_def *scale = nir_channel(b, get_transform(state), 0);
nir_def *neg_scale = nir_channel(b, get_transform(state), 2);
/* Either y or 1-y for scale equal to 1 or -1 respectively. */
nir_fmul(b, nir_channel(b, pos, 1), scale));
nir_def *flipped_pos = nir_vec2(b, nir_channel(b, pos, 0), flipped_y);
- nir_def_rewrite_uses_after(&intr->dest.ssa, flipped_pos,
+ nir_def_rewrite_uses_after(&intr->def, flipped_pos,
flipped_pos->parent_instr);
}
if (store->num_components == 1) {
store->num_components = num_components;
nir_instr_rewrite_src(&store->instr, &store->src[0],
- nir_src_for_ssa(&combo->dst->dest.ssa));
+ nir_src_for_ssa(&combo->dst->def));
}
assert(store->num_components == num_components);
nir_const_value *v = const_value_for_deref(deref);
if (v) {
b->cursor = nir_before_instr(&intrin->instr);
- nir_def *val = nir_build_imm(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, v);
- nir_def_rewrite_uses(&intrin->dest.ssa, val);
+ nir_def *val = nir_build_imm(b, intrin->def.num_components,
+ intrin->def.bit_size, v);
+ nir_def_rewrite_uses(&intrin->def, val);
nir_instr_remove(&intrin->instr);
return true;
}
b->cursor = nir_before_instr(&intrin->instr);
nir_def *val;
if (offset >= range) {
- val = nir_undef(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ val = nir_undef(b, intrin->def.num_components,
+ intrin->def.bit_size);
} else {
nir_const_value imm[NIR_MAX_VEC_COMPONENTS];
memset(imm, 0, sizeof(imm));
uint8_t *data = (uint8_t *)b->shader->constant_data + base;
for (unsigned i = 0; i < intrin->num_components; i++) {
- unsigned bytes = intrin->dest.ssa.bit_size / 8;
+ unsigned bytes = intrin->def.bit_size / 8;
bytes = MIN2(bytes, range - offset);
memcpy(&imm[i].u64, data + offset, bytes);
offset += bytes;
}
- val = nir_build_imm(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, imm);
+ val = nir_build_imm(b, intrin->def.num_components,
+ intrin->def.bit_size, imm);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, val);
+ nir_def_rewrite_uses(&intrin->def, val);
nir_instr_remove(&intrin->instr);
return true;
}
* the data is constant.
*/
if (nir_src_is_const(intrin->src[0])) {
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
intrin->src[0].ssa);
nir_instr_remove(&intrin->instr);
return true;
case nir_intrinsic_vote_ieq:
if (nir_src_is_const(intrin->src[0])) {
b->cursor = nir_before_instr(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
nir_imm_true(b));
nir_instr_remove(&intrin->instr);
return true;
if (available != (1 << num_components) - 1 &&
intrin->intrinsic == nir_intrinsic_load_deref &&
- (available & nir_def_components_read(&intrin->dest.ssa)) == 0) {
+ (available & nir_def_components_read(&intrin->def)) == 0) {
/* If none of the components read are available as SSA values, then we
* should just bail. Otherwise, we would end up replacing the uses of
* the load_deref a vecN() that just gathers up its components.
b->cursor = nir_after_instr(&intrin->instr);
nir_def *load_def =
- intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
+ intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->def : NULL;
bool keep_intrin = false;
nir_scalar comps[NIR_MAX_VEC_COMPONENTS];
/* Loading from an invalid index yields an undef */
if (vec_index >= vec_comps) {
b->cursor = nir_instr_remove(instr);
- nir_def *u = nir_undef(b, 1, intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, u);
+ nir_def *u = nir_undef(b, 1, intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def, u);
state->progress = true;
break;
}
* We need to be careful when rewriting uses so we don't
* rewrite the vecN itself.
*/
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
value.ssa.def[0],
value.ssa.def[0]->parent_instr);
} else {
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
value.ssa.def[0]);
}
} else {
/* We're turning it into a load of a different variable */
- intrin->src[0] = nir_src_for_ssa(&value.deref.instr->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&value.deref.instr->def);
/* Put it back in again. */
nir_builder_instr_insert(b, instr);
- value_set_ssa_components(&value, &intrin->dest.ssa,
+ value_set_ssa_components(&value, &intrin->def,
intrin->num_components);
}
state->progress = true;
} else {
- value_set_ssa_components(&value, &intrin->dest.ssa,
+ value_set_ssa_components(&value, &intrin->def,
intrin->num_components);
}
continue;
/* Just turn it into a copy of a different deref */
- intrin->src[1] = nir_src_for_ssa(&value.deref.instr->dest.ssa);
+ intrin->src[1] = nir_src_for_ssa(&value.deref.instr->def);
/* Put it back in again. */
nir_builder_instr_insert(b, instr);
}
case nir_instr_type_deref: {
nir_deref_instr *deref = nir_instr_as_deref(instr);
- return is_def_live(&deref->dest.ssa, defs_live);
+ return is_def_live(&deref->def, defs_live);
}
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
const nir_intrinsic_info *info = &nir_intrinsic_infos[intrin->intrinsic];
return !(info->flags & NIR_INTRINSIC_CAN_ELIMINATE) ||
- (info->has_dest && is_def_live(&intrin->dest.ssa, defs_live));
+ (info->has_dest && is_def_live(&intrin->def, defs_live));
}
case nir_instr_type_tex: {
nir_tex_instr *tex = nir_instr_as_tex(instr);
- return is_def_live(&tex->dest.ssa, defs_live);
+ return is_def_live(&tex->def, defs_live);
}
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
- return is_def_live(&phi->dest.ssa, defs_live);
+ return is_def_live(&phi->def, defs_live);
}
case nir_instr_type_load_const: {
nir_load_const_instr *lc = nir_instr_as_load_const(instr);
case nir_instr_type_parallel_copy: {
nir_parallel_copy_instr *pc = nir_instr_as_parallel_copy(instr);
nir_foreach_parallel_copy_entry(entry, pc) {
- if (entry->dest_is_reg || is_def_live(&entry->dest.dest.ssa, defs_live))
+ if (entry->dest_is_reg || is_def_live(&entry->dest.def, defs_live))
return true;
}
return false;
}
assert(def);
- nir_def_rewrite_uses(&phi->dest.ssa, def);
+ nir_def_rewrite_uses(&phi->def, def);
nir_instr_remove(&phi->instr);
}
}
nir_phi_instr_add_src(phi, prev_block, nir_src_for_ssa(prev_value));
nir_phi_instr_add_src(phi, continue_block, nir_src_for_ssa(alu_copy));
- nir_def_init(&phi->instr, &phi->dest.ssa, alu_copy->num_components,
+ nir_def_init(&phi->instr, &phi->def, alu_copy->num_components,
alu_copy->bit_size);
b->cursor = nir_after_phis(header_block);
* result of the phi.
*/
nir_def_rewrite_uses(&alu->def,
- &phi->dest.ssa);
+ &phi->def);
/* Since the original ALU instruction no longer has any readers, just
* remove it.
continue_block)
->src);
- nir_def_init(&phi->instr, &phi->dest.ssa,
+ nir_def_init(&phi->instr, &phi->def,
bcsel->def.num_components,
bcsel->def.bit_size);
* the phi.
*/
nir_def_rewrite_uses(&bcsel->def,
- &phi->dest.ssa);
+ &phi->def);
/* Since the original bcsel instruction no longer has any readers,
* just remove it.
nir_block *after_if_block = nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
nir_foreach_phi_safe(phi, after_if_block) {
- if (phi->dest.ssa.bit_size != cond->bit_size ||
- phi->dest.ssa.num_components != 1)
+ if (phi->def.bit_size != cond->bit_size ||
+ phi->def.num_components != 1)
continue;
enum opt_bool {
break;
}
if (then_val == T && else_val == F) {
- nir_def_rewrite_uses(&phi->dest.ssa, cond);
+ nir_def_rewrite_uses(&phi->def, cond);
progress = true;
} else if (then_val == F && else_val == T) {
b->cursor = nir_before_cf_node(&nif->cf_node);
- nir_def_rewrite_uses(&phi->dest.ssa, nir_inot(b, cond));
+ nir_def_rewrite_uses(&phi->def, nir_inot(b, cond));
progress = true;
}
}
* uses is reasonable. If we ever want to use this from an if statement,
* we can change it then.
*/
- if (!list_is_singular(&shuffle->dest.ssa.uses))
+ if (!list_is_singular(&shuffle->def.uses))
return false;
- if (nir_def_used_by_if(&shuffle->dest.ssa))
+ if (nir_def_used_by_if(&shuffle->def))
return false;
*data = shuffle->src[0].ssa;
static bool
try_opt_exclusive_scan_to_inclusive(nir_intrinsic_instr *intrin)
{
- if (intrin->dest.ssa.num_components != 1)
+ if (intrin->def.num_components != 1)
return false;
- nir_foreach_use_including_if(src, &intrin->dest.ssa) {
+ nir_foreach_use_including_if(src, &intrin->def) {
if (src->is_if || src->parent_instr->type != nir_instr_type_alu)
return false;
/* Convert to inclusive scan. */
intrin->intrinsic = nir_intrinsic_inclusive_scan;
- nir_foreach_use_including_if_safe(src, &intrin->dest.ssa) {
+ nir_foreach_use_including_if_safe(src, &intrin->def) {
/* Remove alu. */
nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
- nir_def_rewrite_uses(&alu->def, &intrin->dest.ssa);
+ nir_def_rewrite_uses(&alu->def, &intrin->def);
nir_instr_remove(&alu->instr);
}
return false;
bool progress = false;
- nir_foreach_use_safe(use_src, &intrin->dest.ssa) {
+ nir_foreach_use_safe(use_src, &intrin->def) {
if (use_src->parent_instr->type == nir_instr_type_alu) {
nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
if (info->is_constant) {
b.cursor = nir_after_instr(&intrin->instr);
nir_def *val = build_constant_load(&b, deref, size_align);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
val);
nir_instr_remove(&intrin->instr);
nir_deref_instr_remove_if_unused(deref);
static unsigned
get_bit_size(struct entry *entry)
{
- unsigned size = entry->is_store ? entry->intrin->src[entry->info->value_src].ssa->bit_size : entry->intrin->dest.ssa.bit_size;
+ unsigned size = entry->is_store ? entry->intrin->src[entry->info->value_src].ssa->bit_size : entry->intrin->def.bit_size;
return size == 1 ? 32u : size;
}
if (deref->type == type)
return deref;
- return nir_build_deref_cast(b, &deref->dest.ssa, deref->modes, type, 0);
+ return nir_build_deref_cast(b, &deref->def, deref->modes, type, 0);
}
/* Return true if "new_bit_size" is a usable bit size for a vectorized load/store
offset % nir_deref_instr_array_stride(deref) == 0) {
unsigned stride = nir_deref_instr_array_stride(deref);
nir_def *index = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index) - offset / stride,
- deref->dest.ssa.bit_size);
+ deref->def.bit_size);
return nir_build_deref_ptr_as_array(b, nir_deref_instr_parent(deref), index);
}
b, parent, nir_src_as_int(deref->arr.index) - offset / stride);
}
- deref = nir_build_deref_cast(b, &deref->dest.ssa, deref->modes,
+ deref = nir_build_deref_cast(b, &deref->def, deref->modes,
glsl_scalar_type(GLSL_TYPE_UINT8), 1);
return nir_build_deref_ptr_as_array(
- b, deref, nir_imm_intN_t(b, -offset, deref->dest.ssa.bit_size));
+ b, deref, nir_imm_intN_t(b, -offset, deref->def.bit_size));
}
static void
{
unsigned low_bit_size = get_bit_size(low);
unsigned high_bit_size = get_bit_size(high);
- bool low_bool = low->intrin->dest.ssa.bit_size == 1;
- bool high_bool = high->intrin->dest.ssa.bit_size == 1;
- nir_def *data = &first->intrin->dest.ssa;
+ bool low_bool = low->intrin->def.bit_size == 1;
+ bool high_bool = high->intrin->def.bit_size == 1;
+ nir_def *data = &first->intrin->def;
b->cursor = nir_after_instr(first->instr);
/* update uses */
if (first == low) {
- nir_def_rewrite_uses_after(&low->intrin->dest.ssa, low_def,
+ nir_def_rewrite_uses_after(&low->intrin->def, low_def,
high_def->parent_instr);
- nir_def_rewrite_uses(&high->intrin->dest.ssa, high_def);
+ nir_def_rewrite_uses(&high->intrin->def, high_def);
} else {
- nir_def_rewrite_uses(&low->intrin->dest.ssa, low_def);
- nir_def_rewrite_uses_after(&high->intrin->dest.ssa, high_def,
+ nir_def_rewrite_uses(&low->intrin->def, low_def);
+ nir_def_rewrite_uses_after(&high->intrin->def, high_def,
high_def->parent_instr);
}
first->deref = cast_deref(b, new_num_components, new_bit_size, deref);
nir_instr_rewrite_src(first->instr, &first->intrin->src[info->deref_src],
- nir_src_for_ssa(&first->deref->dest.ssa));
+ nir_src_for_ssa(&first->deref->def));
}
/* update align */
second->deref = cast_deref(b, new_num_components, new_bit_size,
nir_src_as_deref(low->intrin->src[info->deref_src]));
nir_instr_rewrite_src(second->instr, &second->intrin->src[info->deref_src],
- nir_src_for_ssa(&second->deref->dest.ssa));
+ nir_src_for_ssa(&second->deref->def));
}
/* update base/align */
} else {
nir_def *new_def = nir_load_shared2_amd(&b, low_size * 8u, offset, .offset1 = diff / stride,
.st64 = st64);
- nir_def_rewrite_uses(&low->intrin->dest.ssa,
+ nir_def_rewrite_uses(&low->intrin->def,
nir_bitcast_vector(&b, nir_channel(&b, new_def, 0), low_bit_size));
- nir_def_rewrite_uses(&high->intrin->dest.ssa,
+ nir_def_rewrite_uses(&high->intrin->def,
nir_bitcast_vector(&b, nir_channel(&b, new_def, 1), high_bit_size));
}
trip_count)) {
if (intrin->intrinsic == nir_intrinsic_load_deref) {
nir_def *undef =
- nir_undef(&b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_undef(&b, intrin->def.num_components,
+ intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def,
undef);
} else {
nir_instr_remove(instr);
if (cast->type == glsl_int8_t_type() ||
cast->type == glsl_uint8_t_type()) {
nir_instr_rewrite_src(&cpy->instr, deref_src,
- nir_src_for_ssa(&parent->dest.ssa));
+ nir_src_for_ssa(&parent->def));
return true;
}
return false;
nir_instr_rewrite_src(&cpy->instr, deref_src,
- nir_src_for_ssa(&parent->dest.ssa));
+ nir_src_for_ssa(&parent->def));
return true;
}
type_is_tightly_packed(dst->type, &type_size) &&
type_size == size) {
b->cursor = nir_instr_remove(&cpy->instr);
- src = nir_build_deref_cast(b, &src->dest.ssa,
+ src = nir_build_deref_cast(b, &src->def,
src->modes, dst->type, 0);
nir_copy_deref_with_access(b, dst, src,
nir_intrinsic_dst_access(cpy),
_mesa_set_search(complex_vars, dst->var) == NULL &&
glsl_get_explicit_size(dst->type, false) <= size) {
b->cursor = nir_instr_remove(&cpy->instr);
- src = nir_build_deref_cast(b, &src->dest.ssa,
+ src = nir_build_deref_cast(b, &src->def,
src->modes, dst->type, 0);
nir_copy_deref_with_access(b, dst, src,
nir_intrinsic_dst_access(cpy),
type_is_tightly_packed(src->type, &type_size) &&
type_size == size) {
b->cursor = nir_instr_remove(&cpy->instr);
- dst = nir_build_deref_cast(b, &dst->dest.ssa,
+ dst = nir_build_deref_cast(b, &dst->def,
dst->modes, src->type, 0);
nir_copy_deref_with_access(b, dst, src,
nir_intrinsic_dst_access(cpy),
opt_offsets_state *state,
unsigned offset_src_idx)
{
- unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->dest.ssa.bit_size : intrin->src[0].ssa->bit_size) / 8;
+ unsigned comp_size = (intrin->intrinsic == nir_intrinsic_load_shared2_amd ? intrin->def.bit_size : intrin->src[0].ssa->bit_size) / 8;
unsigned stride = (nir_intrinsic_st64(intrin) ? 64 : 1) * comp_size;
unsigned offset0 = nir_intrinsic_offset0(intrin) * stride;
unsigned offset1 = nir_intrinsic_offset1(intrin) * stride;
nir_phi_src *else_src =
nir_phi_get_src_from_block(phi, nir_if_first_else_block(if_stmt));
- nir_foreach_use(src, &phi->dest.ssa) {
+ nir_foreach_use(src, &phi->def) {
assert(src->parent_instr->type == nir_instr_type_phi);
nir_phi_src *phi_src =
nir_phi_get_src_from_block(nir_instr_as_phi(src->parent_instr),
nir_phi_instr *phi = nir_instr_as_phi(instr);
nir_phi_src *else_src =
nir_phi_get_src_from_block(phi, nir_if_first_else_block(if_stmt));
- nir_foreach_use_safe(src, &phi->dest.ssa) {
+ nir_foreach_use_safe(src, &phi->def) {
nir_phi_src *phi_src =
nir_phi_get_src_from_block(nir_instr_as_phi(src->parent_instr),
nir_if_first_else_block(parent_if));
if (phi_src->src.ssa == else_src->src.ssa)
nir_instr_rewrite_src(src->parent_instr, &phi_src->src,
- nir_src_for_ssa(&phi->dest.ssa));
+ nir_src_for_ssa(&phi->def));
}
}
}
nir_def_init(&sel->instr, &sel->def,
- phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
+ phi->def.num_components, phi->def.bit_size);
- nir_def_rewrite_uses(&phi->dest.ssa,
+ nir_def_rewrite_uses(&phi->def,
&sel->def);
nir_instr_insert_before(&phi->instr, &sel->instr);
nir_op op = INVALID_OP;
/* If the phi has already been narrowed, nothing more to do: */
- if (phi->dest.ssa.bit_size != 32)
+ if (phi->def.bit_size != 32)
return false;
/* Are the only uses of the phi conversion instructions, and
* are they all the same conversion?
*/
- nir_foreach_use_including_if(use, &phi->dest.ssa) {
+ nir_foreach_use_including_if(use, &phi->def) {
/* an if use means the phi is used directly in a conditional, ie.
* without a conversion
*/
/* construct replacement phi instruction: */
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
- nir_def_init(&new_phi->instr, &new_phi->dest.ssa,
- phi->dest.ssa.num_components,
+ nir_def_init(&new_phi->instr, &new_phi->def,
+ phi->def.num_components,
nir_alu_type_get_type_size(nir_op_infos[op].output_type));
/* Push the conversion into the new phi sources: */
* directly use the new phi, skipping the conversion out of the orig
* phi
*/
- nir_foreach_use(use, &phi->dest.ssa) {
+ nir_foreach_use(use, &phi->def) {
/* We've previously established that all the uses were alu
* conversion ops. Turn them into movs instead.
*/
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
alu->op = nir_op_mov;
}
- nir_def_rewrite_uses(&phi->dest.ssa, &new_phi->dest.ssa);
+ nir_def_rewrite_uses(&phi->def, &new_phi->def);
/* And finally insert the new phi after all sources are in place: */
b->cursor = nir_after_instr(&phi->instr);
try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
{
/* If the phi has already been narrowed, nothing more to do: */
- if (phi->dest.ssa.bit_size != 32)
+ if (phi->def.bit_size != 32)
return false;
unsigned bit_size;
/* construct replacement phi instruction: */
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
- nir_def_init(&new_phi->instr, &new_phi->dest.ssa,
- phi->dest.ssa.num_components, bit_size);
+ nir_def_init(&new_phi->instr, &new_phi->def,
+ phi->def.num_components, bit_size);
/* Remove the widening conversions from the phi sources: */
nir_foreach_phi_src(src, phi) {
* and re-write the original phi's uses
*/
b->cursor = nir_after_instr_and_phis(&new_phi->instr);
- nir_def *def = nir_build_alu(b, op, &new_phi->dest.ssa, NULL, NULL, NULL);
+ nir_def *def = nir_build_alu(b, op, &new_phi->def, NULL, NULL, NULL);
- nir_def_rewrite_uses(&phi->dest.ssa, def);
+ nir_def_rewrite_uses(&phi->def, def);
return true;
}
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
case nir_intrinsic_rq_proceed:
- if (!list_is_empty(&intrin->dest.ssa.uses))
+ if (!list_is_empty(&intrin->def.uses))
mark_query_read(queries, intrin);
break;
case nir_intrinsic_rq_load:
return false;
if (intrin->intrinsic == nir_intrinsic_rq_load)
- assert(list_is_empty(&intrin->dest.ssa.uses));
+ assert(list_is_empty(&intrin->def.uses));
nir_instr_remove(instr);
* still dominate the phi node, and the phi will still always take
* the value of that definition.
*/
- if (src->src.ssa == &phi->dest.ssa)
+ if (src->src.ssa == &phi->def)
continue;
if (def == NULL) {
/* In this case, the phi had no sources. So turn it into an undef. */
b->cursor = nir_after_phis(block);
- def = nir_undef(b, phi->dest.ssa.num_components,
- phi->dest.ssa.bit_size);
+ def = nir_undef(b, phi->def.num_components,
+ phi->def.bit_size);
} else if (mov) {
/* If the sources were all movs from the same source with the same
* swizzle, then we can't just pick a random move because it may not
def = nir_mov_alu(b, mov->src[0], def->num_components);
}
- nir_def_rewrite_uses(&phi->dest.ssa, def);
+ nir_def_rewrite_uses(&phi->def, def);
nir_instr_remove(&phi->instr);
progress = true;
static bool
shrink_intrinsic_to_non_sparse(nir_intrinsic_instr *instr)
{
- unsigned mask = nir_def_components_read(&instr->dest.ssa);
+ unsigned mask = nir_def_components_read(&instr->def);
int last_bit = util_last_bit(mask);
/* If the sparse component is used, do nothing. */
- if (last_bit == instr->dest.ssa.num_components)
+ if (last_bit == instr->def.num_components)
return false;
- instr->dest.ssa.num_components -= 1;
- instr->num_components = instr->dest.ssa.num_components;
+ instr->def.num_components -= 1;
+ instr->num_components = instr->def.num_components;
/* Switch to the non-sparse intrinsic. */
switch (instr->intrinsic) {
assert(instr->num_components != 0);
/* Trim the dest to the used channels */
- if (!shrink_dest_to_read_mask(&instr->dest.ssa))
+ if (!shrink_dest_to_read_mask(&instr->def))
return false;
- instr->num_components = instr->dest.ssa.num_components;
+ instr->num_components = instr->def.num_components;
return true;
}
case nir_intrinsic_image_sparse_load:
if (!tex->is_sparse)
return false;
- unsigned mask = nir_def_components_read(&tex->dest.ssa);
+ unsigned mask = nir_def_components_read(&tex->def);
int last_bit = util_last_bit(mask);
/* If the sparse component is used, do nothing. */
- if (last_bit == tex->dest.ssa.num_components)
+ if (last_bit == tex->def.num_components)
return false;
- tex->dest.ssa.num_components -= 1;
+ tex->def.num_components -= 1;
tex->is_sparse = false;
return true;
static bool
opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
{
- nir_def *def = &instr->dest.ssa;
+ nir_def *def = &instr->def;
/* early out if there's nothing to do. */
if (def->num_components == 1)
if (return_prev) {
nir_push_else(b, nif);
- nir_def *undef = nir_undef(b, 1, intrin->dest.ssa.bit_size);
+ nir_def *undef = nir_undef(b, 1, intrin->def.bit_size);
nir_pop_if(b, nif);
- nir_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
+ nir_def *result = nir_if_phi(b, &intrin->def, undef);
result = nir_read_first_invocation(b, result);
if (!combined_scan_reduce)
helper_nif = nir_push_if(b, nir_inot(b, helper));
}
- ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent;
- bool return_prev = !nir_def_is_unused(&intrin->dest.ssa);
+ ASSERTED bool original_result_divergent = intrin->def.divergent;
+ bool return_prev = !nir_def_is_unused(&intrin->def);
- nir_def old_result = intrin->dest.ssa;
- list_replace(&intrin->dest.ssa.uses, &old_result.uses);
- nir_def_init(&intrin->instr, &intrin->dest.ssa, 1,
- intrin->dest.ssa.bit_size);
+ nir_def old_result = intrin->def;
+ list_replace(&intrin->def.uses, &old_result.uses);
+ nir_def_init(&intrin->instr, &intrin->def, 1,
+ intrin->def.bit_size);
nir_def *result = optimize_atomic(b, intrin, return_prev);
* be used.
*/
nir_phi_instr *phi = nir_phi_instr_create(val->builder->shader);
- nir_def_init(&phi->instr, &phi->dest.ssa, val->num_components,
+ nir_def_init(&phi->instr, &phi->def, val->num_components,
val->bit_size);
phi->instr.block = dom;
exec_list_push_tail(&val->phis, &phi->instr.node);
- def = &phi->dest.ssa;
+ def = &phi->def;
he->data = def;
} else {
/* In this case, we have an actual SSA def. It's either the result of a
{
FILE *fp = state->fp;
- print_def(&instr->dest.ssa, state);
+ print_def(&instr->def, state);
switch (instr->deref_type) {
case nir_deref_type_var:
FILE *fp = state->fp;
if (info->has_dest) {
- print_def(&instr->dest.ssa, state);
+ print_def(&instr->def, state);
fprintf(fp, " = ");
} else {
print_no_dest_padding(state);
{
FILE *fp = state->fp;
- print_def(&instr->dest.ssa, state);
+ print_def(&instr->def, state);
fprintf(fp, " = (");
print_alu_type(instr->dest_type, state);
print_phi_instr(nir_phi_instr *instr, print_state *state)
{
FILE *fp = state->fp;
- print_def(&instr->dest.ssa, state);
+ print_def(&instr->def, state);
fprintf(fp, " = phi ");
nir_foreach_phi_src(src, instr) {
if (&src->node != exec_list_get_head(&instr->srcs))
fprintf(fp, "*");
print_src(&entry->dest.reg, state, nir_type_invalid);
} else {
- print_def(&entry->dest.dest.ssa, state);
+ print_def(&entry->dest.def, state);
}
fprintf(fp, " = ");
case nir_instr_type_tex: {
nir_tex_instr *tex = nir_instr_as_tex(instr);
- if (def_is_invariant(&tex->dest.ssa, invariants))
+ if (def_is_invariant(&tex->def, invariants))
nir_foreach_src(instr, add_src_cb, invariants);
break;
}
break;
case nir_intrinsic_load_deref:
- if (def_is_invariant(&intrin->dest.ssa, invariants))
+ if (def_is_invariant(&intrin->def, invariants))
add_var(nir_intrinsic_get_var(intrin, 0), invariants);
break;
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
- if (!def_is_invariant(&phi->dest.ssa, invariants))
+ if (!def_is_invariant(&phi->def, invariants))
break;
nir_foreach_phi_src(src, phi) {
case nir_intrinsic_quad_swap_vertical:
case nir_intrinsic_quad_swap_diagonal:
if (src_idx == 0) {
- bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur);
+ bits_used |= ssa_def_bits_used(&use_intrin->def, recur);
} else {
if (use_intrin->intrinsic == nir_intrinsic_quad_broadcast) {
bits_used |= 3;
case nir_op_ior:
case nir_op_iand:
case nir_op_ixor:
- bits_used |= ssa_def_bits_used(&use_intrin->dest.ssa, recur);
+ bits_used |= ssa_def_bits_used(&use_intrin->def, recur);
break;
default:
case nir_instr_type_phi: {
nir_phi_instr *use_phi = nir_instr_as_phi(src->parent_instr);
- bits_used |= ssa_def_bits_used(&use_phi->dest.ssa, recur);
+ bits_used |= ssa_def_bits_used(&use_phi->def, recur);
break;
}
static bool
deref_used_for_not_store(nir_deref_instr *deref)
{
- nir_foreach_use(src, &deref->dest.ssa) {
+ nir_foreach_use(src, &deref->def) {
switch (src->parent_instr->type) {
case nir_instr_type_deref:
if (deref_used_for_not_store(nir_instr_as_deref(src->parent_instr)))
cast->parent = nir_src_for_ssa(block_def);
cast->cast.ptr_stride = nir_deref_instr_array_stride(deref);
- nir_def_init(&cast->instr, &cast->dest.ssa, def->num_components,
+ nir_def_init(&cast->instr, &cast->def, def->num_components,
def->bit_size);
nir_instr_insert(nir_before_instr(src->parent_instr),
&cast->instr);
- block_def = &cast->dest.ssa;
+ block_def = &cast->def;
}
if (src->is_if)
state->regs_freed += nir_schedule_reg_pressure(reg);
}
- nir_schedule_regs_freed_def_cb(&load->dest.ssa, state);
+ nir_schedule_regs_freed_def_cb(&load->def, state);
}
static void
nir_schedule_mark_use(scoreboard, reg, &load->instr,
nir_schedule_reg_pressure(reg));
- nir_schedule_mark_def_scheduled(&load->dest.ssa, scoreboard);
+ nir_schedule_mark_def_scheduled(&load->def, scoreboard);
}
static void
header.deref.in_bounds = deref->arr.in_bounds;
}
- write_def(ctx, &deref->dest.ssa, header, deref->instr.type);
+ write_def(ctx, &deref->def, header, deref->instr.type);
switch (deref->deref_type) {
case nir_deref_type_var:
nir_deref_type deref_type = header.deref.deref_type;
nir_deref_instr *deref = nir_deref_instr_create(ctx->nir, deref_type);
- read_def(ctx, &deref->dest.ssa, &deref->instr, header);
+ read_def(ctx, &deref->def, &deref->instr, header);
nir_deref_instr *parent;
}
if (nir_intrinsic_infos[intrin->intrinsic].has_dest)
- write_def(ctx, &intrin->dest.ssa, header, intrin->instr.type);
+ write_def(ctx, &intrin->def, header, intrin->instr.type);
else
blob_write_uint32(ctx->blob, header.u32);
unsigned num_indices = nir_intrinsic_infos[op].num_indices;
if (nir_intrinsic_infos[op].has_dest)
- read_def(ctx, &intrin->dest.ssa, &intrin->instr, header);
+ read_def(ctx, &intrin->def, &intrin->instr, header);
for (unsigned i = 0; i < num_srcs; i++)
read_src(ctx, &intrin->src[i]);
*/
if (nir_intrinsic_infos[op].has_dest &&
nir_intrinsic_infos[op].dest_components == 0) {
- intrin->num_components = intrin->dest.ssa.num_components;
+ intrin->num_components = intrin->def.num_components;
} else {
for (unsigned i = 0; i < num_srcs; i++) {
if (nir_intrinsic_infos[op].src_components[i] == 0) {
header.tex.num_srcs = tex->num_srcs;
header.tex.op = tex->op;
- write_def(ctx, &tex->dest.ssa, header, tex->instr.type);
+ write_def(ctx, &tex->def, header, tex->instr.type);
blob_write_uint32(ctx->blob, tex->texture_index);
blob_write_uint32(ctx->blob, tex->sampler_index);
{
nir_tex_instr *tex = nir_tex_instr_create(ctx->nir, header.tex.num_srcs);
- read_def(ctx, &tex->dest.ssa, &tex->instr, header);
+ read_def(ctx, &tex->def, &tex->instr, header);
tex->op = header.tex.op;
tex->texture_index = blob_read_uint32(ctx->blob);
* and then store enough information so that a later fixup pass can fill
* them in correctly.
*/
- write_def(ctx, &phi->dest.ssa, header, phi->instr.type);
+ write_def(ctx, &phi->def, header, phi->instr.type);
nir_foreach_phi_src(src, phi) {
size_t blob_offset = blob_reserve_uint32(ctx->blob);
{
nir_phi_instr *phi = nir_phi_instr_create(ctx->nir);
- read_def(ctx, &phi->dest.ssa, &phi->instr, header);
+ read_def(ctx, &phi->def, &phi->instr, header);
/* For similar reasons as before, we just store the index directly into the
* pointer, and let a later pass resolve the phi sources.
switch (intr->intrinsic) {
case nir_intrinsic_load_deref: {
- if (intr->dest.ssa.bit_size != 64)
+ if (intr->def.bit_size != 64)
return false;
nir_variable *var = nir_intrinsic_get_var(intr, 0);
if (var->data.mode != nir_var_function_temp)
return false;
- return intr->dest.ssa.num_components >= 3;
+ return intr->def.num_components >= 3;
}
case nir_intrinsic_store_deref: {
if (nir_src_bit_size(intr->src[1]) != 64)
}
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
- if (phi->dest.ssa.bit_size != 64)
+ if (phi->def.bit_size != 64)
return false;
- return phi->dest.ssa.num_components >= 3;
+ return phi->def.num_components >= 3;
}
default:
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
- nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
+ nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size);
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
switch ((*p)->deref_type) {
case nir_deref_type_array: {
deref2 = nir_build_deref_array(b, deref2, offset);
}
- nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->dest.ssa, 0);
- nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->dest.ssa, 0);
+ nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->def, 0);
+ nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->def, 0);
return merge_to_vec3_or_vec4(b, load1, load2);
}
int write_mask_xy = nir_intrinsic_write_mask(intr) & 3;
if (write_mask_xy) {
nir_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2);
- nir_build_store_deref(b, &deref_xy->dest.ssa, src_xy, write_mask_xy);
+ nir_build_store_deref(b, &deref_xy->def, src_xy, write_mask_xy);
}
int write_mask_zw = nir_intrinsic_write_mask(intr) & 0xc;
if (write_mask_zw) {
nir_def *src_zw = nir_channels(b, intr->src[1].ssa,
nir_component_mask(intr->src[1].ssa->num_components) & 0xc);
- nir_build_store_deref(b, &deref_zw->dest.ssa, src_zw, write_mask_zw >> 2);
+ nir_build_store_deref(b, &deref_zw->def, src_zw, write_mask_zw >> 2);
}
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
static nir_def *
split_phi(nir_builder *b, nir_phi_instr *phi)
{
- nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components);
+ nir_op vec_op = nir_op_vec(phi->def.num_components);
nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op);
nir_def_init(&vec->instr, &vec->def,
- phi->dest.ssa.num_components, 64);
+ phi->def.num_components, 64);
- int num_comp[2] = { 2, phi->dest.ssa.num_components - 2 };
+ int num_comp[2] = { 2, phi->def.num_components - 2 };
nir_phi_instr *new_phi[2];
for (unsigned i = 0; i < 2; i++) {
new_phi[i] = nir_phi_instr_create(b->shader);
- nir_def_init(&new_phi[i]->instr, &new_phi[i]->dest.ssa, num_comp[i],
- phi->dest.ssa.bit_size);
+ nir_def_init(&new_phi[i]->instr, &new_phi[i]->def, num_comp[i],
+ phi->def.bit_size);
nir_foreach_phi_src(src, phi) {
/* Insert at the end of the predecessor but before the jump
}
b->cursor = nir_after_instr(&phi->instr);
- return merge_to_vec3_or_vec4(b, &new_phi[0]->dest.ssa, &new_phi[1]->dest.ssa);
+ return merge_to_vec3_or_vec4(b, &new_phi[0]->def, &new_phi[1]->def);
};
static nir_def *
b->cursor = nir_before_instr(&deref->instr);
nir_deref_instr *member_deref =
build_member_deref(b, nir_deref_instr_parent(deref), member);
- nir_def_rewrite_uses(&deref->dest.ssa,
- &member_deref->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &member_deref->def);
/* The referenced variable is no longer valid, clean up the deref */
nir_deref_instr_remove_if_unused(deref);
}
assert(new_deref->type == deref->type);
- nir_def_rewrite_uses(&deref->dest.ssa,
- &new_deref->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &new_deref->def);
nir_deref_instr_remove_if_unused(deref);
}
}
*/
if (intrin->intrinsic == nir_intrinsic_load_deref) {
nir_def *u =
- nir_undef(&b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_undef(&b, intrin->def.num_components,
+ intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def,
u);
}
nir_instr_remove(&intrin->instr);
/* Rewrite the deref source to point to the split one */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[d],
- nir_src_for_ssa(&new_deref->dest.ssa));
+ nir_src_for_ssa(&new_deref->def));
nir_deref_instr_remove_if_unused(deref);
}
}
switch (intrin->intrinsic) {
case nir_intrinsic_load_deref:
mark_deref_used(nir_src_as_deref(intrin->src[0]),
- nir_def_components_read(&intrin->dest.ssa), 0,
+ nir_def_components_read(&intrin->def), 0,
NULL, var_usage_map, modes, mem_ctx);
break;
if (usage->comps_kept == 0 || vec_deref_is_oob(deref, usage)) {
if (intrin->intrinsic == nir_intrinsic_load_deref) {
nir_def *u =
- nir_undef(&b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_undef(&b, intrin->def.num_components,
+ intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def,
u);
}
nir_instr_remove(&intrin->instr);
b.cursor = nir_after_instr(&intrin->instr);
nir_def *undef =
- nir_undef(&b, 1, intrin->dest.ssa.bit_size);
+ nir_undef(&b, 1, intrin->def.bit_size);
nir_def *vec_srcs[NIR_MAX_VEC_COMPONENTS];
unsigned c = 0;
for (unsigned i = 0; i < intrin->num_components; i++) {
if (usage->comps_kept & (1u << i))
- vec_srcs[i] = nir_channel(&b, &intrin->dest.ssa, c++);
+ vec_srcs[i] = nir_channel(&b, &intrin->def, c++);
else
vec_srcs[i] = undef;
}
nir_def *vec = nir_vec(&b, vec_srcs, intrin->num_components);
- nir_def_rewrite_uses_after(&intrin->dest.ssa,
+ nir_def_rewrite_uses_after(&intrin->def,
vec,
vec->parent_instr);
/* The SSA def is now only used by the swizzle. It's safe to
* shrink the number of components.
*/
- assert(list_length(&intrin->dest.ssa.uses) == c);
+ assert(list_length(&intrin->def.uses) == c);
intrin->num_components = c;
- intrin->dest.ssa.num_components = c;
+ intrin->def.num_components = c;
} else {
nir_component_mask_t write_mask =
nir_intrinsic_write_mask(intrin);
/* Initialize a phi-instruction */
nir_phi_instr *phi = nir_phi_instr_create(state->shader);
- nir_def_init(&phi->instr, &phi->dest.ssa, def->num_components,
+ nir_def_init(&phi->instr, &phi->def, def->num_components,
def->bit_size);
/* Create a phi node with as many sources pointing to the same ssa_def as
}
nir_instr_insert_before_block(state->block_after_loop, &phi->instr);
- nir_def *dest = &phi->dest.ssa;
+ nir_def *dest = &phi->def;
/* deref instructions need a cast after the phi */
if (def->parent_instr->type == nir_instr_type_deref) {
nir_deref_instr *instr = nir_instr_as_deref(def->parent_instr);
cast->modes = instr->modes;
cast->type = instr->type;
- cast->parent = nir_src_for_ssa(&phi->dest.ssa);
+ cast->parent = nir_src_for_ssa(&phi->def);
cast->cast.ptr_stride = nir_deref_instr_array_stride(instr);
- nir_def_init(&cast->instr, &cast->dest.ssa,
- phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
+ nir_def_init(&cast->instr, &cast->def,
+ phi->def.num_components, phi->def.bit_size);
nir_instr_insert(nir_after_phis(state->block_after_loop), &cast->instr);
- dest = &cast->dest.ssa;
+ dest = &cast->def;
}
/* Run through all uses and rewrite those outside the loop to point to
assert(nir_is_load_reg(load));
nir_builder b = nir_builder_at(nir_after_instr(&load->instr));
- nir_def *copy = nir_mov(&b, &load->dest.ssa);
- copy->divergent = load->dest.ssa.divergent;
- nir_def_rewrite_uses_after(&load->dest.ssa, copy, copy->parent_instr);
+ nir_def *copy = nir_mov(&b, &load->def);
+ copy->divergent = load->def.divergent;
+ nir_def_rewrite_uses_after(&load->def, copy, copy->parent_instr);
- assert(list_is_singular(&load->dest.ssa.uses));
+ assert(list_is_singular(&load->def.uses));
}
struct trivialize_src_state {
{
assert(nir_is_load_reg(load));
- unsigned nr = load->dest.ssa.num_components;
+ unsigned nr = load->def.num_components;
trivialize_reg_stores(load->src[0].ssa, nir_component_mask(nr),
possibly_trivial_stores);
}
/* The parent pointer value must have the same number of components
* as the destination.
*/
- validate_src(&instr->parent, state, instr->dest.ssa.bit_size,
- instr->dest.ssa.num_components);
+ validate_src(&instr->parent, state, instr->def.bit_size,
+ instr->def.num_components);
nir_instr *parent_instr = instr->parent.ssa->parent_instr;
if (instr->deref_type == nir_deref_type_array) {
validate_src(&instr->arr.index, state,
- instr->dest.ssa.bit_size, 1);
+ instr->def.bit_size, 1);
}
break;
parent->deref_type == nir_deref_type_ptr_as_array ||
parent->deref_type == nir_deref_type_cast);
validate_src(&instr->arr.index, state,
- instr->dest.ssa.bit_size, 1);
+ instr->def.bit_size, 1);
break;
default:
* want to let other compiler components such as SPIR-V decide how big
* pointers should be.
*/
- validate_def(&instr->dest.ssa, state, 0, 0);
+ validate_def(&instr->def, state, 0, 0);
/* Certain modes cannot be used as sources for phi instructions because
* way too many passes assume that they can always chase deref chains.
*/
- nir_foreach_use_including_if(use, &instr->dest.ssa) {
+ nir_foreach_use_including_if(use, &instr->def) {
/* Deref instructions as if conditions don't make sense because if
* conditions expect well-formed Booleans. If you want to compare with
* NULL, an explicit comparison operation should be used.
case nir_intrinsic_load_reg:
case nir_intrinsic_load_reg_indirect:
validate_register_handle(instr->src[0],
- instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size, state);
+ instr->def.num_components,
+ instr->def.bit_size, state);
break;
case nir_intrinsic_store_reg:
}
case nir_intrinsic_load_ubo_vec4: {
- int bit_size = instr->dest.ssa.bit_size;
+ int bit_size = instr->def.bit_size;
validate_assert(state, bit_size >= 8);
validate_assert(state, (nir_intrinsic_component(instr) +
instr->num_components) *
case nir_intrinsic_load_per_primitive_output:
case nir_intrinsic_load_push_constant:
/* All memory load operations must load at least a byte */
- validate_assert(state, instr->dest.ssa.bit_size >= 8);
+ validate_assert(state, instr->def.bit_size >= 8);
break;
case nir_intrinsic_store_ssbo:
}
validate_assert(state, allowed);
- validate_assert(state, instr->dest.ssa.bit_size ==
+ validate_assert(state, instr->def.bit_size ==
util_format_get_blocksizebits(format));
}
break;
else
dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
- validate_def(&instr->dest.ssa, state, dest_bit_size, components_written);
+ validate_def(&instr->def, state, dest_bit_size, components_written);
}
if (!vectorized_intrinsic(instr))
if (instr->is_gather_implicit_lod)
validate_assert(state, instr->op == nir_texop_tg4);
- validate_def(&instr->dest.ssa, state, 0, nir_tex_instr_dest_size(instr));
+ validate_def(&instr->def, state, 0, nir_tex_instr_dest_size(instr));
unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
validate_assert(state,
(bit_size ? bit_size : 32) ==
- instr->dest.ssa.bit_size);
+ instr->def.bit_size);
}
static void
* basic blocks, to avoid validating an SSA use before its definition.
*/
- validate_def(&instr->dest.ssa, state, 0, 0);
+ validate_def(&instr->def, state, 0, 0);
exec_list_validate(&instr->srcs);
validate_assert(state, exec_list_length(&instr->srcs) ==
exec_list_validate(&instr->srcs);
nir_foreach_phi_src(src, instr) {
if (src->pred == pred) {
- validate_src(&src->src, state, instr->dest.ssa.bit_size,
- instr->dest.ssa.num_components);
+ validate_src(&src->src, state, instr->def.bit_size,
+ instr->def.num_components);
state->instr = NULL;
return;
}
void algebraic_test_base::test_op(nir_op op, nir_def *src0, nir_def *src1,
nir_def *src2, nir_def *src3, const char *desc)
{
- nir_def *res_deref = &nir_build_deref_var(b, res_var)->dest.ssa;
+ nir_def *res_deref = &nir_build_deref_var(b, res_var)->def;
/* create optimized expression */
nir_intrinsic_instr *optimized_instr = nir_build_store_deref(
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_deref);
store->num_components = val->num_components;
- store->src[0] = nir_src_for_ssa(&nir_build_deref_var(b, var)->dest.ssa);
+ store->src[0] = nir_src_for_ssa(&nir_build_deref_var(b, var)->def);
store->src[1] = nir_src_for_ssa(val);
nir_intrinsic_set_write_mask(store, ((1 << val->num_components) - 1));
nir_builder_instr_insert(b, &store->instr);
{
nir_phi_instr *phi = nir_phi_instr_create(shader);
nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
- nir_def_init(&phi->instr, &phi->dest.ssa, def->num_components,
+ nir_def_init(&phi->instr, &phi->def, def->num_components,
def->bit_size);
return phi;
nir_phi_instr *phi = create_one_source_phi(b->shader, one->parent_instr->block, one);
nir_instr_insert_before_block(one->parent_instr->block, &phi->instr);
- nir_store_var(b, var, &phi->dest.ssa, 0x1);
+ nir_store_var(b, var, &phi->def, 0x1);
nir_pop_loop(b, loop);
/* This is a macro so you get good line numbers */
#define EXPECT_INSTR_SWIZZLES(instr, load, expected_swizzle) \
- EXPECT_EQ((instr)->src[0].src.ssa, &(load)->dest.ssa); \
+ EXPECT_EQ((instr)->src[0].src.ssa, &(load)->def); \
EXPECT_EQ(swizzle(instr, 0), expected_swizzle);
namespace {
nir_intrinsic_instr *res = nir_intrinsic_instr_create(
b->shader, nir_intrinsic_vulkan_resource_index);
- nir_def_init(&res->instr, &res->dest.ssa, 1, 32);
+ nir_def_init(&res->instr, &res->def, 1, 32);
res->num_components = 1;
res->src[0] = nir_src_for_ssa(nir_imm_zero(b, 1, 32));
nir_intrinsic_set_desc_type(
nir_intrinsic_set_desc_set(res, 0);
nir_intrinsic_set_binding(res, binding);
nir_builder_instr_insert(b, &res->instr);
- res_map[binding] = &res->dest.ssa;
- return &res->dest.ssa;
+ res_map[binding] = &res->def;
+ return &res->def;
}
nir_intrinsic_instr *
return NULL;
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, intrinsic);
- nir_def_init(&load->instr, &load->dest.ssa, components, bit_size);
+ nir_def_init(&load->instr, &load->def, components, bit_size);
load->num_components = components;
if (res) {
load->src[0] = nir_src_for_ssa(res);
}
nir_builder_instr_insert(b, &load->instr);
- nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->dest.ssa)->parent_instr);
+ nir_alu_instr *mov = nir_instr_as_alu(nir_mov(b, &load->def)->parent_instr);
movs[id] = mov;
loads[id] = &mov->src[0];
return;
}
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, intrinsic);
- nir_def_init(&store->instr, &store->dest.ssa, components, bit_size);
+ nir_def_init(&store->instr, &store->def, components, bit_size);
store->num_components = components;
if (res) {
store->src[0] = nir_src_for_ssa(value);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(nir_intrinsic_range_base(load), 0);
ASSERT_EQ(nir_intrinsic_range(load), 8);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 3);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 3);
ASSERT_EQ(nir_intrinsic_range_base(load), 0);
ASSERT_EQ(nir_intrinsic_range(load), 12);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 4);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 4);
ASSERT_EQ(nir_intrinsic_range_base(load), 0);
ASSERT_EQ(nir_intrinsic_range(load), 16);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
- ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
- ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
+ ASSERT_EQ(loads[0x1]->src.ssa, &load->def);
+ ASSERT_EQ(loads[0x2]->src.ssa, &load->def);
ASSERT_EQ(loads[0x1]->swizzle[0], 0);
ASSERT_EQ(loads[0x1]->swizzle[1], 1);
ASSERT_EQ(loads[0x1]->swizzle[2], 2);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ubo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ubo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(nir_intrinsic_range_base(load), 0);
ASSERT_EQ(nir_intrinsic_range(load), 4);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
- ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa);
- ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa);
+ ASSERT_EQ(loads[0x1]->src.ssa, &load->def);
+ ASSERT_EQ(loads[0x2]->src.ssa, &load->def);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "x");
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(nir_src_as_uint(load->src[0]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(nir_src_as_uint(load->src[0]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(load->src[1].ssa, index_base);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(load->src[1].ssa, index_base_prev);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 8);
- ASSERT_EQ(load->dest.ssa.num_components, 4);
+ ASSERT_EQ(load->def.bit_size, 8);
+ ASSERT_EQ(load->def.num_components, 4);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
high = nir_instr_as_alu(high->parent_instr)->src[0].src.ssa;
ASSERT_TRUE(test_alu(low->parent_instr, nir_op_u2u16));
ASSERT_TRUE(test_alu(high->parent_instr, nir_op_u2u16));
- ASSERT_TRUE(test_alu_def(low->parent_instr, 0, &load->dest.ssa, 2));
- ASSERT_TRUE(test_alu_def(high->parent_instr, 0, &load->dest.ssa, 3));
+ ASSERT_TRUE(test_alu_def(low->parent_instr, 0, &load->def, 2));
+ ASSERT_TRUE(test_alu_def(high->parent_instr, 0, &load->def, 3));
}
TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64)
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 4);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 4);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 64);
- ASSERT_EQ(load->dest.ssa.num_components, 3);
+ ASSERT_EQ(load->def.bit_size, 64);
+ ASSERT_EQ(load->def.num_components, 3);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "z");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 3);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 3);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 4);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
{
nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var");
nir_deref_instr *deref = nir_build_deref_var(b, var);
- nir_def_init(&deref->instr, &deref->dest.ssa, 1, 64);
+ nir_def_init(&deref->instr, &deref->def, 1, 64);
create_shared_load(nir_build_deref_array_imm(b, deref, 0x100000000), 0x1);
create_shared_load(nir_build_deref_array_imm(b, deref, 0x200000001), 0x2);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_array);
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
/* The loaded value is converted to Boolean by (loaded != 0). */
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine));
ASSERT_TRUE(test_alu(loads[0x2]->src.ssa->parent_instr, nir_op_ine));
- ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->dest.ssa, 0));
- ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa->parent_instr, 0, &load->dest.ssa, 1));
+ ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0));
+ ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa->parent_instr, 0, &load->def, 1));
}
TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed)
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
deref = nir_src_as_deref(load->src[0]);
ASSERT_EQ(deref->deref_type, nir_deref_type_cast);
/* The loaded value is converted to Boolean by (loaded != 0). */
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine));
- ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->dest.ssa, 0));
+ ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0));
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_push_constant), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_push_constant, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(load->src[0].ssa, low);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(load->src[1].ssa, offset);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(load->src[1].ssa, offset);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
- ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
+ ASSERT_EQ(load->src[0].ssa, &load_deref->def);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
- ASSERT_EQ(load->src[0].ssa, &load_deref->dest.ssa);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
+ ASSERT_EQ(load->src[0].ssa, &load_deref->def);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "x");
}
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_ssbo), 2);
nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_ssbo, 0);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 1);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 1);
ASSERT_EQ(load->src[1].ssa, offset);
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "x");
load = get_intrinsic(nir_intrinsic_load_ssbo, 1);
- ASSERT_EQ(load->dest.ssa.bit_size, 32);
- ASSERT_EQ(load->dest.ssa.num_components, 2);
+ ASSERT_EQ(load->def.bit_size, 32);
+ ASSERT_EQ(load->def.num_components, 2);
ASSERT_EQ(load->src[1].ssa, offset_4);
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "x");
EXPECT_INSTR_SWIZZLES(movs[0x3], load, "y");
nir_loop *loop = nir_push_loop(b);
{
- nir_def_init(&phi->instr, &phi->dest.ssa, ssa_0->num_components,
+ nir_def_init(&phi->instr, &phi->def, ssa_0->num_components,
ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
nir_src_for_ssa(ssa_0));
- nir_def *ssa_5 = &phi->dest.ssa;
+ nir_def *ssa_5 = &phi->def;
nir_def *ssa_3 = p.cond_instr(b, ssa_5, ssa_1);
nir_if *nif = nir_push_if(b, ssa_3);
nir_loop *loop = nir_push_loop(b);
{
- nir_def_init(&phi->instr, &phi->dest.ssa, ssa_0->num_components,
+ nir_def_init(&phi->instr, &phi->def, ssa_0->num_components,
ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
nir_src_for_ssa(ssa_0));
- nir_def *ssa_5 = &phi->dest.ssa;
+ nir_def *ssa_5 = &phi->def;
nir_def *ssa_3 = p.incr_instr(b, ssa_5, ssa_1);
nir_block *head_block = nir_loop_first_block(loop);
nir_phi_instr *phi = nir_phi_instr_create(bld->shader);
- nir_def_init(&phi->instr, &phi->dest.ssa, 1, 32);
+ nir_def_init(&phi->instr, &phi->def, 1, 32);
nir_phi_instr_add_src(phi, top_block, nir_src_for_ssa(init));
nir_def *cond = cond_instr(bld,
- (reverse ? limit : &phi->dest.ssa),
- (reverse ? &phi->dest.ssa : limit));
+ (reverse ? limit : &phi->def),
+ (reverse ? &phi->def : limit));
nir_if *nif = nir_push_if(bld, cond);
nir_jump(bld, nir_jump_break);
nir_pop_if(bld, nif);
- nir_def *var = incr_instr(bld, &phi->dest.ssa, step);
+ nir_def *var = incr_instr(bld, &phi->def, step);
nir_phi_instr_add_src(phi, nir_cursor_current_block(bld->cursor),
nir_src_for_ssa(var));
b, nir_imm_vec3(b, 1.7014118346046923e+38, 1.7014118346046923e+38, 8.507059173023462e+37),
nir_imm_vec3(b, -0.5, 1.5, 1.0));
nir_intrinsic_instr *store =
- nir_build_store_deref(b, &nir_build_deref_var(b, res_var)->dest.ssa, val);
+ nir_build_store_deref(b, &nir_build_deref_var(b, res_var)->def, val);
nir_lower_alu_width(b->shader, NULL, NULL);
nir_opt_constant_folding(b->shader);
nir_phi_instr_add_src(phi, then_block, nir_src_for_ssa(one));
- nir_def_init(&phi->instr, &phi->dest.ssa,
+ nir_def_init(&phi->instr, &phi->def,
one->num_components, one->bit_size);
nir_builder_instr_insert(b, &phi->instr);
nir_loop *loop = nir_push_loop(b);
{
- nir_def_init(&phi->instr, &phi->dest.ssa,
+ nir_def_init(&phi->instr, &phi->def,
x->num_components, x->bit_size);
nir_phi_instr_add_src(phi, x->parent_instr->block, nir_src_for_ssa(x));
- nir_def *y = nir_iadd(b, &phi->dest.ssa, two);
+ nir_def *y = nir_iadd(b, &phi->def, two);
nir_store_var(b, out_var,
- nir_imul(b, &phi->dest.ssa, two), 1);
+ nir_imul(b, &phi->def, two), 1);
nir_phi_instr_add_src(phi, nir_cursor_current_block(b->cursor), nir_src_for_ssa(y));
}
nir_def *loop_max = nir_imm_float(b, 3.0);
nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
- nir_def *phi_def = &phi->dest.ssa;
+ nir_def *phi_def = &phi->def;
nir_loop *loop = nir_push_loop(b);
- nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size);
+ nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
nir_def *loop_max = nir_imm_float(b, 3.0);
nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
- nir_def *phi_def = &phi->dest.ssa;
+ nir_def *phi_def = &phi->def;
nir_loop *loop = nir_push_loop(b);
- nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size);
+ nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
nir_def *loop_max = nir_imm_float(b, 3.0);
nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
- nir_def *phi_def = &phi->dest.ssa;
+ nir_def *phi_def = &phi->def;
nir_loop *loop = nir_push_loop(b);
- nir_def_init(&phi->instr, &phi->dest.ssa, v->num_components, v->bit_size);
+ nir_def_init(&phi->instr, &phi->def, v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
nir_def *cond = nir_imm_false(b);
nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
- nir_def_init(&phi->instr, &phi->dest.ssa, 1, 32);
+ nir_def_init(&phi->instr, &phi->def, 1, 32);
nir_push_loop(b);
- nir_def *sel = nir_bcsel(b, cond, &phi->dest.ssa, two);
+ nir_def *sel = nir_bcsel(b, cond, &phi->def, two);
nir_pop_loop(b, NULL);
nir_phi_instr_add_src(phi, zero->parent_instr->block,
nir_validate_shader(b->shader, NULL);
struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
- nir_scalar scalar = nir_get_ssa_scalar(&phi->dest.ssa, 0);
+ nir_scalar scalar = nir_get_ssa_scalar(&phi->def, 0);
EXPECT_EQ(nir_unsigned_upper_bound(b->shader, range_ht, scalar, NULL), 2);
_mesa_hash_table_destroy(range_ht, NULL);
}
nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
for (int i = 0; i < 4; i++)
- nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa), nir_load_var(b, in[i]), 1);
+ nir_store_deref(b, nir_build_deref_array(b, temp_deref, &ind_deref->def), nir_load_var(b, in[i]), 1);
nir_validate_shader(b->shader, NULL);
ASSERT_EQ(count_derefs(nir_deref_type_array), 4);
nir_deref_instr *temp_deref = nir_build_deref_var(b, temp);
for (int i = 0; i < 4; i++) {
- nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->dest.ssa);
+ nir_deref_instr *level0 = nir_build_deref_array(b, temp_deref, &ind_deref->def);
for (int j = 0; j < 6; j++) {
nir_deref_instr *level1 = nir_build_deref_array_imm(b, level0, j);
nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
for (int j = 0; j < 6; j++) {
/* just add the inner index to get some different derefs */
- nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->dest.ssa, j));
+ nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->def, j));
nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
}
}
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_image);
- struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
+ struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->def);
value->propagated_non_uniform = propagate_non_uniform;
}
vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
struct vtn_sampled_image si)
{
- return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
+ return nir_vec2(&b->nb, &si.image->def, &si.sampler->def);
}
static void
nir_tex_src srcs[10]; /* 10 should be enough */
nir_tex_src *p = srcs;
- p->src = nir_src_for_ssa(&image->dest.ssa);
+ p->src = nir_src_for_ssa(&image->def);
p->src_type = nir_tex_src_texture_deref;
p++;
vtn_fail_if(sampler == NULL,
"%s requires an image of type OpTypeSampledImage",
spirv_op_to_string(opcode));
- p->src = nir_src_for_ssa(&sampler->dest.ssa);
+ p->src = nir_src_for_ssa(&sampler->def);
p->src_type = nir_tex_src_sampler_deref;
p++;
break;
instr->dest_type = dest_type;
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
nir_tex_instr_dest_size(instr), 32);
vtn_assert(glsl_get_vector_elements(ret_type->type) ==
if (is_sparse) {
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, struct_type->type);
unsigned result_size = glsl_get_vector_elements(ret_type->type);
- dest->elems[0]->def = nir_channel(&b->nb, &instr->dest.ssa, result_size);
- dest->elems[1]->def = nir_trim_vector(&b->nb, &instr->dest.ssa,
+ dest->elems[0]->def = nir_channel(&b->nb, &instr->def, result_size);
+ dest->elems[1]->def = nir_trim_vector(&b->nb, &instr->def,
result_size);
vtn_push_ssa_value(b, w[2], dest);
} else {
- vtn_push_nir_ssa(b, w[2], &instr->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &instr->def);
}
}
if (nir_intrinsic_has_atomic_op(intrin))
nir_intrinsic_set_atomic_op(intrin, translate_atomic_op(opcode));
- intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&image.image->def);
nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(image.image->type));
nir_intrinsic_set_image_array(intrin,
glsl_sampler_type_is_array(image.image->type));
opcode == SpvOpImageQuerySizeLod)
bit_size = MIN2(bit_size, 32);
- nir_def_init(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init(&intrin->instr, &intrin->def,
nir_intrinsic_dest_components(intrin), bit_size);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- nir_def *result = nir_trim_vector(&b->nb, &intrin->dest.ssa,
+ nir_def *result = nir_trim_vector(&b->nb, &intrin->def,
dest_components);
if (opcode == SpvOpImageQuerySize ||
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, struct_type->type);
unsigned res_type_size = glsl_get_vector_elements(type->type);
dest->elems[0]->def = nir_channel(&b->nb, result, res_type_size);
- if (intrin->dest.ssa.bit_size != 32)
+ if (intrin->def.bit_size != 32)
dest->elems[0]->def = nir_u2u32(&b->nb, dest->elems[0]->def);
dest->elems[1]->def = nir_trim_vector(&b->nb, result, res_type_size);
vtn_push_ssa_value(b, w[2], dest);
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ atomic->src[0] = nir_src_for_ssa(&deref->def);
/* SSBO needs to initialize index/offset. In this case we don't need to,
* as that info is already stored on the ptr->var->var nir_variable (see
const struct glsl_type *deref_type = deref->type;
nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ atomic->src[0] = nir_src_for_ssa(&deref->def);
if (nir_intrinsic_has_atomic_op(atomic))
nir_intrinsic_set_atomic_op(atomic, translate_atomic_op(opcode));
if (opcode == SpvOpAtomicFlagTestAndSet) {
/* map atomic flag to a 32-bit atomic integer. */
- nir_def_init(&atomic->instr, &atomic->dest.ssa, 1, 32);
+ nir_def_init(&atomic->instr, &atomic->def, 1, 32);
} else {
- nir_def_init(&atomic->instr, &atomic->dest.ssa,
+ nir_def_init(&atomic->instr, &atomic->def,
glsl_get_vector_elements(type->type),
glsl_get_bit_size(type->type));
- vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &atomic->def);
}
}
nir_builder_instr_insert(&b->nb, &atomic->instr);
if (opcode == SpvOpAtomicFlagTestAndSet) {
- vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->dest.ssa));
+ vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->def));
}
if (after_semantics)
vtn_emit_memory_barrier(b, scope, after_semantics);
payload = vtn_get_call_payload_for_location(b, w[11]);
else
payload = vtn_nir_deref(b, w[11]);
- intrin->src[10] = nir_src_for_ssa(&payload->dest.ssa);
+ intrin->src[10] = nir_src_for_ssa(&payload->def);
nir_builder_instr_insert(&b->nb, &intrin->instr);
break;
}
nir_intrinsic_report_ray_intersection);
intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
- nir_def_init(&intrin->instr, &intrin->dest.ssa, 1, 1);
+ nir_def_init(&intrin->instr, &intrin->def, 1, 1);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &intrin->def);
break;
}
payload = vtn_get_call_payload_for_location(b, w[2]);
else
payload = vtn_nir_deref(b, w[2]);
- intrin->src[1] = nir_src_for_ssa(&payload->dest.ssa);
+ intrin->src[1] = nir_src_for_ssa(&payload->def);
nir_builder_instr_insert(&b->nb, &intrin->instr);
break;
}
nir_local_variable_create(impl, in_var->type, "copy_in");
nir_copy_var(&b->nb, copy_var, in_var);
call->params[i] =
- nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa);
+ nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->def);
} else if (param_type->base_type == vtn_base_type_image ||
param_type->base_type == vtn_base_type_sampler) {
/* Don't load the var, just pass a deref of it */
- call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->dest.ssa);
+ call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->def);
} else {
call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var));
}
const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
- nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, dest_type);
+ nir_def_init_for_type(&intrin->instr, &intrin->def, dest_type);
if (nir_intrinsic_infos[op].src_components[0] == 0)
- intrin->num_components = intrin->dest.ssa.num_components;
+ intrin->num_components = intrin->def.num_components;
for (unsigned i = 0; i < num_args; i++)
intrin->src[i] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[i + 5]));
}
nir_builder_instr_insert(&b->nb, &intrin->instr);
- vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &intrin->def);
return true;
}
vec_deref = deref;
deref = nir_deref_instr_parent(deref);
}
- intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&deref->def);
intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
intrin->num_components = glsl_get_vector_elements(deref->type);
- nir_def_init(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init(&intrin->instr, &intrin->def,
glsl_get_vector_elements(deref->type),
glsl_get_bit_size(deref->type));
nir_def *def;
if (vec_array_deref) {
assert(vec_deref);
- def = nir_vector_extract(&b->nb, &intrin->dest.ssa,
+ def = nir_vector_extract(&b->nb, &intrin->def,
vec_deref->arr.index.ssa);
} else {
- def = &intrin->dest.ssa;
+ def = &intrin->def;
}
vtn_push_nir_ssa(b, w[2], def);
glsl_get_bare_type(ret_type->type),
"return_tmp");
ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
- call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
+ call->params[param_idx++] = nir_src_for_ssa(&ret_deref->def);
}
for (unsigned i = 0; i < vtn_callee->type->length; i++) {
vec_deref = deref;
deref = nir_deref_instr_parent(deref);
}
- intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ intrin->src[0] = nir_src_for_ssa(&deref->def);
switch (opcode) {
case GLSLstd450InterpolateAtCentroid:
}
intrin->num_components = glsl_get_vector_elements(deref->type);
- nir_def_init(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init(&intrin->instr, &intrin->def,
glsl_get_vector_elements(deref->type),
glsl_get_bit_size(deref->type));
nir_builder_instr_insert(&b->nb, &intrin->instr);
- nir_def *def = &intrin->dest.ssa;
+ nir_def *def = &intrin->def;
if (vec_array_deref)
def = nir_vector_extract(&b->nb, def, vec_deref->arr.index.ssa);
glsl_get_bare_type(dest_type->type),
"return_tmp");
ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
- call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
+ call->params[param_idx++] = nir_src_for_ssa(&ret_deref->def);
}
for (unsigned i = 0; i < num_srcs; i++)
/* Lastly, the actual intrinsic */
nir_def *fmt_idx = nir_imm_int(&b->nb, info_idx);
- nir_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->dest.ssa);
+ nir_def *ret = nir_printf(&b->nb, fmt_idx, &deref_var->def);
vtn_push_nir_ssa(b, w_dest[1], ret);
}
nir_intrinsic_instr *intrin =
nir_intrinsic_instr_create(b->nb.shader, nir_op);
- nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa, dst->type);
- intrin->num_components = intrin->dest.ssa.num_components;
+ nir_def_init_for_type(&intrin->instr, &intrin->def, dst->type);
+ intrin->num_components = intrin->def.num_components;
intrin->src[0] = nir_src_for_ssa(src0->def);
if (index)
nir_builder_instr_insert(&b->nb, &intrin->instr);
- dst->def = &intrin->dest.ssa;
+ dst->def = &intrin->def;
return dst;
}
"OpGroupNonUniformElect must return a Bool");
nir_intrinsic_instr *elect =
nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_elect);
- nir_def_init_for_type(&elect->instr, &elect->dest.ssa, dest_type->type);
+ nir_def_init_for_type(&elect->instr, &elect->def, dest_type->type);
nir_builder_instr_insert(&b->nb, &elect->instr);
- vtn_push_nir_ssa(b, w[2], &elect->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &elect->def);
break;
}
nir_intrinsic_instr *ballot =
nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_ballot);
ballot->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[3 + has_scope]));
- nir_def_init(&ballot->instr, &ballot->dest.ssa, 4, 32);
+ nir_def_init(&ballot->instr, &ballot->def, 4, 32);
ballot->num_components = 4;
nir_builder_instr_insert(&b->nb, &ballot->instr);
- vtn_push_nir_ssa(b, w[2], &ballot->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &ballot->def);
break;
}
intrin->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4]));
intrin->src[1] = nir_src_for_ssa(nir_load_subgroup_invocation(&b->nb));
- nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init_for_type(&intrin->instr, &intrin->def,
dest_type->type);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &intrin->def);
break;
}
if (src1)
intrin->src[1] = nir_src_for_ssa(src1);
- nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init_for_type(&intrin->instr, &intrin->def,
dest_type->type);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &intrin->def);
break;
}
if (nir_intrinsic_infos[op].src_components[0] == 0)
intrin->num_components = src0->num_components;
intrin->src[0] = nir_src_for_ssa(src0);
- nir_def_init_for_type(&intrin->instr, &intrin->dest.ssa,
+ nir_def_init_for_type(&intrin->instr, &intrin->def,
dest_type->type);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &intrin->def);
break;
}
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode);
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
nir_address_format_num_components(addr_format),
nir_address_format_bit_size(addr_format));
- instr->num_components = instr->dest.ssa.num_components;
+ instr->num_components = instr->def.num_components;
nir_builder_instr_insert(&b->nb, &instr->instr);
- return &instr->dest.ssa;
+ return &instr->def;
}
static nir_def *
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
nir_address_format_num_components(addr_format),
nir_address_format_bit_size(addr_format));
- instr->num_components = instr->dest.ssa.num_components;
+ instr->num_components = instr->def.num_components;
nir_builder_instr_insert(&b->nb, &instr->instr);
- return &instr->dest.ssa;
+ return &instr->def;
}
static nir_def *
nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
- nir_def_init(&desc_load->instr, &desc_load->dest.ssa,
+ nir_def_init(&desc_load->instr, &desc_load->def,
nir_address_format_num_components(addr_format),
nir_address_format_bit_size(addr_format));
- desc_load->num_components = desc_load->dest.ssa.num_components;
+ desc_load->num_components = desc_load->def.num_components;
nir_builder_instr_insert(&b->nb, &desc_load->instr);
- return &desc_load->dest.ssa;
+ return &desc_load->def;
}
static struct vtn_pointer *
assert(base->var && base->var->var);
tail = nir_build_deref_var(&b->nb, base->var->var);
if (base->ptr_type && base->ptr_type->type) {
- tail->dest.ssa.num_components =
+ tail->def.num_components =
glsl_get_vector_elements(base->ptr_type->type);
- tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
+ tail->def.bit_size = glsl_get_bit_size(base->ptr_type->type);
}
}
/* We start with a deref cast to get the stride. Hopefully, we'll be
* able to delete that cast eventually.
*/
- tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->modes,
+ tail = nir_build_deref_cast(&b->nb, &tail->def, tail->modes,
tail->type, base->ptr_type->stride);
nir_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
- tail->dest.ssa.bit_size);
+ tail->def.bit_size);
tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
idx++;
}
} else {
nir_def *arr_index =
vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
- tail->dest.ssa.bit_size);
+ tail->def.bit_size);
tail = nir_build_deref_array(&b->nb, tail, arr_index);
type = type->array_element;
}
return ptr->block_index;
} else {
- return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
+ return &vtn_pointer_to_deref(b, ptr)->def;
}
}
*/
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
deref_type, ptr_type->stride);
- ptr->deref->dest.ssa.num_components =
+ ptr->deref->def.num_components =
glsl_get_vector_elements(ptr_type->type);
- ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
+ ptr->deref->def.bit_size = glsl_get_bit_size(ptr_type->type);
}
return ptr;
nir_address_format_bit_size(addr_format),
nir_address_format_null_value(addr_format));
- nir_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa, nir_mode);
+ nir_def *valid = nir_build_deref_mode_is(&b->nb, 1, &src_deref->def, nir_mode);
vtn_push_nir_ssa(b, w[2], nir_bcsel(&b->nb, valid,
- &src_deref->dest.ssa,
+ &src_deref->def,
null_value));
break;
}
nir_deref_instr *src_deref = vtn_nir_deref(b, w[3]);
nir_def *global_bit =
- nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
+ nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->def,
nir_var_mem_global),
nir_imm_int(&b->nb, SpvMemorySemanticsCrossWorkgroupMemoryMask),
nir_imm_int(&b->nb, 0));
nir_def *shared_bit =
- nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->dest.ssa,
+ nir_bcsel(&b->nb, nir_build_deref_mode_is(&b->nb, 1, &src_deref->def,
nir_var_mem_shared),
nir_imm_int(&b->nb, SpvMemorySemanticsWorkgroupMemoryMask),
nir_imm_int(&b->nb, 0));
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_load_deref_block_intel);
- load->src[0] = nir_src_for_ssa(&src->dest.ssa);
- nir_def_init_for_type(&load->instr, &load->dest.ssa, res_type->type);
- load->num_components = load->dest.ssa.num_components;
+ load->src[0] = nir_src_for_ssa(&src->def);
+ nir_def_init_for_type(&load->instr, &load->def, res_type->type);
+ load->num_components = load->def.num_components;
nir_builder_instr_insert(&b->nb, &load->instr);
- vtn_push_nir_ssa(b, w[2], &load->dest.ssa);
+ vtn_push_nir_ssa(b, w[2], &load->def);
break;
}
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_store_deref_block_intel);
- store->src[0] = nir_src_for_ssa(&dest->dest.ssa);
+ store->src[0] = nir_src_for_ssa(&dest->def);
store->src[1] = nir_src_for_ssa(data);
store->num_components = data->num_components;
nir_builder_instr_insert(&b->nb, &store->instr);
ldib->dsts[0]->wrmask = MASK(intr->num_components);
ldib->cat6.iim_val = intr->num_components;
ldib->cat6.d = 1;
- ldib->cat6.type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32;
+ ldib->cat6.type = intr->def.bit_size == 16 ? TYPE_U16 : TYPE_U32;
ldib->barrier_class = IR3_BARRIER_BUFFER_R;
ldib->barrier_conflict = IR3_BARRIER_BUFFER_W;
ir3_handle_bindless_cat6(ldib, intr->src[0]);
create_immed(b, 0), 0, create_immed(b, dest_components), 0);
}
- load->cat6.type = type_uint_size(intr->dest.ssa.bit_size);
+ load->cat6.type = type_uint_size(intr->def.bit_size);
load->dsts[0]->wrmask = MASK(dest_components);
load->barrier_class = IR3_BARRIER_BUFFER_R;
ldc->dsts[0]->wrmask = MASK(ncomp);
ldc->cat6.iim_val = ncomp;
ldc->cat6.d = nir_intrinsic_component(intr);
- ldc->cat6.type = utype_def(&intr->dest.ssa);
+ ldc->cat6.type = utype_def(&intr->def);
ir3_handle_bindless_cat6(ldc, intr->src[0]);
if (ldc->flags & IR3_INSTR_B)
ldl = ir3_LDL(b, offset, 0, create_immed(b, base), 0,
create_immed(b, intr->num_components), 0);
- ldl->cat6.type = utype_def(&intr->dest.ssa);
+ ldl->cat6.type = utype_def(&intr->def);
ldl->dsts[0]->wrmask = MASK(intr->num_components);
ldl->barrier_class = IR3_BARRIER_SHARED_R;
if (ctx->so->type == MESA_SHADER_TESS_CTRL && ctx->compiler->tess_use_shared)
load->opc = OPC_LDL;
- load->cat6.type = utype_def(&intr->dest.ssa);
+ load->cat6.type = utype_def(&intr->def);
load->dsts[0]->wrmask = MASK(intr->num_components);
load->barrier_class = IR3_BARRIER_SHARED_R;
ldp = ir3_LDP(b, offset, 0, create_immed(b, base), 0,
create_immed(b, intr->num_components), 0);
- ldp->cat6.type = utype_def(&intr->dest.ssa);
+ ldp->cat6.type = utype_def(&intr->def);
ldp->dsts[0]->wrmask = MASK(intr->num_components);
ldp->barrier_class = IR3_BARRIER_PRIVATE_R;
struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0], true);
struct ir3_instruction *sam, *lod;
unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
- type_t dst_type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32;
+ type_t dst_type = intr->def.bit_size == 16 ? TYPE_U16 : TYPE_U32;
info.flags |= flags;
assert(nir_src_as_uint(intr->src[1]) == 0);
{
/* Note: isam currently can't handle vectorized loads/stores */
if (!(nir_intrinsic_access(intr) & ACCESS_CAN_REORDER) ||
- intr->dest.ssa.num_components > 1) {
+ intr->def.num_components > 1) {
ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
return;
}
struct ir3_instruction *coords = ir3_collect(b, offset, create_immed(b, 0));
struct tex_src_info info = get_image_ssbo_samp_tex_src(ctx, &intr->src[0], false);
- unsigned num_components = intr->dest.ssa.num_components;
+ unsigned num_components = intr->def.num_components;
struct ir3_instruction *sam =
- emit_sam(ctx, OPC_ISAM, info, utype_for_size(intr->dest.ssa.bit_size),
+ emit_sam(ctx, OPC_ISAM, info, utype_for_size(intr->def.bit_size),
MASK(num_components), coords, NULL);
ir3_handle_nonuniform(sam, intr);
ctx->frag_coord = ir3_create_collect(b, xyzw, 4);
}
- ctx->so->fragcoord_compmask |= nir_def_components_read(&intr->dest.ssa);
+ ctx->so->fragcoord_compmask |= nir_def_components_read(&intr->def);
return ctx->frag_coord;
}
struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
nir_op nir_reduce_op = (nir_op) nir_intrinsic_reduction_op(intr);
reduce_op_t reduce_op = get_reduce_op(nir_reduce_op);
- unsigned dst_size = intr->dest.ssa.bit_size;
+ unsigned dst_size = intr->def.bit_size;
unsigned flags = (ir3_bitsize(ctx, dst_size) == 16) ? IR3_REG_HALF : 0;
/* Note: the shared reg is initialized to the identity, so we need it to
int idx;
if (info->has_dest) {
- dst = ir3_get_def(ctx, &intr->dest.ssa, dest_components);
+ dst = ir3_get_def(ctx, &intr->def, dest_components);
} else {
dst = NULL;
}
for (int i = 0; i < dest_components; i++) {
dst[i] = create_uniform_typed(
b, idx + i,
- intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32);
+ intr->def.bit_size == 16 ? TYPE_F16 : TYPE_F32);
}
} else {
src = ir3_get_src(ctx, &intr->src[0]);
for (int i = 0; i < dest_components; i++) {
dst[i] = create_uniform_indirect(
b, idx + i,
- intr->dest.ssa.bit_size == 16 ? TYPE_F16 : TYPE_F32,
+ intr->def.bit_size == 16 ? TYPE_F16 : TYPE_F32,
ir3_get_addr0(ctx, src[0], 1));
}
/* NOTE: if relative addressing is used, we set
case nir_intrinsic_ballot: {
struct ir3_instruction *ballot;
- unsigned components = intr->dest.ssa.num_components;
+ unsigned components = intr->def.num_components;
if (nir_src_is_const(intr->src[0]) && nir_src_as_bool(intr->src[0])) {
/* ballot(true) is just MOVMSK */
ballot = ir3_MOVMSK(ctx->block, components);
struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[1])[0];
- type_t dst_type = type_uint_size(intr->dest.ssa.bit_size);
+ type_t dst_type = type_uint_size(intr->def.bit_size);
if (dst_type != TYPE_U32)
idx = ir3_COV(ctx->block, idx, TYPE_U32, dst_type);
case nir_intrinsic_quad_swap_horizontal: {
struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
dst[0] = ir3_QUAD_SHUFFLE_HORIZ(ctx->block, src, 0);
- dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
+ dst[0]->cat5.type = type_uint_size(intr->def.bit_size);
break;
}
case nir_intrinsic_quad_swap_vertical: {
struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
dst[0] = ir3_QUAD_SHUFFLE_VERT(ctx->block, src, 0);
- dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
+ dst[0]->cat5.type = type_uint_size(intr->def.bit_size);
break;
}
case nir_intrinsic_quad_swap_diagonal: {
struct ir3_instruction *src = ir3_get_src(ctx, &intr->src[0])[0];
dst[0] = ir3_QUAD_SHUFFLE_DIAG(ctx->block, src, 0);
- dst[0]->cat5.type = type_uint_size(intr->dest.ssa.bit_size);
+ dst[0]->cat5.type = type_uint_size(intr->def.bit_size);
break;
}
}
if (info->has_dest)
- ir3_put_def(ctx, &intr->dest.ssa);
+ ir3_put_def(ctx, &intr->def);
}
static void
type_t type;
opc_t opc = 0;
- ncomp = tex->dest.ssa.num_components;
+ ncomp = tex->def.num_components;
coord = off = ddx = ddy = NULL;
lod = proj = compare = sample_index = NULL;
- dst = ir3_get_def(ctx, &tex->dest.ssa, ncomp);
+ dst = ir3_get_def(ctx, &tex->def, ncomp);
for (unsigned i = 0; i < tex->num_srcs; i++) {
switch (tex->src[i].src_type) {
type_float(type) ? fui(swizzle - 4) : (swizzle - 4));
for (int i = 0; i < 4; i++)
dst[i] = imm;
- ir3_put_def(ctx, &tex->dest.ssa);
+ ir3_put_def(ctx, &tex->def);
return;
}
opc = OPC_GATHER4R + swizzle;
/* GETLOD returns results in 4.8 fixed point */
if (opc == OPC_GETLOD) {
- bool half = tex->dest.ssa.bit_size == 16;
+ bool half = tex->def.bit_size == 16;
struct ir3_instruction *factor =
half ? create_immed_typed(b, _mesa_float_to_half(1.0 / 256), TYPE_F16)
: create_immed(b, fui(1.0 / 256));
}
}
- ir3_put_def(ctx, &tex->dest.ssa);
+ ir3_put_def(ctx, &tex->def);
}
static void
type_t dst_type = get_tex_dest_type(tex);
struct tex_src_info info = get_tex_samp_tex_src(ctx, tex);
- dst = ir3_get_def(ctx, &tex->dest.ssa, 1);
+ dst = ir3_get_def(ctx, &tex->def, 1);
sam = emit_sam(ctx, OPC_GETINFO, info, dst_type, 1 << idx, NULL, NULL);
if (ctx->compiler->levels_add_one)
dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
- ir3_put_def(ctx, &tex->dest.ssa);
+ ir3_put_def(ctx, &tex->def);
}
static void
if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
coords = 2;
- dst = ir3_get_def(ctx, &tex->dest.ssa, 4);
+ dst = ir3_get_def(ctx, &tex->def, 4);
int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
compile_assert(ctx, lod_idx >= 0);
}
}
- ir3_put_def(ctx, &tex->dest.ssa);
+ ir3_put_def(ctx, &tex->def);
}
/* phi instructions are left partially constructed. We don't resolve
struct ir3_instruction *phi, **dst;
/* NOTE: phi's should be lowered to scalar at this point */
- compile_assert(ctx, nphi->dest.ssa.num_components == 1);
+ compile_assert(ctx, nphi->def.num_components == 1);
- dst = ir3_get_def(ctx, &nphi->dest.ssa, 1);
+ dst = ir3_get_def(ctx, &nphi->def, 1);
phi = ir3_instr_create(ctx->block, OPC_META_PHI, 1,
exec_list_length(&nphi->srcs));
dst[0] = phi;
- ir3_put_def(ctx, &nphi->dest.ssa);
+ ir3_put_def(ctx, &nphi->def);
}
static struct ir3_block *get_block(struct ir3_context *ctx,
MAX2(1, nir_intrinsic_num_array_elems(decl));
compile_assert(ctx, arr->length > 0);
- arr->r = &decl->dest.ssa;
+ arr->r = &decl->def;
arr->half = ir3_bitsize(ctx, nir_intrinsic_bit_size(decl)) <= 16;
list_addtail(&arr->node, &ctx->ir->array_list);
}
ir3_get_type_for_image_intrinsic(const nir_intrinsic_instr *instr)
{
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
- int bit_size = info->has_dest ? instr->dest.ssa.bit_size : nir_src_bit_size(instr->src[3]);
+ int bit_size = info->has_dest ? instr->def.bit_size : nir_src_bit_size(instr->src[3]);
nir_alu_type type = nir_type_uint;
switch (instr->intrinsic) {
{
uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- return nir_ishl_imm(b, &intr->dest.ssa, ssbo_size_to_bytes_shift);
+ return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift);
}
static bool
}
nir_def *uniform =
- nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,
+ nir_load_uniform(b, instr->num_components, instr->def.bit_size,
uniform_offset, .base = const_offset);
- nir_def_rewrite_uses(&instr->dest.ssa, uniform);
+ nir_def_rewrite_uses(&instr->def, uniform);
nir_instr_remove(&instr->instr);
}
unsigned num_components = instr->num_components;
- if (instr->dest.ssa.bit_size == 16) {
+ if (instr->def.bit_size == 16) {
/* We can't do 16b loads -- either from LDC (32-bit only in any of our
* traces, and disasm that doesn't look like it really supports it) or
* from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
.align_offset = nir_intrinsic_align_offset(instr),
.range_base = base, .range = nir_intrinsic_range(instr));
- if (instr->dest.ssa.bit_size == 16) {
+ if (instr->def.bit_size == 16) {
result = nir_bitcast_vector(b, result, 16);
result = nir_trim_vector(b, result, instr->num_components);
}
if (nir_intrinsic_dest_components(intr) == 0)
return false;
- return intr->dest.ssa.bit_size == 64;
+ return intr->def.bit_size == 64;
}
static nir_def *
unsigned num_comp = nir_intrinsic_dest_components(intr);
- nir_def *def = &intr->dest.ssa;
+ nir_def *def = &intr->def;
def->bit_size = 32;
/* load_kernel_input is handled specially, lowering to two 32b inputs:
load->num_components = 2;
load->src[offset_src_idx] = nir_src_for_ssa(off);
- nir_def_init(&load->instr, &load->dest.ssa, 2, 32);
+ nir_def_init(&load->instr, &load->def, 2, 32);
nir_builder_instr_insert(b, &load->instr);
- components[i] = nir_pack_64_2x32(b, &load->dest.ssa);
+ components[i] = nir_pack_64_2x32(b, &load->def);
off = nir_iadd_imm(b, off, 8);
}
if (intr->intrinsic == nir_intrinsic_global_atomic) {
return nir_global_atomic_ir3(
- b, intr->dest.ssa.bit_size, addr,
+ b, intr->def.bit_size, addr,
nir_ssa_for_src(b, intr->src[1], 1),
.atomic_op = nir_intrinsic_atomic_op(intr));
} else if (intr->intrinsic == nir_intrinsic_global_atomic_swap) {
return nir_global_atomic_swap_ir3(
- b, intr->dest.ssa.bit_size, addr,
+ b, intr->def.bit_size, addr,
nir_ssa_for_src(b, intr->src[1], 1),
nir_ssa_for_src(b, intr->src[2], 1),
.atomic_op = nir_intrinsic_atomic_op(intr));
for (unsigned off = 0; off < num_comp;) {
unsigned c = MIN2(num_comp - off, 4);
nir_def *val = nir_load_global_ir3(
- b, c, intr->dest.ssa.bit_size,
+ b, c, intr->def.bit_size,
addr, nir_imm_int(b, off));
for (unsigned i = 0; i < c; i++) {
components[off++] = nir_channel(b, val, i);
nir_def *descriptor = intrinsic->src[0].ssa;
nir_def *offset = intrinsic->src[1].ssa;
nir_def *new_offset = intrinsic->src[2].ssa;
- unsigned comp_size = intrinsic->dest.ssa.bit_size / 8;
- for (unsigned i = 0; i < intrinsic->dest.ssa.num_components; i++) {
+ unsigned comp_size = intrinsic->def.bit_size / 8;
+ for (unsigned i = 0; i < intrinsic->def.num_components; i++) {
results[i] =
- nir_load_ssbo_ir3(b, 1, intrinsic->dest.ssa.bit_size, descriptor,
+ nir_load_ssbo_ir3(b, 1, intrinsic->def.bit_size, descriptor,
nir_iadd_imm(b, offset, i * comp_size),
nir_iadd_imm(b, new_offset, i),
.access = nir_intrinsic_access(intrinsic),
.align_offset = nir_intrinsic_align_offset(intrinsic));
}
- nir_def *result = nir_vec(b, results, intrinsic->dest.ssa.num_components);
+ nir_def *result = nir_vec(b, results, intrinsic->def.num_components);
- nir_def_rewrite_uses(&intrinsic->dest.ssa, result);
+ nir_def_rewrite_uses(&intrinsic->def, result);
nir_instr_remove(&intrinsic->instr);
}
nir_def *new_dest = NULL;
/* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
- if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
+ if ((has_dest && intrinsic->def.bit_size == 16) ||
(!has_dest && intrinsic->src[0].ssa->bit_size == 16))
shift = 1;
*target_src = nir_src_for_ssa(offset);
if (has_dest) {
- nir_def *dest = &intrinsic->dest.ssa;
- nir_def_init(&new_intrinsic->instr, &new_intrinsic->dest.ssa,
+ nir_def *dest = &intrinsic->def;
+ nir_def_init(&new_intrinsic->instr, &new_intrinsic->def,
dest->num_components, dest->bit_size);
- new_dest = &new_intrinsic->dest.ssa;
+ new_dest = &new_intrinsic->def;
}
for (unsigned i = 0; i < num_srcs; i++)
/* Replace the uses of the original destination by that
* of the new intrinsic.
*/
- nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
+ nir_def_rewrite_uses(&intrinsic->def, new_dest);
}
/* Finally remove the original intrinsic. */
.num_slots = 1,
};
nir_intrinsic_set_io_semantics(load_input, semantics);
- nir_def_init(&load_input->instr, &load_input->dest.ssa, 1, 32);
+ nir_def_init(&load_input->instr, &load_input->def, 1, 32);
nir_builder_instr_insert(b, &load_input->instr);
- nir_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa);
+ nir_def_rewrite_uses(&intr->def, &load_input->def);
return true;
}
new_intr->num_components = intr->num_components;
if (nir_intrinsic_infos[op].has_dest)
- nir_def_init(&new_intr->instr, &new_intr->dest.ssa,
- intr->num_components, intr->dest.ssa.bit_size);
+ nir_def_init(&new_intr->instr, &new_intr->def,
+ intr->num_components, intr->def.bit_size);
nir_builder_instr_insert(b, &new_intr->instr);
if (nir_intrinsic_infos[op].has_dest)
- nir_def_rewrite_uses(&intr->dest.ssa, &new_intr->dest.ssa);
+ nir_def_rewrite_uses(&intr->def, &new_intr->def);
nir_instr_remove(&intr->instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *iid = build_invocation_id(b, state);
- nir_def_rewrite_uses(&intr->dest.ssa, iid);
+ nir_def_rewrite_uses(&intr->def, iid);
nir_instr_remove(&intr->instr);
break;
}
*/
gl_varying_slot location = nir_intrinsic_io_semantics(intr).location;
if (is_tess_levels(location)) {
- assert(intr->dest.ssa.num_components == 1);
+ assert(intr->def.num_components == 1);
address = nir_load_tess_factor_base_ir3(b);
offset = build_tessfactor_base(
b, location, nir_intrinsic_component(intr), state);
*/
gl_varying_slot location = nir_intrinsic_io_semantics(intr).location;
if (is_tess_levels(location)) {
- assert(intr->dest.ssa.num_components == 1);
+ assert(intr->def.num_components == 1);
address = nir_load_tess_factor_base_ir3(b);
offset = build_tessfactor_base(
b, location, nir_intrinsic_component(intr), state);
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
} else {
unsigned num_comp = nir_intrinsic_dest_components(intr);
- unsigned bit_size = intr->dest.ssa.bit_size;
+ unsigned bit_size = intr->def.bit_size;
nir_def *addr = nir_ssa_for_src(b, intr->src[0], 1);
nir_def *components[num_comp];
load->num_components = c;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, nir_intrinsic_align(intr), 0);
- nir_def_init(&load->instr, &load->dest.ssa, c, bit_size);
+ nir_def_init(&load->instr, &load->def, c, bit_size);
nir_builder_instr_insert(b, &load->instr);
addr = nir_iadd(b,
addr);
for (unsigned i = 0; i < c; i++) {
- components[off++] = nir_channel(b, &load->dest.ssa, i);
+ components[off++] = nir_channel(b, &load->def, i);
}
}
if (intrin->intrinsic != nir_intrinsic_load_preamble)
continue;
- nir_def *dest = &intrin->dest.ssa;
+ nir_def *dest = &intrin->def;
unsigned offset = preamble_base + nir_intrinsic_base(intrin);
b->cursor = nir_before_instr(instr);
nir_load_var(b, in_coords));
tex->coord_components = coord_components;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
+ nir_store_var(b, out_color, &tex->def, 0xf);
return b->shader;
}
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_ms_index,
nir_load_sample_id(b));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
+ nir_store_var(b, out_color, &tex->def, 0xf);
return b->shader;
}
nir_def *load =
nir_load_uniform(b, instr->num_components,
- instr->dest.ssa.bit_size,
+ instr->def.bit_size,
nir_ushr_imm(b, instr->src[0].ssa, 2),
.base = base);
- nir_def_rewrite_uses(&instr->dest.ssa, load);
+ nir_def_rewrite_uses(&instr->def, load);
nir_instr_remove(&instr->instr);
}
nir_ishl(b, vulkan_idx, shift)),
shift);
- nir_def_rewrite_uses(&instr->dest.ssa, def);
+ nir_def_rewrite_uses(&instr->def, def);
nir_instr_remove(&instr->instr);
}
nir_ishl(b, delta, shift)),
shift);
- nir_def_rewrite_uses(&instr->dest.ssa, new_index);
+ nir_def_rewrite_uses(&instr->def, new_index);
nir_instr_remove(&instr->instr);
}
nir_vec3(b, nir_channel(b, old_index, 0),
nir_channel(b, old_index, 1),
nir_imm_int(b, 0));
- nir_def_rewrite_uses(&intrin->dest.ssa, new_index);
+ nir_def_rewrite_uses(&intrin->def, new_index);
nir_instr_remove(&intrin->instr);
}
if (dev->physical_device->info->a6xx.storage_16bit &&
intrin->intrinsic == nir_intrinsic_load_ssbo &&
(nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
- intrin->dest.ssa.bit_size > 16) {
+ intrin->def.bit_size > 16) {
descriptor_idx = nir_iadd_imm(b, descriptor_idx, 1);
}
}
if (info->has_dest) {
- nir_def_init(©->instr, ©->dest.ssa,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
- results[i] = ©->dest.ssa;
+ nir_def_init(©->instr, ©->def,
+ intrin->def.num_components,
+ intrin->def.bit_size);
+ results[i] = ©->def;
}
nir_builder_instr_insert(b, ©->instr);
}
nir_def *result =
- nir_undef(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
+ nir_undef(b, intrin->def.num_components, intrin->def.bit_size);
for (int i = MAX_SETS; i >= 0; i--) {
nir_pop_if(b, NULL);
if (info->has_dest)
}
if (info->has_dest)
- nir_def_rewrite_uses(&intrin->dest.ssa, result);
+ nir_def_rewrite_uses(&intrin->def, result);
nir_instr_remove(&intrin->instr);
}
nir_def *result = nir_convert_ycbcr_to_rgb(builder,
ycbcr_sampler->ycbcr_model,
ycbcr_sampler->ycbcr_range,
- &tex->dest.ssa,
+ &tex->def,
bpcs);
- nir_def_rewrite_uses_after(&tex->dest.ssa, result,
+ nir_def_rewrite_uses_after(&tex->def, result,
result->parent_instr);
builder->cursor = nir_before_instr(&tex->instr);
/* Assume we're loading out-of-bounds from a 0-sized inline uniform
* filtered out below.
*/
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
nir_undef(b, intrin->num_components,
- intrin->dest.ssa.bit_size));
+ intrin->def.bit_size));
return true;
}
nir_def *base_addr =
nir_load_uniform(b, 2, 32, nir_imm_int(b, 0), .base = base);
val = nir_load_global_ir3(b, intrin->num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.bit_size,
base_addr, nir_ishr_imm(b, offset, 2));
} else {
val = nir_load_uniform(b, intrin->num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.bit_size,
nir_ishr_imm(b, offset, 2), .base = base);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, val);
+ nir_def_rewrite_uses(&intrin->def, val);
nir_instr_remove(instr);
return true;
}
var.data.driver_location = nir_intrinsic_base(instr);
var.data.location_frac = nir_intrinsic_component(instr);
- unsigned nc = instr->dest.ssa.num_components;
- unsigned bit_size = instr->dest.ssa.bit_size;
+ unsigned nc = instr->def.num_components;
+ unsigned bit_size = instr->def.bit_size;
nir_src offset = *nir_get_io_offset_src(instr);
bool indirect = !nir_src_is_const(offset);
LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage);
- if (!is_aos(bld_base) && instr->dest.ssa.num_components > 1) {
- for (unsigned i = 0; i < instr->dest.ssa.num_components; i++)
+ if (!is_aos(bld_base) && instr->def.num_components > 1) {
+ for (unsigned i = 0; i < instr->def.num_components; i++)
result[i] = LLVMBuildExtractValue(builder, val, i, "");
} else {
result[0] = val;
LLVMValueRef indir_index;
LLVMValueRef indir_vertex_index = NULL;
unsigned vertex_index = 0;
- unsigned nc = instr->dest.ssa.num_components;
- unsigned bit_size = instr->dest.ssa.bit_size;
+ unsigned nc = instr->def.num_components;
+ unsigned bit_size = instr->def.bit_size;
if (var) {
bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
var->data.mode == nir_var_shader_in;
*/
if (var->data.compact && compact_array_index_oob(bld_base, var, const_index)) {
struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
- instr->dest.ssa.bit_size);
- for (int i = 0; i < instr->dest.ssa.num_components; i++)
+ instr->def.bit_size);
+ for (int i = 0; i < instr->def.num_components; i++)
result[i] = LLVMGetUndef(undef_bld->vec_type);
return;
}
if (nir_src_num_components(instr->src[0]) == 1)
idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
- bld_base->load_ubo(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_ubo(bld_base, instr->def.num_components,
+ instr->def.bit_size,
offset_is_uniform, idx, offset, result);
}
LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
- bld_base->load_ubo(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_ubo(bld_base, instr->def.num_components,
+ instr->def.bit_size,
offset_is_uniform, idx, offset, result);
}
bool index_and_offset_are_uniform =
nir_src_is_always_uniform(instr->src[0]) &&
nir_src_is_always_uniform(instr->src[1]);
- bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_mem(bld_base, instr->def.num_components,
+ instr->def.bit_size,
index_and_offset_are_uniform, false, idx, offset, result);
}
{
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
- bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_mem(bld_base, instr->def.num_components,
+ instr->def.bit_size,
offset_is_uniform, false, NULL, offset, result);
}
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
- bld_base->load_kernel_arg(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_kernel_arg(bld_base, instr->def.num_components,
+ instr->def.bit_size,
nir_src_bit_size(instr->src[0]),
offset_is_uniform, offset, result);
}
{
LLVMValueRef addr = get_src(bld_base, instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
- bld_base->load_global(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_global(bld_base, instr->def.num_components,
+ instr->def.bit_size,
nir_src_bit_size(instr->src[0]),
offset_is_uniform, addr, result);
}
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMBuilderRef builder = gallivm->builder;
nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
- unsigned num_components = instr->dest.ssa.num_components;
+ unsigned num_components = instr->def.num_components;
nir_variable *var = nir_deref_instr_get_variable(deref);
unsigned const_index;
LLVMValueRef indir_index;
{
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
- bld_base->load_scratch(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size, offset, result);
+ bld_base->load_scratch(bld_base, instr->def.num_components,
+ instr->def.bit_size, offset, result);
}
{
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
- bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size,
+ bld_base->load_mem(bld_base, instr->def.num_components,
+ instr->def.bit_size,
offset_is_uniform, true, NULL, offset, result);
}
break;
}
if (result[0]) {
- assign_ssa_dest(bld_base, &instr->dest.ssa, result);
+ assign_ssa_dest(bld_base, &instr->def, result);
}
}
params.resource = resource;
bld_base->tex_size(bld_base, ¶ms);
- assign_ssa_dest(bld_base, &instr->dest.ssa,
+ assign_ssa_dest(bld_base, &instr->def,
&sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
}
params.sampler_resource = sampler_resource;
bld_base->tex(bld_base, ¶ms);
- if (instr->dest.ssa.bit_size != 32) {
- assert(instr->dest.ssa.bit_size == 16);
+ if (instr->def.bit_size != 32) {
+ assert(instr->def.bit_size == 16);
LLVMTypeRef vec_type = NULL;
bool is_float = false;
switch (nir_alu_type_get_base_type(instr->dest_type)) {
default:
unreachable("unexpected alu type");
}
- for (int i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (int i = 0; i < instr->def.num_components; ++i) {
if (is_float) {
texel[i] = lp_build_float_to_half(gallivm, texel[i]);
} else {
}
}
- assign_ssa_dest(bld_base, &instr->dest.ssa, texel);
+ assign_ssa_dest(bld_base, &instr->def, texel);
}
unreachable("Unhandled deref_instr deref type");
}
- assign_ssa(bld_base, instr->dest.ssa.index, result);
+ assign_ssa(bld_base, instr->def.index, result);
}
{
struct lp_build_nir_soa_context *bld = (struct lp_build_nir_soa_context *)bld_base;
struct gallivm_state *gallivm = bld_base->base.gallivm;
- struct lp_build_context *bld_broad = get_int_bld(bld_base, true, instr->dest.ssa.bit_size);
+ struct lp_build_context *bld_broad = get_int_bld(bld_base, true, instr->def.bit_size);
switch (instr->intrinsic) {
case nir_intrinsic_load_instance_id:
result[0] = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.instance_id);
LLVMValueRef tmp[3];
for (unsigned i = 0; i < 3; i++) {
tmp[i] = bld->system_values.block_id[i];
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
tmp[i] = LLVMBuildZExt(gallivm->builder, tmp[i], bld_base->uint64_bld.elem_type, "");
result[i] = lp_build_broadcast_scalar(bld_broad, tmp[i]);
}
LLVMValueRef tmp[3];
for (unsigned i = 0; i < 3; i++) {
tmp[i] = bld->system_values.grid_size[i];
- if (instr->dest.ssa.bit_size == 64)
+ if (instr->def.bit_size == 64)
tmp[i] = LLVMBuildZExt(gallivm->builder, tmp[i], bld_base->uint64_bld.elem_type, "");
result[i] = lp_build_broadcast_scalar(bld_broad, tmp[i]);
}
tex->texture_index = state->stip_tex->data.binding;
tex->sampler_index = state->stip_tex->data.binding;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, texcoord);
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
switch (state->bool_type) {
case nir_type_bool1:
- condition = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, 3), 0.0);
+ condition = nir_fneu_imm(b, nir_channel(b, &tex->def, 3), 0.0);
break;
case nir_type_bool32:
- condition = nir_fneu32(b, nir_channel(b, &tex->dest.ssa, 3),
- nir_imm_floatN_t(b, 0.0, tex->dest.ssa.bit_size));
+ condition = nir_fneu32(b, nir_channel(b, &tex->def, 3),
+ nir_imm_floatN_t(b, 0.0, tex->def.bit_size));
break;
default:
unreachable("Invalid Boolean type.");
nir_src *use = NULL;
nir_foreach_reg_load(src, reg_decl) {
nir_intrinsic_instr *load = nir_instr_as_intrinsic(src->parent_instr);
- nir_foreach_use_including_if(load_use, &load->dest.ssa) {
+ nir_foreach_use_including_if(load_use, &load->def) {
/* We can only have one use */
if (use != NULL)
return false;
nir_foreach_reg_decl_safe(nir_reg, nir_shader_get_entrypoint(c->s)) {
/* Permanently allocate all the array regs at the start. */
unsigned num_array_elems = nir_intrinsic_num_array_elems(nir_reg);
- unsigned index = nir_reg->dest.ssa.index;
+ unsigned index = nir_reg->def.index;
if (num_array_elems != 0) {
struct ureg_dst decl = ureg_DECL_array_temporary(c->ureg, num_array_elems, true);
unsigned num_array_elems = nir_intrinsic_num_array_elems(nir_reg);
unsigned num_components = nir_intrinsic_num_components(nir_reg);
unsigned bit_size = nir_intrinsic_bit_size(nir_reg);
- unsigned index = nir_reg->dest.ssa.index;
+ unsigned index = nir_reg->def.index;
/* We already handled arrays */
if (num_array_elems == 0) {
static void
ntt_emit_load_ubo(struct ntt_compile *c, nir_intrinsic_instr *instr)
{
- int bit_size = instr->dest.ssa.bit_size;
+ int bit_size = instr->def.bit_size;
assert(bit_size == 32 || instr->num_components <= 2);
struct ureg_src src = ureg_src_register(TGSI_FILE_CONSTANT, 0);
src = ntt_shift_by_frac(src, start_component,
instr->num_components * bit_size / 32);
- ntt_store(c, &instr->dest.ssa, src);
+ ntt_store(c, &instr->def, src);
} else {
/* PIPE_CAP_LOAD_CONSTBUF: Not necessarily vec4 aligned, emit a
* TGSI_OPCODE_LOAD instruction from the const file.
*/
struct ntt_insn *insn =
ntt_insn(c, TGSI_OPCODE_LOAD,
- ntt_get_dest(c, &instr->dest.ssa),
+ ntt_get_dest(c, &instr->def),
src, ntt_get_src(c, instr->src[1]),
ureg_src_undef(), ureg_src_undef());
insn->is_mem = true;
write_mask = ntt_64bit_write_mask(write_mask);
dst = ureg_writemask(dst, write_mask);
} else {
- dst = ntt_get_dest(c, &instr->dest.ssa);
+ dst = ntt_get_dest(c, &instr->def);
}
struct ntt_insn *insn = ntt_insn(c, opcode, dst, src[0], src[1], src[2], src[3]);
dst = ureg_dst(resource);
} else {
srcs[num_src++] = resource;
- dst = ntt_get_dest(c, &instr->dest.ssa);
+ dst = ntt_get_dest(c, &instr->def);
}
struct ureg_dst opcode_dst = dst;
unsigned base = nir_intrinsic_base(instr);
struct ureg_src input;
nir_io_semantics semantics = nir_intrinsic_io_semantics(instr);
- bool is_64 = instr->dest.ssa.bit_size == 64;
+ bool is_64 = instr->def.bit_size == 64;
if (c->s->info.stage == MESA_SHADER_VERTEX) {
input = ureg_DECL_vs_input(c->ureg, base);
switch (instr->intrinsic) {
case nir_intrinsic_load_input:
input = ntt_ureg_src_indirect(c, input, instr->src[0], 0);
- ntt_store(c, &instr->dest.ssa, input);
+ ntt_store(c, &instr->def, input);
break;
case nir_intrinsic_load_per_vertex_input:
input = ntt_ureg_src_indirect(c, input, instr->src[1], 0);
input = ntt_ureg_src_dimension_indirect(c, input, instr->src[0]);
- ntt_store(c, &instr->dest.ssa, input);
+ ntt_store(c, &instr->def, input);
break;
case nir_intrinsic_load_interpolated_input: {
/* For these, we know that the barycentric load matches the
* interpolation on the input declaration, so we can use it directly.
*/
- ntt_store(c, &instr->dest.ssa, input);
+ ntt_store(c, &instr->def, input);
break;
case nir_intrinsic_load_barycentric_centroid:
* input.
*/
if (c->centroid_inputs & (1ull << nir_intrinsic_base(instr))) {
- ntt_store(c, &instr->dest.ssa, input);
+ ntt_store(c, &instr->def, input);
} else {
- ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->dest.ssa), input);
+ ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->def), input);
}
break;
case nir_intrinsic_load_barycentric_at_sample:
/* We stored the sample in the fake "bary" dest. */
- ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->dest.ssa), input,
+ ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->def), input,
ntt_get_src(c, instr->src[0]));
break;
case nir_intrinsic_load_barycentric_at_offset:
/* We stored the offset in the fake "bary" dest. */
- ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->dest.ssa), input,
+ ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->def), input,
ntt_get_src(c, instr->src[0]));
break;
out = ntt_ureg_dst_indirect(c, out, instr->src[0]);
}
- struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa);
+ struct ureg_dst dst = ntt_get_dest(c, &instr->def);
struct ureg_src out_src = ureg_src(out);
/* Don't swizzling unavailable channels of the output in the writemasked-out
* aren't defined, even if they aren't really read. (GLSL compile fails on
* gl_NumWorkGroups.w, for example).
*/
- uint32_t write_mask = BITSET_MASK(instr->dest.ssa.num_components);
+ uint32_t write_mask = BITSET_MASK(instr->def.num_components);
sv = ntt_swizzle_for_write_mask(sv, write_mask);
/* TGSI and NIR define these intrinsics as always loading ints, but they can
switch (instr->intrinsic) {
case nir_intrinsic_load_vertex_id:
case nir_intrinsic_load_instance_id:
- ntt_U2F(c, ntt_get_dest(c, &instr->dest.ssa), sv);
+ ntt_U2F(c, ntt_get_dest(c, &instr->def), sv);
return;
default:
}
}
- ntt_store(c, &instr->dest.ssa, sv);
+ ntt_store(c, &instr->def, sv);
}
static void
}
case nir_intrinsic_is_helper_invocation:
- ntt_READ_HELPER(c, ntt_get_dest(c, &instr->dest.ssa));
+ ntt_READ_HELPER(c, ntt_get_dest(c, &instr->def));
break;
case nir_intrinsic_vote_all:
- ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c,instr->src[0]));
+ ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->def), ntt_get_src(c,instr->src[0]));
return;
case nir_intrinsic_vote_any:
- ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
+ ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]));
return;
case nir_intrinsic_vote_ieq:
- ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
+ ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]));
return;
case nir_intrinsic_ballot:
- ntt_BALLOT(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
+ ntt_BALLOT(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]));
return;
case nir_intrinsic_read_first_invocation:
- ntt_READ_FIRST(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
+ ntt_READ_FIRST(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]));
return;
case nir_intrinsic_read_invocation:
- ntt_READ_INVOC(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1]));
+ ntt_READ_INVOC(c, ntt_get_dest(c, &instr->def), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1]));
return;
case nir_intrinsic_load_ssbo:
break;
case nir_intrinsic_load_barycentric_at_sample:
case nir_intrinsic_load_barycentric_at_offset:
- ntt_store(c, &instr->dest.ssa, ntt_get_src(c, instr->src[0]));
+ ntt_store(c, &instr->def, ntt_get_src(c, instr->src[0]));
break;
case nir_intrinsic_shader_clock:
- ntt_CLOCK(c, ntt_get_dest(c, &instr->dest.ssa));
+ ntt_CLOCK(c, ntt_get_dest(c, &instr->def));
break;
case nir_intrinsic_decl_reg:
static void
ntt_emit_texture(struct ntt_compile *c, nir_tex_instr *instr)
{
- struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa);
+ struct ureg_dst dst = ntt_get_dest(c, &instr->def);
enum tgsi_texture_type target = tgsi_texture_type_from_sampler_dim(instr->sampler_dim, instr->is_array, instr->is_shadow);
unsigned tex_opcode;
bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
if (has_dest) {
- if (instr->dest.ssa.bit_size != 64)
+ if (instr->def.bit_size != 64)
return false;
} else {
if (nir_src_bit_size(instr->src[0]) != 64)
first->num_components = 2;
second->num_components -= 2;
if (has_dest) {
- first->dest.ssa.num_components = 2;
- second->dest.ssa.num_components -= 2;
+ first->def.num_components = 2;
+ second->def.num_components -= 2;
}
nir_builder_instr_insert(b, &first->instr);
if (has_dest) {
/* Merge the two loads' results back into a vector. */
nir_scalar channels[4] = {
- nir_get_ssa_scalar(&first->dest.ssa, 0),
- nir_get_ssa_scalar(&first->dest.ssa, 1),
- nir_get_ssa_scalar(&second->dest.ssa, 0),
- nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0),
+ nir_get_ssa_scalar(&first->def, 0),
+ nir_get_ssa_scalar(&first->def, 1),
+ nir_get_ssa_scalar(&second->def, 0),
+ nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0),
};
nir_def *new = nir_vec_scalars(b, channels, instr->num_components);
- nir_def_rewrite_uses(&instr->dest.ssa, new);
+ nir_def_rewrite_uses(&instr->def, new);
} else {
/* Split the src value across the two stores. */
b->cursor = nir_before_instr(&instr->instr);
{
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- nir_def *old_result = &intr->dest.ssa;
+ nir_def *old_result = &intr->def;
intr->intrinsic = nir_intrinsic_atomic_counter_post_dec;
return nir_iadd_imm(b, old_result, -1);
assert(var && var->data.mode == nir_var_shader_in);
if (nir->info.stage == MESA_SHADER_FRAGMENT)
- gather_usage(deref, nir_def_components_read(&instr->dest.ssa),
+ gather_usage(deref, nir_def_components_read(&instr->def),
info->input_usage_mask);
switch (nir->info.stage) {
}
load->src[srcn++] = nir_src_for_ssa(offset);
- nir_def_init(&load->instr, &load->dest.ssa, 4, 32);
+ nir_def_init(&load->instr, &load->def, 4, 32);
nir_builder_instr_insert(b, &load->instr);
- src = nir_src_for_ssa(&load->dest.ssa);
+ src = nir_src_for_ssa(&load->def);
break;
}
unsigned src_number = 0;
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
src_number++;
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &deref->dest.ssa);
+ &deref->def);
src_number++;
instr->src[src_number] =
assert(src_number == num_srcs);
assert(src_number == instr->num_srcs);
- nir_def_init(&instr->instr, &instr->dest.ssa,
+ nir_def_init(&instr->instr, &instr->def,
nir_tex_instr_dest_size(instr), 32);
nir_builder_instr_insert(b, &instr->instr);
- return nir_pad_vector_imm_int(b, &instr->dest.ssa, 0, 4);
+ return nir_pad_vector_imm_int(b, &instr->def, 0, 4);
}
/* TGSI_OPCODE_TXQ is actually two distinct operations:
nir_deref_instr *deref = nir_build_deref_var(b, var);
txs->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
qlv->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
/* lod: */
txs->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod,
ttn_channel(b, src[0], X));
- nir_def_init(&txs->instr, &txs->dest.ssa, nir_tex_instr_dest_size(txs), 32);
+ nir_def_init(&txs->instr, &txs->def, nir_tex_instr_dest_size(txs), 32);
nir_builder_instr_insert(b, &txs->instr);
- nir_def_init(&qlv->instr, &qlv->dest.ssa, 1, 32);
+ nir_def_init(&qlv->instr, &qlv->def, 1, 32);
nir_builder_instr_insert(b, &qlv->instr);
return nir_vector_insert_imm(b,
- nir_pad_vector_imm_int(b, &txs->dest.ssa, 0, 4),
- &qlv->dest.ssa, 3);
+ nir_pad_vector_imm_int(b, &txs->def, 0, 4),
+ &qlv->def, 3);
}
static enum glsl_base_type
nir_intrinsic_set_access(instr, image_deref->var->data.access);
- instr->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
+ instr->src[0] = nir_src_for_ssa(&image_deref->def);
instr->src[1] = nir_src_for_ssa(src[addr_src_index]);
/* Set the sample argument, which is undefined for single-sample images. */
if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
- nir_def_init(&instr->instr, &instr->dest.ssa, instr->num_components, 32);
+ nir_def_init(&instr->instr, &instr->def, instr->num_components, 32);
nir_builder_instr_insert(b, &instr->instr);
- return nir_pad_vector_imm_int(b, &instr->dest.ssa, 0, 4);
+ return nir_pad_vector_imm_int(b, &instr->def, 0, 4);
} else {
nir_builder_instr_insert(b, &instr->instr);
return NULL;
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- old = &intr->dest.ssa;
+ old = &intr->def;
replacement = lower_intrinsic(b, intr);
} else if (instr->type == nir_instr_type_tex) {
nir_tex_instr *tex = nir_instr_as_tex(instr);
- old = &tex->dest.ssa;
+ old = &tex->def;
if (tex->op != nir_texop_lod_bias_agx)
return false;
if (intr->intrinsic != nir_intrinsic_load_preamble)
return false;
- assert(intr->dest.ssa.bit_size >= 16 && "no 8-bit sysvals");
- unsigned dim = intr->dest.ssa.num_components;
- unsigned element_size = intr->dest.ssa.bit_size / 16;
+ assert(intr->def.bit_size >= 16 && "no 8-bit sysvals");
+ unsigned dim = intr->def.num_components;
+ unsigned element_size = intr->def.bit_size / 16;
unsigned length = dim * element_size;
struct state *state = data;
/* XXX: Rename to "xfb index" to avoid the clash */
case nir_intrinsic_load_vertex_id_zero_base: {
nir_def *id = nir_load_vertex_id(b);
- nir_def_rewrite_uses(&intr->dest.ssa, id);
+ nir_def_rewrite_uses(&intr->def, id);
return true;
}
id = nir_u2uN(b, index, id->bit_size);
}
- nir_def_rewrite_uses(&intr->dest.ssa, id);
+ nir_def_rewrite_uses(&intr->def, id);
return true;
}
case nir_intrinsic_load_base_workgroup_id: {
/* GL doesn't have a concept of base workgroup */
b.cursor = nir_instr_remove(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
nir_imm_zero(&b, 3, 32));
continue;
}
nir_intrinsic_set_align(load_ubo, 4, 0);
nir_intrinsic_set_range_base(load_ubo, 0);
nir_intrinsic_set_range(load_ubo, ~0);
- nir_def_init(&load_ubo->instr, &load_ubo->dest.ssa,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ nir_def_init(&load_ubo->instr, &load_ubo->def,
+ intrin->def.num_components,
+ intrin->def.bit_size);
nir_builder_instr_insert(&b, &load_ubo->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
- &load_ubo->dest.ssa);
+ nir_def_rewrite_uses(&intrin->def,
+ &load_ubo->def);
nir_instr_remove(&intrin->instr);
continue;
}
nir_intrinsic_set_align(load, 4, 0);
nir_intrinsic_set_range_base(load, 0);
nir_intrinsic_set_range(load, ~0);
- nir_def_init(&load->instr, &load->dest.ssa, comps, 32);
+ nir_def_init(&load->instr, &load->def, comps, 32);
nir_builder_instr_insert(&b, &load->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
- &load->dest.ssa);
+ nir_def_rewrite_uses(&intrin->def,
+ &load->def);
nir_instr_remove(instr);
}
}
enum gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index];
int width = (wa & WA_8BIT) ? 8 : 16;
- nir_def *val = nir_fmul_imm(&b, &tex->dest.ssa, (1 << width) - 1);
+ nir_def *val = nir_fmul_imm(&b, &tex->def, (1 << width) - 1);
val = nir_f2u32(&b, val);
if (wa & WA_SIGN) {
val = nir_ishl_imm(&b, val, 32 - width);
val = nir_ishr_imm(&b, val, 32 - width);
}
- nir_def_rewrite_uses_after(&tex->dest.ssa, val, val->parent_instr);
+ nir_def_rewrite_uses_after(&tex->def, val, val->parent_instr);
}
tex->texture_index =
sampler->data.binding = 0;
sampler->data.explicit_binding = true;
- nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
+ nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->def;
nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
glsl_vec4_type(), "pos");
txs->is_array = false;
txs->dest_type = nir_type_int;
- nir_def_init(&txs->instr, &txs->dest.ssa, 2, 32);
+ nir_def_init(&txs->instr, &txs->def, 2, 32);
nir_builder_instr_insert(&b, &txs->instr);
pos_src = nir_vec4(&b,
/*Height - pos_dest.y - 1*/
nir_fsub(&b,
nir_fsub(&b,
- nir_channel(&b, nir_i2f32(&b, &txs->dest.ssa), 1),
+ nir_channel(&b, nir_i2f32(&b, &txs->def), 1),
nir_channel(&b, pos, 1)),
nir_imm_float(&b, 1.0)),
nir_channel(&b, pos, 2),
tex->is_array = false;
tex->coord_components = 2;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- nir_store_var(&b, stencil_out, nir_channel(&b, &tex->dest.ssa, 1), 0x1);
+ nir_store_var(&b, stencil_out, nir_channel(&b, &tex->def, 1), 0x1);
struct pipe_shader_state state = {};
state.type = PIPE_SHADER_IR_NIR;
const struct util_format_description *from_desc, *to_desc;
if (intr->intrinsic == nir_intrinsic_image_deref_load) {
b->cursor = nir_after_instr(instr);
- value = &intr->dest.ssa;
+ value = &intr->def;
from_desc = util_format_description(emulation_format);
to_desc = util_format_description(real_format);
} else {
pos = nir_vector_insert_imm(b, pos, depth, 2);
- nir_def_rewrite_uses_after(&intr->dest.ssa, pos,
+ nir_def_rewrite_uses_after(&intr->def, pos,
pos->parent_instr);
}
return false;
}
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(instr);
return true;
}
unsigned channel = intr->intrinsic == nir_intrinsic_load_first_vertex ? 0 :
intr->intrinsic == nir_intrinsic_load_base_instance ? 1 :
intr->intrinsic == nir_intrinsic_load_draw_id ? 2 : 3;
- nir_def_rewrite_uses(&intr->dest.ssa, nir_channel(b, load, channel));
+ nir_def_rewrite_uses(&intr->def, nir_channel(b, load, channel));
nir_instr_remove(instr);
return true;
nir_def *load = b->shader->info.stage == MESA_SHADER_TESS_CTRL ?
d3d12_get_state_var(b, D3D12_STATE_VAR_PATCH_VERTICES_IN, "d3d12_FirstVertex", glsl_uint_type(), _state) :
nir_imm_int(b, b->shader->info.tess.tcs_vertices_out);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
nir_instr_remove(instr);
return true;
}
nir_def *ubo_idx = nir_imm_int(b, binding);
nir_def *ubo_offset = nir_imm_int(b, get_state_var_offset(shader, var) * 4);
nir_def *load =
- nir_load_ubo(b, instr->num_components, instr->dest.ssa.bit_size,
+ nir_load_ubo(b, instr->num_components, instr->def.bit_size,
ubo_idx, ubo_offset,
.align_mul = 16,
.align_offset = 0,
.range = ~0,
);
- nir_def_rewrite_uses(&instr->dest.ssa, load);
+ nir_def_rewrite_uses(&instr->def, load);
/* Remove the old load_* instruction and any parent derefs */
nir_instr_remove(&instr->instr);
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
- if (!list_is_empty(&d->dest.ssa.uses))
+ if (!list_is_empty(&d->def.uses))
break;
nir_instr_remove(&d->instr);
first_channel += var_state->subvars[subvar].num_components;
unsigned new_write_mask = (orig_write_mask >> first_channel) & mask_num_channels;
- nir_build_store_deref(b, &new_path->dest.ssa, sub_value, new_write_mask, nir_intrinsic_access(intr));
+ nir_build_store_deref(b, &new_path->def, sub_value, new_write_mask, nir_intrinsic_access(intr));
}
nir_deref_path_finish(&path);
emit_tex(struct etna_compile *c, nir_tex_instr * tex)
{
unsigned dst_swiz;
- hw_dst dst = ra_def(c, &tex->dest.ssa, &dst_swiz);
+ hw_dst dst = ra_def(c, &tex->def, &dst_swiz);
nir_src *coord = NULL, *src1 = NULL, *src2 = NULL;
for (unsigned i = 0; i < tex->num_srcs; i++) {
break;
case nir_intrinsic_load_uniform: {
unsigned dst_swiz;
- struct etna_inst_dst dst = ra_def(c, &intr->dest.ssa, &dst_swiz);
+ struct etna_inst_dst dst = ra_def(c, &intr->def, &dst_swiz);
/* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
emit_inst(c, &(struct etna_inst) {
emit_inst(c, &(struct etna_inst) {
.opcode = INST_OPCODE_LOAD,
.type = INST_TYPE_U32,
- .dst = ra_def(c, &intr->dest.ssa, &dst_swiz),
+ .dst = ra_def(c, &intr->def, &dst_swiz),
.src[0] = get_src(c, &intr->src[1]),
.src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO0_ADDR + idx, 0), 1),
});
break;
case nir_instr_type_intrinsic:
if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
- need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa);
+ need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->def);
break;
}
FALLTHROUGH;
base += off[0].u32;
nir_const_value value[4];
- for (unsigned i = 0; i < intr->dest.ssa.num_components; i++)
+ for (unsigned i = 0; i < intr->def.num_components; i++)
value[i] = UNIFORM(base * 4 + i);
b.cursor = nir_after_instr(instr);
- nir_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
+ nir_def *def = nir_build_imm(&b, intr->def.num_components, 32, value);
- nir_def_rewrite_uses(&intr->dest.ssa, def);
+ nir_def_rewrite_uses(&intr->def, def);
nir_instr_remove(instr);
} break;
default:
def = &nir_instr_as_alu(instr)->def;
break;
case nir_instr_type_tex:
- def = &nir_instr_as_tex(instr)->dest.ssa;
+ def = &nir_instr_as_tex(instr)->def;
break;
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
intr->intrinsic == nir_intrinsic_load_instance_id ||
intr->intrinsic == nir_intrinsic_load_texture_scale ||
intr->intrinsic == nir_intrinsic_load_texture_size_etna)
- def = &intr->dest.ssa;
+ def = &intr->def;
} break;
case nir_instr_type_deref:
return NULL;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
/* can't have dst swizzle or sparse writemask on UBO loads */
if (intr->intrinsic == nir_intrinsic_load_ubo) {
- assert(def == &intr->dest.ssa);
+ assert(def == &intr->def);
if (def->num_components == 2)
comp = REG_CLASS_VIRT_VEC2C;
if (def->num_components == 3)
/* HW front_face is 0.0/1.0, not 0/~0u for bool
* lower with a comparison with 0
*/
- intr->dest.ssa.bit_size = 32;
+ intr->def.bit_size = 32;
b.cursor = nir_after_instr(instr);
- nir_def *ssa = nir_ine_imm(&b, &intr->dest.ssa, 0);
+ nir_def *ssa = nir_ine_imm(&b, &intr->def, 0);
if (v->key.front_ccw)
nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
- nir_def_rewrite_uses_after(&intr->dest.ssa,
+ nir_def_rewrite_uses_after(&intr->def,
ssa,
ssa->parent_instr);
} break;
nir_def *idx = nir_imm_int(b, tex->texture_index);
nir_def *sizes = nir_load_texture_size_etna(b, 32, idx);
- nir_def_rewrite_uses(&tex->dest.ssa, sizes);
+ nir_def_rewrite_uses(&tex->def, sizes);
return true;
}
nir_ushr_imm(b, nir_isub(b, ubo_offset, range_base), 4);
nir_def *uniform =
- nir_load_uniform(b, intr->num_components, intr->dest.ssa.bit_size, uniform_offset,
+ nir_load_uniform(b, intr->num_components, intr->def.bit_size, uniform_offset,
.base = nir_intrinsic_range_base(intr) / 16,
.range = nir_intrinsic_range(intr) / 16,
.dest_type = nir_type_float32);
- nir_def_rewrite_uses(&intr->dest.ssa, uniform);
+ nir_def_rewrite_uses(&intr->def, uniform);
return uniform;
}
break;
case nir_intrinsic_load_input:
- load_input(ctx, &intr->dest.ssa, nir_intrinsic_base(intr));
+ load_input(ctx, &intr->def, nir_intrinsic_base(intr));
break;
case nir_intrinsic_store_output:
store_output(ctx, intr->src[0], output_slot(ctx, intr),
assert(const_offset); /* TODO can be false in ES2? */
idx = nir_intrinsic_base(intr);
idx += (uint32_t)const_offset[0].f32;
- instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa);
+ instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->def);
instr->src[0] = ir2_src(idx, 0, IR2_SRC_CONST);
break;
case nir_intrinsic_discard:
struct ir2_instr *tmp = instr_create_alu(ctx, nir_op_frcp, 1);
tmp->src[0] = ir2_src(ctx->f->inputs_count, 0, IR2_SRC_INPUT);
- instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->dest.ssa);
+ instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->def);
instr->src[0] = ir2_src(tmp->idx, 0, IR2_SRC_SSA);
instr->src[1] = ir2_zero(ctx);
break;
/* param.zw (note: abs might be needed like fragcoord in param.xy?) */
ctx->so->need_param = true;
- instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa);
+ instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->def);
instr->src[0] =
ir2_src(ctx->f->inputs_count, IR2_SWIZZLE_ZW, IR2_SRC_INPUT);
break;
/* TODO: lod/bias transformed by src_coord.z ? */
}
- instr = ir2_instr_create_fetch(ctx, &tex->dest.ssa, TEX_FETCH);
+ instr = ir2_instr_create_fetch(ctx, &tex->def, TEX_FETCH);
instr->src[0] = src_coord;
instr->src[0].swizzle = is_cube ? IR2_SWIZZLE_YXW : 0;
instr->fetch.tex.is_cube = is_cube;
nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->nir);
nir_foreach_reg_decl (decl, fxn) {
- assert(decl->dest.ssa.index < ARRAY_SIZE(ctx->reg));
- ctx->reg[decl->dest.ssa.index].ncomp = nir_intrinsic_num_components(decl);
- ctx->reg_count = MAX2(ctx->reg_count, decl->dest.ssa.index + 1);
+ assert(decl->def.index < ARRAY_SIZE(ctx->reg));
+ ctx->reg[decl->def.index].ncomp = nir_intrinsic_num_components(decl);
+ ctx->reg_count = MAX2(ctx->reg_count, decl->def.index + 1);
}
nir_metadata_require(fxn, nir_metadata_block_index);
case nir_intrinsic_load_base_workgroup_id: {
/* GL doesn't have a concept of base workgroup */
b.cursor = nir_instr_remove(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
nir_imm_zero(&b, 3, 32));
continue;
}
case nir_intrinsic_load_constant: {
- unsigned load_size = intrin->dest.ssa.num_components *
- intrin->dest.ssa.bit_size / 8;
- unsigned load_align = intrin->dest.ssa.bit_size / 8;
+ unsigned load_size = intrin->def.num_components *
+ intrin->def.bit_size / 8;
+ unsigned load_align = intrin->def.bit_size / 8;
/* This one is special because it reads from the shader constant
* data and not cbuf0 which gallium uploads for us.
nir_def *data =
nir_load_global_constant(&b, nir_u2u64(&b, const_data_addr),
load_align,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
data);
continue;
}
}
nir_def *load =
- nir_load_ubo(&b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size,
+ nir_load_ubo(&b, intrin->def.num_components, intrin->def.bit_size,
temp_ubo_name, offset,
.align_mul = 4,
.align_offset = 0,
.range_base = 0,
.range = ~0);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
load);
nir_instr_remove(instr);
}
case nir_intrinsic_decl_reg:
{
gpir_reg *reg = gpir_create_reg(block->comp);
- block->comp->reg_for_ssa[instr->dest.ssa.index] = reg;
+ block->comp->reg_for_ssa[instr->def.index] = reg;
return true;
}
case nir_intrinsic_load_reg:
{
gpir_node *node = gpir_node_find(block, &instr->src[0], 0);
assert(node);
- block->comp->node_for_ssa[instr->dest.ssa.index] = node;
+ block->comp->node_for_ssa[instr->def.index] = node;
return true;
}
case nir_intrinsic_store_reg:
return true;
}
case nir_intrinsic_load_input:
- return gpir_create_load(block, &instr->dest.ssa,
+ return gpir_create_load(block, &instr->def,
gpir_op_load_attribute,
nir_intrinsic_base(instr),
nir_intrinsic_component(instr)) != NULL;
int offset = nir_intrinsic_base(instr);
offset += (int)nir_src_as_float(instr->src[0]);
- return gpir_create_load(block, &instr->dest.ssa,
+ return gpir_create_load(block, &instr->def,
gpir_op_load_uniform,
offset / 4, offset % 4) != NULL;
}
case nir_intrinsic_load_viewport_scale:
- return gpir_create_vector_load(block, &instr->dest.ssa, GPIR_VECTOR_SSA_VIEWPORT_SCALE);
+ return gpir_create_vector_load(block, &instr->def, GPIR_VECTOR_SSA_VIEWPORT_SCALE);
case nir_intrinsic_load_viewport_offset:
- return gpir_create_vector_load(block, &instr->dest.ssa, GPIR_VECTOR_SSA_VIEWPORT_OFFSET);
+ return gpir_create_vector_load(block, &instr->def, GPIR_VECTOR_SSA_VIEWPORT_OFFSET);
case nir_intrinsic_store_output:
{
gpir_store_node *store = gpir_node_create(block, gpir_op_store_varying);
nir_intrinsic_instr *last_dupl = NULL;
nir_instr *last_parent_instr = NULL;
- nir_foreach_use_safe(use_src, &itr->dest.ssa) {
+ nir_foreach_use_safe(use_src, &itr->def) {
nir_intrinsic_instr *dupl;
if (last_parent_instr != use_src->parent_instr) {
memcpy(dupl->const_index, itr->const_index, sizeof(itr->const_index));
dupl->src[0].ssa = itr->src[0].ssa;
- nir_def_init(&dupl->instr, &dupl->dest.ssa, dupl->num_components,
- itr->dest.ssa.bit_size);
+ nir_def_init(&dupl->instr, &dupl->def, dupl->num_components,
+ itr->def.bit_size);
dupl->instr.pass_flags = 1;
nir_builder_instr_insert(b, &dupl->instr);
dupl = last_dupl;
}
- nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->dest.ssa));
+ nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->def));
last_parent_instr = use_src->parent_instr;
last_dupl = dupl;
}
last_dupl = NULL;
last_parent_instr = NULL;
- nir_foreach_if_use_safe(use_src, &itr->dest.ssa) {
+ nir_foreach_if_use_safe(use_src, &itr->def) {
nir_intrinsic_instr *dupl;
if (last_parent_instr != use_src->parent_instr) {
memcpy(dupl->const_index, itr->const_index, sizeof(itr->const_index));
dupl->src[0].ssa = itr->src[0].ssa;
- nir_def_init(&dupl->instr, &dupl->dest.ssa, dupl->num_components,
- itr->dest.ssa.bit_size);
+ nir_def_init(&dupl->instr, &dupl->def, dupl->num_components,
+ itr->def.bit_size);
dupl->instr.pass_flags = 1;
nir_builder_instr_insert(b, &dupl->instr);
dupl = last_dupl;
}
- nir_if_rewrite_condition(use_src->parent_if, nir_src_for_ssa(&dupl->dest.ssa));
+ nir_if_rewrite_condition(use_src->parent_if, nir_src_for_ssa(&dupl->def));
last_parent_instr = use_src->parent_instr;
last_dupl = dupl;
}
if (intrin->intrinsic != nir_intrinsic_load_input)
return NULL;
- if (intrin->dest.ssa.num_components != 4)
+ if (intrin->def.num_components != 4)
return NULL;
/* Coords must be in .xyz */
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
+ intr->def.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr) * 4 + i);
nir_builder_instr_insert(b, &chan_intr->instr);
- loads[i] = &chan_intr->dest.ssa;
+ loads[i] = &chan_intr->def;
}
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(b, loads, intr->num_components));
nir_instr_remove(&intr->instr);
}
nir_intrinsic_instr *new_intrin = nir_intrinsic_instr_create(
b->shader,
intrin->intrinsic);
- nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa,
+ nir_def_init(&new_intrin->instr, &new_intrin->def,
alu->def.num_components, ssa->bit_size);
new_intrin->num_components = alu->def.num_components;
nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin));
nir_builder_instr_insert(b, &new_intrin->instr);
nir_def_rewrite_uses(&alu->def,
- &new_intrin->dest.ssa);
+ &new_intrin->def);
nir_instr_remove(&alu->instr);
return true;
}
nir_builder_instr_insert(b, &new_intrin->instr);
- return &new_intrin->dest.ssa;
+ return &new_intrin->def;
}
static bool
struct hash_table *visited_instrs = _mesa_pointer_hash_table_create(NULL);
- nir_foreach_use_safe(src, &intrin->dest.ssa) {
+ nir_foreach_use_safe(src, &intrin->def) {
struct hash_entry *entry =
_mesa_hash_table_search(visited_instrs, src->parent_instr);
if (entry && (src->parent_instr->type != nir_instr_type_phi)) {
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new));
_mesa_hash_table_insert(visited_instrs, src->parent_instr, new);
}
- nir_foreach_if_use_safe(src, &intrin->dest.ssa) {
+ nir_foreach_if_use_safe(src, &intrin->def) {
b->cursor = nir_before_src(src);
nir_if_rewrite_condition(src->parent_if,
nir_src_for_ssa(clone_intrinsic(b, intrin)));
/* Skip folded fabs/fneg since we do not have dead code elimination */
if ((instr->op == nir_op_fabs || instr->op == nir_op_fneg) &&
nir_legacy_float_mod_folds(instr)) {
- /* Add parent node as a the folded dest ssa node to keep
+ /* Add parent node as a the folded def node to keep
* the dependency chain */
nir_alu_src *ns = &instr->src[0];
ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index];
return true;
case nir_intrinsic_load_reg: {
- nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
+ nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def);
lnode = ppir_node_create_dest(block, ppir_op_dummy, &legacy_dest, mask);
return true;
}
case nir_intrinsic_load_input: {
mask = u_bit_consecutive(0, instr->num_components);
- nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
+ nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def);
lnode = ppir_node_create_dest(block, ppir_op_load_varying, &legacy_dest, mask);
if (!lnode)
return false;
break;
}
- nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
+ nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def);
lnode = ppir_node_create_dest(block, op, &legacy_dest, mask);
if (!lnode)
return false;
case nir_intrinsic_load_uniform: {
mask = u_bit_consecutive(0, instr->num_components);
- nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
+ nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def);
lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &legacy_dest, mask);
if (!lnode)
return false;
unsigned mask = 0;
mask = u_bit_consecutive(0, nir_tex_instr_dest_size(instr));
- nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
+ nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->def);
node = ppir_node_create_dest(block, ppir_op_load_texture, &legacy_dest, mask);
if (!node)
return false;
if (!r)
return false;
- r->index = decl->dest.ssa.index;
+ r->index = decl->def.index;
r->num_components = nir_intrinsic_num_components(decl);
r->is_head = false;
list_addtail(&r->list, &comp->reg_list);
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- old = &intr->dest.ssa;
+ old = &intr->def;
sysval = sysval_for_intrinsic(intr, &offset);
if (sysval == ~0)
return false;
} else if (instr->type == nir_instr_type_tex) {
nir_tex_instr *tex = nir_instr_as_tex(instr);
- old = &tex->dest.ssa;
+ old = &tex->def;
if (tex->op != nir_texop_txs)
return false;
GDSInstr::emit_atomic_op2(nir_intrinsic_instr *instr, Shader& shader)
{
auto& vf = shader.value_factory();
- bool read_result = !list_is_empty(&instr->dest.ssa.uses);
+ bool read_result = !list_is_empty(&instr->def.uses);
ESDOp op =
read_result ? get_opcode(instr->intrinsic) : get_opcode_wo(instr->intrinsic);
}
offset += nir_intrinsic_base(instr);
- auto dest = read_result ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr;
+ auto dest = read_result ? vf.dest(instr->def, 0, pin_free) : nullptr;
PRegister src_as_register = nullptr;
auto src_val = vf.src(instr->src[1], 0);
ir = new GDSInstr(op, dest, src, offset, uav_id);
} else {
- auto dest = vf.dest(instr->dest.ssa, 0, pin_free);
+ auto dest = vf.dest(instr->def, 0, pin_free);
auto tmp = vf.temp_vec4(pin_group, {0, 1, 7, 7});
if (uav_id)
shader.emit_instruction(new AluInstr(op3_muladd_uint24,
}
offset += shader.remap_atomic_base(nir_intrinsic_base(instr));
- auto dest = vf.dest(instr->dest.ssa, 0, pin_free);
+ auto dest = vf.dest(instr->def, 0, pin_free);
GDSInstr *ir = nullptr;
GDSInstr::emit_atomic_inc(nir_intrinsic_instr *instr, Shader& shader)
{
auto& vf = shader.value_factory();
- bool read_result = !list_is_empty(&instr->dest.ssa.uses);
+ bool read_result = !list_is_empty(&instr->def.uses);
auto [offset, uav_id] = shader.evaluate_resource_offset(instr, 0);
{
offset += shader.remap_atomic_base(nir_intrinsic_base(instr));
GDSInstr *ir = nullptr;
- auto dest = read_result ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr;
+ auto dest = read_result ? vf.dest(instr->def, 0, pin_free) : nullptr;
if (shader.chip_class() < ISA_CC_CAYMAN) {
RegisterVec4 src(nullptr, shader.atomic_update(), nullptr, nullptr, pin_chan);
{
auto& vf = shader.value_factory();
- bool read_result = !list_is_empty(&instr->dest.ssa.uses);
+ bool read_result = !list_is_empty(&instr->def.uses);
auto opcode = read_result ? DS_OP_SUB_RET : DS_OP_SUB;
shader.emit_instruction(ir);
if (read_result)
shader.emit_instruction(new AluInstr(op2_sub_int,
- vf.dest(instr->dest.ssa, 0, pin_free),
+ vf.dest(instr->def, 0, pin_free),
tmp_dest,
vf.one_i(),
AluInstr::last_write));
RatInstr::emit_ssbo_load(nir_intrinsic_instr *intr, Shader& shader)
{
auto& vf = shader.value_factory();
- auto dest = vf.dest_vec4(intr->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intr->def, pin_group);
/** src0 not used, should be some offset */
auto addr = vf.src(intr->src[1], 0);
{0, 1, 2, 3}
};
- int comp_idx = intr->dest.ssa.num_components - 1;
+ int comp_idx = intr->def.num_components - 1;
auto [offset, res_offset] = shader.evaluate_resource_offset(intr, 0);
{
{
}
- bool read_result = !list_is_empty(&intr->dest.ssa.uses);
+ bool read_result = !list_is_empty(&intr->def.uses);
auto opcode = read_result ? get_rat_opcode(nir_intrinsic_atomic_op(intr))
: get_rat_opcode_wo(nir_intrinsic_atomic_op(intr));
atomic->set_ack();
if (read_result) {
atomic->set_instr_flag(ack_rat_return_write);
- auto dest = vf.dest_vec4(intr->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intr->def, pin_group);
auto fetch = new FetchInstr(vc_fetch,
dest,
RatInstr::emit_ssbo_size(nir_intrinsic_instr *intr, Shader& shader)
{
auto& vf = shader.value_factory();
- auto dest = vf.dest_vec4(intr->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intr->def, pin_group);
auto const_offset = nir_src_as_const_value(intr->src[0]);
int res_id = R600_IMAGE_REAL_RESOURCE_OFFSET;
{
}
- bool read_result = !list_is_empty(&intrin->dest.ssa.uses);
+ bool read_result = !list_is_empty(&intrin->def.uses);
bool image_load = (intrin->intrinsic == nir_intrinsic_image_load);
auto opcode = image_load ? RatInstr::NOP_RTN :
read_result ? get_rat_opcode(nir_intrinsic_atomic_op(intrin))
atomic->set_ack();
if (read_result) {
atomic->set_instr_flag(ack_rat_return_write);
- auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intrin->def, pin_group);
pipe_format format = nir_intrinsic_format(intrin);
unsigned fmt = fmt_32;
dyn_offset = shader.emit_load_to_register(vf.src(intrin->src[0], 0));
if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_BUF) {
- auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intrin->def, pin_group);
shader.emit_instruction(new QueryBufferSizeInstr(dest, {0, 1, 2, 3}, res_id));
return true;
} else {
if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_CUBE &&
nir_intrinsic_image_array(intrin) &&
- intrin->dest.ssa.num_components > 2) {
+ intrin->def.num_components > 2) {
/* Need to load the layers from a const buffer */
- auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intrin->def, pin_group);
shader.emit_instruction(new TexInstr(TexInstr::get_resinfo,
dest,
{0, 1, 7, 3},
op3_cnde_int, dest[2], low_bit, comp1, comp2, AluInstr::last_write));
}
} else {
- auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(intrin->def, pin_group);
shader.emit_instruction(new TexInstr(TexInstr::get_resinfo,
dest,
{0, 1, 2, 3},
auto src = RegisterVec4(0, true, {4, 4, 4, 4});
auto tmp = shader.value_factory().temp_vec4(pin_group);
- auto dest = shader.value_factory().dest(intrin->dest.ssa, 0, pin_free);
+ auto dest = shader.value_factory().dest(intrin->def, 0, pin_free);
auto const_offset = nir_src_as_const_value(intrin->src[0]);
PRegister dyn_offset = nullptr;
int32_t inst_mode = params[2].i32;
uint32_t dst_swz_packed = params[3].u32;
- auto dst = vf.dest_vec4(tex->dest.ssa, pin_group);
+ auto dst = vf.dest_vec4(tex->def, pin_group);
RegisterVec4::Swizzle src_swizzle = {0};
for (int i = 0; i < 4; ++i)
TexInstr::emit_buf_txf(nir_tex_instr *tex, Inputs& src, Shader& shader)
{
auto& vf = shader.value_factory();
- auto dst = vf.dest_vec4(tex->dest.ssa, pin_group);
+ auto dst = vf.dest_vec4(tex->def, pin_group);
PRegister tex_offset = nullptr;
if (src.resource_offset)
bool
TexInstr::emit_tex_texture_samples(nir_tex_instr *instr, Inputs& src, Shader& shader)
{
- RegisterVec4 dest = shader.value_factory().dest_vec4(instr->dest.ssa, pin_chan);
+ RegisterVec4 dest = shader.value_factory().dest_vec4(instr->def, pin_chan);
RegisterVec4 help{
0, true, {4, 4, 4, 4}
};
{
auto& vf = shader.value_factory();
- auto dest = vf.dest_vec4(tex->dest.ssa, pin_group);
+ auto dest = vf.dest_vec4(tex->def, pin_group);
if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
if (shader.chip_class() >= ISA_CC_EVERGREEN) {
auto sampler = get_sampler_id(tex->sampler_index, src.sampler_deref);
assert(!sampler.indirect && "Indirect sampler selection not yet supported");
- auto dst = shader.value_factory().dest_vec4(tex->dest.ssa, pin_group);
+ auto dst = shader.value_factory().dest_vec4(tex->def, pin_group);
auto swizzle = src.swizzle_from_ncomps(tex->coord_components);
}
auto fetch_sample = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
- nir_def_init(&fetch_sample->instr, &fetch_sample->dest.ssa, 4, 32);
+ nir_def_init(&fetch_sample->instr, &fetch_sample->def, 4, 32);
int used_coord_mask = 0;
nir_def *backend1 = prep_src(new_coord, used_coord_mask);
new_coord[3] = nir_iand_imm(b,
nir_ushr(b,
- nir_channel(b, &fetch_sample->dest.ssa, 0),
+ nir_channel(b, &fetch_sample->def, 0),
nir_ishl_imm(b, new_coord[3], 2)),
15);
align = instr->src[0].ssa->num_components;
address_index = 1;
} else {
- align = instr->dest.ssa.num_components;
+ align = instr->def.num_components;
}
nir_def *address = instr->src[address_index].ssa;
nir_intrinsic_set_base(intr, new_base);
nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(new_bufid->ssa));
- return &intr->dest.ssa;
+ return &intr->def;
}
};
if (op->intrinsic == nir_intrinsic_load_shared) {
nir_def *addr = op->src[0].ssa;
- switch (op->dest.ssa.num_components) {
+ switch (op->def.num_components) {
case 2: {
auto addr2 = nir_iadd_imm(&b, addr, 4);
addr = nir_vec2(&b, addr, addr2);
auto load =
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_local_shared_r600);
- load->num_components = op->dest.ssa.num_components;
+ load->num_components = op->def.num_components;
load->src[0] = nir_src_for_ssa(addr);
- nir_def_init(&load->instr, &load->dest.ssa, load->num_components,
+ nir_def_init(&load->instr, &load->def, load->num_components,
32);
- nir_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa);
+ nir_def_rewrite_uses(&op->def, &load->def);
nir_builder_instr_insert(&b, &load->instr);
} else {
nir_def *addr = op->src[1].ssa;
(void)_options;
auto old_ir = nir_instr_as_intrinsic(instr);
auto load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input);
- nir_def_init(&load->instr, &load->dest.ssa,
- old_ir->dest.ssa.num_components, old_ir->dest.ssa.bit_size);
+ nir_def_init(&load->instr, &load->def,
+ old_ir->def.num_components, old_ir->def.bit_size);
nir_intrinsic_set_io_semantics(load, nir_intrinsic_io_semantics(old_ir));
nir_intrinsic_set_base(load, nir_intrinsic_base(old_ir));
load->num_components = old_ir->num_components;
load->src[0] = old_ir->src[1];
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
bool
if (load_value)
default_value =
- nir_imm_zero(b, ir->dest.ssa.num_components, ir->dest.ssa.bit_size);
+ nir_imm_zero(b, ir->def.num_components, ir->def.bit_size);
auto image_exists =
nir_ult_imm(b, ir->src[0].ssa, b->shader->info.num_images);
nir_builder_instr_insert(b, new_load);
if (load_value)
- result = &new_load_ir->dest.ssa;
+ result = &new_load_ir->def;
if (ir->intrinsic != nir_intrinsic_image_size) {
/* Access is out of range start */
intr->intrinsic != nir_intrinsic_load_ubo_vec4)
return false;
- return intr->dest.ssa.bit_size == 64;
+ return intr->def.bit_size == 64;
}
nir_def *
LowerLoad64Uniform::lower(nir_instr *instr)
{
auto intr = nir_instr_as_intrinsic(instr);
- int old_components = intr->dest.ssa.num_components;
+ int old_components = intr->def.num_components;
assert(old_components <= 2);
- intr->dest.ssa.num_components *= 2;
- intr->dest.ssa.bit_size = 32;
+ intr->def.num_components *= 2;
+ intr->def.bit_size = 32;
intr->num_components *= 2;
if (intr->intrinsic == nir_intrinsic_load_ubo ||
for (int i = 0; i < old_components; ++i) {
result_vec[i] = nir_pack_64_2x32_split(b,
- nir_channel(b, &intr->dest.ssa, 2 * i),
- nir_channel(b, &intr->dest.ssa, 2 * i + 1));
+ nir_channel(b, &intr->def, 2 * i),
+ nir_channel(b, &intr->def, 2 * i + 1));
}
if (old_components == 1)
return result_vec[0];
}
case nir_instr_type_phi: {
auto phi = nir_instr_as_phi(instr);
- return phi->dest.ssa.num_components == 64;
+ return phi->def.num_components == 64;
}
default:
return false;
auto phi_lo = nir_phi_instr_create(b->shader);
auto phi_hi = nir_phi_instr_create(b->shader);
nir_def_init(
- &phi_lo->instr, &phi_lo->dest.ssa, phi->dest.ssa.num_components * 2, 32);
+ &phi_lo->instr, &phi_lo->def, phi->def.num_components * 2, 32);
nir_def_init(
- &phi_hi->instr, &phi_hi->dest.ssa, phi->dest.ssa.num_components * 2, 32);
+ &phi_hi->instr, &phi_hi->def, phi->def.num_components * 2, 32);
nir_foreach_phi_src(s, phi)
{
auto lo = nir_unpack_32_2x16_split_x(b, nir_ssa_for_src(b, s->src, 1));
nir_phi_instr_add_src(phi_lo, s->pred, nir_src_for_ssa(lo));
nir_phi_instr_add_src(phi_hi, s->pred, nir_src_for_ssa(hi));
}
- return nir_pack_64_2x32_split(b, &phi_lo->dest.ssa, &phi_hi->dest.ssa);
+ return nir_pack_64_2x32_split(b, &phi_lo->def, &phi_hi->def);
}
default:
unreachable("Trying to lower instruction that was not in filter");
case nir_intrinsic_load_input:
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
- if (intr->dest.ssa.bit_size != 64)
+ if (intr->def.bit_size != 64)
return false;
- return intr->dest.ssa.num_components >= 3;
+ return intr->def.num_components >= 3;
case nir_intrinsic_store_output:
if (nir_src_bit_size(intr->src[0]) != 64)
return false;
auto deref1 = nir_build_deref_var(b, vars.first);
auto deref_array1 = nir_build_deref_array(b, deref1, nir_ssa_for_src(b, index, 1));
auto load1 =
- nir_build_load_deref(b, 2, 64, &deref_array1->dest.ssa, (enum gl_access_qualifier)0);
+ nir_build_load_deref(b, 2, 64, &deref_array1->def, (enum gl_access_qualifier)0);
auto deref2 = nir_build_deref_var(b, vars.second);
auto deref_array2 = nir_build_deref_array(b, deref2, nir_ssa_for_src(b, index, 1));
auto load2 = nir_build_load_deref(
- b, old_components - 2, 64, &deref_array2->dest.ssa, (enum gl_access_qualifier)0);
+ b, old_components - 2, 64, &deref_array2->def, (enum gl_access_qualifier)0);
return merge_64bit_loads(load1, load2, old_components == 3);
}
auto deref_array1 =
nir_build_deref_array(b, deref1, nir_ssa_for_src(b, deref->arr.index, 1));
- nir_build_store_deref(b, &deref_array1->dest.ssa, src_xy, 3);
+ nir_build_store_deref(b, &deref_array1->def, src_xy, 3);
auto deref2 = nir_build_deref_var(b, vars.second);
auto deref_array2 =
if (old_components == 3)
nir_build_store_deref(b,
- &deref_array2->dest.ssa,
+ &deref_array2->def,
nir_channel(b, intr->src[1].ssa, 2),
1);
else
nir_build_store_deref(b,
- &deref_array2->dest.ssa,
+ &deref_array2->def,
nir_channels(b, intr->src[1].ssa, 0xc),
3);
auto vars = get_var_pair(old_var);
auto deref1 = nir_build_deref_var(b, vars.first);
- nir_build_store_deref(b, &deref1->dest.ssa, src_xy, 3);
+ nir_build_store_deref(b, &deref1->def, src_xy, 3);
auto deref2 = nir_build_deref_var(b, vars.second);
if (old_components == 3)
- nir_build_store_deref(b, &deref2->dest.ssa, nir_channel(b, intr->src[1].ssa, 2), 1);
+ nir_build_store_deref(b, &deref2->def, nir_channel(b, intr->src[1].ssa, 2), 1);
else
nir_build_store_deref(b,
- &deref2->dest.ssa,
+ &deref2->def,
nir_channels(b, intr->src[1].ssa, 0xc),
3);
nir_def *
LowerSplit64BitVar::split_double_load(nir_intrinsic_instr *load1)
{
- unsigned old_components = load1->dest.ssa.num_components;
+ unsigned old_components = load1->def.num_components;
auto load2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &load1->instr));
nir_io_semantics sem = nir_intrinsic_io_semantics(load1);
- load1->dest.ssa.num_components = 2;
+ load1->def.num_components = 2;
sem.num_slots = 1;
nir_intrinsic_set_io_semantics(load1, sem);
- load2->dest.ssa.num_components = old_components - 2;
+ load2->def.num_components = old_components - 2;
sem.location += 1;
nir_intrinsic_set_io_semantics(load2, sem);
nir_intrinsic_set_base(load2, nir_intrinsic_base(load1) + 1);
nir_builder_instr_insert(b, &load2->instr);
- return merge_64bit_loads(&load1->dest.ssa, &load2->dest.ssa, old_components == 3);
+ return merge_64bit_loads(&load1->def, &load2->def, old_components == 3);
}
nir_def *
nir_def *
LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
{
- unsigned second_components = intr->dest.ssa.num_components - 2;
+ unsigned second_components = intr->def.num_components - 2;
nir_intrinsic_instr *load2 =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
load2->src[0] = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1));
nir_intrinsic_set_range(load2, nir_intrinsic_range(intr));
load2->num_components = second_components;
- nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64);
+ nir_def_init(&load2->instr, &load2->def, second_components, 64);
nir_builder_instr_insert(b, &load2->instr);
- intr->dest.ssa.num_components = intr->num_components = 2;
+ intr->def.num_components = intr->num_components = 2;
if (second_components == 1)
return nir_vec3(b,
- nir_channel(b, &intr->dest.ssa, 0),
- nir_channel(b, &intr->dest.ssa, 1),
- nir_channel(b, &load2->dest.ssa, 0));
+ nir_channel(b, &intr->def, 0),
+ nir_channel(b, &intr->def, 1),
+ nir_channel(b, &load2->def, 0));
else
return nir_vec4(b,
- nir_channel(b, &intr->dest.ssa, 0),
- nir_channel(b, &intr->dest.ssa, 1),
- nir_channel(b, &load2->dest.ssa, 0),
- nir_channel(b, &load2->dest.ssa, 1));
+ nir_channel(b, &intr->def, 0),
+ nir_channel(b, &intr->def, 1),
+ nir_channel(b, &load2->def, 0),
+ nir_channel(b, &load2->def, 1));
}
nir_def *
LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
{
- unsigned second_components = intr->dest.ssa.num_components - 2;
+ unsigned second_components = intr->def.num_components - 2;
nir_intrinsic_instr *load2 =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
auto new_src0 = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1));
nir_instr_rewrite_src(&load2->instr, &load2->src[0], new_src0);
load2->num_components = second_components;
- nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64);
+ nir_def_init(&load2->instr, &load2->def, second_components, 64);
nir_intrinsic_set_dest_type(load2, nir_intrinsic_dest_type(intr));
nir_builder_instr_insert(b, &load2->instr);
- intr->dest.ssa.num_components = intr->num_components = 2;
+ intr->def.num_components = intr->num_components = 2;
- return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1);
+ return merge_64bit_loads(&intr->def, &load2->def, second_components == 1);
}
nir_def *
LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr)
{
- unsigned second_components = intr->dest.ssa.num_components - 2;
+ unsigned second_components = intr->def.num_components - 2;
nir_intrinsic_instr *load2 =
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
load2->src[0] = intr->src[0];
load2->num_components = second_components;
- nir_def_init(&load2->instr, &load2->dest.ssa, second_components, 64);
+ nir_def_init(&load2->instr, &load2->def, second_components, 64);
nir_builder_instr_insert(b, &load2->instr);
- intr->dest.ssa.num_components = intr->num_components = 2;
+ intr->def.num_components = intr->num_components = 2;
- return merge_64bit_loads(&intr->dest.ssa, &load2->dest.ssa, second_components == 1);
+ return merge_64bit_loads(&intr->def, &load2->def, second_components == 1);
}
nir_def *
case nir_intrinsic_load_global:
case nir_intrinsic_load_ubo_vec4:
case nir_intrinsic_load_ssbo:
- return intr->dest.ssa.bit_size == 64;
+ return intr->def.bit_size == 64;
case nir_intrinsic_store_deref: {
if (nir_src_bit_size(intr->src[1]) == 64)
return true;
}
case nir_instr_type_phi: {
auto phi = nir_instr_as_phi(instr);
- return phi->dest.ssa.bit_size == 64;
+ return phi->def.bit_size == 64;
}
case nir_instr_type_load_const: {
auto lc = nir_instr_as_load_const(instr);
}
case nir_instr_type_phi: {
auto phi = nir_instr_as_phi(instr);
- phi->dest.ssa.bit_size = 32;
- phi->dest.ssa.num_components = 2;
+ phi->def.bit_size = 32;
+ phi->def.num_components = 2;
return NIR_LOWER_INSTR_PROGRESS;
}
case nir_instr_type_load_const: {
}
intr->num_components = components;
- intr->dest.ssa.bit_size = 32;
- intr->dest.ssa.num_components = components;
+ intr->def.bit_size = 32;
+ intr->def.num_components = components;
return NIR_LOWER_INSTR_PROGRESS;
}
Lower64BitToVec2::load_uniform_64_to_vec2(nir_intrinsic_instr *intr)
{
intr->num_components *= 2;
- intr->dest.ssa.bit_size = 32;
- intr->dest.ssa.num_components *= 2;
+ intr->def.bit_size = 32;
+ intr->def.num_components *= 2;
nir_intrinsic_set_dest_type(intr, nir_type_float32);
return NIR_LOWER_INSTR_PROGRESS;
}
Lower64BitToVec2::load_64_to_vec2(nir_intrinsic_instr *intr)
{
intr->num_components *= 2;
- intr->dest.ssa.bit_size = 32;
- intr->dest.ssa.num_components *= 2;
+ intr->def.bit_size = 32;
+ intr->def.num_components *= 2;
nir_intrinsic_set_component(intr, nir_intrinsic_component(intr) * 2);
return NIR_LOWER_INSTR_PROGRESS;
}
Lower64BitToVec2::load_ssbo_64_to_vec2(nir_intrinsic_instr *intr)
{
intr->num_components *= 2;
- intr->dest.ssa.bit_size = 32;
- intr->dest.ssa.num_components *= 2;
+ intr->def.bit_size = 32;
+ intr->def.num_components *= 2;
return NIR_LOWER_INSTR_PROGRESS;
}
bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
if (has_dest) {
- if (instr->dest.ssa.bit_size != 64)
+ if (instr->def.bit_size != 64)
return false;
} else {
if (nir_src_bit_size(instr->src[0]) != 64)
first->num_components = 2;
second->num_components -= 2;
if (has_dest) {
- first->dest.ssa.num_components = 2;
- second->dest.ssa.num_components -= 2;
+ first->def.num_components = 2;
+ second->def.num_components -= 2;
}
nir_builder_instr_insert(b, &first->instr);
if (has_dest) {
/* Merge the two loads' results back into a vector. */
nir_scalar channels[4] = {
- nir_get_ssa_scalar(&first->dest.ssa, 0),
- nir_get_ssa_scalar(&first->dest.ssa, 1),
- nir_get_ssa_scalar(&second->dest.ssa, 0),
- nir_get_ssa_scalar(&second->dest.ssa, second->num_components > 1 ? 1 : 0),
+ nir_get_ssa_scalar(&first->def, 0),
+ nir_get_ssa_scalar(&first->def, 1),
+ nir_get_ssa_scalar(&second->def, 0),
+ nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0),
};
nir_def *new_ir = nir_vec_scalars(b, channels, instr->num_components);
- nir_def_rewrite_uses(&instr->dest.ssa, new_ir);
+ nir_def_rewrite_uses(&instr->def, new_ir);
} else {
/* Split the src value across the two stores. */
b->cursor = nir_before_instr(&instr->instr);
nir_deref_instr *deref = nir_build_deref_var(b, var);
deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
- new_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ new_intr->src[0] = nir_src_for_ssa(&deref->def);
new_intr->src[1] =
nir_src_for_ssa(create_combined_vector(b, srcs, first_comp, num_comps));
emit_load_param_base(nir_builder *b, nir_intrinsic_op op)
{
nir_intrinsic_instr *result = nir_intrinsic_instr_create(b->shader, op);
- nir_def_init(&result->instr, &result->dest.ssa, 4, 32);
+ nir_def_init(&result->instr, &result->def, 4, 32);
nir_builder_instr_insert(b, &result->instr);
- return &result->dest.ssa;
+ return &result->def;
}
static int
get_dest_usee_mask(nir_intrinsic_instr *op)
{
MaskQuery mq = {0};
- mq.full_mask = (1 << op->dest.ssa.num_components) - 1;
+ mq.full_mask = (1 << op->def.num_components) - 1;
- nir_foreach_use(use_src, &op->dest.ssa)
+ nir_foreach_use(use_src, &op->def)
{
auto use_instr = use_src->parent_instr;
mq.ssa_index = use_src->ssa->index;
auto new_load = nir_load_local_shared_r600(b, 32, addr_outer);
auto undef = nir_undef(b, 1, 32);
- int comps = op->dest.ssa.num_components;
+ int comps = op->def.num_components;
nir_def *remix[4] = {undef, undef, undef, undef};
int chan = 0;
}
}
auto new_load_remixed = nir_vec(b, remix, comps);
- nir_def_rewrite_uses(&op->dest.ssa, new_load_remixed);
+ nir_def_rewrite_uses(&op->def, new_load_remixed);
}
nir_instr_remove(&op->instr);
}
{
auto patch_id =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_rel_patch_id_r600);
- nir_def_init(&patch_id->instr, &patch_id->dest.ssa, 1, 32);
+ nir_def_init(&patch_id->instr, &patch_id->def, 1, 32);
nir_builder_instr_insert(b, &patch_id->instr);
- return &patch_id->dest.ssa;
+ return &patch_id->def;
}
static void
auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_in_param_base_r600);
vertices_in = nir_channel(b, base, 2);
}
- nir_def_rewrite_uses(&op->dest.ssa, vertices_in);
+ nir_def_rewrite_uses(&op->def, vertices_in);
nir_instr_remove(&op->instr);
return true;
}
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600);
tf->num_components = ncomps;
tf->src[0] = nir_src_for_ssa(addr_outer);
- nir_def_init(&tf->instr, &tf->dest.ssa, tf->num_components, 32);
+ nir_def_init(&tf->instr, &tf->def, tf->num_components, 32);
nir_builder_instr_insert(b, &tf->instr);
if (ncomps < 4 && b->shader->info.stage != MESA_SHADER_TESS_EVAL) {
auto undef = nir_undef(b, 1, 32);
nir_def *srcs[4] = {undef, undef, undef, undef};
for (unsigned i = 0; i < ncomps; ++i)
- srcs[i] = nir_channel(b, &tf->dest.ssa, i);
+ srcs[i] = nir_channel(b, &tf->def, i);
auto help = nir_vec(b, srcs, 4);
- nir_def_rewrite_uses(&op->dest.ssa, help);
+ nir_def_rewrite_uses(&op->def, help);
} else {
- nir_def_rewrite_uses(&op->dest.ssa, &tf->dest.ssa);
+ nir_def_rewrite_uses(&op->def, &tf->def);
}
nir_instr_remove(instr);
return true;
auto invocation_id =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_invocation_id);
- nir_def_init(&invocation_id->instr, &invocation_id->dest.ssa, 1, 32);
+ nir_def_init(&invocation_id->instr, &invocation_id->def, 1, 32);
nir_builder_instr_insert(b, &invocation_id->instr);
- nir_push_if(b, nir_ieq_imm(b, &invocation_id->dest.ssa, 0));
+ nir_push_if(b, nir_ieq_imm(b, &invocation_id->def, 0));
auto base = emit_load_param_base(b, nir_intrinsic_load_tcs_out_param_base_r600);
auto rel_patch_id = r600_load_rel_patch_id(b);
tf_outer->num_components = outer_comps;
tf_outer->src[0] = nir_src_for_ssa(addr_outer);
nir_def_init(
- &tf_outer->instr, &tf_outer->dest.ssa, tf_outer->num_components, 32);
+ &tf_outer->instr, &tf_outer->def, tf_outer->num_components, 32);
nir_builder_instr_insert(b, &tf_outer->instr);
std::vector<nir_def *> tf_out;
auto tf_out_base =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_tess_factor_base_r600);
- nir_def_init(&tf_out_base->instr, &tf_out_base->dest.ssa, 1, 32);
+ nir_def_init(&tf_out_base->instr, &tf_out_base->def, 1, 32);
nir_builder_instr_insert(b, &tf_out_base->instr);
auto out_addr0 = nir_build_alu(b,
nir_op_umad24,
rel_patch_id,
nir_imm_int(b, stride),
- &tf_out_base->dest.ssa,
+ &tf_out_base->def,
NULL);
int chanx = 0;
int chany = 1;
tf_out.push_back(nir_vec2(b,
out_addr0,
- nir_channel(b, &tf_outer->dest.ssa, chanx)));
+ nir_channel(b, &tf_outer->def, chanx)));
tf_out.push_back(nir_vec2(b, nir_iadd_imm(b, out_addr0, 4),
- nir_channel(b, &tf_outer->dest.ssa, chany)));
+ nir_channel(b, &tf_outer->def, chany)));
if (outer_comps > 2) {
tf_out.push_back(nir_vec2(b,
nir_iadd_imm(b, out_addr0, 8),
- nir_channel(b, &tf_outer->dest.ssa, 2)));
+ nir_channel(b, &tf_outer->def, 2)));
}
if (outer_comps > 3) {
tf_out.push_back(nir_vec2(b,
nir_iadd_imm(b, out_addr0, 12),
- nir_channel(b, &tf_outer->dest.ssa, 3)));
+ nir_channel(b, &tf_outer->def, 3)));
inner_base = 16;
}
tf_inner->num_components = inner_comps;
tf_inner->src[0] = nir_src_for_ssa(addr1);
nir_def_init(
- &tf_inner->instr, &tf_inner->dest.ssa, tf_inner->num_components, 32);
+ &tf_inner->instr, &tf_inner->def, tf_inner->num_components, 32);
nir_builder_instr_insert(b, &tf_inner->instr);
tf_out.push_back(nir_vec2(b,
nir_iadd_imm(b, out_addr0, inner_base),
- nir_channel(b, &tf_inner->dest.ssa, 0)));
+ nir_channel(b, &tf_inner->def, 0)));
if (inner_comps > 1) {
tf_out.push_back(nir_vec2(b,
nir_iadd_imm(b, out_addr0, inner_base + 4),
- nir_channel(b, &tf_inner->dest.ssa, 1)));
+ nir_channel(b, &tf_inner->def, 1)));
}
}
b->cursor = nir_before_instr(&intr->instr);
nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&new_intr->instr, &new_intr->dest.ssa, num_comps,
- intr->dest.ssa.bit_size);
+ nir_def_init(&new_intr->instr, &new_intr->def, num_comps,
+ intr->def.bit_size);
new_intr->num_components = num_comps;
nir_deref_instr *deref = nir_build_deref_var(b, var);
deref = r600_clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
- new_intr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ new_intr->src[0] = nir_src_for_ssa(&deref->def);
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
for (unsigned i = 0; i < old_num_comps; ++i)
channels[i] = comp - var->data.location_frac + i;
- nir_def *load = nir_swizzle(b, &new_intr->dest.ssa, channels, old_num_comps);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def *load = nir_swizzle(b, &new_intr->def, channels, old_num_comps);
+ nir_def_rewrite_uses(&intr->def, load);
/* Remove the old load intrinsic */
nir_instr_remove(&intr->instr);
void RegisterReadHandler::visit(LocalArray& array)
{
- int slots = ir->dest.ssa.bit_size / 32;
- auto pin = ir->dest.ssa.num_components > 1 ? pin_none : pin_free;
- for (int i = 0; i < ir->dest.ssa.num_components; ++i) {
+ int slots = ir->def.bit_size / 32;
+ auto pin = ir->def.num_components > 1 ? pin_none : pin_free;
+ for (int i = 0; i < ir->def.num_components; ++i) {
for (int s = 0; s < slots; ++s) {
int chan = i * slots + s;
- auto dest = sh.value_factory().dest(ir->dest.ssa, chan, pin);
+ auto dest = sh.value_factory().dest(ir->def, chan, pin);
auto src = array.element(nir_intrinsic_base(ir), addr, chan);
sh.emit_instruction(new AluInstr(op1_mov, dest, src, AluInstr::write));
}
void RegisterReadHandler::visit(Register& reg)
{
- auto dest = sh.value_factory().dest(ir->dest.ssa, 0, pin_free);
+ auto dest = sh.value_factory().dest(ir->def, 0, pin_free);
sh.emit_instruction(new AluInstr(op1_mov, dest, ®, AluInstr::write));
}
bool
Shader::emit_atomic_local_shared(nir_intrinsic_instr *instr)
{
- bool uses_retval = !list_is_empty(&instr->dest.ssa.uses);
+ bool uses_retval = !list_is_empty(&instr->def.uses);
auto& vf = value_factory();
- auto dest_value = uses_retval ? vf.dest(instr->dest.ssa, 0, pin_free) : nullptr;
+ auto dest_value = uses_retval ? vf.dest(instr->def, 0, pin_free) : nullptr;
auto op = lds_op_from_intrinsic(nir_intrinsic_atomic_op(instr), uses_retval);
* value from read queue. */
if (!uses_retval &&
(op == LDS_XCHG_RET || op == LDS_CMP_XCHG_RET)) {
- dest_value = vf.dest(instr->dest.ssa, 0, pin_free);
+ dest_value = vf.dest(instr->def, 0, pin_free);
}
auto address = vf.src(instr->src[0], 0);
Shader::emit_load_scratch(nir_intrinsic_instr *intr)
{
auto addr = value_factory().src(intr->src[0], 0);
- auto dest = value_factory().dest_vec4(intr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(intr->def, pin_group);
if (chip_class() >= ISA_CC_R700) {
RegisterVec4::Swizzle dest_swz = {7, 7, 7, 7};
bool Shader::emit_load_global(nir_intrinsic_instr *intr)
{
- auto dest = value_factory().dest_vec4(intr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(intr->def, pin_group);
auto src_value = value_factory().src(intr->src[0], 0);
auto src = src_value->as_register();
Shader::emit_local_load(nir_intrinsic_instr *instr)
{
auto address = value_factory().src_vec(instr->src[0], instr->num_components);
- auto dest_value = value_factory().dest_vec(instr->dest.ssa, instr->num_components);
+ auto dest_value = value_factory().dest_vec(instr->def, instr->num_components);
emit_instruction(new LDSReadInstr(dest_value, address));
return true;
}
emit_instruction(
new AluInstr(op1_mov, src, value_factory().zero(), AluInstr::last_write));
- auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(instr->def, pin_group);
auto fetch = new LoadFromBuffer(dest,
{0, 1, 2, 3},
src,
auto& vf = value_factory();
auto group = new AluGroup();
group->add_instruction(new AluInstr(op1_mov,
- vf.dest(instr->dest.ssa, 0, pin_chan),
+ vf.dest(instr->def, 0, pin_chan),
vf.inline_const(ALU_SRC_TIME_LO, 0),
AluInstr::write));
group->add_instruction(new AluInstr(op1_mov,
- vf.dest(instr->dest.ssa, 1, pin_chan),
+ vf.dest(instr->def, 1, pin_chan),
vf.inline_const(ALU_SRC_TIME_HI, 0),
AluInstr::last_write));
emit_instruction(group);
auto addr = value_factory().src(instr->src[1], 0)->as_register();
RegisterVec4::Swizzle dest_swz{7, 7, 7, 7};
- auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(instr->def, pin_group);
- for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
dest_swz[i] = i + nir_intrinsic_component(instr);
}
int buf_cmp = nir_intrinsic_component(instr);
AluInstr *ir = nullptr;
- auto pin = instr->dest.ssa.num_components == 1
+ auto pin = instr->def.num_components == 1
? pin_free
: pin_none;
- for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
- sfn_log << SfnLog::io << "UBO[" << bufid << "] " << instr->dest.ssa.index
+ sfn_log << SfnLog::io << "UBO[" << bufid << "] " << instr->def.index
<< " const[" << i << "]: " << instr->const_index[i] << "\n";
auto uniform =
value_factory().uniform(512 + buf_offset->u32, i + buf_cmp, bufid->u32);
ir = new AluInstr(op1_mov,
- value_factory().dest(instr->dest.ssa, i, pin),
+ value_factory().dest(instr->def, i, pin),
uniform,
{alu_write});
emit_instruction(ir);
AluInstr *ir = nullptr;
auto kc_id = value_factory().src(instr->src[0], 0);
- for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
int cmp = buf_cmp + i;
auto u =
new UniformValue(512 + buf_offset->u32, cmp, kc_id, nir_intrinsic_base(instr));
- auto dest = value_factory().dest(instr->dest.ssa, i, pin_none);
+ auto dest = value_factory().dest(instr->def, i, pin_none);
ir = new AluInstr(op1_mov, dest, u, AluInstr::write);
emit_instruction(ir);
}
AluInstr::last_write));
}
- auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(instr->def, pin_group);
auto ir = new LoadFromBuffer(dest,
{0, 1, 2, 7},
auto& vf = value_factory();
for (int i = 0; i < 3; ++i) {
- auto dest = vf.dest(instr->dest.ssa, i, pin_none);
+ auto dest = vf.dest(instr->def, i, pin_none);
emit_instruction(new AluInstr(
op1_mov, dest, src[i], i == 2 ? AluInstr::last_write : AluInstr::write));
}
auto location = nir_intrinsic_io_semantics(intr).location;
if (location == VARYING_SLOT_POS) {
AluInstr *ir = nullptr;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
ir = new AluInstr(op1_mov,
- vf.dest(intr->dest.ssa, i, pin_none),
+ vf.dest(intr->def, i, pin_none),
m_pos_input[i],
AluInstr::write);
emit_instruction(ir);
if (location == VARYING_SLOT_FACE) {
auto ir = new AluInstr(op2_setgt_dx10,
- vf.dest(intr->dest.ssa, 0, pin_none),
+ vf.dest(intr->def, 0, pin_none),
m_face_input,
vf.inline_const(ALU_SRC_0, 0),
AluInstr::last_write);
if (m_apply_sample_mask) {
return emit_load_sample_mask_in(intr);
} else
- return emit_simple_mov(intr->dest.ssa, 0, m_sample_mask_reg);
+ return emit_simple_mov(intr->def, 0, m_sample_mask_reg);
case nir_intrinsic_load_sample_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_sample_id_reg);
+ return emit_simple_mov(intr->def, 0, m_sample_id_reg);
case nir_intrinsic_load_helper_invocation:
return emit_load_helper_invocation(intr);
case nir_intrinsic_load_sample_pos:
unsigned loc = nir_intrinsic_io_semantics(intr).location;
switch (loc) {
case VARYING_SLOT_POS:
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i)
- vf.inject_value(intr->dest.ssa, i, m_pos_input[i]);
+ for (unsigned i = 0; i < intr->def.num_components; ++i)
+ vf.inject_value(intr->def, i, m_pos_input[i]);
return true;
case VARYING_SLOT_FACE:
return false;
FragmentShader::emit_load_sample_mask_in(nir_intrinsic_instr *instr)
{
auto& vf = value_factory();
- auto dest = vf.dest(instr->dest.ssa, 0, pin_free);
+ auto dest = vf.dest(instr->def, 0, pin_free);
auto tmp = vf.temp_register();
assert(m_sample_id_reg);
assert(m_sample_mask_reg);
vtx->set_fetch_flag(FetchInstr::vpm);
vtx->set_fetch_flag(FetchInstr::use_tc);
vtx->set_always_keep();
- auto dst = value_factory().dest(instr->dest.ssa, 0, pin_free);
+ auto dst = value_factory().dest(instr->def, 0, pin_free);
auto ir = new AluInstr(op1_mov, dst, m_helper_invocation, AluInstr::last_write);
ir->add_required_instr(vtx);
emit_instruction(vtx);
bool
FragmentShader::emit_load_sample_pos(nir_intrinsic_instr *instr)
{
- auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(instr->def, pin_group);
auto fetch = new LoadFromBuffer(dest,
{0, 1, 2, 3},
{
auto& vf = value_factory();
AluInstr *ir = nullptr;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
sfn_log << SfnLog::io << "Inject register "
<< *m_interpolated_inputs[nir_intrinsic_base(intr)][i] << "\n";
unsigned index = nir_intrinsic_component(intr) + i;
assert(index < 4);
- vf.inject_value(intr->dest.ssa,
+ vf.inject_value(intr->def,
i,
m_interpolated_inputs[nir_intrinsic_base(intr)][index]);
}
bool need_temp = comp > 0;
AluInstr *ir = nullptr;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
if (need_temp) {
auto tmp = vf.temp_register(comp + i);
ir =
AluInstr::last_write);
emit_instruction(ir);
emit_instruction(new AluInstr(
- op1_mov, vf.dest(intr->dest.ssa, i, pin_chan), tmp, AluInstr::last_write));
+ op1_mov, vf.dest(intr->def, i, pin_chan), tmp, AluInstr::last_write));
} else {
ir = new AluInstr(op1_interp_load_p0,
- vf.dest(intr->dest.ssa, i, pin_chan),
+ vf.dest(intr->def, i, pin_chan),
new InlineConstant(ALU_SRC_PARAM_BASE + io.lds_pos(), i),
AluInstr::write);
emit_instruction(ir);
case nir_intrinsic_load_barycentric_pixel:
case nir_intrinsic_load_barycentric_sample: {
unsigned ij = barycentric_ij_index(intr);
- vf.inject_value(intr->dest.ssa, 0, m_interpolator[ij].i);
- vf.inject_value(intr->dest.ssa, 1, m_interpolator[ij].j);
+ vf.inject_value(intr->def, 0, m_interpolator[ij].i);
+ vf.inject_value(intr->def, 1, m_interpolator[ij].j);
return true;
}
case nir_intrinsic_load_barycentric_at_offset:
ASSERTED auto param = nir_src_as_const_value(intr->src[1]);
assert(param && "Indirect PS inputs not (yet) supported");
- int dest_num_comp = intr->dest.ssa.num_components;
+ int dest_num_comp = intr->def.num_components;
int start_comp = nir_intrinsic_component(intr);
bool need_temp = start_comp > 0;
- auto dst = need_temp ? vf.temp_vec4(pin_chan) : vf.dest_vec4(intr->dest.ssa, pin_chan);
+ auto dst = need_temp ? vf.temp_vec4(pin_chan) : vf.dest_vec4(intr->def, pin_chan);
InterpolateParams params;
if (need_temp) {
AluInstr *ir = nullptr;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
- auto real_dst = vf.dest(intr->dest.ssa, i, pin_chan);
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
+ auto real_dst = vf.dest(intr->def, i, pin_chan);
ir = new AluInstr(op1_mov, real_dst, dst[i + start_comp], AluInstr::write);
emit_instruction(ir);
}
op3_muladd, tmp1, grad[1], slope[2], interpolator.i, {alu_write, alu_last_instr}));
emit_instruction(new AluInstr(op3_muladd,
- vf.dest(instr->dest.ssa, 0, pin_none),
+ vf.dest(instr->def, 0, pin_none),
grad[3],
slope[3],
tmp1,
{alu_write}));
emit_instruction(new AluInstr(op3_muladd,
- vf.dest(instr->dest.ssa, 1, pin_none),
+ vf.dest(instr->def, 1, pin_none),
grad[2],
slope[3],
tmp0,
emit_instruction(new AluInstr(
op3_muladd, tmp1, help[1], ofs_x, interpolator.i, {alu_write, alu_last_instr}));
emit_instruction(new AluInstr(
- op3_muladd, vf.dest(instr->dest.ssa, 0, pin_none), help[3], ofs_y, tmp1, {alu_write}));
+ op3_muladd, vf.dest(instr->def, 0, pin_none), help[3], ofs_y, tmp1, {alu_write}));
emit_instruction(new AluInstr(op3_muladd,
- vf.dest(instr->dest.ssa, 1, pin_none),
+ vf.dest(instr->def, 1, pin_none),
help[2],
ofs_y,
tmp0,
case nir_intrinsic_end_primitive:
return emit_vertex(intr, true);
case nir_intrinsic_load_primitive_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_primitive_id);
+ return emit_simple_mov(intr->def, 0, m_primitive_id);
case nir_intrinsic_load_invocation_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_invocation_id);
+ return emit_simple_mov(intr->def, 0, m_invocation_id);
case nir_intrinsic_load_per_vertex_input:
return emit_load_per_vertex_input(intr);
default:;
bool
GeometryShader::emit_load_per_vertex_input(nir_intrinsic_instr *instr)
{
- auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
+ auto dest = value_factory().dest_vec4(instr->def, pin_group);
RegisterVec4::Swizzle dest_swz{7, 7, 7, 7};
- for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
dest_swz[i] = i + nir_intrinsic_component(instr);
}
{
switch (instr->intrinsic) {
case nir_intrinsic_load_tcs_rel_patch_id_r600:
- return emit_simple_mov(instr->dest.ssa, 0, m_rel_patch_id);
+ return emit_simple_mov(instr->def, 0, m_rel_patch_id);
case nir_intrinsic_load_invocation_id:
- return emit_simple_mov(instr->dest.ssa, 0, m_invocation_id);
+ return emit_simple_mov(instr->def, 0, m_invocation_id);
case nir_intrinsic_load_primitive_id:
- return emit_simple_mov(instr->dest.ssa, 0, m_primitive_id);
+ return emit_simple_mov(instr->def, 0, m_primitive_id);
case nir_intrinsic_load_tcs_tess_factor_base_r600:
- return emit_simple_mov(instr->dest.ssa, 0, m_tess_factor_base);
+ return emit_simple_mov(instr->def, 0, m_tess_factor_base);
case nir_intrinsic_store_tf_r600:
return store_tess_factor(instr);
default:
{
switch (intr->intrinsic) {
case nir_intrinsic_load_tess_coord_xy:
- return emit_simple_mov(intr->dest.ssa, 0, m_tess_coord[0], pin_none) &&
- emit_simple_mov(intr->dest.ssa, 1, m_tess_coord[1], pin_none);
+ return emit_simple_mov(intr->def, 0, m_tess_coord[0], pin_none) &&
+ emit_simple_mov(intr->def, 1, m_tess_coord[1], pin_none);
case nir_intrinsic_load_primitive_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_primitive_id);
+ return emit_simple_mov(intr->def, 0, m_primitive_id);
case nir_intrinsic_load_tcs_rel_patch_id_r600:
- return emit_simple_mov(intr->dest.ssa, 0, m_rel_patch_id);
+ return emit_simple_mov(intr->def, 0, m_rel_patch_id);
case nir_intrinsic_store_output:
return m_export_processor->store_output(*intr);
default:
AluInstr *ir = nullptr;
if (location < VERT_ATTRIB_MAX) {
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
auto src = vf.allocate_pinned_register(driver_location + 1, i);
src->set_flag(Register::ssa);
- vf.inject_value(intr->dest.ssa, i, src);
+ vf.inject_value(intr->def, i, src);
}
if (ir)
ir->set_alu_flag(alu_last_instr);
{
switch (intr->intrinsic) {
case nir_intrinsic_load_vertex_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_vertex_id);
+ return emit_simple_mov(intr->def, 0, m_vertex_id);
case nir_intrinsic_load_instance_id:
- return emit_simple_mov(intr->dest.ssa, 0, m_instance_id);
+ return emit_simple_mov(intr->def, 0, m_instance_id);
case nir_intrinsic_load_primitive_id:
- return emit_simple_mov(intr->dest.ssa, 0, primitive_id());
+ return emit_simple_mov(intr->def, 0, primitive_id());
case nir_intrinsic_load_tcs_rel_patch_id_r600:
- return emit_simple_mov(intr->dest.ssa, 0, m_rel_vertex_id);
+ return emit_simple_mov(intr->def, 0, m_rel_vertex_id);
default:
return false;
}
if (num_elms > 0 || num_comp > 1 || bit_size > 32) {
array_entry ae = {
- intr->dest.ssa.index,
+ intr->def.index,
num_elms ? num_elms : 1,
bit_size / 32 * num_comp};
arrays.push(ae);
} else {
- non_array.push_back(intr->dest.ssa.index);
+ non_array.push_back(intr->def.index);
}
}
case nir_intrinsic_load_tess_level_outer_default:
case nir_intrinsic_load_tess_level_inner_default: {
nir_def *buf = si_nir_load_internal_binding(b, args, SI_HS_CONST_DEFAULT_TESS_LEVELS, 4);
- unsigned num_components = intrin->dest.ssa.num_components;
+ unsigned num_components = intrin->def.num_components;
unsigned offset =
intrin->intrinsic == nir_intrinsic_load_tess_level_inner_default ? 16 : 0;
replacement = nir_load_ubo(b, num_components, 32, buf, nir_imm_int(b, offset),
}
if (replacement)
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
nir_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
nir_def *size = nir_channel(b, desc, 2);
- nir_def_rewrite_uses(&intrin->dest.ssa, size);
+ nir_def_rewrite_uses(&intrin->def, size);
nir_instr_remove(&intrin->instr);
break;
}
nir_def *desc = load_deref_image_desc(b, deref, desc_type, is_load, s);
if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
} else {
nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type));
nir_def *desc = load_bindless_image_desc(b, index, desc_type, is_load, s);
if (intrin->intrinsic == nir_intrinsic_bindless_image_descriptor_amd) {
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
} else {
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], nir_src_for_ssa(desc));
image = load_deref_sampler_desc(b, texture_deref, desc_type, s, true);
else
image = load_bindless_sampler_desc(b, texture_handle, desc_type, s);
- nir_def_rewrite_uses(&tex->dest.ssa, image);
+ nir_def_rewrite_uses(&tex->def, image);
nir_instr_remove(&tex->instr);
return true;
}
sampler = load_deref_sampler_desc(b, sampler_deref, AC_DESC_SAMPLER, s, true);
else
sampler = load_bindless_sampler_desc(b, sampler_handle, AC_DESC_SAMPLER, s);
- nir_def_rewrite_uses(&tex->dest.ssa, sampler);
+ nir_def_rewrite_uses(&tex->def, sampler);
nir_instr_remove(&tex->instr);
return true;
}
unsigned input_index = nir_intrinsic_base(intrin);
unsigned component = nir_intrinsic_component(intrin);
- unsigned num_components = intrin->dest.ssa.num_components;
+ unsigned num_components = intrin->def.num_components;
nir_def *comp[4];
if (s->shader->selector->info.base.vs.blit_sgprs_amd)
load_vs_input_from_blit_sgpr(b, input_index, s, comp);
else
- load_vs_input_from_vertex_buffer(b, input_index, s, intrin->dest.ssa.bit_size, comp);
+ load_vs_input_from_vertex_buffer(b, input_index, s, intrin->def.bit_size, comp);
nir_def *replacement = nir_vec(b, &comp[component], num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
+ nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
b->cursor = nir_instr_remove(&tex->instr);
nir_def *imm = nir_imm_vec4(b, p->value[0], p->value[1], p->value[2], p->value[3]);
- nir_def_rewrite_uses(&tex->dest.ssa, imm);
+ nir_def_rewrite_uses(&tex->def, imm);
return true;
}
unsigned index = intrin->intrinsic == nir_intrinsic_load_color0 ? 0 : 1;
assert(colors[index]);
- nir_def_rewrite_uses(&intrin->dest.ssa, colors[index]);
+ nir_def_rewrite_uses(&intrin->def, colors[index]);
nir_instr_remove(&intrin->instr);
return true;
bit_size = nir_src_bit_size(intr->src[0]);
is_output_load = false;
} else {
- mask = nir_def_components_read(&intr->dest.ssa); /* load */
- bit_size = intr->dest.ssa.bit_size;
+ mask = nir_def_components_read(&intr->def); /* load */
+ bit_size = intr->def.bit_size;
is_output_load = !is_input;
}
assert(bit_size != 64 && !(mask & ~0xf) && "64-bit IO should have been lowered");
break;
case nir_intrinsic_load_local_invocation_id:
case nir_intrinsic_load_workgroup_id: {
- unsigned mask = nir_def_components_read(&intr->dest.ssa);
+ unsigned mask = nir_def_components_read(&intr->def);
while (mask) {
unsigned i = u_bit_scan(&mask);
case nir_intrinsic_load_color0:
case nir_intrinsic_load_color1: {
unsigned index = intr->intrinsic == nir_intrinsic_load_color1;
- uint8_t mask = nir_def_components_read(&intr->dest.ssa);
+ uint8_t mask = nir_def_components_read(&intr->def);
info->colors_read |= mask << (index * 4);
switch (info->color_interpolate[index]) {
info->uses_interp_at_sample = true;
break;
case nir_intrinsic_load_frag_coord:
- info->reads_frag_coord_mask |= nir_def_components_read(&intr->dest.ssa);
+ info->reads_frag_coord_mask |= nir_def_components_read(&intr->def);
break;
case nir_intrinsic_load_sample_pos:
- info->reads_sample_pos_mask |= nir_def_components_read(&intr->dest.ssa);
+ info->reads_sample_pos_mask |= nir_def_components_read(&intr->def);
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_per_vertex_input:
}
/* If dest is already divergent, divergence won't change. */
- divergence_changed |= !tex->dest.ssa.divergent &&
+ divergence_changed |= !tex->def.divergent &&
(tex->texture_non_uniform || tex->sampler_non_uniform);
}
}
static nir_def *
deref_ssa(nir_builder *b, nir_variable *var)
{
- return &nir_build_deref_var(b, var)->dest.ssa;
+ return &nir_build_deref_var(b, var)->def;
}
/* Create a NIR compute shader implementing copy_image.
/* Replace the old intrinsic with a reference to our reconstructed
* vector.
*/
- nir_def_rewrite_uses(&intr->dest.ssa, vec);
+ nir_def_rewrite_uses(&intr->def, vec);
nir_instr_remove(&intr->instr);
}
c->fs_key->point_sprite_mask)) {
assert(intr->num_components == 1);
- nir_def *result = &intr->dest.ssa;
+ nir_def *result = &intr->def;
switch (comp) {
case 0:
if (c->fs_key->point_coord_upper_left && comp == 1)
result = nir_fsub_imm(b, 1.0, result);
- if (result != &intr->dest.ssa) {
- nir_def_rewrite_uses_after(&intr->dest.ssa,
+ if (result != &intr->def) {
+ nir_def_rewrite_uses_after(&intr->def,
result,
result->parent_instr);
}
nir_intrinsic_instr *intr_comp =
nir_intrinsic_instr_create(c->s, intr->intrinsic);
intr_comp->num_components = 1;
- nir_def_init(&intr_comp->instr, &intr_comp->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&intr_comp->instr, &intr_comp->def, 1,
+ intr->def.bit_size);
/* Convert the uniform offset to bytes. If it happens
* to be a constant, constant-folding will clean up
intr_comp->src[0] =
nir_src_for_ssa(nir_ishl_imm(b, intr->src[0].ssa, 4));
- dests[i] = &intr_comp->dest.ssa;
+ dests[i] = &intr_comp->def;
nir_builder_instr_insert(b, &intr_comp->instr);
}
txf->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_vec2(b, addr, nir_imm_int(b, 0)));
- nir_def_init(&txf->instr, &txf->dest.ssa, 4, 32);
+ nir_def_init(&txf->instr, &txf->def, 4, 32);
nir_builder_instr_insert(b, &txf->instr);
- return &txf->dest.ssa;
+ return &txf->def;
}
static bool
if (util_format_is_depth_or_stencil(format)) {
struct qreg scaled = ntq_scale_depth_texture(c, tex);
for (int i = 0; i < 4; i++)
- ntq_store_def(c, &instr->dest.ssa, i, qir_MOV(c, scaled));
+ ntq_store_def(c, &instr->def, i, qir_MOV(c, scaled));
} else {
for (int i = 0; i < 4; i++)
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
qir_UNPACK_8_F(c, tex, i));
}
}
}
for (int i = 0; i < 4; i++)
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
qir_MOV(c, depth_output));
} else {
for (int i = 0; i < 4; i++)
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
qir_UNPACK_8_F(c, tex, i));
}
}
struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
array_len * num_components);
- nir_def *nir_reg = &decl->dest.ssa;
+ nir_def *nir_reg = &decl->def;
_mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
for (int i = 0; i < array_len * num_components; i++)
qir_TLB_COLOR_READ(c);
}
}
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_MOV(c, c->color_reads[sample_index]));
}
uint32_t offset = nir_intrinsic_base(instr) +
nir_src_as_uint(instr->src[0]);
int comp = nir_intrinsic_component(instr);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_MOV(c, c->inputs[offset * 4 + comp]));
}
assert(offset % 4 == 0);
/* We need dwords */
offset = offset / 4;
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_UNIFORM,
offset));
} else {
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
indirect_uniform_load(c, instr));
}
break;
case nir_intrinsic_load_ubo:
assert(instr->num_components == 1);
- ntq_store_def(c, &instr->dest.ssa, 0, vc4_ubo_load(c, instr));
+ ntq_store_def(c, &instr->def, 0, vc4_ubo_load(c, instr));
break;
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
- ntq_store_def(c, &instr->dest.ssa, i,
+ ntq_store_def(c, &instr->def, i,
qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
nir_intrinsic_ucp_id(instr) *
4 + i));
case nir_intrinsic_load_blend_const_color_g_float:
case nir_intrinsic_load_blend_const_color_b_float:
case nir_intrinsic_load_blend_const_color_a_float:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
(instr->intrinsic -
nir_intrinsic_load_blend_const_color_r_float),
break;
case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
0));
break;
case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
0));
break;
case nir_intrinsic_load_sample_mask_in:
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
break;
/* The register contains 0 (front) or 1 (back), and we need to
* turn it into a NIR bool where true means front.
*/
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_ADD(c,
qir_uniform_ui(c, -1),
qir_reg(QFILE_FRAG_REV_FLAG, 0)));
assert(nir_src_is_const(instr->src[0]));
int sampler = nir_src_as_int(instr->src[0]);
- ntq_store_def(c, &instr->dest.ssa, 0,
+ ntq_store_def(c, &instr->def, 0,
qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, sampler));
- ntq_store_def(c, &instr->dest.ssa, 1,
+ ntq_store_def(c, &instr->def, 1,
qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, sampler));
break;
}
static void
init_reg(struct ntv_context *ctx, nir_intrinsic_instr *decl, nir_alu_type atype)
{
- unsigned index = decl->dest.ssa.index;
+ unsigned index = decl->def.index;
unsigned num_components = nir_intrinsic_num_components(decl);
unsigned bit_size = nir_intrinsic_bit_size(decl);
result = emit_atomic(ctx, SpvOpAtomicLoad, type, ptr, 0, 0);
else
result = spirv_builder_emit_load(&ctx->builder, type, ptr);
- store_def(ctx, &intr->dest.ssa, result, atype);
+ store_def(ctx, &intr->def, result, atype);
}
static void
static void
emit_load_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr)
{
- SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
- unsigned num_components = intr->dest.ssa.num_components;
- unsigned bit_size = intr->dest.ssa.bit_size;
+ SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint);
+ unsigned num_components = intr->def.num_components;
+ unsigned bit_size = intr->def.bit_size;
SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
SpvStorageClassWorkgroup,
result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type, constituents, num_components);
else
result = constituents[0];
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
static void
emit_load_scratch(struct ntv_context *ctx, nir_intrinsic_instr *intr)
{
- SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
- unsigned num_components = intr->dest.ssa.num_components;
- unsigned bit_size = intr->dest.ssa.bit_size;
+ SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint);
+ unsigned num_components = intr->def.num_components;
+ unsigned bit_size = intr->def.bit_size;
SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
SpvStorageClassPrivate,
result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type, constituents, num_components);
else
result = constituents[0];
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
SpvId load_type = get_uvec_type(ctx, 32, 1);
/* number of components being loaded */
- unsigned num_components = intr->dest.ssa.num_components;
+ unsigned num_components = intr->def.num_components;
SpvId constituents[NIR_MAX_VEC_COMPONENTS * 2];
SpvId result;
/* destination type for the load */
- SpvId type = get_def_uvec_type(ctx, &intr->dest.ssa);
+ SpvId type = get_def_uvec_type(ctx, &intr->def);
SpvId one = emit_uint_const(ctx, 32, 1);
/* we grab a single array member at a time, so it's a pointer to a uint */
} else
result = constituents[0];
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
emit_load_global(struct ntv_context *ctx, nir_intrinsic_instr *intr)
{
spirv_builder_emit_cap(&ctx->builder, SpvCapabilityPhysicalStorageBufferAddresses);
- SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
+ SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint);
SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
SpvStorageClassPhysicalStorageBuffer,
dest_type);
nir_alu_type atype;
SpvId ptr = emit_bitcast(ctx, pointer_type, get_src(ctx, &intr->src[0], &atype));
SpvId result = spirv_builder_emit_load(&ctx->builder, dest_type, ptr);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
nir_intrinsic_instr *decl = nir_reg_get_decl(intr->src[0].ssa);
unsigned num_components = nir_intrinsic_num_components(decl);
unsigned bit_size = nir_intrinsic_bit_size(decl);
- unsigned index = decl->dest.ssa.index;
+ unsigned index = decl->def.index;
assert(index < ctx->num_defs);
init_reg(ctx, decl, nir_type_uint);
SpvId var = ctx->defs[index];
SpvId type = get_alu_type(ctx, atype, num_components, bit_size);
SpvId result = spirv_builder_emit_load(&ctx->builder, type, var);
- store_def(ctx, &intr->dest.ssa, result, atype);
+ store_def(ctx, &intr->def, result, atype);
}
static void
SpvId param = get_src(ctx, &intr->src[0], &atype);
nir_intrinsic_instr *decl = nir_reg_get_decl(intr->src[1].ssa);
- unsigned index = decl->dest.ssa.index;
+ unsigned index = decl->def.index;
unsigned num_components = nir_intrinsic_num_components(decl);
unsigned bit_size = nir_intrinsic_bit_size(decl);
SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
ctx->front_face_var);
- assert(1 == intr->dest.ssa.num_components);
- store_def(ctx, &intr->dest.ssa, result, nir_type_bool);
+ assert(1 == intr->def.num_components);
+ store_def(ctx, &intr->def, result, nir_type_bool);
}
static void
}
SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, load_var);
- assert(1 == intr->dest.ssa.num_components);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ assert(1 == intr->def.num_components);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
switch (type) {
case nir_type_bool:
- var_type = get_bvec_type(ctx, intr->dest.ssa.num_components);
+ var_type = get_bvec_type(ctx, intr->def.num_components);
break;
case nir_type_int:
- var_type = get_ivec_type(ctx, intr->dest.ssa.bit_size,
- intr->dest.ssa.num_components);
+ var_type = get_ivec_type(ctx, intr->def.bit_size,
+ intr->def.num_components);
break;
case nir_type_uint:
- var_type = get_uvec_type(ctx, intr->dest.ssa.bit_size,
- intr->dest.ssa.num_components);
+ var_type = get_uvec_type(ctx, intr->def.bit_size,
+ intr->def.num_components);
break;
case nir_type_float:
- var_type = get_fvec_type(ctx, intr->dest.ssa.bit_size,
- intr->dest.ssa.num_components);
+ var_type = get_fvec_type(ctx, intr->def.bit_size,
+ intr->def.num_components);
break;
default:
unreachable("unknown type passed");
builtin);
SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, *var_id);
- store_def(ctx, &intr->dest.ssa, result, type);
+ store_def(ctx, &intr->def, result, type);
}
static void
result = emit_builtin_unop(ctx, op, get_glsl_type(ctx, gtype), ptr);
else
result = emit_builtin_binop(ctx, op, get_glsl_type(ctx, gtype), ptr, src1);
- store_def(ctx, &intr->dest.ssa, result, ptype);
+ store_def(ctx, &intr->def, result, ptype);
}
static void
handle_atomic_op(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId ptr, SpvId param, SpvId param2, nir_alu_type type)
{
- SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, type);
+ SpvId dest_type = get_def_type(ctx, &intr->def, type);
SpvId result = emit_atomic(ctx,
- get_atomic_op(ctx, intr->dest.ssa.bit_size, nir_intrinsic_atomic_op(intr)),
+ get_atomic_op(ctx, intr->def.bit_size, nir_intrinsic_atomic_op(intr)),
dest_type, ptr, param, param2);
assert(result);
- store_def(ctx, &intr->dest.ssa, result, type);
+ store_def(ctx, &intr->def, result, type);
}
static void
emit_shared_atomic_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
{
unsigned bit_size = nir_src_bit_size(intr->src[1]);
- SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
+ SpvId dest_type = get_def_type(ctx, &intr->def, nir_type_uint);
nir_alu_type atype;
nir_alu_type ret_type = nir_atomic_op_type(nir_intrinsic_atomic_op(intr)) == nir_type_float ? nir_type_float : nir_type_uint;
SpvId param = get_src(ctx, &intr->src[1], &atype);
result = emit_binop(ctx, SpvOpIAdd, uint_type, result,
emit_uint_const(ctx, 32,
glsl_get_struct_field_offset(bare_type, last_member_idx)));
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static SpvId
}
static SpvId
-extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_def *dest_ssa)
+extract_sparse_load(struct ntv_context *ctx, SpvId result, SpvId dest_type, nir_def *def)
{
/* Result Type must be an OpTypeStruct with two members.
* The first member’s type must be an integer type scalar.
SpvId resident = spirv_builder_emit_composite_extract(&ctx->builder, spirv_builder_type_uint(&ctx->builder, 32), result, &idx, 1);
idx = 1;
/* normal vec4 return */
- if (dest_ssa->num_components == 4)
+ if (def->num_components == 4)
result = spirv_builder_emit_composite_extract(&ctx->builder, dest_type, result, &idx, 1);
else {
/* shadow */
- assert(dest_ssa->num_components == 1);
- SpvId type = spirv_builder_type_float(&ctx->builder, dest_ssa->bit_size);
+ assert(def->num_components == 1);
+ SpvId type = spirv_builder_type_float(&ctx->builder, def->bit_size);
SpvId val[2];
/* pad to 2 components: the upcoming is_sparse_texels_resident instr will always use the
* separate residency value, but the shader still expects this return to be a vec2,
* so give it a vec2
*/
val[0] = spirv_builder_emit_composite_extract(&ctx->builder, type, result, &idx, 1);
- val[1] = emit_float_const(ctx, dest_ssa->bit_size, 0);
- result = spirv_builder_emit_composite_construct(&ctx->builder, get_fvec_type(ctx, dest_ssa->bit_size, 2), val, 2);
+ val[1] = emit_float_const(ctx, def->bit_size, 0);
+ result = spirv_builder_emit_composite_construct(&ctx->builder, get_fvec_type(ctx, def->bit_size, 2), val, 2);
}
assert(resident != 0);
- assert(dest_ssa->index < ctx->num_defs);
- ctx->resident_defs[dest_ssa->index] = resident;
+ assert(def->index < ctx->num_defs);
+ ctx->resident_defs[def->index] = resident;
return result;
}
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS_MS;
SpvId sample = use_sample ? get_src(ctx, &intr->src[2], &atype) : 0;
SpvId dest_type = spirv_builder_type_vector(&ctx->builder, base_type,
- intr->dest.ssa.num_components);
+ intr->def.num_components);
SpvId result = spirv_builder_emit_image_read(&ctx->builder,
dest_type,
img, coord, 0, sample, 0, sparse);
if (sparse)
- result = extract_sparse_load(ctx, result, dest_type, &intr->dest.ssa);
+ result = extract_sparse_load(ctx, result, dest_type, &intr->def);
if (!sparse && mediump) {
spirv_builder_emit_decoration(&ctx->builder, result,
SpvDecorationRelaxedPrecision);
}
- store_def(ctx, &intr->dest.ssa, result, nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(type)));
+ store_def(ctx, &intr->def, result, nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(type)));
}
static void
spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery);
SpvId result = spirv_builder_emit_image_query_size(&ctx->builder, get_uvec_type(ctx, 32, num_components), img, 0);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
SpvId img = spirv_builder_emit_load(&ctx->builder, img_type, img_var);
spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery);
- SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples, get_def_type(ctx, &intr->dest.ssa, nir_type_uint), img);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples, get_def_type(ctx, &intr->def, nir_type_uint), img);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
*/
nir_alu_type ntype = nir_get_nir_type_for_glsl_base_type(glsl_type);
if (ptype != ntype) {
- SpvId cast_type = get_def_type(ctx, &intr->dest.ssa, ntype);
+ SpvId cast_type = get_def_type(ctx, &intr->def, ntype);
param = emit_bitcast(ctx, cast_type, param);
}
if (intr->intrinsic == nir_intrinsic_image_deref_atomic_swap) {
param2 = get_src(ctx, &intr->src[4], &ptype);
if (ptype != ntype) {
- SpvId cast_type = get_def_type(ctx, &intr->dest.ssa, ntype);
+ SpvId cast_type = get_def_type(ctx, &intr->def, ntype);
param2 = emit_bitcast(ctx, cast_type, param2);
}
}
{
spirv_builder_emit_cap(&ctx->builder, SpvCapabilitySubgroupBallotKHR);
spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot");
- SpvId type = get_def_uvec_type(ctx, &intr->dest.ssa);
+ SpvId type = get_def_uvec_type(ctx, &intr->def);
nir_alu_type atype;
SpvId result = emit_unop(ctx, SpvOpSubgroupBallotKHR, type, get_src(ctx, &intr->src[0], &atype));
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot");
nir_alu_type atype;
SpvId src = get_src(ctx, &intr->src[0], &atype);
- SpvId type = get_def_type(ctx, &intr->dest.ssa, atype);
+ SpvId type = get_def_type(ctx, &intr->def, atype);
SpvId result = emit_unop(ctx, SpvOpSubgroupFirstInvocationKHR, type, src);
- store_def(ctx, &intr->dest.ssa, result, atype);
+ store_def(ctx, &intr->def, result, atype);
}
static void
spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_ballot");
nir_alu_type atype, itype;
SpvId src = get_src(ctx, &intr->src[0], &atype);
- SpvId type = get_def_type(ctx, &intr->dest.ssa, atype);
+ SpvId type = get_def_type(ctx, &intr->def, atype);
SpvId result = emit_binop(ctx, SpvOpSubgroupReadInvocationKHR, type,
src,
get_src(ctx, &intr->src[1], &itype));
- store_def(ctx, &intr->dest.ssa, result, atype);
+ store_def(ctx, &intr->def, result, atype);
}
static void
spirv_builder_emit_extension(&ctx->builder, "SPV_KHR_shader_clock");
SpvScope scope = get_scope(nir_intrinsic_memory_scope(intr));
- SpvId type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
+ SpvId type = get_def_type(ctx, &intr->def, nir_type_uint);
SpvId result = spirv_builder_emit_unop_const(&ctx->builder, SpvOpReadClockKHR, type, scope);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
{
spirv_builder_emit_cap(&ctx->builder, SpvCapabilitySparseResidency);
- SpvId type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
+ SpvId type = get_def_type(ctx, &intr->def, nir_type_uint);
/* this will always be stored with the ssa index of the parent instr */
nir_def *ssa = intr->src[0].ssa;
SpvId resident = ctx->resident_defs[index];
SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageSparseTexelsResident, type, resident);
- store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
+ store_def(ctx, &intr->def, result, nir_type_uint);
}
static void
spirv_builder_emit_cap(&ctx->builder, SpvCapabilityGroupNonUniformVote);
nir_alu_type atype;
SpvId result = spirv_builder_emit_vote(&ctx->builder, op, get_src(ctx, &intr->src[0], &atype));
- store_def_raw(ctx, &intr->dest.ssa, result, nir_type_bool);
+ store_def_raw(ctx, &intr->def, result, nir_type_bool);
}
static void
spirv_builder_emit_extension(&ctx->builder,
"SPV_EXT_demote_to_helper_invocation");
SpvId result = spirv_is_helper_invocation(&ctx->builder);
- store_def(ctx, &intr->dest.ssa, result, nir_type_bool);
+ store_def(ctx, &intr->def, result, nir_type_bool);
}
static void
case nir_intrinsic_load_workgroup_size:
assert(ctx->local_group_size_var);
- store_def(ctx, &intr->dest.ssa, ctx->local_group_size_var, nir_type_uint);
+ store_def(ctx, &intr->def, ctx->local_group_size_var, nir_type_uint);
break;
case nir_intrinsic_load_shared:
}
if (tex->is_sparse)
- tex->dest.ssa.num_components--;
- SpvId dest_type = get_def_type(ctx, &tex->dest.ssa, tex->dest_type);
+ tex->def.num_components--;
+ SpvId dest_type = get_def_type(ctx, &tex->def, tex->dest_type);
if (nir_tex_instr_is_query(tex))
spirv_builder_emit_cap(&ctx->builder, SpvCapabilityImageQuery);
SpvId result = spirv_builder_emit_image_query_size(&ctx->builder,
dest_type, image,
lod);
- store_def(ctx, &tex->dest.ssa, result, tex->dest_type);
+ store_def(ctx, &tex->def, result, tex->dest_type);
return;
}
if (tex->op == nir_texop_query_levels) {
spirv_builder_emit_image(&ctx->builder, image_type, load);
SpvId result = spirv_builder_emit_image_query_levels(&ctx->builder,
dest_type, image);
- store_def(ctx, &tex->dest.ssa, result, tex->dest_type);
+ store_def(ctx, &tex->def, result, tex->dest_type);
return;
}
if (tex->op == nir_texop_texture_samples) {
spirv_builder_emit_image(&ctx->builder, image_type, load);
SpvId result = spirv_builder_emit_unop(&ctx->builder, SpvOpImageQuerySamples,
dest_type, image);
- store_def(ctx, &tex->dest.ssa, result, tex->dest_type);
+ store_def(ctx, &tex->def, result, tex->dest_type);
return;
}
SpvId result = spirv_builder_emit_image_query_lod(&ctx->builder,
dest_type, load,
coord);
- store_def(ctx, &tex->dest.ssa, result, tex->dest_type);
+ store_def(ctx, &tex->def, result, tex->dest_type);
return;
}
SpvId actual_dest_type;
- unsigned num_components = tex->dest.ssa.num_components;
+ unsigned num_components = tex->def.num_components;
switch (nir_alu_type_get_base_type(tex->dest_type)) {
case nir_type_int:
actual_dest_type = get_ivec_type(ctx, 32, num_components);
}
if (tex->is_sparse)
- result = extract_sparse_load(ctx, result, actual_dest_type, &tex->dest.ssa);
+ result = extract_sparse_load(ctx, result, actual_dest_type, &tex->def);
- if (tex->dest.ssa.bit_size != 32) {
+ if (tex->def.bit_size != 32) {
/* convert FP32 to FP16 */
result = emit_unop(ctx, SpvOpFConvert, dest_type, result);
}
if (tex->is_sparse && tex->is_shadow)
- tex->dest.ssa.num_components++;
- store_def(ctx, &tex->dest.ssa, result, tex->dest_type);
+ tex->def.num_components++;
+ store_def(ctx, &tex->def, result, tex->dest_type);
if (tex->is_sparse && !tex->is_shadow)
- tex->dest.ssa.num_components++;
+ tex->def.num_components++;
}
static void
struct hash_entry *he = _mesa_hash_table_search(ctx->vars, deref->var);
assert(he);
SpvId result = (SpvId)(intptr_t)he->data;
- store_def_raw(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type));
+ store_def_raw(ctx, &deref->def, result, get_nir_alu_type(deref->type));
}
static void
base,
&index, 1);
/* uint is a bit of a lie here, it's really just an opaque type */
- store_def(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type));
+ store_def(ctx, &deref->def, result, get_nir_alu_type(deref->type));
}
static void
get_src(ctx, &deref->parent, &atype),
&index, 1);
/* uint is a bit of a lie here, it's really just an opaque type */
- store_def(ctx, &deref->dest.ssa, result, get_nir_alu_type(deref->type));
+ store_def(ctx, &deref->def, result, get_nir_alu_type(deref->type));
}
static void
def[3] = nir_vector_extract(b, load2, nir_imm_int(b, 1));
nir_def *new_vec = nir_vec(b, def, total_num_components);
/* use the assembled dvec3/4 for all other uses of the load */
- nir_def_rewrite_uses_after(&intr->dest.ssa, new_vec,
+ nir_def_rewrite_uses_after(&intr->def, new_vec,
new_vec->parent_instr);
/* remove the original instr and its deref chain */
nir_def *casted[2];
for (unsigned i = 0; i < num_components; i++)
casted[i] = nir_pack_64_2x32(b, nir_channels(b, load, BITFIELD_RANGE(i * 2, 2)));
- nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, casted, num_components));
+ nir_def_rewrite_uses(&intr->def, nir_vec(b, casted, num_components));
/* remove the original instr and its deref chain */
nir_instr *parent = intr->src[0].ssa->parent_instr;
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant_zink);
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_MODE_IS_INDEXED));
load->num_components = 1;
- nir_def_init(&load->instr, &load->dest.ssa, 1, 32);
+ nir_def_init(&load->instr, &load->def, 1, 32);
nir_builder_instr_insert(b, &load->instr);
nir_def *composite = nir_build_alu(b, nir_op_bcsel,
- nir_build_alu(b, nir_op_ieq, &load->dest.ssa, nir_imm_int(b, 1), NULL, NULL),
- &instr->dest.ssa,
+ nir_build_alu(b, nir_op_ieq, &load->def, nir_imm_int(b, 1), NULL, NULL),
+ &instr->def,
nir_imm_int(b, 0),
NULL);
- nir_def_rewrite_uses_after(&instr->dest.ssa, composite,
+ nir_def_rewrite_uses_after(&instr->def, composite,
composite->parent_instr);
return true;
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant_zink);
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_ID));
load->num_components = 1;
- nir_def_init(&load->instr, &load->dest.ssa, 1, 32);
+ nir_def_init(&load->instr, &load->def, 1, 32);
nir_builder_instr_insert(b, &load->instr);
- nir_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa);
+ nir_def_rewrite_uses(&instr->def, &load->def);
return true;
}
nir_def *stipple_pos =
nir_interp_deref_at_sample(&b, 1, 32,
- &nir_build_deref_var(&b, stipple)->dest.ssa, index);
+ &nir_build_deref_var(&b, stipple)->def, index);
stipple_pos = nir_fmod(&b, nir_fdiv(&b, stipple_pos, factor),
nir_imm_float(&b, 16.0));
stipple_pos = nir_f2i32(&b, stipple_pos);
nir_imm_int(b, inlined_uniform_offset),
.align_mul = 4, .align_offset = 0,
.range_base = 0, .range = ~0);
- nir_def_rewrite_uses(&intrin->dest.ssa, new_dest_def);
+ nir_def_rewrite_uses(&intrin->def, new_dest_def);
nir_instr_remove(instr);
return true;
}
switch (intr->intrinsic) {
case nir_intrinsic_store_ssbo:
- var = bo->ssbo[intr->dest.ssa.bit_size >> 4];
+ var = bo->ssbo[intr->def.bit_size >> 4];
offset = intr->src[2].ssa;
is_load = false;
break;
case nir_intrinsic_load_ssbo:
- var = bo->ssbo[intr->dest.ssa.bit_size >> 4];
+ var = bo->ssbo[intr->def.bit_size >> 4];
offset = intr->src[1].ssa;
break;
case nir_intrinsic_load_ubo:
if (nir_src_is_const(intr->src[0]) && nir_src_as_const_value(intr->src[0])->u32 == 0)
- var = bo->uniforms[intr->dest.ssa.bit_size >> 4];
+ var = bo->uniforms[intr->def.bit_size >> 4];
else
- var = bo->ubo[intr->dest.ssa.bit_size >> 4];
+ var = bo->ubo[intr->def.bit_size >> 4];
offset = intr->src[1].ssa;
break;
default:
if (offset_bytes + i >= size) {
rewrites++;
if (is_load)
- result[i] = nir_imm_zero(b, 1, intr->dest.ssa.bit_size);
+ result[i] = nir_imm_zero(b, 1, intr->def.bit_size);
}
}
assert(rewrites == intr->num_components);
if (is_load) {
nir_def *load = nir_vec(b, result, intr->num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
}
nir_instr_remove(instr);
return true;
enum glsl_sampler_dim dim = ms ? GLSL_SAMPLER_DIM_SUBPASS_MS : GLSL_SAMPLER_DIM_SUBPASS;
fbfetch->type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
nir_shader_add_variable(b->shader, fbfetch);
- nir_def *deref = &nir_build_deref_var(b, fbfetch)->dest.ssa;
+ nir_def *deref = &nir_build_deref_var(b, fbfetch)->def;
nir_def *sample = ms ? nir_load_sample_id(b) : nir_undef(b, 1, 32);
nir_def *load = nir_image_deref_load(b, 4, 32, deref, nir_imm_vec4(b, 0, 0, 0, 1), sample, nir_imm_int(b, 0));
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
return true;
}
levels->src[!!(offset_idx >= 0)].src_type = nir_tex_src_texture_handle;
nir_src_copy(&levels->src[!!(offset_idx >= 0)].src, &txf->src[handle_idx].src, &levels->instr);
}
- nir_def_init(&levels->instr, &levels->dest.ssa,
+ nir_def_init(&levels->instr, &levels->def,
nir_tex_instr_dest_size(levels), 32);
nir_builder_instr_insert(b, &levels->instr);
- nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->dest.ssa));
+ nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->def));
nir_tex_instr *new_txf = nir_instr_as_tex(nir_instr_clone(b->shader, in));
nir_builder_instr_insert(b, &new_txf->instr);
nir_def *oob_val = nir_build_imm(b, nir_tex_instr_dest_size(txf), bit_size, oob_values);
nir_pop_if(b, lod_oob_else);
- nir_def *robust_txf = nir_if_phi(b, &new_txf->dest.ssa, oob_val);
+ nir_def *robust_txf = nir_if_phi(b, &new_txf->def, oob_val);
- nir_def_rewrite_uses(&txf->dest.ssa, robust_txf);
+ nir_def_rewrite_uses(&txf->def, robust_txf);
nir_instr_remove_v(in);
return true;
}
loads[0] = nir_channel(b, loads[0], 0);
}
nir_def *new_load = nir_vec(b, loads, num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, new_load);
+ nir_def_rewrite_uses(&intr->def, new_load);
nir_instr_remove_v(instr);
return true;
}
case nir_intrinsic_ssbo_atomic:
case nir_intrinsic_ssbo_atomic_swap: {
/* convert offset to uintN_t[idx] */
- nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, intr->dest.ssa.bit_size / 8);
+ nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, intr->def.bit_size / 8);
nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
return true;
}
bool force_2x32 = intr->intrinsic == nir_intrinsic_load_ubo &&
nir_src_is_const(intr->src[0]) &&
nir_src_as_uint(intr->src[0]) == 0 &&
- intr->dest.ssa.bit_size == 64 &&
+ intr->def.bit_size == 64 &&
nir_intrinsic_align_offset(intr) % 8 != 0;
- force_2x32 |= intr->dest.ssa.bit_size == 64 && !has_int64;
- nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8);
+ force_2x32 |= intr->def.bit_size == 64 && !has_int64;
+ nir_def *offset = nir_udiv_imm(b, intr->src[1].ssa, (force_2x32 ? 32 : intr->def.bit_size) / 8);
nir_instr_rewrite_src_ssa(instr, &intr->src[1], offset);
/* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
if (force_2x32) {
/* this is always scalarized */
- assert(intr->dest.ssa.num_components == 1);
+ assert(intr->def.num_components == 1);
/* rewrite as 2x32 */
nir_def *load[2];
for (unsigned i = 0; i < 2; i++) {
}
/* cast back to 64bit */
nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
- nir_def_rewrite_uses(&intr->dest.ssa, casted);
+ nir_def_rewrite_uses(&intr->def, casted);
nir_instr_remove(instr);
}
return true;
}
case nir_intrinsic_load_shared:
b->cursor = nir_before_instr(instr);
- bool force_2x32 = intr->dest.ssa.bit_size == 64 && !has_int64;
- nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->dest.ssa.bit_size) / 8);
+ bool force_2x32 = intr->def.bit_size == 64 && !has_int64;
+ nir_def *offset = nir_udiv_imm(b, intr->src[0].ssa, (force_2x32 ? 32 : intr->def.bit_size) / 8);
nir_instr_rewrite_src_ssa(instr, &intr->src[0], offset);
/* if 64bit isn't supported, 64bit loads definitely aren't supported, so rewrite as 2x32 with cast and pray */
if (force_2x32) {
/* this is always scalarized */
- assert(intr->dest.ssa.num_components == 1);
+ assert(intr->def.num_components == 1);
/* rewrite as 2x32 */
nir_def *load[2];
for (unsigned i = 0; i < 2; i++)
load[i] = nir_load_shared(b, 1, 32, nir_iadd_imm(b, intr->src[0].ssa, i), .align_mul = 4, .align_offset = 0);
/* cast back to 64bit */
nir_def *casted = nir_pack_64_2x32_split(b, load[0], load[1]);
- nir_def_rewrite_uses(&intr->dest.ssa, casted);
+ nir_def_rewrite_uses(&intr->def, casted);
nir_instr_remove(instr);
return true;
}
nir_def *offset = intr->src[1].ssa;
nir_src *src = &intr->src[0];
nir_variable *var = get_bo_var(b->shader, bo, true, src,
- intr->dest.ssa.bit_size);
+ intr->def.bit_size);
nir_deref_instr *deref_var = nir_build_deref_var(b, var);
nir_def *idx = src->ssa;
if (bo->first_ssbo)
/* generate new atomic deref ops for every component */
nir_def *result[4];
- unsigned num_components = intr->dest.ssa.num_components;
+ unsigned num_components = intr->def.num_components;
for (unsigned i = 0; i < num_components; i++) {
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op);
- nir_def_init(&new_instr->instr, &new_instr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&new_instr->instr, &new_instr->def, 1,
+ intr->def.bit_size);
nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr));
- new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa);
+ new_instr->src[0] = nir_src_for_ssa(&deref_arr->def);
/* deref ops have no offset src, so copy the srcs after it */
for (unsigned i = 2; i < nir_intrinsic_infos[intr->intrinsic].num_srcs; i++)
nir_src_copy(&new_instr->src[i - 1], &intr->src[i], &new_instr->instr);
nir_builder_instr_insert(b, &new_instr->instr);
- result[i] = &new_instr->dest.ssa;
+ result[i] = &new_instr->def;
offset = nir_iadd_imm(b, offset, 1);
}
nir_def *load = nir_vec(b, result, num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
nir_instr_remove(instr);
}
break;
case nir_intrinsic_load_ssbo:
src = &intr->src[0];
- var = get_bo_var(b->shader, bo, true, src, intr->dest.ssa.bit_size);
+ var = get_bo_var(b->shader, bo, true, src, intr->def.bit_size);
offset = intr->src[1].ssa;
break;
case nir_intrinsic_load_ubo:
src = &intr->src[0];
- var = get_bo_var(b->shader, bo, false, src, intr->dest.ssa.bit_size);
+ var = get_bo_var(b->shader, bo, false, src, intr->def.bit_size);
offset = intr->src[1].ssa;
ssbo = false;
break;
else if (ssbo && bo->first_ssbo)
idx = nir_iadd_imm(b, idx, -bo->first_ssbo);
nir_deref_instr *deref_array = nir_build_deref_array(b, deref_var,
- nir_i2iN(b, idx, deref_var->dest.ssa.bit_size));
+ nir_i2iN(b, idx, deref_var->def.bit_size));
nir_deref_instr *deref_struct = nir_build_deref_struct(b, deref_array, 0);
assert(intr->num_components <= 2);
if (is_load) {
nir_def *result[2];
for (unsigned i = 0; i < intr->num_components; i++) {
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct,
- nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size));
+ nir_i2iN(b, offset, deref_struct->def.bit_size));
result[i] = nir_load_deref(b, deref_arr);
if (intr->intrinsic == nir_intrinsic_load_ssbo)
nir_intrinsic_set_access(nir_instr_as_intrinsic(result[i]->parent_instr), nir_intrinsic_access(intr));
offset = nir_iadd_imm(b, offset, 1);
}
nir_def *load = nir_vec(b, result, intr->num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
} else {
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct,
- nir_i2iN(b, offset, deref_struct->dest.ssa.bit_size));
- nir_build_store_deref(b, &deref_arr->dest.ssa, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr));
+ nir_i2iN(b, offset, deref_struct->def.bit_size));
+ nir_build_store_deref(b, &deref_arr->def, intr->src[0].ssa, BITFIELD_MASK(intr->num_components), nir_intrinsic_access(intr));
}
nir_instr_remove(instr);
return true;
if (deref_var != var)
return false;
b->cursor = nir_before_instr(instr);
- nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size);
+ nir_def *zero = nir_imm_zero(b, intr->def.num_components,
+ intr->def.bit_size);
if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
switch (var->data.location) {
case VARYING_SLOT_COL0:
case VARYING_SLOT_BFC0:
case VARYING_SLOT_BFC1:
/* default color is 0,0,0,1 */
- if (intr->dest.ssa.num_components == 4)
+ if (intr->def.num_components == 4)
zero = nir_vector_insert_imm(b, zero, nir_imm_float(b, 1.0), 3);
break;
default:
break;
}
}
- nir_def_rewrite_uses(&intr->dest.ssa, zero);
+ nir_def_rewrite_uses(&intr->def, zero);
nir_instr_remove(instr);
return true;
}
if (nir_intrinsic_get_var(intr, 0) != var)
break;
if ((intr->intrinsic == nir_intrinsic_store_deref && intr->src[1].ssa->bit_size != 64) ||
- (intr->intrinsic == nir_intrinsic_load_deref && intr->dest.ssa.bit_size != 64))
+ (intr->intrinsic == nir_intrinsic_load_deref && intr->def.bit_size != 64))
break;
b.cursor = nir_before_instr(instr);
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
}
dest = nir_vec(&b, comp, intr->num_components);
}
- nir_def_rewrite_uses_after(&intr->dest.ssa, dest, instr);
+ nir_def_rewrite_uses_after(&intr->def, dest, instr);
}
_mesa_set_add(deletes, instr);
break;
deref->modes = nir_var_shader_temp;
parent->modes = nir_var_shader_temp;
b.cursor = nir_before_instr(instr);
- nir_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->dest.ssa;
- nir_def_rewrite_uses_after(&deref->dest.ssa, dest, &deref->instr);
+ nir_def *dest = &nir_build_deref_var(&b, members[deref->strct.index])->def;
+ nir_def_rewrite_uses_after(&deref->def, dest, &deref->instr);
nir_instr_remove(&deref->instr);
func_progress = true;
break;
enum glsl_base_type ret_type = glsl_get_sampler_result_type(type);
bool is_int = glsl_base_type_is_integer(ret_type);
unsigned bit_size = glsl_base_type_get_bit_size(ret_type);
- unsigned dest_size = tex->dest.ssa.bit_size;
+ unsigned dest_size = tex->def.bit_size;
b->cursor = nir_after_instr(&tex->instr);
- unsigned num_components = tex->dest.ssa.num_components;
+ unsigned num_components = tex->def.num_components;
bool rewrite_depth = tex->is_shadow && num_components > 1 && tex->op != nir_texop_tg4 && !tex->is_sparse;
if (bit_size == dest_size && !rewrite_depth)
return NULL;
- nir_def *dest = &tex->dest.ssa;
+ nir_def *dest = &tex->def;
if (rewrite_depth && zs) {
/* If only .x is used in the NIR, then it's effectively not a legacy depth
* sample anyway and we don't want to ask for shader recompiles. This is
return NULL;
}
if (bit_size != dest_size) {
- tex->dest.ssa.bit_size = bit_size;
+ tex->def.bit_size = bit_size;
tex->dest_type = nir_get_nir_type_for_glsl_base_type(ret_type);
if (is_int) {
if (glsl_unsigned_base_type_of(ret_type) == ret_type)
- dest = nir_u2uN(b, &tex->dest.ssa, dest_size);
+ dest = nir_u2uN(b, &tex->def, dest_size);
else
- dest = nir_i2iN(b, &tex->dest.ssa, dest_size);
+ dest = nir_i2iN(b, &tex->def, dest_size);
} else {
- dest = nir_f2fN(b, &tex->dest.ssa, dest_size);
+ dest = nir_f2fN(b, &tex->def, dest_size);
}
if (rewrite_depth)
return dest;
- nir_def_rewrite_uses_after(&tex->dest.ssa, dest, dest->parent_instr);
+ nir_def_rewrite_uses_after(&tex->def, dest, dest->parent_instr);
} else if (rewrite_depth) {
return dest;
}
const struct glsl_type *type = glsl_without_array(var->type);
enum glsl_base_type ret_type = glsl_get_sampler_result_type(type);
bool is_int = glsl_base_type_is_integer(ret_type);
- unsigned num_components = tex->dest.ssa.num_components;
+ unsigned num_components = tex->def.num_components;
if (tex->is_shadow)
tex->is_new_style_shadow = true;
nir_def *dest = rewrite_tex_dest(b, tex, var, NULL);
if (!dest && !(swizzle_key->mask & BITFIELD_BIT(sampler_id)))
return false;
else if (!dest)
- dest = &tex->dest.ssa;
+ dest = &tex->def;
else
- tex->dest.ssa.num_components = 1;
+ tex->def.num_components = 1;
if (swizzle_key && (swizzle_key->mask & BITFIELD_BIT(sampler_id))) {
/* these require manual swizzles */
if (tex->op == nir_texop_tg4) {
nir_def *swizzle;
switch (swizzle_key->swizzle[sampler_id].s[tex->component]) {
case PIPE_SWIZZLE_0:
- swizzle = nir_imm_zero(b, 4, tex->dest.ssa.bit_size);
+ swizzle = nir_imm_zero(b, 4, tex->def.bit_size);
break;
case PIPE_SWIZZLE_1:
if (is_int)
- swizzle = nir_imm_intN_t(b, 4, tex->dest.ssa.bit_size);
+ swizzle = nir_imm_intN_t(b, 4, tex->def.bit_size);
else
- swizzle = nir_imm_floatN_t(b, 4, tex->dest.ssa.bit_size);
+ swizzle = nir_imm_floatN_t(b, 4, tex->def.bit_size);
break;
default:
if (!tex->component)
for (unsigned i = 0; i < ARRAY_SIZE(vec); i++) {
switch (swizzle_key->swizzle[sampler_id].s[i]) {
case PIPE_SWIZZLE_0:
- vec[i] = nir_imm_zero(b, 1, tex->dest.ssa.bit_size);
+ vec[i] = nir_imm_zero(b, 1, tex->def.bit_size);
break;
case PIPE_SWIZZLE_1:
if (is_int)
- vec[i] = nir_imm_intN_t(b, 1, tex->dest.ssa.bit_size);
+ vec[i] = nir_imm_intN_t(b, 1, tex->def.bit_size);
else
- vec[i] = nir_imm_floatN_t(b, 1, tex->dest.ssa.bit_size);
+ vec[i] = nir_imm_floatN_t(b, 1, tex->def.bit_size);
break;
default:
vec[i] = dest->num_components == 1 ? dest : nir_channel(b, dest, i);
if (intr->intrinsic != nir_intrinsic_load_point_coord)
return false;
b->cursor = nir_after_instr(instr);
- nir_def *def = nir_vec2(b, nir_channel(b, &intr->dest.ssa, 0),
- nir_fsub_imm(b, 1.0, nir_channel(b, &intr->dest.ssa, 1)));
- nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
+ nir_def *def = nir_vec2(b, nir_channel(b, &intr->def, 0),
+ nir_fsub_imm(b, 1.0, nir_channel(b, &intr->def, 1)));
+ nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr);
return true;
}
if (intr->intrinsic != nir_intrinsic_load_instance_id)
return false;
b->cursor = nir_after_instr(instr);
- nir_def *def = nir_isub(b, &intr->dest.ssa, nir_load_base_instance(b));
- nir_def_rewrite_uses_after(&intr->dest.ssa, def, def->parent_instr);
+ nir_def *def = nir_isub(b, &intr->def, nir_load_base_instance(b));
+ nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr);
return true;
}
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, tex->src[idx].src.ssa, 32));
- nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->dest.ssa);
+ nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->def);
/* bindless sampling uses the variable type directly, which means the tex instr has to exactly
* match up with it in contrast to normal sampler ops where things are a bit more flexible;
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, instr->src[0].ssa, 32));
- nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->dest.ssa);
+ nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->def);
return true;
}
if (instr->intrinsic == nir_intrinsic_load_deref) {
nir_def *def = nir_load_deref(b, deref);
nir_instr_rewrite_src_ssa(in, &instr->src[0], def);
- nir_def_rewrite_uses(&instr->dest.ssa, def);
+ nir_def_rewrite_uses(&instr->def, def);
} else {
nir_store_deref(b, deref, instr->src[1].ssa, nir_intrinsic_write_mask(instr));
}
}
b->cursor = nir_after_instr(instr);
unsigned needed_components = nir_tex_instr_dest_size(tex);
- unsigned num_components = tex->dest.ssa.num_components;
+ unsigned num_components = tex->def.num_components;
if (needed_components > num_components) {
- tex->dest.ssa.num_components = needed_components;
+ tex->def.num_components = needed_components;
assert(num_components < 3);
/* take either xz or just x since this is promoted to 2D from 1D */
uint32_t mask = num_components == 2 ? (1|4) : 1;
- nir_def *dst = nir_channels(b, &tex->dest.ssa, mask);
- nir_def_rewrite_uses_after(&tex->dest.ssa, dst, dst->parent_instr);
+ nir_def *dst = nir_channels(b, &tex->def, mask);
+ nir_def_rewrite_uses_after(&tex->def, dst, dst->parent_instr);
}
return true;
}
else
src1 = instr->src[1].ssa;
nir_def *def = nir_iand(b, src0, src1);
- nir_def_rewrite_uses_after(&instr->dest.ssa, def, in);
+ nir_def_rewrite_uses_after(&instr->def, def, in);
nir_instr_remove(in);
return true;
}
nir_alu_instr *alu = nir_instr_as_alu(parent);
src = alu->src[0].src.ssa;
}
- if (instr->dest.ssa.bit_size != 32) {
- if (instr->dest.ssa.bit_size == 1)
+ if (instr->def.bit_size != 32) {
+ if (instr->def.bit_size == 1)
src = nir_ieq_imm(b, src, 1);
else
- src = nir_u2uN(b, src, instr->dest.ssa.bit_size);
+ src = nir_u2uN(b, src, instr->def.bit_size);
}
- nir_def_rewrite_uses(&instr->dest.ssa, src);
+ nir_def_rewrite_uses(&instr->def, src);
nir_instr_remove(in);
}
return true;
s++;
}
- nir_def_init(&array_tex->instr, &array_tex->dest.ssa,
+ nir_def_init(&array_tex->instr, &array_tex->def,
nir_tex_instr_dest_size(array_tex),
- tex->dest.ssa.bit_size);
+ tex->def.bit_size);
nir_builder_instr_insert(b, &array_tex->instr);
- return &array_tex->dest.ssa;
+ return &array_tex->def;
}
static nir_def *
txl->src[s] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
b->cursor = nir_before_instr(&tex->instr);
- nir_def_init(&txl->instr, &txl->dest.ssa,
- tex->dest.ssa.num_components,
- tex->dest.ssa.bit_size);
+ nir_def_init(&txl->instr, &txl->def,
+ tex->def.num_components,
+ tex->def.bit_size);
nir_builder_instr_insert(b, &txl->instr);
- nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
+ nir_def_rewrite_uses(&tex->def, &txl->def);
return txl;
}
b->cursor = nir_after_instr(&tex->instr);
rewrite_cube_var_type(b, tex);
- unsigned num_components = tex->dest.ssa.num_components;
+ unsigned num_components = tex->def.num_components;
/* force max components to unbreak textureSize().xy */
- tex->dest.ssa.num_components = 3;
+ tex->def.num_components = 3;
tex->is_array = true;
- nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
+ nir_def *array_dim = nir_channel(b, &tex->def, 2);
nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
- nir_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
- nir_channel(b, &tex->dest.ssa, 1),
+ nir_def *size = nir_vec3(b, nir_channel(b, &tex->def, 0),
+ nir_channel(b, &tex->def, 1),
cube_array_dim);
return nir_trim_vector(b, size, num_components);
}
}
return nir_u2uN(b, nir_vec(b, loads, state->global_dims),
- intrinsic->dest.ssa.bit_size);
+ intrinsic->def.bit_size);
}
case nir_intrinsic_load_constant_base_ptr: {
return nir_load_var(b, state->constant_var);
nir_src_as_uint(intr->src[0]) == ubo &&
nir_src_is_const(intr->src[1]) &&
/* TODO: Can't handle other bit sizes for now. */
- intr->dest.ssa.bit_size == 32) {
- int num_components = intr->dest.ssa.num_components;
+ intr->def.bit_size == 32) {
+ int num_components = intr->def.num_components;
uint32_t offset = nir_src_as_uint(intr->src[1]);
const unsigned num_uniforms = shader->inlines.count[ubo];
const unsigned *uniform_dw_offsets = shader->inlines.uniform_offsets[ubo];
if (offset == uniform_dw_offsets[i]) {
b.cursor = nir_before_instr(&intr->instr);
nir_def *def = nir_imm_int(&b, uniform_values[i]);
- nir_def_rewrite_uses(&intr->dest.ssa, def);
+ nir_def_rewrite_uses(&intr->def, def);
nir_instr_remove(&intr->instr);
break;
}
for (unsigned i = 0; i < num_components; i++) {
if (!components[i]) {
uint32_t scalar_offset = (offset + i) * 4;
- components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
+ components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
intr->src[0].ssa,
nir_imm_int(&b, scalar_offset));
nir_intrinsic_instr *load =
}
/* Replace the original uniform load. */
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(&b, components, num_components));
nir_instr_remove(&intr->instr);
}
case nir_intrinsic_load_work_dim:
assert(state->work_dim);
return nir_u2uN(b, nir_load_var(b, state->work_dim),
- intrins->dest.ssa.bit_size);
+ intrins->def.bit_size);
default:
return NULL;
}
nir_def *ubo_idx = nir_imm_int(b, 0);
nir_def *uniform_offset = nir_ssa_for_src(b, intrins->src[0], 1);
- assert(intrins->dest.ssa.bit_size >= 8);
+ assert(intrins->def.bit_size >= 8);
nir_def *load_result =
- nir_load_ubo(b, intrins->num_components, intrins->dest.ssa.bit_size,
+ nir_load_ubo(b, intrins->num_components, intrins->def.bit_size,
ubo_idx, nir_iadd_imm(b, uniform_offset, nir_intrinsic_base(intrins)));
nir_intrinsic_instr *load = nir_instr_as_intrinsic(load_result->parent_instr);
nir_imm_int(b, desc_set),
nir_imm_int(b, binding),
nir_imm_int(b, desc_type));
- nir_def_rewrite_uses(&intr->dest.ssa, def);
+ nir_def_rewrite_uses(&intr->def, def);
nir_instr_remove(&intr->instr);
}
for (uint8_t i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_def_init(&chan_intr->instr, &chan_intr->dest.ssa, 1,
- intr->dest.ssa.bit_size);
+ nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
+ intr->def.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
nir_builder_instr_insert(b, &chan_intr->instr);
- loads[i] = &chan_intr->dest.ssa;
+ loads[i] = &chan_intr->def;
}
- nir_def_rewrite_uses(&intr->dest.ssa,
+ nir_def_rewrite_uses(&intr->def,
nir_vec(b, loads, intr->num_components));
nir_instr_remove(&intr->instr);
}
{
struct rogue_fs_build_data *fs_data = &b->shader->ctx->stage_data.fs;
- unsigned load_size = intr->dest.ssa.num_components;
+ unsigned load_size = intr->def.num_components;
assert(load_size == 1); /* TODO: We can support larger load sizes. */
- rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index);
+ rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index);
struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr);
unsigned component = nir_intrinsic_component(intr);
struct pvr_pipeline_layout *pipeline_layout =
b->shader->ctx->pipeline_layout;
- ASSERTED unsigned load_size = intr->dest.ssa.num_components;
+ ASSERTED unsigned load_size = intr->def.num_components;
assert(load_size == 1); /* TODO: We can support larger load sizes. */
- rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index);
+ rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index);
struct nir_io_semantics io_semantics = nir_intrinsic_io_semantics(intr);
unsigned input = io_semantics.location - VERT_ATTRIB_GENERIC0;
rogue_ref_reg(desc_addr_offset_val_hi),
rogue_ref_io(ROGUE_IO_NONE));
- unsigned desc_addr_idx = intr->dest.ssa.index;
+ unsigned desc_addr_idx = intr->def.index;
rogue_regarray *desc_addr_64 =
rogue_ssa_vec_regarray(b->shader, 2, desc_addr_idx, 0);
instr = &rogue_LD(b,
rogue_regarray *src = rogue_ssa_vec_regarray(b->shader, 2, src_index, 0);
/*** TODO NEXT: this could be either a reg or regarray. ***/
- rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index);
+ rogue_reg *dst = rogue_ssa_reg(b->shader, intr->def.index);
/* TODO NEXT: src[1] should be depending on ssa vec size for burst loads */
rogue_instr *instr = &rogue_LD(b,
return false;
b->cursor = nir_instr_remove(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
+ nir_def_rewrite_uses(&intrin->def, nir_imm_zero(b, 3, 32));
return true;
}
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, pos);
tex->coord_components = 3;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
return tex;
}
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static nir_def *
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static nir_def *
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static nir_def *
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
/**
tex->texture_index = 0;
tex->sampler_index = 0;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static inline nir_def *
}
/* Ignore WPOS; it doesn't require interpolation. */
- if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa))
+ if (!is_used_in_not_interp_frag_coord(&intrin->def))
continue;
nir_intrinsic_op bary_op = intrin->intrinsic;
/* We don't handle indirects on locals */
assert(nir_intrinsic_base(load_reg) == 0);
assert(load_reg->intrinsic != nir_intrinsic_load_reg_indirect);
- reg = nir_ssa_values[decl_reg->dest.ssa.index];
+ reg = nir_ssa_values[decl_reg->def.index];
}
if (nir_src_bit_size(src) == 64 && devinfo->ver == 7) {
/* We don't handle indirects on locals */
assert(nir_intrinsic_base(store_reg) == 0);
assert(store_reg->intrinsic != nir_intrinsic_store_reg_indirect);
- return nir_ssa_values[decl_reg->dest.ssa.index];
+ return nir_ssa_values[decl_reg->def.index];
}
}
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_vertex_id:
unreachable("should be lowered by nir_lower_system_values()");
case nir_intrinsic_load_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
src = offset(src, bld, nir_intrinsic_component(instr));
src = offset(src, bld, nir_src_as_uint(instr->src[0]));
fs_reg dst;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dst = get_nir_def(instr->dest.ssa);
+ dst = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_primitive_id:
break;
case nir_intrinsic_load_per_vertex_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
fs_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = nir_intrinsic_base(instr);
fs_inst *inst;
case nir_intrinsic_load_output:
case nir_intrinsic_load_per_vertex_output: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
fs_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = nir_intrinsic_base(instr);
unsigned first_component = nir_intrinsic_component(instr);
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_primitive_id:
case nir_intrinsic_load_input:
case nir_intrinsic_load_per_vertex_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
fs_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = nir_intrinsic_base(instr);
unsigned first_component = nir_intrinsic_component(instr);
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_primitive_id:
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_front_face:
/* In Fragment Shaders load_input is used either for flat inputs or
* per-primitive inputs.
*/
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
unsigned base = nir_intrinsic_base(instr);
unsigned comp = nir_intrinsic_component(instr);
unsigned num_components = instr->num_components;
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_barrier:
}
case nir_intrinsic_load_num_workgroups: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
cs_prog_data->uses_num_work_groups = true;
case nir_intrinsic_load_shared: {
assert(devinfo->ver >= 7);
- const unsigned bit_size = instr->dest.ssa.bit_size;
+ const unsigned bit_size = instr->def.bit_size;
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GFX7_BTI_SLM);
assert(nir_intrinsic_align(instr) > 0);
if (bit_size == 32 &&
nir_intrinsic_align(instr) >= 4) {
- assert(instr->dest.ssa.num_components <= 4);
+ assert(instr->def.num_components <= 4);
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
fs_inst *inst =
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
inst->size_written = instr->num_components * dispatch_width * 4;
} else {
- assert(instr->dest.ssa.num_components == 1);
+ assert(instr->def.num_components == 1);
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_btd_global_arg_addr_intel:
BRW_REGISTER_TYPE_F);
/* Re-use the destination's slot in the table for the register */
- nir_ssa_values[instr->dest.ssa.index] =
+ nir_ssa_values[instr->def.index] =
bld.vgrf(reg_type, num_components);
return;
}
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_resource_intel:
- nir_ssa_bind_infos[instr->dest.ssa.index].valid = true;
- nir_ssa_bind_infos[instr->dest.ssa.index].bindless =
+ nir_ssa_bind_infos[instr->def.index].valid = true;
+ nir_ssa_bind_infos[instr->def.index].bindless =
(nir_intrinsic_resource_access_intel(instr) &
nir_resource_intel_bindless) != 0;
- nir_ssa_bind_infos[instr->dest.ssa.index].block =
+ nir_ssa_bind_infos[instr->def.index].block =
nir_intrinsic_resource_block_intel(instr);
- nir_ssa_bind_infos[instr->dest.ssa.index].set =
+ nir_ssa_bind_infos[instr->def.index].set =
nir_intrinsic_desc_set(instr);
- nir_ssa_bind_infos[instr->dest.ssa.index].binding =
+ nir_ssa_bind_infos[instr->def.index].binding =
nir_intrinsic_binding(instr);
if (nir_intrinsic_resource_access_intel(instr) &
nir_resource_intel_non_uniform) {
- nir_resource_values[instr->dest.ssa.index] = fs_reg();
+ nir_resource_values[instr->def.index] = fs_reg();
} else {
- nir_resource_values[instr->dest.ssa.index] =
+ nir_resource_values[instr->def.index] =
try_rebuild_resource(bld, instr->src[1].ssa);
}
- nir_ssa_values[instr->dest.ssa.index] =
+ nir_ssa_values[instr->def.index] =
nir_ssa_values[instr->src[1].ssa->index];
break;
tmp, srcs, ARRAY_SIZE(srcs));
inst->size_written = 4 * REG_SIZE;
- for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
+ for (unsigned c = 0; c < instr->def.num_components; ++c) {
bld.MOV(offset(retype(dest, tmp.type), bld, c),
component(offset(tmp, ubld, c), 0));
}
VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i),
surface, surface_handle,
base_offset, i * type_sz(dest.type),
- instr->dest.ssa.bit_size / 8);
+ instr->def.bit_size / 8);
prog_data->has_ubo_pull = true;
} else {
case nir_intrinsic_load_global_constant: {
assert(devinfo->ver >= 8);
- assert(instr->dest.ssa.bit_size <= 32);
+ assert(instr->def.bit_size <= 32);
assert(nir_intrinsic_align(instr) > 0);
fs_reg srcs[A64_LOGICAL_NUM_SRCS];
srcs[A64_LOGICAL_ADDRESS] = get_nir_src(instr->src[0]);
srcs[A64_LOGICAL_ENABLE_HELPERS] =
brw_imm_ud(nir_intrinsic_access(instr) & ACCESS_INCLUDE_HELPERS);
- if (instr->dest.ssa.bit_size == 32 &&
+ if (instr->def.bit_size == 32 &&
nir_intrinsic_align(instr) >= 4) {
- assert(instr->dest.ssa.num_components <= 4);
+ assert(instr->def.num_components <= 4);
srcs[A64_LOGICAL_ARG] = brw_imm_ud(instr->num_components);
inst->size_written = instr->num_components *
inst->dst.component_size(inst->exec_size);
} else {
- const unsigned bit_size = instr->dest.ssa.bit_size;
- assert(instr->dest.ssa.num_components == 1);
+ const unsigned bit_size = instr->def.bit_size;
+ assert(instr->def.num_components == 1);
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
srcs[A64_LOGICAL_ARG] = brw_imm_ud(bit_size);
break;
case nir_intrinsic_load_global_const_block_intel: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
assert(instr->num_components == 8 || instr->num_components == 16);
const fs_builder ubld = bld.exec_all().group(instr->num_components, 0);
case nir_intrinsic_load_ssbo: {
assert(devinfo->ver >= 7);
- const unsigned bit_size = instr->dest.ssa.bit_size;
+ const unsigned bit_size = instr->def.bit_size;
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
srcs[get_nir_src_bindless(instr->src[0]) ?
SURFACE_LOGICAL_SRC_SURFACE_HANDLE :
assert(nir_intrinsic_align(instr) > 0);
if (bit_size == 32 &&
nir_intrinsic_align(instr) >= 4) {
- assert(instr->dest.ssa.num_components <= 4);
+ assert(instr->def.num_components <= 4);
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
fs_inst *inst =
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
inst->size_written = instr->num_components * dispatch_width * 4;
} else {
- assert(instr->dest.ssa.num_components == 1);
+ assert(instr->def.num_components == 1);
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
case nir_intrinsic_load_scratch: {
assert(devinfo->ver >= 7);
- assert(instr->dest.ssa.num_components == 1);
- const unsigned bit_size = instr->dest.ssa.bit_size;
+ assert(instr->def.num_components == 1);
+ const unsigned bit_size = instr->def.bit_size;
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
if (devinfo->verx10 >= 125) {
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
/* Read the vector */
- assert(instr->dest.ssa.num_components == 1);
+ assert(instr->def.num_components == 1);
assert(bit_size <= 32);
assert(nir_intrinsic_align(instr) > 0);
if (bit_size == 32 &&
bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
- if (instr->dest.ssa.bit_size > 32) {
+ if (instr->def.bit_size > 32) {
dest.type = BRW_REGISTER_TYPE_UQ;
} else {
dest.type = BRW_REGISTER_TYPE_UD;
}
case nir_intrinsic_load_global_block_intel: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
fs_reg address = bld.emit_uniformize(get_nir_src(instr->src[0]));
case nir_intrinsic_load_shared_block_intel:
case nir_intrinsic_load_ssbo_block_intel: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
const bool is_ssbo =
instr->intrinsic == nir_intrinsic_load_ssbo_block_intel;
*
* 16-bit float atomics are supported, however.
*/
- assert(instr->dest.ssa.bit_size == 32 ||
- (instr->dest.ssa.bit_size == 64 && devinfo->has_lsc) ||
- (instr->dest.ssa.bit_size == 16 &&
+ assert(instr->def.bit_size == 32 ||
+ (instr->def.bit_size == 64 && devinfo->has_lsc) ||
+ (instr->def.bit_size == 16 &&
(devinfo->has_lsc || lsc_opcode_is_atomic_float(op))));
- fs_reg dest = get_nir_def(instr->dest.ssa);
+ fs_reg dest = get_nir_def(instr->def);
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
srcs[bindless ?
/* Emit the actual atomic operation */
- switch (instr->dest.ssa.bit_size) {
+ switch (instr->def.bit_size) {
case 16: {
fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
{
int op = lsc_aop_for_nir_intrinsic(instr);
- fs_reg dest = get_nir_def(instr->dest.ssa);
+ fs_reg dest = get_nir_def(instr->def);
fs_reg addr = get_nir_src(instr->src[0]);
srcs[A64_LOGICAL_ARG] = brw_imm_ud(op);
srcs[A64_LOGICAL_ENABLE_HELPERS] = brw_imm_ud(0);
- switch (instr->dest.ssa.bit_size) {
+ switch (instr->def.bit_size) {
case 16: {
fs_reg dest32 = bld.vgrf(BRW_REGISTER_TYPE_UD);
bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
break;
case nir_texop_samples_identical: {
- fs_reg dst = retype(get_nir_def(instr->dest.ssa), BRW_REGISTER_TYPE_D);
+ fs_reg dst = retype(get_nir_def(instr->def), BRW_REGISTER_TYPE_D);
/* If mcs is an immediate value, it means there is no MCS. In that case
* just return false.
const unsigned dest_size = nir_tex_instr_dest_size(instr);
if (devinfo->ver >= 9 &&
instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
- unsigned write_mask = nir_def_components_read(&instr->dest.ssa);
+ unsigned write_mask = nir_def_components_read(&instr->def);
assert(write_mask != 0); /* dead code should have been eliminated */
if (instr->is_sparse) {
inst->size_written = (util_last_bit(write_mask) - 1) *
if (instr->is_sparse)
nir_dest[dest_size - 1] = component(offset(dst, bld, dest_size - 1), 0);
- bld.LOAD_PAYLOAD(get_nir_def(instr->dest.ssa), nir_dest, dest_size, 0);
+ bld.LOAD_PAYLOAD(get_nir_def(instr->def), nir_dest, dest_size, 0);
}
void
if (i == 0) {
/* The first source is our deref */
assert(nir_intrinsic_infos[op].src_components[i] == -1);
- src = &nir_build_deref_cast(&b, src, mode, data_type, 0)->dest.ssa;
+ src = &nir_build_deref_cast(&b, src, mode, data_type, 0)->def;
}
atomic->src[i] = nir_src_for_ssa(src);
}
- nir_def_init_for_type(&atomic->instr, &atomic->dest.ssa, data_type);
+ nir_def_init_for_type(&atomic->instr, &atomic->def, data_type);
nir_builder_instr_insert(&b, &atomic->instr);
- nir_store_deref(&b, ret, &atomic->dest.ssa, ~0);
+ nir_store_deref(&b, ret, &atomic->def, ~0);
}
static void
nir_intrinsic_instr_create(b.shader, nir_intrinsic_ballot);
ballot->src[0] = nir_src_for_ssa(cond);
ballot->num_components = 1;
- nir_def_init(&ballot->instr, &ballot->dest.ssa, 1, 32);
+ nir_def_init(&ballot->instr, &ballot->def, 1, 32);
nir_builder_instr_insert(&b, &ballot->instr);
- nir_store_deref(&b, ret, &ballot->dest.ssa, ~0);
+ nir_store_deref(&b, ret, &ballot->def, ~0);
}
static bool
load->src[0] = nir_src_for_ssa(nir_u2u32(&b, intrin->src[0].ssa));
nir_intrinsic_set_base(load, kernel_arg_start);
nir_intrinsic_set_range(load, nir->num_uniforms);
- nir_def_init(&load->instr, &load->dest.ssa,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ nir_def_init(&load->instr, &load->def,
+ intrin->def.num_components,
+ intrin->def.bit_size);
nir_builder_instr_insert(&b, &load->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa);
+ nir_def_rewrite_uses(&intrin->def, &load->def);
progress = true;
break;
}
nir_def *const_data_base_addr = nir_pack_64_2x32_split(&b,
nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW),
nir_load_reloc_const_intel(&b, BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
- nir_def_rewrite_uses(&intrin->dest.ssa, const_data_base_addr);
+ nir_def_rewrite_uses(&intrin->def, const_data_base_addr);
progress = true;
break;
}
nir_intrinsic_set_base(load, kernel_sysvals_start +
offsetof(struct brw_kernel_sysvals, num_work_groups));
nir_intrinsic_set_range(load, 3 * 4);
- nir_def_init(&load->instr, &load->dest.ssa, 3, 32);
+ nir_def_init(&load->instr, &load->def, 3, 32);
nir_builder_instr_insert(&b, &load->instr);
/* We may need to do a bit-size cast here */
nir_def *num_work_groups =
- nir_u2uN(&b, &load->dest.ssa, intrin->dest.ssa.bit_size);
+ nir_u2uN(&b, &load->def, intrin->def.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, num_work_groups);
+ nir_def_rewrite_uses(&intrin->def, num_work_groups);
progress = true;
break;
}
/* Read the first few 32-bit scalars from InlineData. */
if (nir_src_is_const(intrin->src[0]) &&
- intrin->dest.ssa.bit_size == 32 &&
- intrin->dest.ssa.num_components == 1) {
+ intrin->def.bit_size == 32 &&
+ intrin->def.num_components == 1) {
unsigned off = nir_intrinsic_base(intrin) + nir_src_as_uint(intrin->src[0]);
unsigned off_dw = off / 4;
if (off % 4 == 0 && off_dw < BRW_TASK_MESH_PUSH_CONSTANTS_SIZE_DW) {
nir_ishl_imm(b, nir_channel(b, data_def, 2), 16));
}
- nir_build_store_deref(b, &new_array_deref->dest.ssa, new_data);
+ nir_build_store_deref(b, &new_array_deref->def, new_data);
nir_instr_remove(instr);
emit_urb_direct_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
const fs_reg &dest, fs_reg urb_handle)
{
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
- unsigned comps = instr->dest.ssa.num_components;
+ unsigned comps = instr->def.num_components;
if (comps == 0)
return;
emit_urb_indirect_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
const fs_reg &dest, const fs_reg &offset_src, fs_reg urb_handle)
{
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
- unsigned comps = instr->dest.ssa.num_components;
+ unsigned comps = instr->def.num_components;
if (comps == 0)
return;
fs_visitor::emit_task_mesh_load(const fs_builder &bld, nir_intrinsic_instr *instr,
const fs_reg &urb_handle)
{
- fs_reg dest = get_nir_def(instr->dest.ssa);
+ fs_reg dest = get_nir_def(instr->def);
nir_src *offset_nir_src = nir_get_io_offset_src(instr);
/* TODO(mesh): for per_vertex and per_primitive, if we could keep around
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
switch (instr->intrinsic) {
case nir_intrinsic_load_mesh_inline_data_intel: {
if (write) {
assert(intr->num_components == intr->src[0].ssa->num_components);
} else {
- assert(intr->num_components == intr->dest.ssa.num_components);
+ assert(intr->num_components == intr->def.num_components);
}
if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
src = nir_vec4(b, undef, undef, y, x);
mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
- } else if (intr->dest.ssa.num_components > 1) {
- assert(intr->dest.ssa.num_components == 2);
+ } else if (intr->def.num_components > 1) {
+ assert(intr->def.num_components == 2);
intr->num_components = 4;
- intr->dest.ssa.num_components = 4;
+ intr->def.num_components = 4;
unsigned wz[2] = { 3, 2 };
- dest = nir_swizzle(b, &intr->dest.ssa, wz, 2);
+ dest = nir_swizzle(b, &intr->def, wz, 2);
} else {
nir_intrinsic_set_component(intr, 3 - component);
}
/* Don't overwrite the inner factor at DWord 4 for triangles */
if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
mask &= ~WRITEMASK_X;
- } else if (intr->dest.ssa.num_components > 1) {
- assert(intr->dest.ssa.num_components == 4);
+ } else if (intr->def.num_components > 1) {
+ assert(intr->def.num_components == 4);
unsigned wzyx[4] = { 3, 2, 1, 0 };
- dest = nir_swizzle(b, &intr->dest.ssa, wzyx, 4);
+ dest = nir_swizzle(b, &intr->def, wzyx, 4);
} else {
nir_intrinsic_set_component(intr, 3 - component);
out_of_bounds = component == 3 &&
if (out_of_bounds) {
if (!write)
- nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, 32));
+ nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
nir_instr_remove(&intr->instr);
} else if (write) {
nir_intrinsic_set_write_mask(intr, mask);
nir_src_for_ssa(src));
}
} else if (dest) {
- nir_def_rewrite_uses_after(&intr->dest.ssa, dest,
+ nir_def_rewrite_uses_after(&intr->def, dest,
dest->parent_instr);
}
}
load->num_components = 1;
- nir_def_init(&load->instr, &load->dest.ssa, 1, 32);
+ nir_def_init(&load->instr, &load->def, 1, 32);
nir_builder_instr_insert(&b, &load->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa,
- &load->dest.ssa);
+ nir_def_rewrite_uses(&intrin->def,
+ &load->def);
nir_instr_remove(&intrin->instr);
break;
}
nir_def *centroid =
nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
nir_intrinsic_interp_mode(intrin));
- nir_def_rewrite_uses(&intrin->dest.ssa, centroid);
+ nir_def_rewrite_uses(&intrin->def, centroid);
nir_instr_remove(instr);
return true;
}
* to do if we were trying to do it in native 8-bit types and the
* results are the same once we truncate to 8 bits at the end.
*/
- if (intrin->dest.ssa.bit_size == 8)
+ if (intrin->def.bit_size == 8)
return 16;
return 0;
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
- if (phi->dest.ssa.bit_size == 8)
+ if (phi->def.bit_size == 8)
return 16;
return 0;
}
nir_def *zero = nir_imm_zero(b, 1, 32);
- nir_def_rewrite_uses(&intrin->dest.ssa, zero);
+ nir_def_rewrite_uses(&intrin->def, zero);
nir_instr_remove(instr);
{
assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
- unsigned bit_size = load_uniform->dest.ssa.bit_size;
+ unsigned bit_size = load_uniform->def.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);
unsigned byte_size = bit_size / 8;
nir_def *sysval;
/* The value might span multiple 32-byte chunks. */
const int bytes = nir_intrinsic_dest_components(intrin) *
- (intrin->dest.ssa.bit_size / 8);
+ (intrin->def.bit_size / 8);
const int start = ROUND_DOWN_TO(byte_offset, 32);
const int end = ALIGN(byte_offset + bytes, 32);
const int chunks = (end - start) / 32;
b->cursor = nir_after_instr(instr);
- nir_def *val = &intrin->dest.ssa;
+ nir_def *val = &intrin->def;
/* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED attributes
* come in as floating point conversions of the integer values.
: nir_u2f32(b, val);
}
- nir_def_rewrite_uses_after(&intrin->dest.ssa, val,
+ nir_def_rewrite_uses_after(&intrin->def, val,
val->parent_instr);
return true;
if (nir_src_is_divergent(intrin->src[1]))
return false;
- if (intrin->dest.ssa.bit_size != 32)
+ if (intrin->def.bit_size != 32)
return false;
/* Without the LSC, we can only do block loads of at least 4dwords (1
* oword).
*/
- if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4)
+ if (!devinfo->has_lsc && intrin->def.num_components < 4)
return false;
intrin->intrinsic =
if (nir_src_is_divergent(intrin->src[0]))
return false;
- if (intrin->dest.ssa.bit_size != 32)
+ if (intrin->def.bit_size != 32)
return false;
intrin->intrinsic = nir_intrinsic_load_shared_uniform_block_intel;
if (nir_src_is_divergent(intrin->src[0]))
return false;
- if (intrin->dest.ssa.bit_size != 32)
+ if (intrin->def.bit_size != 32)
return false;
/* Without the LSC, we can only do block loads of at least 4dwords (1
* oword).
*/
- if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4)
+ if (!devinfo->has_lsc && intrin->def.num_components < 4)
return false;
intrin->intrinsic = nir_intrinsic_load_global_constant_uniform_block_intel;
if (!nir_intrinsic_image_array(intr))
break;
- image_size = &intr->dest.ssa;
+ image_size = &intr->def;
break;
case nir_intrinsic_image_deref_size: {
if (!glsl_sampler_type_is_array(deref->type))
break;
- image_size = &intr->dest.ssa;
+ image_size = &intr->def;
break;
}
if (!tex_instr->is_array)
break;
- image_size = &tex_instr->dest.ssa;
+ image_size = &tex_instr->def;
break;
}
b->cursor = nir_before_instr(instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_int(b, *input_vertices));
+ nir_def_rewrite_uses(&intrin->def, nir_imm_int(b, *input_vertices));
return true;
}
case nir_intrinsic_load_workgroup_id:
case nir_intrinsic_load_num_workgroups:
/* Convert this to 32-bit if it's not */
- if (intrinsic->dest.ssa.bit_size == 64) {
- intrinsic->dest.ssa.bit_size = 32;
- sysval = nir_u2u64(b, &intrinsic->dest.ssa);
- nir_def_rewrite_uses_after(&intrinsic->dest.ssa,
+ if (intrinsic->def.bit_size == 64) {
+ intrinsic->def.bit_size = 32;
+ sysval = nir_u2u64(b, &intrinsic->def);
+ nir_def_rewrite_uses_after(&intrinsic->def,
sysval,
sysval->parent_instr);
}
continue;
}
- if (intrinsic->dest.ssa.bit_size == 64)
+ if (intrinsic->def.bit_size == 64)
sysval = nir_u2u64(b, sysval);
- nir_def_rewrite_uses(&intrinsic->dest.ssa, sysval);
+ nir_def_rewrite_uses(&intrinsic->def, sysval);
nir_instr_remove(&intrinsic->instr);
state->progress = true;
break;
case nir_intrinsic_load_ray_t_max:
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
hit_t);
nir_instr_remove(&intrin->instr);
break;
case nir_intrinsic_load_ray_hit_kind:
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
hit_kind);
nir_instr_remove(&intrin->instr);
break;
nir_push_if(b, nir_inot(b, nir_load_leaf_opaque_intel(b)));
{
nir_def *params[] = {
- &nir_build_deref_var(b, commit_tmp)->dest.ssa,
+ &nir_build_deref_var(b, commit_tmp)->def,
hit_t,
hit_kind,
};
nir_pop_if(b, NULL);
nir_def *accepted = nir_load_var(b, commit_tmp);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
accepted);
break;
}
nir_instr_as_intrinsic(new_instr);
nir_src_rewrite(&new_resource_intel->src[1], intrin->src[source].ssa);
- nir_src_rewrite(&intrin->src[source], &new_resource_intel->dest.ssa);
+ nir_src_rewrite(&intrin->src[source], &new_resource_intel->def);
return true;
}
nir_instr_as_intrinsic(new_instr);
nir_src_rewrite(&new_resource_intel->src[1], tex->src[s].src.ssa);
- nir_src_rewrite(&tex->src[s].src, &new_resource_intel->dest.ssa);
+ nir_src_rewrite(&tex->src[s].src, &new_resource_intel->def);
progress = true;
}
return false;
bool progress = false;
- nir_foreach_use_safe(src, &intrin->dest.ssa) {
+ nir_foreach_use_safe(src, &intrin->def) {
if (!src->is_if && skip_resource_intel_cleanup(src->parent_instr))
continue;
}
nir_pop_if(b, NULL);
not_done = nir_if_phi(b, not_done_then, not_done_else);
- nir_def_rewrite_uses(&intrin->dest.ssa, not_done);
+ nir_def_rewrite_uses(&intrin->def, not_done);
break;
}
}
assert(sysval);
- nir_def_rewrite_uses(&intrin->dest.ssa, sysval);
+ nir_def_rewrite_uses(&intrin->def, sysval);
break;
}
progress = true;
if (sysval) {
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
sysval);
nir_instr_remove(&intrin->instr);
}
nir_instr_rewrite_src(instr, &intrin->src[0],
nir_src_for_ssa(packed_fp16_xy));
} else {
- nir_def *packed_fp16_xy = &intrin->dest.ssa;
+ nir_def *packed_fp16_xy = &intrin->def;
nir_def *u32_x =
nir_i2i32(b, nir_unpack_32_2x16_split_x(b, packed_fp16_xy));
b->cursor = nir_instr_remove(&intrin->instr);
nir_def_rewrite_uses(
- &intrin->dest.ssa,
+ &intrin->def,
nir_i2b(b, nir_iand(b, intrin->src[0].ssa,
nir_ishl(b, nir_imm_int(b, 1),
nir_load_subgroup_invocation(b)))));
b->cursor = nir_instr_remove(&intrin->instr);
nir_def_rewrite_uses(
- &intrin->dest.ssa,
+ &intrin->def,
nir_iand(b, intrin->src[0].ssa, intrin->src[1].ssa));
}
if (intrin->intrinsic == nir_intrinsic_image_sparse_load) {
img_load = nir_image_load(b,
intrin->num_components - 1,
- intrin->dest.ssa.bit_size,
+ intrin->def.bit_size,
intrin->src[0].ssa,
intrin->src[1].ssa,
intrin->src[2].ssa,
} else {
img_load = nir_bindless_image_load(b,
intrin->num_components - 1,
- intrin->dest.ssa.bit_size,
+ intrin->def.bit_size,
intrin->src[0].ssa,
intrin->src[1].ssa,
intrin->src[2].ssa,
tex->src[2].src_type = nir_tex_src_lod;
tex->src[2].src = nir_src_for_ssa(nir_imm_int(b, 0));
- nir_def_init(&tex->instr, &tex->dest.ssa, 5,
- intrin->dest.ssa.bit_size);
+ nir_def_init(&tex->instr, &tex->def, 5,
+ intrin->def.bit_size);
nir_builder_instr_insert(b, &tex->instr);
- dests[intrin->num_components - 1] = nir_channel(b, &tex->dest.ssa, 4);
+ dests[intrin->num_components - 1] = nir_channel(b, &tex->def, 4);
nir_def_rewrite_uses(
- &intrin->dest.ssa,
+ &intrin->def,
nir_vec(b, dests, intrin->num_components));
}
/* Clone the original instruction */
nir_tex_instr *sparse_tex = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
- nir_def_init(&sparse_tex->instr, &sparse_tex->dest.ssa,
- tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
+ nir_def_init(&sparse_tex->instr, &sparse_tex->def,
+ tex->def.num_components, tex->def.bit_size);
nir_builder_instr_insert(b, &sparse_tex->instr);
/* Drop the compare source on the cloned instruction */
/* Drop the residency query on the original tex instruction */
tex->is_sparse = false;
- tex->dest.ssa.num_components = tex->dest.ssa.num_components - 1;
+ tex->def.num_components = tex->def.num_components - 1;
nir_def *new_comps[NIR_MAX_VEC_COMPONENTS];
- for (unsigned i = 0; i < tex->dest.ssa.num_components; i++)
- new_comps[i] = nir_channel(b, &tex->dest.ssa, i);
- new_comps[tex->dest.ssa.num_components] =
- nir_channel(b, &sparse_tex->dest.ssa, tex->dest.ssa.num_components);
+ for (unsigned i = 0; i < tex->def.num_components; i++)
+ new_comps[i] = nir_channel(b, &tex->def, i);
+ new_comps[tex->def.num_components] =
+ nir_channel(b, &sparse_tex->def, tex->def.num_components);
- nir_def *new_vec = nir_vec(b, new_comps, sparse_tex->dest.ssa.num_components);
+ nir_def *new_vec = nir_vec(b, new_comps, sparse_tex->def.num_components);
- nir_def_rewrite_uses_after(&tex->dest.ssa, new_vec, new_vec->parent_instr);
+ nir_def_rewrite_uses_after(&tex->def, new_vec, new_vec->parent_instr);
}
static bool
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_image_deref_load_param_intel);
- load->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ load->src[0] = nir_src_for_ssa(&deref->def);
nir_intrinsic_set_base(load, offset / 4);
switch (offset) {
default:
unreachable("Invalid param offset");
}
- nir_def_init(&load->instr, &load->dest.ssa, load->num_components, 32);
+ nir_def_init(&load->instr, &load->def, load->num_components, 32);
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
#define load_image_param(b, d, o) \
* conversion.
*/
nir_def *placeholder = nir_undef(b, 4, 32);
- nir_def_rewrite_uses(&intrin->dest.ssa, placeholder);
+ nir_def_rewrite_uses(&intrin->def, placeholder);
intrin->num_components = isl_format_get_num_channels(lower_fmt);
- intrin->dest.ssa.num_components = intrin->num_components;
+ intrin->def.num_components = intrin->num_components;
b->cursor = nir_after_instr(&intrin->instr);
nir_def *color = convert_color_for_load(b, devinfo,
- &intrin->dest.ssa,
+ &intrin->def,
image_fmt, lower_fmt,
dest_components);
if (sparse) {
/* Put the sparse component back on the original instruction */
intrin->num_components++;
- intrin->dest.ssa.num_components = intrin->num_components;
+ intrin->def.num_components = intrin->num_components;
/* Carry over the sparse component without modifying it with the
* converted color.
for (unsigned i = 0; i < dest_components; i++)
sparse_color[i] = nir_channel(b, color, i);
sparse_color[dest_components] =
- nir_channel(b, &intrin->dest.ssa, intrin->num_components - 1);
+ nir_channel(b, &intrin->def, intrin->num_components - 1);
color = nir_vec(b, sparse_color, dest_components + 1);
}
nir_def *addr = image_address(b, devinfo, deref, coord);
nir_def *load =
nir_image_deref_load_raw_intel(b, image_fmtl->bpb / 32, 32,
- &deref->dest.ssa, addr);
+ &deref->def, addr);
nir_push_else(b, NULL);
image_fmt, raw_fmt,
dest_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, color);
+ nir_def_rewrite_uses(&intrin->def, color);
}
return true;
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_image_deref_store_raw_intel);
- store->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ store->src[0] = nir_src_for_ssa(&deref->def);
store->src[1] = nir_src_for_ssa(addr);
store->src[2] = nir_src_for_ssa(color);
store->num_components = image_fmtl->bpb / 32;
/* Use an undef to hold the uses of the load conversion. */
nir_def *placeholder = nir_undef(b, 4, 32);
- nir_def_rewrite_uses(&intrin->dest.ssa, placeholder);
+ nir_def_rewrite_uses(&intrin->def, placeholder);
/* Check the first component of the size field to find out if the
* image is bound. Necessary on IVB for typed atomics because
nir_pop_if(b, NULL);
- nir_def *result = nir_if_phi(b, &intrin->dest.ssa, zero);
+ nir_def *result = nir_if_phi(b, &intrin->def, zero);
nir_def_rewrite_uses(placeholder, result);
return true;
for (unsigned c = 0; c < coord_comps; c++)
comps[c] = nir_channel(b, size, c);
- for (unsigned c = coord_comps; c < intrin->dest.ssa.num_components; ++c)
+ for (unsigned c = coord_comps; c < intrin->def.num_components; ++c)
comps[c] = nir_imm_int(b, 1);
- nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, vec);
+ nir_def *vec = nir_vec(b, comps, intrin->def.num_components);
+ nir_def_rewrite_uses(&intrin->def, vec);
return true;
}
resize_deref(nir_builder *b, nir_deref_instr *deref,
unsigned num_components, unsigned bit_size)
{
- if (deref->dest.ssa.num_components == num_components &&
- deref->dest.ssa.bit_size == bit_size)
+ if (deref->def.num_components == num_components &&
+ deref->def.bit_size == bit_size)
return false;
/* NIR requires array indices have to match the deref bit size */
- if (deref->dest.ssa.bit_size != bit_size &&
+ if (deref->def.bit_size != bit_size &&
(deref->deref_type == nir_deref_type_array ||
deref->deref_type == nir_deref_type_ptr_as_array)) {
b->cursor = nir_before_instr(&deref->instr);
nir_src_for_ssa(idx));
}
- deref->dest.ssa.num_components = num_components;
- deref->dest.ssa.bit_size = bit_size;
+ deref->def.num_components = num_components;
+ deref->def.bit_size = bit_size;
return true;
}
nir_build_deref_cast(&b, call_data_addr,
nir_var_function_temp,
deref->var->type, 0);
- nir_def_rewrite_uses(&deref->dest.ssa,
- &cast->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &cast->def);
nir_instr_remove(&deref->instr);
progress = true;
}
nir_build_deref_cast(&b, hit_attrib_addr,
nir_var_function_temp,
deref->type, 0);
- nir_def_rewrite_uses(&deref->dest.ssa,
- &cast->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &cast->def);
nir_instr_remove(&deref->instr);
progress = true;
}
b.cursor = nir_before_instr(&intrin->instr);
nir_def *global_arg_addr =
load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
- nir_def_rewrite_uses(&intrin->dest.ssa,
+ nir_def_rewrite_uses(&intrin->def,
global_arg_addr);
nir_instr_remove(instr);
}
switch (instr->intrinsic) {
case nir_intrinsic_load_per_vertex_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
/* The EmitNoIndirectInput flag guarantees our vertex index will
* be constant. We should handle indirects someday.
*/
type);
src.swizzle = BRW_SWZ_COMP_INPUT(nir_intrinsic_component(instr));
- dest = get_nir_def(instr->dest.ssa, src.type);
+ dest = get_nir_def(instr->def, src.type);
dest.writemask = brw_writemask_for_size(instr->num_components);
emit(MOV(dest, src));
break;
case nir_intrinsic_load_primitive_id:
assert(gs_prog_data->include_primitive_id);
- dest = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D);
+ dest = get_nir_def(instr->def, BRW_REGISTER_TYPE_D);
emit(MOV(dest, retype(brw_vec4_grf(1, 0), BRW_REGISTER_TYPE_D)));
break;
case nir_intrinsic_load_invocation_id: {
- dest = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D);
+ dest = get_nir_def(instr->def, BRW_REGISTER_TYPE_D);
if (gs_prog_data->invocations > 1)
emit(GS_OPCODE_GET_INSTANCE_ID, dest);
else
if (bit_size == 64)
reg.type = BRW_REGISTER_TYPE_DF;
- nir_ssa_values[instr->dest.ssa.index] = reg;
+ nir_ssa_values[instr->def.index] = reg;
break;
}
break;
case nir_intrinsic_load_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
/* We set EmitNoIndirectInput for VS */
unsigned load_offset = nir_src_as_uint(instr->src[0]);
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
src = src_reg(ATTR, nir_intrinsic_base(instr) + load_offset,
glsl_type::uvec4_type);
unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
nir_src_as_uint(instr->src[0]) : 0;
- dst_reg result_dst = get_nir_def(instr->dest.ssa);
+ dst_reg result_dst = get_nir_def(instr->def);
vec4_instruction *inst = new(mem_ctx)
vec4_instruction(SHADER_OPCODE_GET_BUFFER_SIZE, result_dst);
assert(devinfo->ver == 7);
/* brw_nir_lower_mem_access_bit_sizes takes care of this */
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
src_reg surf_index = get_nir_ssbo_intrinsic_index(instr);
src_reg offset_reg = retype(get_nir_src_imm(instr->src[1]),
src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
1 /* dims */, 4 /* size*/,
BRW_PREDICATE_NONE);
- dst_reg dest = get_nir_def(instr->dest.ssa);
+ dst_reg dest = get_nir_def(instr->def);
read_result.type = dest.type;
read_result.swizzle = brw_swizzle_for_size(instr->num_components);
emit(MOV(dest, read_result));
/* Offsets are in bytes but they should always be multiples of 4 */
assert(nir_intrinsic_base(instr) % 4 == 0);
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
src = src_reg(dst_reg(UNIFORM, nir_intrinsic_base(instr) / 16));
src.type = dest.type;
case nir_intrinsic_load_ubo: {
src_reg surf_index;
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
if (nir_src_is_const(instr->src[0])) {
/* The block index is a constant, so just emit the binding table entry
src_reg packed_consts;
if (push_reg.file != BAD_FILE) {
packed_consts = push_reg;
- } else if (instr->dest.ssa.bit_size == 32) {
+ } else if (instr->def.bit_size == 32) {
packed_consts = src_reg(this, glsl_type::vec4_type);
emit_pull_constant_load_reg(dst_reg(packed_consts),
surf_index,
const src_reg shader_clock = get_timestamp();
const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
- dest = get_nir_def(instr->dest.ssa, type);
+ dest = get_nir_def(instr->def, type);
emit(MOV(dest, shader_clock));
break;
}
{
dst_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
- dest = get_nir_def(instr->dest.ssa);
+ dest = get_nir_def(instr->def);
src_reg surface = get_nir_ssbo_intrinsic_index(instr);
src_reg offset = get_nir_src(instr->src[1], 1);
src_reg sample_index;
src_reg mcs;
- dst_reg dest = get_nir_def(instr->dest.ssa, instr->dest_type);
+ dst_reg dest = get_nir_def(instr->def, instr->dest_type);
/* The hardware requires a LOD for buffer textures */
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
{
switch (instr->intrinsic) {
case nir_intrinsic_load_invocation_id:
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_UD),
invocation_id));
break;
case nir_intrinsic_load_primitive_id:
emit(TCS_OPCODE_GET_PRIMITIVE_ID,
- get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD));
+ get_nir_def(instr->def, BRW_REGISTER_TYPE_UD));
break;
case nir_intrinsic_load_patch_vertices_in:
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_D),
brw_imm_d(key->input_vertices)));
break;
case nir_intrinsic_load_per_vertex_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
src_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = nir_intrinsic_base(instr);
BRW_REGISTER_TYPE_UD);
unsigned first_component = nir_intrinsic_component(instr);
- dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D);
+ dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D);
dst.writemask = brw_writemask_for_size(instr->num_components);
emit_input_urb_read(dst, vertex_index, imm_offset,
first_component, indirect_offset);
src_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = nir_intrinsic_base(instr);
- dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D);
+ dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D);
dst.writemask = brw_writemask_for_size(instr->num_components);
emit_output_urb_read(dst, imm_offset, nir_intrinsic_component(instr),
switch (instr->intrinsic) {
case nir_intrinsic_load_tess_coord:
/* gl_TessCoord is part of the payload in g1 channels 0-2 and 4-6. */
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F),
src_reg(brw_vec8_grf(1, 0))));
break;
case nir_intrinsic_load_tess_level_outer:
if (tes_prog_data->domain == BRW_TESS_DOMAIN_ISOLINE) {
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F),
swizzle(src_reg(ATTR, 1, glsl_type::vec4_type),
BRW_SWIZZLE_ZWZW)));
} else {
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F),
swizzle(src_reg(ATTR, 1, glsl_type::vec4_type),
BRW_SWIZZLE_WZYX)));
}
break;
case nir_intrinsic_load_tess_level_inner:
if (tes_prog_data->domain == BRW_TESS_DOMAIN_QUAD) {
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F),
swizzle(src_reg(ATTR, 0, glsl_type::vec4_type),
BRW_SWIZZLE_WZYX)));
} else {
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_F),
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_F),
src_reg(ATTR, 1, glsl_type::float_type)));
}
break;
case nir_intrinsic_load_primitive_id:
emit(TES_OPCODE_GET_PRIMITIVE_ID,
- get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_UD));
+ get_nir_def(instr->def, BRW_REGISTER_TYPE_UD));
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_per_vertex_input: {
- assert(instr->dest.ssa.bit_size == 32);
+ assert(instr->def.bit_size == 32);
src_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = instr->const_index[0];
src_reg header = input_read_header;
src_reg src = src_reg(ATTR, imm_offset, glsl_type::ivec4_type);
src.swizzle = BRW_SWZ_COMP_INPUT(first_component);
- emit(MOV(get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D), src));
+ emit(MOV(get_nir_def(instr->def, BRW_REGISTER_TYPE_D), src));
prog_data->urb_read_length =
MAX2(prog_data->urb_read_length,
/* Copy to target. We might end up with some funky writemasks landing
* in here, but we really don't want them in the above pseudo-ops.
*/
- dst_reg dst = get_nir_def(instr->dest.ssa, BRW_REGISTER_TYPE_D);
+ dst_reg dst = get_nir_def(instr->def, BRW_REGISTER_TYPE_D);
dst.writemask = brw_writemask_for_size(instr->num_components);
emit(MOV(dst, src));
break;
nir_imm_int(b, 0));
}
- nir_def_rewrite_uses(&intrin->dest.ssa, desc_value);
+ nir_def_rewrite_uses(&intrin->def, desc_value);
return true;
}
return false;
b->cursor = nir_instr_remove(&intrin->instr);
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_zero(b, 3, 32));
+ nir_def_rewrite_uses(&intrin->def, nir_imm_zero(b, 3, 32));
return true;
}
b->cursor = nir_instr_remove(instr);
nir_def_rewrite_uses(
- &intrin->dest.ssa,
+ &intrin->def,
nir_load_uniform(b,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.num_components,
+ intrin->def.bit_size,
intrin->src[1].ssa,
.base = 0,
- .range = intrin->dest.ssa.num_components *
- intrin->dest.ssa.bit_size / 8));
+ .range = intrin->def.num_components *
+ intrin->def.bit_size / 8));
return true;
}
return false;
nir_instr_remove(&deref->instr);
- nir_def_rewrite_uses(&deref->dest.ssa, &new_derefs[location]->dest.ssa);
+ nir_def_rewrite_uses(&deref->def, &new_derefs[location]->def);
return true;
}
/* 64-bit atomics only support A64 messages so we can't lower them to
* the index+offset model.
*/
- if (is_atomic && intrin->dest.ssa.bit_size == 64 &&
+ if (is_atomic && intrin->def.bit_size == 64 &&
!state->pdevice->info.has_lsc)
return false;
/* Acceleration structure descriptors are always uint64_t */
nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
- assert(load_desc->dest.ssa.bit_size == 64);
- assert(load_desc->dest.ssa.num_components == 1);
- nir_def_rewrite_uses(&load_desc->dest.ssa, desc);
+ assert(load_desc->def.bit_size == 64);
+ assert(load_desc->def.num_components == 1);
+ nir_def_rewrite_uses(&load_desc->def, desc);
nir_instr_remove(&load_desc->instr);
return true;
intrin->src[0].ssa,
state);
- assert(intrin->dest.ssa.bit_size == index->bit_size);
- assert(intrin->dest.ssa.num_components == index->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, index);
+ assert(intrin->def.bit_size == index->bit_size);
+ assert(intrin->def.num_components == index->num_components);
+ nir_def_rewrite_uses(&intrin->def, index);
nir_instr_remove(&intrin->instr);
return true;
build_res_reindex(b, intrin->src[0].ssa,
intrin->src[1].ssa);
- assert(intrin->dest.ssa.bit_size == index->bit_size);
- assert(intrin->dest.ssa.num_components == index->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, index);
+ assert(intrin->def.bit_size == index->bit_size);
+ assert(intrin->def.num_components == index->num_components);
+ nir_def_rewrite_uses(&intrin->def, index);
nir_instr_remove(&intrin->instr);
return true;
desc_type, intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.ssa.bit_size == desc->bit_size);
- assert(intrin->dest.ssa.num_components == desc->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ assert(intrin->def.bit_size == desc->bit_size);
+ assert(intrin->def.num_components == desc->num_components);
+ nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
return true;
}
nir_def *size = nir_channel(b, desc_range, 2);
- nir_def_rewrite_uses(&intrin->dest.ssa, size);
+ nir_def_rewrite_uses(&intrin->def, size);
nir_instr_remove(&intrin->instr);
return true;
nir_def *image_depth =
build_load_storage_3d_image_depth(b, desc_addr,
- nir_channel(b, &intrin->dest.ssa, 2),
+ nir_channel(b, &intrin->def, 2),
state);
nir_def *comps[4] = {};
- for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++)
- comps[c] = c == 2 ? image_depth : nir_channel(b, &intrin->dest.ssa, c);
+ for (unsigned c = 0; c < intrin->def.num_components; c++)
+ comps[c] = c == 2 ? image_depth : nir_channel(b, &intrin->def, c);
- nir_def *vec = nir_vec(b, comps, intrin->dest.ssa.num_components);
- nir_def_rewrite_uses_after(&intrin->dest.ssa, vec, vec->parent_instr);
+ nir_def *vec = nir_vec(b, comps, intrin->def.num_components);
+ nir_def_rewrite_uses_after(&intrin->def, vec, vec->parent_instr);
return true;
}
nir_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
nir_intrinsic_base(intrin));
- unsigned load_size = intrin->dest.ssa.num_components *
- intrin->dest.ssa.bit_size / 8;
- unsigned load_align = intrin->dest.ssa.bit_size / 8;
+ unsigned load_size = intrin->def.num_components *
+ intrin->def.bit_size / 8;
+ unsigned load_align = intrin->def.bit_size / 8;
assert(load_size < b->shader->constant_data_size);
unsigned max_offset = b->shader->constant_data_size - load_size;
nir_def *data =
nir_load_global_constant(b, const_data_addr,
load_align,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, data);
+ nir_def_rewrite_uses(&intrin->def, data);
return true;
}
nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0),
.base = offsetof(struct anv_push_constants, cs.base_work_group_id),
.range = sizeof_field(struct anv_push_constants, cs.base_work_group_id));
- nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
+ nir_def_rewrite_uses(&intrin->def, base_workgroup_id);
return true;
}
nir_load_push_constant(b, 1, 64, nir_imm_int(b, 0),
.base = offsetof(struct anv_push_constants, ray_query_globals),
.range = sizeof_field(struct anv_push_constants, ray_query_globals));
- nir_def_rewrite_uses(&intrin->dest.ssa, rq_globals);
+ nir_def_rewrite_uses(&intrin->def, rq_globals);
return true;
}
b, pc_load,
nir_load_reloc_const_intel(
b, BRW_SHADER_RELOC_DESCRIPTORS_ADDR_HIGH));
- nir_def_rewrite_uses(&intrin->dest.ssa, desc_addr);
+ nir_def_rewrite_uses(&intrin->def, desc_addr);
break;
}
.dest_type = nir_type_uint32);
pc_load = nir_iand_imm(
b, pc_load, ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK);
- nir_def_rewrite_uses(&intrin->dest.ssa, pc_load);
+ nir_def_rewrite_uses(&intrin->def, pc_load);
break;
}
b->cursor = nir_before_instr(instr);
nir_def_rewrite_uses(
- &load->dest.ssa,
+ &load->def,
nir_load_push_constant(
b, 1, 32,
nir_imm_int(b, 0),
value = build_view_index(&state);
}
- nir_def_rewrite_uses(&load->dest.ssa, value);
+ nir_def_rewrite_uses(&load->def, value);
nir_instr_remove(&load->instr);
}
if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)
bound = load->src[2].ssa;
- unsigned bit_size = load->dest.ssa.bit_size;
+ unsigned bit_size = load->def.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);
unsigned byte_size = bit_size / 8;
nir_push_if(b, in_bounds);
nir_def *load_val =
- nir_build_load_global_constant(b, load->dest.ssa.num_components,
- load->dest.ssa.bit_size, addr,
+ nir_build_load_global_constant(b, load->def.num_components,
+ load->def.bit_size, addr,
.access = nir_intrinsic_access(load),
.align_mul = nir_intrinsic_align_mul(load),
.align_offset = nir_intrinsic_align_offset(load));
val = nir_if_phi(b, load_val, zero);
} else {
- val = nir_build_load_global_constant(b, load->dest.ssa.num_components,
- load->dest.ssa.bit_size, addr,
+ val = nir_build_load_global_constant(b, load->def.num_components,
+ load->def.bit_size, addr,
.access = nir_intrinsic_access(load),
.align_mul = nir_intrinsic_align_mul(load),
.align_offset = nir_intrinsic_align_offset(load));
}
}
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
nir_instr_remove(&load->instr);
return true;
/* Check if the load was promoted to a push constant. */
const unsigned load_offset = const_load_offset[0].u32;
const int load_bytes = nir_intrinsic_dest_components(intrin) *
- (intrin->dest.ssa.bit_size / 8);
+ (intrin->def.bit_size / 8);
for (unsigned i = 0; i < ARRAY_SIZE(bind_map->push_ranges); i++) {
if (bind_map->push_ranges[i].set == binding->set &&
/* Acceleration structure descriptors are always uint64_t */
nir_def *desc = build_load_descriptor_mem(b, desc_addr, 0, 1, 64, state);
- assert(load_desc->dest.ssa.bit_size == 64);
- assert(load_desc->dest.ssa.num_components == 1);
- nir_def_rewrite_uses(&load_desc->dest.ssa, desc);
+ assert(load_desc->def.bit_size == 64);
+ assert(load_desc->def.num_components == 1);
+ nir_def_rewrite_uses(&load_desc->def, desc);
nir_instr_remove(&load_desc->instr);
return true;
intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.ssa.bit_size == index->bit_size);
- assert(intrin->dest.ssa.num_components == index->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, index);
+ assert(intrin->def.bit_size == index->bit_size);
+ assert(intrin->def.num_components == index->num_components);
+ nir_def_rewrite_uses(&intrin->def, index);
nir_instr_remove(&intrin->instr);
return true;
intrin->src[1].ssa,
addr_format);
- assert(intrin->dest.ssa.bit_size == index->bit_size);
- assert(intrin->dest.ssa.num_components == index->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, index);
+ assert(intrin->def.bit_size == index->bit_size);
+ assert(intrin->def.num_components == index->num_components);
+ nir_def_rewrite_uses(&intrin->def, index);
nir_instr_remove(&intrin->instr);
return true;
build_buffer_addr_for_res_index(b, desc_type, intrin->src[0].ssa,
addr_format, state);
- assert(intrin->dest.ssa.bit_size == desc->bit_size);
- assert(intrin->dest.ssa.num_components == desc->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ assert(intrin->def.bit_size == desc->bit_size);
+ assert(intrin->def.num_components == desc->num_components);
+ nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
return true;
case nir_address_format_64bit_global_32bit_offset:
case nir_address_format_64bit_bounded_global: {
nir_def *size = nir_channel(b, desc, 2);
- nir_def_rewrite_uses(&intrin->dest.ssa, size);
+ nir_def_rewrite_uses(&intrin->def, size);
nir_instr_remove(&intrin->instr);
break;
}
nir_def *desc =
build_load_var_deref_descriptor_mem(b, deref, param * 16,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, state);
+ intrin->def.num_components,
+ intrin->def.bit_size, state);
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
} else {
nir_def *index = NULL;
if (deref->deref_type != nir_deref_type_var) {
nir_def *data;
if (!anv_use_relocations(state->pdevice)) {
- unsigned load_size = intrin->dest.ssa.num_components *
- intrin->dest.ssa.bit_size / 8;
- unsigned load_align = intrin->dest.ssa.bit_size / 8;
+ unsigned load_size = intrin->def.num_components *
+ intrin->def.bit_size / 8;
+ unsigned load_align = intrin->def.bit_size / 8;
assert(load_size < b->shader->constant_data_size);
unsigned max_offset = b->shader->constant_data_size - load_size;
data = nir_load_global_constant(b, nir_iadd(b, const_data_base_addr,
nir_u2u64(b, offset)),
load_align,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size);
+ intrin->def.num_components,
+ intrin->def.bit_size);
} else {
nir_def *index = nir_imm_int(b, state->constants_offset);
- data = nir_load_ubo(b, intrin->num_components, intrin->dest.ssa.bit_size,
+ data = nir_load_ubo(b, intrin->num_components, intrin->def.bit_size,
index, offset,
- .align_mul = intrin->dest.ssa.bit_size / 8,
+ .align_mul = intrin->def.bit_size / 8,
.align_offset = 0,
.range_base = nir_intrinsic_base(intrin),
.range = nir_intrinsic_range(intrin));
}
- nir_def_rewrite_uses(&intrin->dest.ssa, data);
+ nir_def_rewrite_uses(&intrin->def, data);
return true;
}
nir_load_push_constant(b, 3, 32, nir_imm_int(b, 0),
.base = offsetof(struct anv_push_constants, cs.base_work_group_id),
.range = 3 * sizeof(uint32_t));
- nir_def_rewrite_uses(&intrin->dest.ssa, base_workgroup_id);
+ nir_def_rewrite_uses(&intrin->def, base_workgroup_id);
return true;
}
b->cursor = nir_after_instr(&tex->instr);
- assert(tex->dest.ssa.bit_size == 32);
- assert(tex->dest.ssa.num_components == 4);
+ assert(tex->def.bit_size == 32);
+ assert(tex->def.num_components == 4);
/* Initializing to undef is ok; nir_opt_undef will clean it up. */
nir_def *undef = nir_undef(b, 1, 32);
comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_float(b, 1);
else
comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_int(b, 1);
- comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->dest.ssa, 0);
- comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->dest.ssa, 1);
- comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2);
- comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3);
+ comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->def, 0);
+ comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->def, 1);
+ comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->def, 2);
+ comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->def, 3);
nir_def *swiz_comps[4];
for (unsigned i = 0; i < 4; i++) {
nir_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
/* Rewrite uses before we insert so we don't rewrite this use */
- nir_def_rewrite_uses_after(&tex->dest.ssa,
+ nir_def_rewrite_uses_after(&tex->def,
swiz_tex_res,
swiz_tex_res->parent_instr);
}
value = build_view_index(&state);
}
- nir_def_rewrite_uses(&load->dest.ssa, value);
+ nir_def_rewrite_uses(&load->def, value);
nir_instr_remove(&load->instr);
}
if (load->intrinsic == nir_intrinsic_load_global_constant_bounded)
bound = load->src[2].ssa;
- unsigned bit_size = load->dest.ssa.bit_size;
+ unsigned bit_size = load->def.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);
unsigned byte_size = bit_size / 8;
nir_push_if(b, in_bounds);
nir_def *load_val =
- nir_build_load_global_constant(b, load->dest.ssa.num_components,
- load->dest.ssa.bit_size, addr,
+ nir_build_load_global_constant(b, load->def.num_components,
+ load->def.bit_size, addr,
.access = nir_intrinsic_access(load),
.align_mul = nir_intrinsic_align_mul(load),
.align_offset = nir_intrinsic_align_offset(load));
val = nir_if_phi(b, load_val, zero);
} else {
- val = nir_build_load_global_constant(b, load->dest.ssa.num_components,
- load->dest.ssa.bit_size, addr,
+ val = nir_build_load_global_constant(b, load->def.num_components,
+ load->def.bit_size, addr,
.access = nir_intrinsic_access(load),
.align_mul = nir_intrinsic_align_mul(load),
.align_offset = nir_intrinsic_align_offset(load));
}
}
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
nir_instr_remove(&load->instr);
return true;
tex->dest_type = nir_type_int32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &texture->dest.ssa);
+ &texture->def);
- nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32);
+ nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32);
nir_builder_instr_insert(b, &tex->instr);
- state->image_size = nir_i2f32(b, &tex->dest.ssa);
+ state->image_size = nir_i2f32(b, &tex->def);
return state->image_size;
}
tex->sampler_index = old_tex->sampler_index;
tex->is_array = old_tex->is_array;
- nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components,
- old_tex->dest.ssa.bit_size);
+ nir_def_init(&tex->instr, &tex->def, old_tex->def.num_components,
+ old_tex->def.bit_size);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static unsigned
swizzled_bpcs);
}
- nir_def_rewrite_uses(&tex->dest.ssa, result);
+ nir_def_rewrite_uses(&tex->def, result);
nir_instr_remove(&tex->instr);
return true;
nir_deref_instr *deref = nir_build_deref_var(p->b, var);
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &deref->dest.ssa);
+ &deref->def);
nir_def *src2 =
nir_channels(p->b, texcoord,
tex->src[4] = nir_tex_src_for_ssa(nir_tex_src_comparator, src4);
}
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
- p->src_texture[unit] = &tex->dest.ssa;
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
+ p->src_texture[unit] = &tex->def;
nir_builder_instr_insert(p->b, &tex->instr);
BITSET_SET(p->b->shader->info.textures_used, unit);
unsigned src_number = 0;
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
src_number++;
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &deref->dest.ssa);
+ &deref->def);
src_number++;
instr->src[src_number] = nir_tex_src_for_ssa(nir_tex_src_coord,
assert(src_number == num_srcs);
- nir_def_init(&instr->instr, &instr->dest.ssa, 4, 32);
+ nir_def_init(&instr->instr, &instr->def, 4, 32);
nir_builder_instr_insert(b, &instr->instr);
- return &instr->dest.ssa;
+ return &instr->def;
}
static const nir_op op_trans[MAX_OPCODE] = {
glsl_get_sampler_dim_coordinate_components(tex->sampler_dim);
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(t->b, coord, tex->coord_components));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(t->b, &tex->instr);
- t->temps[r] = &tex->dest.ssa;
+ t->temps[r] = &tex->def;
} else if (texinst->Opcode == ATI_FRAGMENT_SHADER_PASS_OP) {
t->temps[r] = coord;
}
tex->coord_components = 2;
tex->dest_type = alu_type;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &deref->dest.ssa);
+ &deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &deref->dest.ssa);
+ &deref->def);
tex->src[2] =
nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, nir_load_var(b, texcoord),
tex->coord_components));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
- return nir_channel(b, &tex->dest.ssa, 0);
+ return nir_channel(b, &tex->def, 0);
}
static void *
def = nir_swizzle(b, def, swiz, intrin->num_components);
/* and rewrite uses of original instruction: */
- nir_def_rewrite_uses(&intrin->dest.ssa, def);
+ nir_def_rewrite_uses(&intrin->def, def);
/* at this point intrin should be unused. We need to remove it
* (rather than waiting for DCE pass) to avoid dangling reference
assert(samp);
nir_deref_instr *tex_deref_instr = nir_build_deref_var(b, samp);
- nir_def *tex_deref = &tex_deref_instr->dest.ssa;
+ nir_def *tex_deref = &tex_deref_instr->def;
nir_instr_rewrite_src(&tex->instr,
&tex->src[tex_index].src,
tex->dest_type = nir_get_nir_type_for_glsl_base_type(glsl_get_sampler_result_type(tex_var->type));
tex->src[0].src_type = nir_tex_src_texture_deref;
- tex->src[0].src = nir_src_for_ssa(&tex_deref->dest.ssa);
+ tex->src[0].src = nir_src_for_ssa(&tex_deref->def);
tex->src[1].src_type = nir_tex_src_sampler_deref;
- tex->src[1].src = nir_src_for_ssa(&tex_deref->dest.ssa);
+ tex->src[1].src = nir_src_for_ssa(&tex_deref->def);
tex->src[2].src_type = nir_tex_src_coord;
tex->src[2].src = nir_src_for_ssa(texcoord);
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- nir_def *result = &tex->dest.ssa;
+ nir_def *result = &tex->def;
if (conversion == ST_PBO_CONVERT_SINT_TO_UINT)
result = nir_imax(&b, result, zero);
img_var->data.image.format = format;
nir_deref_instr *img_deref = nir_build_deref_var(&b, img_var);
- nir_image_deref_store(&b, &img_deref->dest.ssa,
+ nir_image_deref_store(&b, &img_deref->def,
nir_vec4(&b, pbo_addr, zero, zero, zero),
zero,
result,
txf->src[1] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(&b, 0));
txf->src[2].src_type = nir_tex_src_texture_deref;
nir_deref_instr *sampler_deref = nir_build_deref_var(&b, sampler);
- txf->src[2].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
+ txf->src[2].src = nir_src_for_ssa(&sampler_deref->def);
- nir_def_init(&txf->instr, &txf->dest.ssa, 4, 32);
+ nir_def_init(&txf->instr, &txf->def, 4, 32);
nir_builder_instr_insert(&b, &txf->instr);
/* pass the grid offset as the coord to get the zero-indexed buffer offset */
- do_shader_conversion(&b, &txf->dest.ssa, num_components, global_id, &sd);
+ do_shader_conversion(&b, &txf->def, num_components, global_id, &sd);
nir_pop_if(&b, NULL);
* arbitrary type for it.
*/
for (int pass = 0; pass < 2; ++pass) {
- nir_foreach_use_safe(src, &context->deref->dest.ssa) {
+ nir_foreach_use_safe(src, &context->deref->def) {
enum image_type type;
if (src->parent_instr->type == nir_instr_type_intrinsic) {
}
/* No actual intrinsic needed here, just reference the loaded variable */
- nir_def_rewrite_uses(&intrinsic->dest.ssa, *cached_deref);
+ nir_def_rewrite_uses(&intrinsic->def, *cached_deref);
nir_instr_remove(&intrinsic->instr);
break;
}
if (nir_instr_ssa_def(instr)->bit_size != 64)
continue;
- intrinsic->dest.ssa.bit_size = 32;
+ intrinsic->def.bit_size = 32;
b.cursor = nir_after_instr(instr);
- nir_def *i64 = nir_u2u64(&b, &intrinsic->dest.ssa);
+ nir_def *i64 = nir_u2u64(&b, &intrinsic->def);
nir_def_rewrite_uses_after(
- &intrinsic->dest.ssa,
+ &intrinsic->def,
i64,
i64->parent_instr);
}
load_ubo(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, unsigned offset)
{
return nir_load_ubo(b,
- intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size,
+ intr->def.num_components,
+ intr->def.bit_size,
nir_imm_int(b, var->data.binding),
nir_imm_int(b, offset),
.align_mul = 256,
.align_offset = offset,
.range_base = offset,
- .range = intr->dest.ssa.bit_size * intr->dest.ssa.num_components / 8);
+ .range = intr->def.bit_size * intr->def.num_components / 8);
}
static bool
nir_def *offset = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
global_offset_x));
- nir_def_rewrite_uses(&intr->dest.ssa, offset);
+ nir_def_rewrite_uses(&intr->def, offset);
nir_instr_remove(&intr->instr);
return true;
}
nir_def *dim = load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
work_dim));
- nir_def_rewrite_uses(&intr->dest.ssa, dim);
+ nir_def_rewrite_uses(&intr->def, dim);
nir_instr_remove(&intr->instr);
return true;
}
nir_def *count =
load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
group_count_total_x));
- nir_def_rewrite_uses(&intr->dest.ssa, count);
+ nir_def_rewrite_uses(&intr->def, count);
nir_instr_remove(&intr->instr);
return true;
}
nir_def *offset =
load_ubo(b, intr, var, offsetof(struct clc_work_properties_data,
group_id_offset_x));
- nir_def_rewrite_uses(&intr->dest.ssa, offset);
+ nir_def_rewrite_uses(&intr->def, offset);
nir_instr_remove(&intr->instr);
return true;
}
{
b->cursor = nir_before_instr(&intr->instr);
- unsigned bit_size = intr->dest.ssa.bit_size;
+ unsigned bit_size = intr->def.bit_size;
enum glsl_base_type base_type;
switch (bit_size) {
}
const struct glsl_type *type =
- glsl_vector_type(base_type, intr->dest.ssa.num_components);
+ glsl_vector_type(base_type, intr->def.num_components);
nir_def *ptr = nir_vec2(b, nir_imm_int(b, var->data.binding),
nir_u2uN(b, intr->src[0].ssa, 32));
nir_deref_instr *deref = nir_build_deref_cast(b, ptr, nir_var_mem_ubo, type,
nir_def *result =
nir_load_deref(b, deref);
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(&intr->instr);
return true;
}
if (!printf_var) {
printf_var = add_printf_var(nir, uav_id);
nir_deref_instr *deref = nir_build_deref_var(&b, printf_var);
- printf_deref = &deref->dest.ssa;
+ printf_deref = &deref->def;
}
- nir_def_rewrite_uses(&intrin->dest.ssa, printf_deref);
+ nir_def_rewrite_uses(&intrin->def, printf_deref);
progress = true;
}
}
static bool
lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var)
{
- unsigned bit_size = intr->dest.ssa.bit_size;
- unsigned num_components = intr->dest.ssa.num_components;
+ unsigned bit_size = intr->def.bit_size;
+ unsigned num_components = intr->def.num_components;
unsigned num_bits = num_components * bit_size;
b->cursor = nir_before_instr(&intr->instr);
}
nir_def *result = nir_vec(b, comps, num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(&intr->instr);
return true;
if (var->data.mode == nir_var_mem_shared) {
/* Use the dedicated masked intrinsic */
nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, var), index);
- nir_deref_atomic(b, 32, &deref->dest.ssa, nir_inot(b, mask), .atomic_op = nir_atomic_op_iand);
- nir_deref_atomic(b, 32, &deref->dest.ssa, vec32, .atomic_op = nir_atomic_op_ior);
+ nir_deref_atomic(b, 32, &deref->def, nir_inot(b, mask), .atomic_op = nir_atomic_op_iand);
+ nir_deref_atomic(b, 32, &deref->def, vec32, .atomic_op = nir_atomic_op_ior);
} else {
/* For scratch, since we don't need atomics, just generate the read-modify-write in NIR */
nir_def *load = nir_load_array_var(b, var, index);
if (parent && parent->var->data.mode != nir_var_mem_constant) {
deref->modes = parent->var->data.mode;
/* Also change "pointer" size to 32-bit since this is now a logical pointer */
- deref->dest.ssa.bit_size = 32;
+ deref->def.bit_size = 32;
if (deref->deref_type == nir_deref_type_array) {
b.cursor = nir_before_instr(instr);
nir_src_rewrite(&deref->arr.index, nir_u2u32(&b, deref->arr.index.ssa));
nir_deref_instr *comp_deref = nir_build_deref_array(b, new_var_deref, final_index);
components[i] = nir_load_deref(b, comp_deref);
}
- nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, components, vector_comps));
+ nir_def_rewrite_uses(&intr->def, nir_vec(b, components, vector_comps));
} else if (intr->intrinsic == nir_intrinsic_store_deref) {
for (unsigned i = 0; i < vector_comps; ++i) {
if (((1 << i) & nir_intrinsic_write_mask(intr)) == 0)
}
nir_instr_remove(instr);
} else {
- nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->dest.ssa);
+ nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->def);
}
nir_deref_path_finish(&path);
if (glsl_get_bit_size(old_glsl_type) < glsl_get_bit_size(var_scalar_type)) {
deref->type = var_scalar_type;
if (intr->intrinsic == nir_intrinsic_load_deref) {
- intr->dest.ssa.bit_size = glsl_get_bit_size(var_scalar_type);
+ intr->def.bit_size = glsl_get_bit_size(var_scalar_type);
b->cursor = nir_after_instr(instr);
- nir_def *downcast = nir_type_convert(b, &intr->dest.ssa, new_type, old_type, nir_rounding_mode_undef);
- nir_def_rewrite_uses_after(&intr->dest.ssa, downcast, downcast->parent_instr);
+ nir_def *downcast = nir_type_convert(b, &intr->def, new_type, old_type, nir_rounding_mode_undef);
+ nir_def_rewrite_uses_after(&intr->def, downcast, downcast->parent_instr);
}
else {
b->cursor = nir_before_instr(instr);
if (intr->intrinsic == nir_intrinsic_load_deref) {
nir_def *src1 = nir_load_deref(b, deref);
nir_def *src2 = nir_load_deref(b, deref2);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_pack_64_2x32_split(b, src1, src2));
+ nir_def_rewrite_uses(&intr->def, nir_pack_64_2x32_split(b, src1, src2));
} else {
nir_def *src1 = nir_unpack_64_2x32_split_x(b, intr->src[1].ssa);
nir_def *src2 = nir_unpack_64_2x32_split_y(b, intr->src[1].ssa);
nir_deref_instr *deref = nir_build_deref_array(b, nir_build_deref_var(b, var), index);
nir_def *result;
if (intr->intrinsic == nir_intrinsic_shared_atomic_swap)
- result = nir_deref_atomic_swap(b, 32, &deref->dest.ssa, intr->src[1].ssa, intr->src[2].ssa,
+ result = nir_deref_atomic_swap(b, 32, &deref->def, intr->src[1].ssa, intr->src[2].ssa,
.atomic_op = nir_intrinsic_atomic_op(intr));
else
- result = nir_deref_atomic(b, 32, &deref->dest.ssa, intr->src[1].ssa,
+ result = nir_deref_atomic(b, 32, &deref->def, intr->src[1].ssa,
.atomic_op = nir_intrinsic_atomic_op(intr));
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(&intr->instr);
return true;
}
nir_deref_instr *deref_cast =
nir_build_deref_cast(b, ptr, nir_var_mem_ssbo, deref->type,
glsl_get_explicit_stride(var->type));
- nir_def_rewrite_uses(&deref->dest.ssa,
- &deref_cast->dest.ssa);
+ nir_def_rewrite_uses(&deref->def,
+ &deref_cast->def);
nir_instr_remove(&deref->instr);
deref = deref_cast;
{
nir_phi_instr *lowered = nir_phi_instr_create(b->shader);
int num_components = 0;
- int old_bit_size = phi->dest.ssa.bit_size;
+ int old_bit_size = phi->def.bit_size;
nir_foreach_phi_src(src, phi) {
assert(num_components == 0 || num_components == src->src.ssa->num_components);
nir_phi_instr_add_src(lowered, src->pred, nir_src_for_ssa(cast));
}
- nir_def_init(&lowered->instr, &lowered->dest.ssa, num_components,
+ nir_def_init(&lowered->instr, &lowered->def, num_components,
new_bit_size);
b->cursor = nir_before_instr(&phi->instr);
nir_builder_instr_insert(b, &lowered->instr);
b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
- nir_def *result = nir_u2uN(b, &lowered->dest.ssa, old_bit_size);
+ nir_def *result = nir_u2uN(b, &lowered->def, old_bit_size);
- nir_def_rewrite_uses(&phi->dest.ssa, result);
+ nir_def_rewrite_uses(&phi->def, result);
nir_instr_remove(&phi->instr);
}
nir_foreach_block_reverse(block, impl) {
nir_foreach_phi_safe(phi, block) {
- if (phi->dest.ssa.bit_size == 1 ||
- phi->dest.ssa.bit_size >= min_bit_size)
+ if (phi->def.bit_size == 1 ||
+ phi->def.bit_size >= min_bit_size)
continue;
cast_phi(&b, phi, min_bit_size);
new_intermediate_deref = nir_build_deref_array(b, new_intermediate_deref, parent->arr.index.ssa);
}
nir_deref_instr *new_array_deref = nir_build_deref_array(b, new_intermediate_deref, nir_imm_int(b, total_index % 4));
- nir_def_rewrite_uses(&deref->dest.ssa, &new_array_deref->dest.ssa);
+ nir_def_rewrite_uses(&deref->def, &new_array_deref->def);
return true;
}
nir_const_value_for_int(b->shader->info.workgroup_size[2], 32)
};
nir_def *size = nir_build_imm(b, 3, 32, v);
- nir_def_rewrite_uses(&intr->dest.ssa, size);
+ nir_def_rewrite_uses(&intr->def, size);
nir_instr_remove(&intr->instr);
}
}
nir_deref_path_finish(&path);
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[sampler_idx].src, &new_tail->dest.ssa);
+ nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[sampler_idx].src, &new_tail->def);
return true;
}
}
nir_deref_path_finish(&path);
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[texture_idx].src, &new_tail->dest.ssa);
+ nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[texture_idx].src, &new_tail->def);
return true;
}
const nir_alu_type dest_type = (sysval == SYSTEM_VALUE_FRONT_FACE)
? nir_type_uint32 : nir_get_nir_type_for_glsl_type(var->type);
const unsigned bit_size = (sysval == SYSTEM_VALUE_FRONT_FACE)
- ? 32 : intr->dest.ssa.bit_size;
+ ? 32 : intr->def.bit_size;
b->cursor = nir_before_instr(instr);
- nir_def *result = nir_load_input(b, intr->dest.ssa.num_components, bit_size, nir_imm_int(b, 0),
+ nir_def *result = nir_load_input(b, intr->def.num_components, bit_size, nir_imm_int(b, 0),
.base = var->data.driver_location, .dest_type = dest_type);
/* The nir_type_uint32 is really a nir_type_bool32, but that type is very
if (sysval == SYSTEM_VALUE_FRONT_FACE)
result = nir_ine_imm(b, result, 0);
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
return true;
}
// Indexing out of bounds on array of UBOs is considered undefined
// behavior. Therefore, we just hardcode all the index to 0.
- uint8_t bit_size = index->dest.ssa.bit_size;
+ uint8_t bit_size = index->def.bit_size;
nir_def *zero = nir_imm_intN_t(b, 0, bit_size);
nir_def *dest =
nir_vulkan_resource_index(b, index->num_components, bit_size, zero,
.binding = nir_intrinsic_binding(index),
.desc_type = nir_intrinsic_desc_type(index));
- nir_def_rewrite_uses(&index->dest.ssa, dest);
+ nir_def_rewrite_uses(&index->def, dest);
return true;
}
/* When using Nx1x1 groups, use a simple stable algorithm
* which is almost guaranteed to be correct. */
nir_def *subgroup_id = nir_udiv(b, nir_load_local_invocation_index(b), nir_load_subgroup_size(b));
- nir_def_rewrite_uses(&intr->dest.ssa, subgroup_id);
+ nir_def_rewrite_uses(&intr->def, subgroup_id);
return true;
}
.memory_modes = nir_var_mem_shared);
nif = nir_push_if(b, nir_elect(b, 1));
- nir_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->dest.ssa, nir_imm_int(b, 1),
+ nir_def *subgroup_id_first_thread = nir_deref_atomic(b, 32, &counter_deref->def, nir_imm_int(b, 1),
.atomic_op = nir_atomic_op_iadd);
nir_store_var(b, subgroup_id_local, subgroup_id_first_thread, 1);
nir_pop_if(b, nif);
nir_def *subgroup_id_loaded = nir_load_var(b, subgroup_id_local);
*subgroup_id = nir_read_first_invocation(b, subgroup_id_loaded);
}
- nir_def_rewrite_uses(&intr->dest.ssa, *subgroup_id);
+ nir_def_rewrite_uses(&intr->def, *subgroup_id);
return true;
}
nir_imul(b, nir_channel(b, workgroup_size_vec, 1),
nir_channel(b, workgroup_size_vec, 2)));
nir_def *ret = nir_idiv(b, nir_iadd(b, workgroup_size, size_minus_one), subgroup_size);
- nir_def_rewrite_uses(&intr->dest.ssa, ret);
+ nir_def_rewrite_uses(&intr->def, ret);
return true;
}
{
enum gl_access_qualifier access = nir_intrinsic_access(intrin);
nir_def *srcs[NIR_MAX_VEC_COMPONENTS * NIR_MAX_VEC_COMPONENTS * sizeof(int64_t) / 8];
- unsigned comp_size = intrin->dest.ssa.bit_size / 8;
- unsigned num_comps = intrin->dest.ssa.num_components;
+ unsigned comp_size = intrin->def.bit_size / 8;
+ unsigned num_comps = intrin->def.num_components;
b->cursor = nir_before_instr(&intrin->instr);
nir_deref_instr *ptr = nir_src_as_deref(intrin->src[0]);
const struct glsl_type *cast_type = get_cast_type(alignment * 8);
- nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->dest.ssa, ptr->modes, cast_type, alignment);
+ nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->def, ptr->modes, cast_type, alignment);
unsigned num_loads = DIV_ROUND_UP(comp_size * num_comps, alignment);
for (unsigned i = 0; i < num_loads; ++i) {
- nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->dest.ssa.bit_size));
+ nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->def.bit_size));
srcs[i] = nir_load_deref_with_access(b, elem, access);
}
- nir_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
+ nir_def *new_dest = nir_extract_bits(b, srcs, num_loads, 0, num_comps, intrin->def.bit_size);
+ nir_def_rewrite_uses(&intrin->def, new_dest);
nir_instr_remove(&intrin->instr);
}
nir_deref_instr *ptr = nir_src_as_deref(intrin->src[0]);
const struct glsl_type *cast_type = get_cast_type(alignment * 8);
- nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->dest.ssa, ptr->modes, cast_type, alignment);
+ nir_deref_instr *cast = nir_build_deref_cast(b, &ptr->def, ptr->modes, cast_type, alignment);
unsigned num_stores = DIV_ROUND_UP(comp_size * num_comps, alignment);
for (unsigned i = 0; i < num_stores; ++i) {
nir_def *substore_val = nir_extract_bits(b, &value, 1, i * alignment * 8, 1, alignment * 8);
- nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->dest.ssa.bit_size));
+ nir_deref_instr *elem = nir_build_deref_ptr_as_array(b, cast, nir_imm_intN_t(b, i, cast->def.bit_size));
nir_store_deref_with_access(b, elem, substore_val, ~0, access);
}
nir_def *val;
if (intrin->intrinsic == nir_intrinsic_load_deref) {
- val = &intrin->dest.ssa;
+ val = &intrin->def;
} else {
val = intrin->src[1].ssa;
}
nir_intrinsic_set_reduction_op(intr, op);
nir_def *final_val = nir_build_alu2(b, nir_intrinsic_reduction_op(intr),
- &intr->dest.ssa, intr->src[0].ssa);
- nir_def_rewrite_uses_after(&intr->dest.ssa, final_val, final_val->parent_instr);
+ &intr->def, intr->src[0].ssa);
+ nir_def_rewrite_uses_after(&intr->def, final_val, final_val->parent_instr);
}
static bool
nir_def *subgroup_id = nir_load_subgroup_invocation(b);
nir_def *active_threads = nir_ballot(b, 4, 32, nir_imm_true(b));
nir_def *base_value;
- uint32_t bit_size = intr->dest.ssa.bit_size;
+ uint32_t bit_size = intr->def.bit_size;
if (op == nir_op_iand || op == nir_op_umin)
base_value = nir_imm_intN_t(b, ~0ull, bit_size);
else if (op == nir_op_imin)
nir_pop_loop(b, loop);
result = nir_load_var(b, result_var);
- nir_def_rewrite_uses(&intr->dest.ssa, result);
+ nir_def_rewrite_uses(&intr->def, result);
return true;
}
nir_variable *var = data;
nir_def *load = nir_ine_imm(b, nir_load_var(b, var), 0);
- nir_def_rewrite_uses(&intr->dest.ssa, load);
+ nir_def_rewrite_uses(&intr->def, load);
nir_instr_remove(instr);
return true;
}
array_tex->src[i].src_type = tex->src[i].src_type;
}
- nir_def_init(&array_tex->instr, &array_tex->dest.ssa,
+ nir_def_init(&array_tex->instr, &array_tex->def,
nir_tex_instr_dest_size(array_tex), 32);
nir_builder_instr_insert(b, &array_tex->instr);
- return &array_tex->dest.ssa;
+ return &array_tex->def;
}
static nir_def *
{
b->cursor = nir_after_instr(&tex->instr);
if (!tex->is_array)
- return nir_trim_vector(b, &tex->dest.ssa, 2);
+ return nir_trim_vector(b, &tex->def, 2);
- nir_def *array_dim = nir_channel(b, &tex->dest.ssa, 2);
+ nir_def *array_dim = nir_channel(b, &tex->def, 2);
nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
- return nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0),
- nir_channel(b, &tex->dest.ssa, 1),
+ return nir_vec3(b, nir_channel(b, &tex->def, 0),
+ nir_channel(b, &tex->def, 1),
cube_array_dim);
}
{
b->cursor = nir_after_instr(&intr->instr);
if (!nir_intrinsic_image_array(intr))
- return nir_trim_vector(b, &intr->dest.ssa, 2);
+ return nir_trim_vector(b, &intr->def, 2);
- nir_def *array_dim = nir_channel(b, &intr->dest.ssa, 2);
+ nir_def *array_dim = nir_channel(b, &intr->def, 2);
nir_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6));
- return nir_vec3(b, nir_channel(b, &intr->dest.ssa, 0),
- nir_channel(b, &intr->dest.ssa, 1),
+ return nir_vec3(b, nir_channel(b, &intr->def, 0),
+ nir_channel(b, &intr->def, 1),
cube_array_dim);
}
}
}
- nir_def_init(&tql->instr, &tql->dest.ssa, 2, 32);
+ nir_def_init(&tql->instr, &tql->def, 2, 32);
nir_builder_instr_insert(b, &tql->instr);
/* DirectX LOD only has a value in x channel */
- return nir_channel(b, &tql->dest.ssa, 0);
+ return nir_channel(b, &tql->def, 0);
}
typedef struct {
load_bordercolor(nir_builder *b, nir_tex_instr *tex, const dxil_wrap_sampler_state *active_state,
const dxil_texture_swizzle_state *tex_swizzle)
{
- int ndest_comp = tex->dest.ssa.num_components;
+ int ndest_comp = tex->def.num_components;
unsigned swizzle[4] = {
tex_swizzle->swizzle_r,
}
}
- nir_def_init(&txf->instr, &txf->dest.ssa, nir_tex_instr_dest_size(txf), 32);
+ nir_def_init(&txf->instr, &txf->def, nir_tex_instr_dest_size(txf), 32);
nir_builder_instr_insert(b, &txf->instr);
return txf;
nir_tex_instr_add_src(load, nir_tex_src_lod, nir_src_for_ssa(params->lod));
nir_tex_instr_add_src(load, nir_tex_src_coord, nir_src_for_ssa(texcoord));
b->cursor = nir_after_instr(&load->instr);
- return &load->dest.ssa;
+ return &load->def;
}
typedef struct {
fmt == PIPE_FORMAT_R8G8B8_UINT ||
fmt == PIPE_FORMAT_R16G16B16_SINT ||
fmt == PIPE_FORMAT_R16G16B16_UINT);
- if (intr->dest.ssa.num_components == 3)
+ if (intr->def.num_components == 3)
return NULL;
- return nir_vector_insert_imm(b, &intr->dest.ssa, nir_imm_int(b, 1), 3);
+ return nir_vector_insert_imm(b, &intr->def, nir_imm_int(b, 1), 3);
} else {
- nir_def *src = nir_channel(b, &intr->dest.ssa, 0);
+ nir_def *src = nir_channel(b, &intr->def, 0);
switch (fmt) {
case PIPE_FORMAT_R10G10B10A2_SNORM:
return from_10_10_10_2_scaled(b, src, lshift_bgra(b), nir_ushr);
case PIPE_FORMAT_R8G8B8A8_USCALED:
case PIPE_FORMAT_R16G16B16A16_USCALED:
- return nir_u2f32(b, &intr->dest.ssa);
+ return nir_u2f32(b, &intr->def);
case PIPE_FORMAT_R8G8B8A8_SSCALED:
case PIPE_FORMAT_R16G16B16A16_SSCALED:
- return nir_i2f32(b, &intr->dest.ssa);
+ return nir_i2f32(b, &intr->def);
default:
unreachable("Unsupported emulated vertex format");
continue;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_invocation_id ||
- list_is_empty(&intr->dest.ssa.uses) ||
- list_is_singular(&intr->dest.ssa.uses))
+ list_is_empty(&intr->def.uses) ||
+ list_is_singular(&intr->def.uses))
continue;
- nir_foreach_use_including_if_safe(src, &intr->dest.ssa) {
+ nir_foreach_use_including_if_safe(src, &intr->def) {
b.cursor = nir_before_src(src);
nir_src_rewrite(src, nir_load_invocation_id(&b));
}
b.cursor = state.begin_cursor = get_cursor_for_instr_without_cf(instr);
start_tcs_loop(&b, &state, loop_var_deref);
}
- nir_def_rewrite_uses(&intr->dest.ssa, state.count);
+ nir_def_rewrite_uses(&intr->def, state.count);
break;
}
case nir_intrinsic_barrier:
nir_instr_remove(instr);
} else {
b->cursor = nir_after_instr(instr);
- assert(intr->dest.ssa.num_components == 1);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, intr->dest.ssa.bit_size));
+ assert(intr->def.num_components == 1);
+ nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, intr->def.bit_size));
}
return true;
}
get_ambiguous_overload(struct ntd_context *ctx, nir_intrinsic_instr *intr,
enum overload_type default_type)
{
- if (BITSET_TEST(ctx->int_types, intr->dest.ssa.index))
- return get_overload(nir_type_int, intr->dest.ssa.bit_size);
- if (BITSET_TEST(ctx->float_types, intr->dest.ssa.index))
- return get_overload(nir_type_float, intr->dest.ssa.bit_size);
+ if (BITSET_TEST(ctx->int_types, intr->def.index))
+ return get_overload(nir_type_int, intr->def.bit_size);
+ if (BITSET_TEST(ctx->float_types, intr->def.index))
+ return get_overload(nir_type_float, intr->def.bit_size);
return default_type;
}
get_ambiguous_overload_alu_type(struct ntd_context *ctx, nir_intrinsic_instr *intr,
nir_alu_type alu_type)
{
- return get_ambiguous_overload(ctx, intr, get_overload(alu_type, intr->dest.ssa.bit_size));
+ return get_ambiguous_overload(ctx, intr, get_overload(alu_type, intr->def.bit_size));
}
static bool
emit_load_global_invocation_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
+ nir_component_mask_t comps = nir_def_components_read(&intr->def);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
if (comps & (1 << i)) {
if (!globalid)
return false;
- store_def(ctx, &intr->dest.ssa, i, globalid);
+ store_def(ctx, &intr->def, i, globalid);
}
}
return true;
emit_load_local_invocation_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
+ nir_component_mask_t comps = nir_def_components_read(&intr->def);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
if (comps & (1 << i)) {
*threadidingroup = emit_threadidingroup_call(ctx, idx);
if (!threadidingroup)
return false;
- store_def(ctx, &intr->dest.ssa, i, threadidingroup);
+ store_def(ctx, &intr->def, i, threadidingroup);
}
}
return true;
*flattenedthreadidingroup = emit_flattenedthreadidingroup_call(ctx);
if (!flattenedthreadidingroup)
return false;
- store_def(ctx, &intr->dest.ssa, 0, flattenedthreadidingroup);
+ store_def(ctx, &intr->def, 0, flattenedthreadidingroup);
return true;
}
emit_load_local_workgroup_id(struct ntd_context *ctx,
nir_intrinsic_instr *intr)
{
- nir_component_mask_t comps = nir_def_components_read(&intr->dest.ssa);
+ nir_component_mask_t comps = nir_def_components_read(&intr->def);
for (int i = 0; i < nir_intrinsic_dest_components(intr); i++) {
if (comps & (1 << i)) {
const struct dxil_value *groupid = emit_groupid_call(ctx, idx);
if (!groupid)
return false;
- store_def(ctx, &intr->dest.ssa, i, groupid);
+ store_def(ctx, &intr->def, i, groupid);
}
}
return true;
nir_alu_type type)
{
const struct dxil_value *value = call_unary_external_function(ctx, name, dxil_intr,
- get_overload(type, intr->dest.ssa.bit_size));
- store_def(ctx, &intr->dest.ssa, 0, value);
+ get_overload(type, intr->def.bit_size));
+ store_def(ctx, &intr->def, 0, value);
return true;
}
call_unary_external_function(ctx, "dx.op.sampleIndex", DXIL_INTR_SAMPLE_INDEX, DXIL_I32), 0), 0);
}
- store_def(ctx, &intr->dest.ssa, 0, value);
+ store_def(ctx, &intr->def, 0, value);
return true;
}
const struct dxil_value *value =
dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
- store_def(ctx, &intr->dest.ssa, i, value);
+ store_def(ctx, &intr->def, i, value);
}
- for (unsigned i = num_coords; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = num_coords; i < intr->def.num_components; ++i) {
const struct dxil_value *value = dxil_module_get_float_const(&ctx->mod, 0.0f);
- store_def(ctx, &intr->dest.ssa, i, value);
+ store_def(ctx, &intr->def, i, value);
}
return true;
emit_raw_bufferload_call(ctx, handle, coord,
overload,
nir_intrinsic_dest_components(intr),
- intr->dest.ssa.bit_size / 8) :
+ intr->def.bit_size / 8) :
emit_bufferload_call(ctx, handle, coord, overload);
if (!load)
return false;
dxil_emit_extractval(&ctx->mod, load, i);
if (!val)
return false;
- store_def(ctx, &intr->dest.ssa, i, val);
+ store_def(ctx, &intr->def, i, val);
}
- if (intr->dest.ssa.bit_size == 16)
+ if (intr->def.bit_size == 16)
ctx->mod.feats.native_low_precision = true;
return true;
}
unsigned first_component = nir_intrinsic_has_component(intr) ?
nir_intrinsic_component(intr) : 0;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; i++)
- store_def(ctx, &intr->dest.ssa, i,
+ for (unsigned i = 0; i < intr->def.num_components; i++)
+ store_def(ctx, &intr->def, i,
dxil_emit_extractval(&ctx->mod, agg, i + first_component));
- if (intr->dest.ssa.bit_size == 16)
+ if (intr->def.bit_size == 16)
ctx->mod.feats.native_low_precision = true;
return true;
}
row = get_src(ctx, &intr->src[row_index], 0, nir_type_int);
nir_alu_type out_type = nir_intrinsic_dest_type(intr);
- enum overload_type overload = get_overload(out_type, intr->dest.ssa.bit_size);
+ enum overload_type overload = get_overload(out_type, intr->def.bit_size);
const struct dxil_func *func = dxil_get_function(&ctx->mod, func_name, overload);
struct dxil_signature_record *sig_rec = is_patch_constant ?
&ctx->mod.patch_consts[nir_intrinsic_base(intr)] :
&ctx->mod.inputs[ctx->mod.input_mappings[nir_intrinsic_base(intr)]];
- unsigned comp_size = intr->dest.ssa.bit_size == 64 ? 2 : 1;
+ unsigned comp_size = intr->def.bit_size == 64 ? 2 : 1;
unsigned comp_mask = (1 << (intr->num_components * comp_size)) - 1;
comp_mask <<= (var_base_component * comp_size);
if (is_tess_level)
const struct dxil_value *retval = dxil_emit_call(&ctx->mod, func, args, num_args);
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, i, retval);
+ store_def(ctx, &intr->def, i, retval);
}
return true;
}
if (ctx->mod.minor_validator >= 5) {
struct dxil_signature_record *sig_rec =
&ctx->mod.inputs[ctx->mod.input_mappings[nir_intrinsic_base(intr)]];
- unsigned comp_size = intr->dest.ssa.bit_size == 64 ? 2 : 1;
+ unsigned comp_size = intr->def.bit_size == 64 ? 2 : 1;
unsigned comp_mask = (1 << (intr->num_components * comp_size)) - 1;
comp_mask <<= (var_base_component * comp_size);
for (unsigned r = 0; r < sig_rec->num_elements; ++r)
const struct dxil_value *retval = dxil_emit_call(&ctx->mod, func, args, num_args);
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, i, retval);
+ store_def(ctx, &intr->def, i, retval);
}
return true;
}
gep_indices[0] = var_array[var->data.driver_location];
for (uint32_t i = 0; i < count; ++i)
- gep_indices[i + 1] = get_src_ssa(ctx, &path.path[i]->dest.ssa, 0);
+ gep_indices[i + 1] = get_src_ssa(ctx, &path.path[i]->def, 0);
return dxil_emit_gep_inbounds(&ctx->mod, gep_indices, count + 1);
}
return false;
const struct dxil_value *retval =
- dxil_emit_load(&ctx->mod, ptr, intr->dest.ssa.bit_size / 8, false);
+ dxil_emit_load(&ctx->mod, ptr, intr->def.bit_size / 8, false);
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!load_result)
return false;
- assert(intr->dest.ssa.bit_size == 32);
- unsigned num_components = intr->dest.ssa.num_components;
+ assert(intr->def.bit_size == 32);
+ unsigned num_components = intr->def.num_components;
assert(num_components <= 4);
for (unsigned i = 0; i < num_components; ++i) {
const struct dxil_value *component = dxil_emit_extractval(&ctx->mod, load_result, i);
if (!component)
return false;
- store_def(ctx, &intr->dest.ssa, i, component);
+ store_def(ctx, &intr->def, i, component);
}
if (util_format_get_nr_components(nir_intrinsic_format(intr)) > 1)
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!dimensions)
return false;
- for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < intr->def.num_components; ++i) {
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, dimensions, i);
- store_def(ctx, &intr->dest.ssa, i, retval);
+ store_def(ctx, &intr->def, i, retval);
}
return true;
return false;
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, dimensions, 0);
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
if (!retval)
return false;
- store_def(ctx, &intr->dest.ssa, 0, retval);
+ store_def(ctx, &intr->def, 0, retval);
return true;
}
return false;
}
- store_def(ctx, &intr->dest.ssa, 0, index_value);
- store_def(ctx, &intr->dest.ssa, 1, dxil_module_get_int32_const(&ctx->mod, 0));
+ store_def(ctx, &intr->def, 0, index_value);
+ store_def(ctx, &intr->def, 1, dxil_module_get_int32_const(&ctx->mod, 0));
return true;
}
handle = emit_annotate_handle(ctx, unannotated_handle, res_props);
}
- store_ssa_def(ctx, &intr->dest.ssa, 0, handle);
- store_def(ctx, &intr->dest.ssa, 1, get_src(ctx, &intr->src[0], 1, nir_type_uint32));
+ store_ssa_def(ctx, &intr->def, 0, handle);
+ store_def(ctx, &intr->def, 1, get_src(ctx, &intr->src[0], 1, nir_type_uint32));
return true;
}
const struct dxil_value *coord = dxil_emit_binop(&ctx->mod, DXIL_BINOP_ADD,
dxil_emit_extractval(&ctx->mod, v, i),
dxil_module_get_float_const(&ctx->mod, 0.5f), 0);
- store_def(ctx, &intr->dest.ssa, i, coord);
+ store_def(ctx, &intr->def, i, coord);
}
return true;
}
return emit_load_unary_external_function(ctx, intr, "dx.op.sampleIndex",
DXIL_INTR_SAMPLE_INDEX, nir_type_int);
- store_def(ctx, &intr->dest.ssa, 0, dxil_module_get_int32_const(&ctx->mod, 0));
+ store_def(ctx, &intr->def, 0, dxil_module_get_int32_const(&ctx->mod, 0));
return true;
}
{
ctx->mod.feats.wave_ops = 1;
const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.waveReadLaneFirst",
- get_overload(nir_type_uint, intr->dest.ssa.bit_size));
+ get_overload(nir_type_uint, intr->def.bit_size));
const struct dxil_value *args[] = {
dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_WAVE_READ_LANE_FIRST),
get_src(ctx, intr->src, 0, nir_type_uint),
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
ctx->mod.feats.wave_ops = 1;
bool quad = intr->intrinsic == nir_intrinsic_quad_broadcast;
const struct dxil_func *func = dxil_get_function(&ctx->mod, quad ? "dx.op.quadReadLaneAt" : "dx.op.waveReadLaneAt",
- get_overload(nir_type_uint, intr->dest.ssa.bit_size));
+ get_overload(nir_type_uint, intr->def.bit_size));
const struct dxil_value *args[] = {
dxil_module_get_int32_const(&ctx->mod, quad ? DXIL_INTR_QUAD_READ_LANE_AT : DXIL_INTR_WAVE_READ_LANE_AT),
get_src(ctx, &intr->src[0], 0, nir_type_uint),
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
if (!ret)
return false;
for (uint32_t i = 0; i < 4; ++i)
- store_def(ctx, &intr->dest.ssa, i, dxil_emit_extractval(&ctx->mod, ret, i));
+ store_def(ctx, &intr->def, i, dxil_emit_extractval(&ctx->mod, ret, i));
return true;
}
{
ctx->mod.feats.wave_ops = 1;
const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.quadOp",
- get_overload(nir_type_uint, intr->dest.ssa.bit_size));
+ get_overload(nir_type_uint, intr->def.bit_size));
const struct dxil_value *args[] = {
dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_QUAD_OP),
get_src(ctx, intr->src, 0, nir_type_uint),
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
{
enum dxil_wave_bit_op_kind wave_bit_op = get_reduce_bit_op(nir_intrinsic_reduction_op(intr));
const struct dxil_func *func = dxil_get_function(&ctx->mod, "dx.op.waveActiveBit",
- get_overload(nir_type_uint, intr->dest.ssa.bit_size));
+ get_overload(nir_type_uint, intr->def.bit_size));
const struct dxil_value *args[] = {
dxil_module_get_int32_const(&ctx->mod, DXIL_INTR_WAVE_ACTIVE_BIT),
get_src(ctx, intr->src, 0, nir_type_uint),
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
nir_alu_type alu_type = nir_op_infos[reduction_op].input_types[0];
enum dxil_wave_op_kind wave_op = get_reduce_op(reduction_op);
const struct dxil_func *func = dxil_get_function(&ctx->mod, is_prefix ? "dx.op.wavePrefixOp" : "dx.op.waveActiveOp",
- get_overload(alu_type, intr->dest.ssa.bit_size));
+ get_overload(alu_type, intr->def.bit_size));
bool is_unsigned = alu_type == nir_type_uint;
const struct dxil_value *args[] = {
dxil_module_get_int32_const(&ctx->mod, is_prefix ? DXIL_INTR_WAVE_PREFIX_OP : DXIL_INTR_WAVE_ACTIVE_OP),
const struct dxil_value *ret = dxil_emit_call(&ctx->mod, func, args, ARRAY_SIZE(args));
if (!ret)
return false;
- store_def(ctx, &intr->dest.ssa, 0, ret);
+ store_def(ctx, &intr->def, 0, ret);
return true;
}
/* Just store the values, we'll use these to build a GEP in the load or store */
switch (instr->deref_type) {
case nir_deref_type_var:
- store_def(ctx, &instr->dest.ssa, 0, dxil_module_get_int_const(&ctx->mod, 0, instr->dest.ssa.bit_size));
+ store_def(ctx, &instr->def, 0, dxil_module_get_int_const(&ctx->mod, 0, instr->def.bit_size));
return true;
case nir_deref_type_array:
- store_def(ctx, &instr->dest.ssa, 0, get_src(ctx, &instr->arr.index, 0, nir_type_int));
+ store_def(ctx, &instr->def, 0, get_src(ctx, &instr->arr.index, 0, nir_type_int));
return true;
case nir_deref_type_struct:
- store_def(ctx, &instr->dest.ssa, 0, dxil_module_get_int_const(&ctx->mod, instr->strct.index, 32));
+ store_def(ctx, &instr->def, 0, dxil_module_get_int_const(&ctx->mod, instr->strct.index, 32));
return true;
default:
unreachable("Other deref types not supported");
/* Haven't finished chasing the deref chain yet, just store the value */
if (glsl_type_is_array(type)) {
- store_def(ctx, &instr->dest.ssa, 0, binding);
+ store_def(ctx, &instr->def, 0, binding);
return true;
}
if (!handle)
return false;
- store_ssa_def(ctx, &instr->dest.ssa, 0, handle);
+ store_ssa_def(ctx, &instr->def, 0, handle);
return true;
}
}
struct phi_block *vphi = ralloc(ctx->phis, struct phi_block);
- vphi->num_components = instr->dest.ssa.num_components;
+ vphi->num_components = instr->def.num_components;
for (unsigned i = 0; i < vphi->num_components; ++i) {
struct dxil_instr *phi = vphi->comp[i] = dxil_emit_phi(&ctx->mod, type);
if (!phi)
return false;
- store_ssa_def(ctx, &instr->dest.ssa, i, dxil_instr_get_return_value(phi));
+ store_ssa_def(ctx, &instr->def, i, dxil_instr_get_return_value(phi));
}
_mesa_hash_table_insert(ctx->phis, instr, vphi);
return true;
case nir_texop_lod:
sample = emit_texture_lod(ctx, ¶ms, true);
- store_def(ctx, &instr->dest.ssa, 0, sample);
+ store_def(ctx, &instr->def, 0, sample);
sample = emit_texture_lod(ctx, ¶ms, false);
- store_def(ctx, &instr->dest.ssa, 1, sample);
+ store_def(ctx, &instr->def, 1, sample);
return true;
case nir_texop_query_levels: {
params.lod_or_sample = dxil_module_get_int_const(&ctx->mod, 0, 32);
sample = emit_texture_size(ctx, ¶ms);
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, 3);
- store_def(ctx, &instr->dest.ssa, 0, retval);
+ store_def(ctx, &instr->def, 0, retval);
return true;
}
params.lod_or_sample = int_undef;
sample = emit_texture_size(ctx, ¶ms);
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, 3);
- store_def(ctx, &instr->dest.ssa, 0, retval);
+ store_def(ctx, &instr->def, 0, retval);
return true;
}
if (!sample)
return false;
- for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
+ for (unsigned i = 0; i < instr->def.num_components; ++i) {
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, i);
- store_def(ctx, &instr->dest.ssa, i, retval);
+ store_def(ctx, &instr->def, i, retval);
}
return true;
nir_def *load_data = nir_load_ubo(
builder,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.num_components,
+ intrin->def.bit_size,
nir_channel(builder, load_desc, 0),
nir_imm_int(builder, offset),
.align_mul = 256,
.align_offset = offset,
.range_base = offset,
- .range = intrin->dest.ssa.bit_size * intrin->dest.ssa.num_components / 8);
+ .range = intrin->def.bit_size * intrin->def.num_components / 8);
- nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
+ nir_def_rewrite_uses(&intrin->def, load_data);
nir_instr_remove(instr);
return true;
}
nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
nir_def *load_data = nir_load_ubo(
builder,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
+ intrin->def.num_components,
+ intrin->def.bit_size,
nir_channel(builder, load_desc, 0),
nir_iadd_imm(builder, offset, base),
.align_mul = nir_intrinsic_align_mul(intrin),
.range_base = base,
.range = range);
- nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
+ nir_def_rewrite_uses(&intrin->def, load_data);
nir_instr_remove(instr);
return true;
}
builder->cursor = nir_before_instr(instr);
if (intrin->intrinsic == nir_intrinsic_load_deref)
- nir_def_rewrite_uses(&intrin->dest.ssa, nir_imm_float(builder, 1.0));
+ nir_def_rewrite_uses(&intrin->def, nir_imm_float(builder, 1.0));
nir_instr_remove(instr);
return true;
* since that would remove the store instruction, and would make it tricky to satisfy
* the DXIL requirements of writing all position components.
*/
- nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size);
- nir_def_rewrite_uses(&intr->dest.ssa, zero);
+ nir_def *zero = nir_imm_zero(b, intr->def.num_components,
+ intr->def.bit_size);
+ nir_def_rewrite_uses(&intr->def, zero);
nir_instr_remove(instr);
return true;
}
if (!var || var->data.location != VARYING_SLOT_PNTC)
return false;
- nir_def *point_center = &intr->dest.ssa;
+ nir_def *point_center = &intr->def;
nir_variable *pos_var = (nir_variable *)data;
b->cursor = nir_after_instr(instr);
pos = nir_load_var(b, pos_var);
else if (var->data.sample)
pos = nir_interp_deref_at_sample(b, 4, 32,
- &nir_build_deref_var(b, pos_var)->dest.ssa,
+ &nir_build_deref_var(b, pos_var)->def,
nir_load_sample_id(b));
else
pos = nir_interp_deref_at_offset(b, 4, 32,
- &nir_build_deref_var(b, pos_var)->dest.ssa,
+ &nir_build_deref_var(b, pos_var)->def,
nir_imm_zero(b, 2, 32));
nir_def *pntc = nir_fadd_imm(b,
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan:
- return intr->dest.ssa.bit_size == 1 ? 32 : 0;
+ return intr->def.bit_size == 1 ? 32 : 0;
default:
return 0;
}
nir_def *res_idx =
load_vulkan_ssbo(b, remap.descriptor_set, nir_imul_imm(b, index_in_ubo, descriptor_size), 2);
- nir_def_rewrite_uses(&intr->dest.ssa, res_idx);
+ nir_def_rewrite_uses(&intr->def, res_idx);
return true;
}
nir_imm_int(&b, 0));
tex->src[3] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- res = res ? nir_build_alu2(&b, resolve_op, res, &tex->dest.ssa) : &tex->dest.ssa;
+ res = res ? nir_build_alu2(&b, resolve_op, res, &tex->def) : &tex->def;
}
if (resolve_mode == dzn_blit_resolve_average)
nir_imm_int(&b, 0));
tex->src[3] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
} else {
nir_variable *sampler_var =
nir_variable_create(b.shader, nir_var_uniform, glsl_bare_sampler_type(), "sampler");
tex->coord_components = coord_comps;
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &tex_deref->dest.ssa);
+ &tex_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
- &sampler_deref->dest.ssa);
+ &sampler_deref->def);
}
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- res = &tex->dest.ssa;
+ res = &tex->def;
}
nir_store_var(&b, out, nir_trim_vector(&b, res, out_comps), 0xf);
break;
}
- return typeOfSize(insn->dest.ssa.bit_size / 8, isFloat, isSigned);
+ return typeOfSize(insn->def.bit_size / 8, isFloat, isSigned);
}
DataType
switch (op) {
case nir_intrinsic_decl_reg: {
- const unsigned reg_index = insn->dest.ssa.index;
+ const unsigned reg_index = insn->def.index;
const unsigned bit_size = nir_intrinsic_bit_size(insn);
const unsigned num_components = nir_intrinsic_num_components(insn);
assert(nir_intrinsic_num_array_elems(insn) == 0);
LValues &src = it->second;
DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
for (uint8_t c = 0; c < insn->num_components; c++)
mkMov(newDefs[c], src[c], dType);
break;
case nir_intrinsic_load_input:
case nir_intrinsic_load_interpolated_input:
case nir_intrinsic_load_output: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
// FBFetch
if (prog->getType() == Program::TYPE_FRAGMENT &&
case nir_intrinsic_load_barycentric_centroid:
case nir_intrinsic_load_barycentric_pixel:
case nir_intrinsic_load_barycentric_sample: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
uint32_t mode;
if (op == nir_intrinsic_load_barycentric_centroid ||
case nir_intrinsic_load_work_dim: {
const DataType dType = getDType(insn);
SVSemantic sv = convert(op);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
Value *def;
}
// constants
case nir_intrinsic_load_subgroup_size: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
loadImm(newDefs[0], 32u);
break;
}
case nir_intrinsic_vote_all:
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_ieq: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *pred = getScratch(1, FILE_PREDICATE);
mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
break;
}
case nir_intrinsic_ballot: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *pred = getSSA(1, FILE_PREDICATE);
mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
}
case nir_intrinsic_read_first_invocation:
case nir_intrinsic_read_invocation: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
const DataType dType = getDType(insn);
Value *tmp = getScratch();
}
case nir_intrinsic_load_per_vertex_input: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectVertex;
Value *indirectOffset;
uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
}
case nir_intrinsic_load_per_vertex_output: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectVertex;
Value *indirectOffset;
uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
}
case nir_intrinsic_load_ubo: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectIndex;
Value *indirectOffset;
uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex);
break;
}
case nir_intrinsic_get_ssbo_size: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
const DataType dType = getDType(insn);
Value *indirectBuffer;
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
}
case nir_intrinsic_load_ssbo: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectBuffer;
Value *indirectOffset;
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
case nir_intrinsic_shared_atomic:
case nir_intrinsic_shared_atomic_swap: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectOffset;
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
case nir_intrinsic_ssbo_atomic:
case nir_intrinsic_ssbo_atomic_swap: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectBuffer;
Value *indirectOffset;
uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
case nir_intrinsic_global_atomic:
case nir_intrinsic_global_atomic_swap: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *address;
uint32_t offset = getIndirect(&insn->src[0], 0, address);
uint16_t location = 0;
if (opInfo.has_dest) {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
for (uint8_t i = 0u; i < newDefs.size(); ++i) {
defs.push_back(newDefs[i]);
mask |= 1 << i;
case nir_intrinsic_load_scratch:
case nir_intrinsic_load_shared: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectOffset;
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
if (indirectOffset)
}
case nir_intrinsic_shader_clock: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
loadImm(newDefs[0], 0u);
mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
case nir_intrinsic_load_global:
case nir_intrinsic_load_global_constant: {
const DataType dType = getDType(insn);
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
Value *indirectOffset;
uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
case nir_texop_txf_ms:
case nir_texop_txl:
case nir_texop_txs: {
- LValues &newDefs = convert(&insn->dest.ssa);
+ LValues &newDefs = convert(&insn->def);
std::vector<Value*> srcs;
std::vector<Value*> defs;
std::vector<nir_src*> offsets;
nir_def *desc = load_descriptor_for_idx_intrin(b, idx_intrin, ctx);
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
return true;
}
.align_offset = 0,
.range = root_table_offset + 3 * 4);
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
return true;
}
.align_offset = 0,
.range = root_table_offset + 3 * 4);
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
return true;
}
push_region_offset + base);
nir_def *val =
- nir_load_ubo(b, load->dest.ssa.num_components, load->dest.ssa.bit_size,
+ nir_load_ubo(b, load->def.num_components, load->def.bit_size,
nir_imm_int(b, 0), offset,
- .align_mul = load->dest.ssa.bit_size / 8,
+ .align_mul = load->def.bit_size / 8,
.align_offset = 0,
.range = push_region_offset + base +
nir_intrinsic_range(load));
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
return true;
}
.align_offset = 0,
.range = root_table_offset + 4);
- nir_def_rewrite_uses(&load->dest.ssa, val);
+ nir_def_rewrite_uses(&load->def, val);
return true;
}
unreachable("Unknown address mode");
}
- nir_def_rewrite_uses(&intrin->dest.ssa, addr);
+ nir_def_rewrite_uses(&intrin->def, addr);
return true;
}
addr = nir_build_addr_iadd(b, addr, ctx->ssbo_addr_format,
nir_var_mem_ssbo, offset);
- nir_def_rewrite_uses(&intrin->dest.ssa, addr);
+ nir_def_rewrite_uses(&intrin->def, addr);
return true;
}
unreachable("Unknown address mode");
}
- nir_def_rewrite_uses(&intrin->dest.ssa, desc);
+ nir_def_rewrite_uses(&intrin->def, desc);
return true;
}
}
}
- nir_def_rewrite_uses(&intrin->dest.ssa, size);
+ nir_def_rewrite_uses(&intrin->def, size);
return true;
}
if (intrin->intrinsic == nir_intrinsic_load_global_constant_bounded) {
nir_def *bound = intrin->src[2].ssa;
- unsigned bit_size = intrin->dest.ssa.bit_size;
+ unsigned bit_size = intrin->def.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);
unsigned byte_size = bit_size / 8;
}
nir_def *val =
- nir_build_load_global(b, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
+ nir_build_load_global(b, intrin->def.num_components,
+ intrin->def.bit_size,
nir_iadd(b, base_addr, nir_u2u64(b, offset)),
.access = nir_intrinsic_access(intrin),
.align_mul = nir_intrinsic_align_mul(intrin),
val = nir_if_phi(b, val, zero);
}
- nir_def_rewrite_uses(&intrin->dest.ssa, val);
+ nir_def_rewrite_uses(&intrin->def, val);
return true;
}
return false;
}
- nir_def_rewrite_uses(&intrin->dest.ssa, val);
+ nir_def_rewrite_uses(&intrin->def, val);
return true;
}
b->cursor = nir_before_instr(&intrin->instr);
nir_def *base_vertex = nir_load_base_vertex(b);
- nir_def_rewrite_uses(&intrin->dest.ssa, base_vertex);
+ nir_def_rewrite_uses(&intrin->def, base_vertex);
return true;
}
/* Write zero in a funny way to bypass lower_load_const_to_scalar */
bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
- unsigned size = has_dest ? intr->dest.ssa.bit_size : 32;
+ unsigned size = has_dest ? intr->def.bit_size : 32;
nir_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
nir_def *zeroes[4] = {zero, zero, zero, zero};
nir_def *res =
- has_dest ? nir_vec(b, zeroes, intr->dest.ssa.num_components) : NULL;
+ has_dest ? nir_vec(b, zeroes, intr->def.num_components) : NULL;
for (unsigned i = 0; i < (*lanes); ++i) {
nir_push_if(b, nir_ieq_imm(b, lane, i));
nir_pop_if(b, NULL);
if (has_dest) {
- nir_def *c_ssa = &c_intr->dest.ssa;
+ nir_def *c_ssa = &c_intr->def;
res = nir_if_phi(b, c_ssa, res);
}
}
if (has_dest)
- nir_def_rewrite_uses(&intr->dest.ssa, res);
+ nir_def_rewrite_uses(&intr->def, res);
nir_instr_remove(instr);
return true;
unsigned component = nir_intrinsic_component(instr);
unsigned nr = instr->num_components;
unsigned total = nr + component;
- unsigned bitsize = instr->dest.ssa.bit_size;
+ unsigned bitsize = instr->def.bit_size;
assert(total <= 4 && "should be vec4");
bi_emit_cached_split(b, tmp, total * bitsize);
bi_index srcs[] = {tmp, tmp, tmp};
unsigned channels[] = {component, component + 1, component + 2};
- bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), srcs, channels, nr,
- instr->dest.ssa.bit_size);
+ bi_make_vec_to(b, bi_def_index(&instr->def), srcs, channels, nr,
+ instr->def.bit_size);
}
static void
bool constant = nir_src_is_const(*offset);
bool immediate = bi_is_intr_immediate(instr, &imm_index, 16);
bi_index dest =
- (component == 0) ? bi_def_index(&instr->dest.ssa) : bi_temp(b->shader);
+ (component == 0) ? bi_def_index(&instr->def) : bi_temp(b->shader);
bi_instr *I;
if (immediate) {
unsigned component = nir_intrinsic_component(instr);
enum bi_vecsize vecsize = (instr->num_components + component - 1);
bi_index dest =
- (component == 0) ? bi_def_index(&instr->dest.ssa) : bi_temp(b->shader);
+ (component == 0) ? bi_def_index(&instr->def) : bi_temp(b->shader);
- unsigned sz = instr->dest.ssa.bit_size;
+ unsigned sz = instr->def.bit_size;
if (smooth) {
nir_intrinsic_instr *parent = nir_src_as_intrinsic(instr->src[0]);
bi_index srcs[] = {bi_preload(b, base + 0), bi_preload(b, base + 1),
bi_preload(b, base + 2), bi_preload(b, base + 3)};
- bi_emit_collect_to(b, bi_def_index(&instr->dest.ssa), srcs,
- size == 32 ? 4 : 2);
+ bi_emit_collect_to(b, bi_def_index(&instr->def), srcs, size == 32 ? 4 : 2);
}
static void
bi_index dyn_offset = bi_src_index(offset);
uint32_t const_offset = offset_is_const ? nir_src_as_uint(*offset) : 0;
- bi_load_ubo_to(b, instr->num_components * instr->dest.ssa.bit_size,
- bi_def_index(&instr->dest.ssa),
+ bi_load_ubo_to(b, instr->num_components * instr->def.bit_size,
+ bi_def_index(&instr->def),
offset_is_const ? bi_imm_u32(const_offset) : dyn_offset,
bi_src_index(&instr->src[0]));
}
uint32_t base = nir_intrinsic_base(instr) + nir_src_as_uint(*offset);
assert((base & 3) == 0 && "unaligned push constants");
- unsigned bits = instr->dest.ssa.bit_size * instr->dest.ssa.num_components;
+ unsigned bits = instr->def.bit_size * instr->def.num_components;
unsigned n = DIV_ROUND_UP(bits, 32);
assert(n <= 4);
channels[i] = bi_fau(BIR_FAU_UNIFORM | (word >> 1), word & 1);
}
- bi_emit_collect_to(b, bi_def_index(&instr->dest.ssa), channels, n);
+ bi_emit_collect_to(b, bi_def_index(&instr->def), channels, n);
}
static bi_index
bi_emit_load(bi_builder *b, nir_intrinsic_instr *instr, enum bi_seg seg)
{
int16_t offset = 0;
- unsigned bits = instr->num_components * instr->dest.ssa.bit_size;
- bi_index dest = bi_def_index(&instr->dest.ssa);
+ unsigned bits = instr->num_components * instr->def.bit_size;
+ bi_index dest = bi_def_index(&instr->def);
bi_index addr_lo = bi_extract(b, bi_src_index(&instr->src[0]), 0);
bi_index addr_hi = bi_addr_high(b, &instr->src[0]);
bi_index coords = bi_src_index(&instr->src[1]);
bi_index xy = bi_emit_image_coord(b, coords, 0, coord_comps, array);
bi_index zw = bi_emit_image_coord(b, coords, 1, coord_comps, array);
- bi_index dest = bi_def_index(&instr->dest.ssa);
+ bi_index dest = bi_def_index(&instr->def);
enum bi_register_format regfmt =
bi_reg_fmt_for_nir(nir_intrinsic_dest_type(instr));
enum bi_vecsize vecsize = instr->num_components - 1;
vecsize);
}
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
}
static void
static void
bi_emit_ld_tile(bi_builder *b, nir_intrinsic_instr *instr)
{
- bi_index dest = bi_def_index(&instr->dest.ssa);
+ bi_index dest = bi_def_index(&instr->def);
nir_alu_type T = nir_intrinsic_dest_type(instr);
enum bi_register_format regfmt = bi_reg_fmt_for_nir(T);
- unsigned size = instr->dest.ssa.bit_size;
+ unsigned size = instr->def.bit_size;
unsigned nr = instr->num_components;
/* Get the render target */
bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr)
{
bi_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest
- ? bi_def_index(&instr->dest.ssa)
+ ? bi_def_index(&instr->def)
: bi_null();
gl_shader_stage stage = b->shader->stage;
bi_emit_atomic_i32_to(b, dst, addr, bi_src_index(&instr->src[1]), op);
}
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
break;
}
bi_src_index(&instr->src[1]), op);
}
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
break;
}
case nir_intrinsic_global_atomic_swap:
bi_emit_acmpxchg_to(b, dst, bi_src_index(&instr->src[0]), &instr->src[1],
&instr->src[2], BI_SEG_NONE);
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
break;
case nir_intrinsic_shared_atomic_swap:
bi_emit_acmpxchg_to(b, dst, bi_src_index(&instr->src[0]), &instr->src[1],
&instr->src[2], BI_SEG_WLS);
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
break;
case nir_intrinsic_load_pixel_coord:
case nir_intrinsic_shader_clock:
bi_ld_gclk_u64_to(b, dst, BI_SOURCE_CYCLE_COUNTER);
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
break;
default:
.shadow_or_clamp_disable = instr->is_shadow,
.array = instr->is_array,
.dimension = bifrost_tex_format(instr->sampler_dim),
- .format = bi_texture_format(instr->dest_type | instr->dest.ssa.bit_size,
+ .format = bi_texture_format(instr->dest_type | instr->def.bit_size,
BI_CLAMP_NONE), /* TODO */
.mask = 0xF,
};
dregs[sr_count++] = dregs[i];
}
- unsigned res_size = instr->dest.ssa.bit_size == 16 ? 2 : 4;
+ unsigned res_size = instr->def.bit_size == 16 ? 2 : 4;
bi_index sr = sr_count ? bi_temp(b->shader) : bi_null();
bi_index dst = bi_temp(b->shader);
bi_index w[4] = {bi_null(), bi_null(), bi_null(), bi_null()};
bi_emit_split_i32(b, w, dst, res_size);
- bi_emit_collect_to(
- b, bi_def_index(&instr->dest.ssa), w,
- DIV_ROUND_UP(instr->dest.ssa.num_components * res_size, 4));
+ bi_emit_collect_to(b, bi_def_index(&instr->def), w,
+ DIV_ROUND_UP(instr->def.num_components * res_size, 4));
}
/* Staging registers required by texturing in the order they appear (Valhall) */
image_src = bi_lshift_or_i32(b, texture, image_src, bi_imm_u8(16));
/* Only write the components that we actually read */
- unsigned mask = nir_def_components_read(&instr->dest.ssa);
- unsigned comps_per_reg = instr->dest.ssa.bit_size == 16 ? 2 : 1;
+ unsigned mask = nir_def_components_read(&instr->def);
+ unsigned comps_per_reg = instr->def.bit_size == 16 ? 2 : 1;
unsigned res_size = DIV_ROUND_UP(util_bitcount(mask), comps_per_reg);
enum bi_register_format regfmt = bi_reg_fmt_for_nir(instr->dest_type);
/* Index into the packed component array */
unsigned j = 0;
unsigned comps[4] = {0};
- unsigned nr_components = instr->dest.ssa.num_components;
+ unsigned nr_components = instr->def.num_components;
for (unsigned i = 0; i < nr_components; ++i) {
if (mask & BITFIELD_BIT(i)) {
}
}
- bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), unpacked, comps,
- instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
+ bi_make_vec_to(b, bi_def_index(&instr->def), unpacked, comps,
+ instr->def.num_components, instr->def.bit_size);
}
/* Simple textures ops correspond to NIR tex or txl with LOD = 0 on 2D/cube
bi_index face, s, t;
bi_emit_cube_coord(b, coords, &face, &s, &t);
- bi_texs_cube_to(b, instr->dest.ssa.bit_size,
- bi_def_index(&instr->dest.ssa), s, t, face,
- instr->sampler_index, instr->texture_index);
+ bi_texs_cube_to(b, instr->def.bit_size, bi_def_index(&instr->def), s, t,
+ face, instr->sampler_index, instr->texture_index);
} else {
- bi_texs_2d_to(b, instr->dest.ssa.bit_size, bi_def_index(&instr->dest.ssa),
+ bi_texs_2d_to(b, instr->def.bit_size, bi_def_index(&instr->def),
bi_extract(b, coords, 0), bi_extract(b, coords, 1),
instr->op != nir_texop_tex, /* zero LOD */
instr->sampler_index, instr->texture_index);
}
- bi_split_def(b, &instr->dest.ssa);
+ bi_split_def(b, &instr->def);
}
static bool
bi_emit_phi(bi_builder *b, nir_phi_instr *instr)
{
unsigned nr_srcs = exec_list_length(&instr->srcs);
- bi_instr *I = bi_phi_to(b, bi_def_index(&instr->dest.ssa), nr_srcs);
+ bi_instr *I = bi_phi_to(b, bi_def_index(&instr->def), nr_srcs);
/* Deferred */
I->phi = instr;
nir_phi_instr *phi = I->phi;
/* Guaranteed by lower_phis_to_scalar */
- assert(phi->dest.ssa.num_components == 1);
+ assert(phi->def.num_components == 1);
nir_foreach_phi_src(src, phi) {
bi_block *pred = bi_from_nir_block(ctx, src->pred);
b, .base = rt, .src_type = nir_intrinsic_dest_type(intr));
nir_def *lowered = nir_load_converted_output_pan(
- b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, conversion,
+ b, intr->def.num_components, intr->def.bit_size, conversion,
.dest_type = nir_intrinsic_dest_type(intr),
.io_semantics = nir_intrinsic_io_semantics(intr));
- nir_def_rewrite_uses(&intr->dest.ssa, lowered);
+ nir_def_rewrite_uses(&intr->def, lowered);
return true;
}
b->cursor = nir_after_instr(instr);
nir_def *constant = nir_build_imm(b, 4, 32, constants);
- nir_def_rewrite_uses(&intr->dest.ssa, constant);
+ nir_def_rewrite_uses(&intr->def, constant);
nir_instr_remove(instr);
return true;
}
inputs->dev, inputs->formats[rt], rt, size, false);
b->cursor = nir_after_instr(instr);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_int(b, conversion >> 32));
+ nir_def_rewrite_uses(&intr->def, nir_imm_int(b, conversion >> 32));
return true;
}
};
b->cursor = nir_after_instr(instr);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_build_imm(b, 3, 32, constants));
+ nir_def_rewrite_uses(&intr->def, nir_build_imm(b, 3, 32, constants));
return true;
}
tex->src[2] =
nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(&b, 0));
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- res = res ? nir_fadd(&b, res, &tex->dest.ssa) : &tex->dest.ssa;
+ res = res ? nir_fadd(&b, res, &tex->def) : &tex->def;
}
if (base_type == nir_type_float)
tex->coord_components = coord_comps;
}
- nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
+ nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
- res = &tex->dest.ssa;
+ res = &tex->def;
}
assert(res);
intr->intrinsic != nir_intrinsic_load_shared)
return false;
- unsigned compsz = intr->dest.ssa.bit_size;
- unsigned totalsz = compsz * intr->dest.ssa.num_components;
+ unsigned compsz = intr->def.bit_size;
+ unsigned totalsz = compsz * intr->def.num_components;
/* 8, 16, 32, 64 and 128 bit loads don't need to be lowered */
if (util_bitcount(totalsz) < 2 && totalsz <= 128)
return false;
shared_load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(shared_load, compsz / 8, 0);
nir_intrinsic_set_base(shared_load, nir_intrinsic_base(intr));
- nir_def_init(&shared_load->instr, &shared_load->dest.ssa,
+ nir_def_init(&shared_load->instr, &shared_load->def,
shared_load->num_components, compsz);
nir_builder_instr_insert(b, &shared_load->instr);
- load = &shared_load->dest.ssa;
+ load = &shared_load->def;
}
for (unsigned i = 0; i < loadncomps; i++)
addr = nir_iadd_imm(b, addr, loadsz / 8);
}
- assert(ncomps == intr->dest.ssa.num_components);
- nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, comps, ncomps));
+ assert(ncomps == intr->def.num_components);
+ nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, ncomps));
return true;
}
nir_mask = mask_of(nir_intrinsic_dest_components(intr));
/* Extension is mandatory for 8/16-bit loads */
- dsize = intr->dest.ssa.bit_size == 64 ? 64 : 32;
+ dsize = intr->def.bit_size == 64 ? 64 : 32;
} else {
nir_mask = nir_intrinsic_write_mask(intr);
dsize = OP_IS_COMMON_STORE(ins->op) ? nir_src_bit_size(intr->src[0]) : 32;
midgard_instruction ins;
unsigned dest_size = (instr->type == nir_instr_type_intrinsic)
- ? nir_instr_as_intrinsic(instr)->dest.ssa.bit_size
+ ? nir_instr_as_intrinsic(instr)->def.bit_size
: 32;
unsigned bitsize = dest_size * nr_comps;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (is_read) {
- unsigned bitsize =
- intr->dest.ssa.bit_size * intr->dest.ssa.num_components;
+ unsigned bitsize = intr->def.bit_size * intr->def.num_components;
switch (bitsize) {
case 8:
/* For anything not aligned on 32bit, make sure we write full
* 32 bits registers. */
if (bitsize & 31) {
- unsigned comps_per_32b = 32 / intr->dest.ssa.bit_size;
+ unsigned comps_per_32b = 32 / intr->def.bit_size;
for (unsigned c = 0; c < 4 * comps_per_32b; c += comps_per_32b) {
if (!(ins.mask & BITFIELD_RANGE(c, comps_per_32b)))
bool is_shared = (instr->intrinsic == nir_intrinsic_shared_atomic) ||
(instr->intrinsic == nir_intrinsic_shared_atomic_swap);
- unsigned dest = nir_def_index(&instr->dest.ssa);
+ unsigned dest = nir_def_index(&instr->def);
unsigned val = nir_src_index(ctx, &instr->src[1]);
unsigned bitsize = nir_src_bit_size(instr->src[1]);
emit_explicit_constant(ctx, val);
nir_alu_type base_type = nir_alu_type_get_base_type(type);
ins.src_types[0] = base_type | nir_src_bit_size(instr->src[3]);
} else if (instr->intrinsic == nir_intrinsic_image_texel_address) {
- ins = m_lea_image(nir_def_index(&instr->dest.ssa),
- PACK_LDST_ATTRIB_OFS(address));
+ ins =
+ m_lea_image(nir_def_index(&instr->def), PACK_LDST_ATTRIB_OFS(address));
ins.mask = mask_of(2); /* 64-bit memory address */
} else { /* emit ld_image_* */
nir_alu_type type = nir_intrinsic_dest_type(instr);
- ins = ld_image(type, nir_def_index(&instr->dest.ssa),
+ ins = ld_image(type, nir_def_index(&instr->def),
PACK_LDST_ATTRIB_OFS(address));
ins.mask = mask_of(nir_intrinsic_dest_components(instr));
ins.dest_type = type;
static void
emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- unsigned reg = nir_def_index(&instr->dest.ssa);
+ unsigned reg = nir_def_index(&instr->def);
midgard_instruction ins = m_ldst_mov(reg, 0);
ins.mask = mask_of(3);
ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
static void
emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- unsigned reg = nir_def_index(&instr->dest.ssa);
+ unsigned reg = nir_def_index(&instr->def);
emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1,
nir_type_int);
}
static void
emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx)
{
- unsigned reg = nir_def_index(&instr->dest.ssa);
+ unsigned reg = nir_def_index(&instr->def);
midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0);
ld.op = midgard_op_ld_special_32u;
nir_def *handle = instr->src[0].ssa;
midgard_instruction ins =
- v_mov(nir_reg_index(handle), nir_def_index(&instr->dest.ssa));
+ v_mov(nir_reg_index(handle), nir_def_index(&instr->def));
- ins.dest_type = ins.src_types[1] =
- nir_type_uint | instr->dest.ssa.bit_size;
+ ins.dest_type = ins.src_types[1] = nir_type_uint | instr->def.bit_size;
- ins.mask = BITFIELD_MASK(instr->dest.ssa.num_components);
+ ins.mask = BITFIELD_MASK(instr->def.num_components);
emit_mir_instruction(ctx, ins);
break;
}
/* We may need to apply a fractional offset */
int component =
(is_flat || is_interp) ? nir_intrinsic_component(instr) : 0;
- reg = nir_def_index(&instr->dest.ssa);
+ reg = nir_def_index(&instr->def);
if (is_ubo) {
nir_src index = instr->src[0];
emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->inputs->is_blend) {
emit_varying_read(ctx, reg, offset, nr_comp, component,
- indirect_offset, t | instr->dest.ssa.bit_size,
- is_flat);
+ indirect_offset, t | instr->def.bit_size, is_flat);
} else if (ctx->inputs->is_blend) {
/* ctx->blend_input will be precoloured to r0/r2, where
* the input is preloaded */
/* Reads 128-bit value raw off the tilebuffer during blending, tasty */
case nir_intrinsic_load_raw_output_pan: {
- reg = nir_def_index(&instr->dest.ssa);
+ reg = nir_def_index(&instr->def);
/* T720 and below use different blend opcodes with slightly
* different semantics than T760 and up */
}
case nir_intrinsic_load_output: {
- reg = nir_def_index(&instr->dest.ssa);
+ reg = nir_def_index(&instr->def);
- unsigned bits = instr->dest.ssa.bit_size;
+ unsigned bits = instr->def.bit_size;
midgard_instruction ld;
if (bits == 16)
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
- .dest = nir_def_index(&instr->dest.ssa),
+ .dest = nir_def_index(&instr->def),
.src = {~0, ~0, ~0, ~0},
.dest_type = instr->dest_type,
.swizzle = SWIZZLE_IDENTITY_4,
nir_intrinsic_instr *l = nir_intrinsic_instr_create(
b->shader, nir_intrinsic_load_sampler_lod_parameters_pan);
l->num_components = 3;
- nir_def_init(&l->instr, &l->dest.ssa, 3, 32);
+ nir_def_init(&l->instr, &l->def, 3, 32);
/* TODO: Indirect samplers, separate sampler objects XXX */
nir_src idx = nir_src_for_ssa(nir_imm_int(b, tex->texture_index));
nir_src_copy(&l->src[0], &idx, &l->instr);
nir_builder_instr_insert(b, &l->instr);
- nir_def *params = &l->dest.ssa;
+ nir_def *params = &l->def;
/* Extract the individual components */
nir_def *min_lod = nir_channel(b, params, 0);
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
continue;
- if (intr->dest.ssa.bit_size != 32)
+ if (intr->def.bit_size != 32)
continue;
/* We swizzle at a 32-bit level so need a multiple of 2. We could
bool valid = true;
- nir_foreach_use_including_if(src, &intr->dest.ssa)
+ nir_foreach_use_including_if(src, &intr->def)
valid &= !src->is_if && nir_src_is_f2fmp(src);
if (!valid)
continue;
- intr->dest.ssa.bit_size = 16;
+ intr->def.bit_size = 16;
nir_builder b = nir_builder_at(nir_after_instr(instr));
/* The f2f32(f2fmp(x)) will cancel by opt_algebraic */
- nir_def *conv = nir_f2f32(&b, &intr->dest.ssa);
- nir_def_rewrite_uses_after(&intr->dest.ssa, conv,
- conv->parent_instr);
+ nir_def *conv = nir_f2f32(&b, &intr->def);
+ nir_def_rewrite_uses_after(&intr->def, conv, conv->parent_instr);
progress |= true;
}
if (b->shader->info.stage != MESA_SHADER_FRAGMENT)
return false;
- count = intr->dest.ssa.num_components;
+ count = intr->def.num_components;
break;
default:
return false;
}
- if (intr->dest.ssa.bit_size != 64)
+ if (intr->def.bit_size != 64)
return false;
b->cursor = nir_after_instr(instr);
- intr->dest.ssa.bit_size = 32;
+ intr->def.bit_size = 32;
- nir_def *conv = nir_u2u64(b, &intr->dest.ssa);
+ nir_def *conv = nir_u2u64(b, &intr->def);
- nir_def_rewrite_uses_after(&intr->dest.ssa, conv, conv->parent_instr);
+ nir_def_rewrite_uses_after(&intr->def, conv, conv->parent_instr);
return true;
}
* the result is undefined.
*/
- unsigned bits = intr->dest.ssa.bit_size;
+ unsigned bits = intr->def.bit_size;
nir_alu_type src_type =
nir_alu_type_get_base_type(pan_unpacked_type_for_format(desc));
unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
- unpacked = nir_resize_vector(b, unpacked, intr->dest.ssa.num_components);
+ unpacked = nir_resize_vector(b, unpacked, intr->def.num_components);
/* Reorder the components */
if (reorder_comps)
unpacked = pan_unpack_reorder(b, desc, unpacked);
- nir_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
+ nir_def_rewrite_uses_after(&intr->def, unpacked, &intr->instr);
}
struct inputs {
nir_def *mask = nir_load_sample_mask_in(b);
nir_def *eq = nir_ieq_imm(b, mask, 0);
- nir_def_rewrite_uses(&intr->dest.ssa, eq);
+ nir_def_rewrite_uses(&intr->def, eq);
return true;
}
nir_def *decoded = nir_fmul_imm(b, nir_i2f16(b, raw), 1.0 / 256.0);
/* Make NIR validator happy */
- if (decoded->bit_size != intr->dest.ssa.bit_size)
- decoded = nir_f2fN(b, decoded, intr->dest.ssa.bit_size);
+ if (decoded->bit_size != intr->def.bit_size)
+ decoded = nir_f2fN(b, decoded, intr->def.bit_size);
- nir_def_rewrite_uses(&intr->dest.ssa, decoded);
+ nir_def_rewrite_uses(&intr->def, decoded);
return true;
}
nir_def *repl =
nir_iadd(b, nir_load_vertex_id_zero_base(b), nir_load_first_vertex(b));
- nir_def_rewrite_uses(&intr->dest.ssa, repl);
+ nir_def_rewrite_uses(&intr->def, repl);
return true;
}
nir_tex_src_for_ssa(nir_tex_src_ms_index, nir_load_sample_id(&b));
}
- nir_def_init(&tex->instr, &tex->dest.ssa, 4,
+ nir_def_init(&tex->instr, &tex->def, 4,
nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(&b, &tex->instr);
- nir_def *texel = &tex->dest.ssa;
+ nir_def *texel = &tex->def;
unsigned dstcompsz =
util_format_get_component_bits(dstfmt, UTIL_FORMAT_COLORSPACE_RGB, 0);
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, imgcoords);
tex->coord_components = texdim + texisarray;
- nir_def_init(&tex->instr, &tex->dest.ssa, 4,
+ nir_def_init(&tex->instr, &tex->def, 4,
nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(&b, &tex->instr);
- nir_def *texel = &tex->dest.ssa;
+ nir_def *texel = &tex->def;
unsigned fullmask = (1 << util_format_get_nr_components(key.imgfmt)) - 1;
unsigned nbufcomps = util_bitcount(fullmask);
unreachable("Unhandled resource intrinsic");
}
- assert(intrin->dest.ssa.bit_size == res->bit_size);
- assert(intrin->dest.ssa.num_components == res->num_components);
- nir_def_rewrite_uses(&intrin->dest.ssa, res);
+ assert(intrin->def.bit_size == res->bit_size);
+ assert(intrin->def.num_components == res->num_components);
+ nir_def_rewrite_uses(&intrin->def, res);
nir_instr_remove(&intrin->instr);
return true;
switch (tex->op) {
case nir_texop_txs:
res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
- nir_component_mask(tex->dest.ssa.num_components));
+ nir_component_mask(tex->def.num_components));
break;
case nir_texop_query_levels:
- assert(tex->dest.ssa.num_components == 1);
+ assert(tex->def.num_components == 1);
res = load_tex_img_levels(b, deref, dim, ctx);
break;
case nir_texop_texture_samples:
- assert(tex->dest.ssa.num_components == 1);
+ assert(tex->def.num_components == 1);
res = load_tex_img_samples(b, deref, dim, ctx);
break;
default:
unreachable("Unsupported texture query op");
}
- nir_def_rewrite_uses(&tex->dest.ssa, res);
+ nir_def_rewrite_uses(&tex->def, res);
nir_instr_remove(&tex->instr);
return true;
}
switch (intr->intrinsic) {
case nir_intrinsic_image_deref_size:
res = nir_channels(b, load_tex_img_size(b, deref, dim, ctx),
- nir_component_mask(intr->dest.ssa.num_components));
+ nir_component_mask(intr->def.num_components));
break;
case nir_intrinsic_image_deref_samples:
res = load_tex_img_samples(b, deref, dim, ctx);
unreachable("Unsupported image query op");
}
- nir_def_rewrite_uses(&intr->dest.ssa, res);
+ nir_def_rewrite_uses(&intr->def, res);
nir_instr_remove(&intr->instr);
} else {
nir_rewrite_image_intrinsic(intr, get_img_index(b, deref, ctx), false);
static nir_def *
load_sysval_from_ubo(nir_builder *b, nir_intrinsic_instr *intr, unsigned offset)
{
- return nir_load_ubo(
- b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
- nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX), nir_imm_int(b, offset),
- .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
- .range_base = offset, .range = intr->dest.ssa.bit_size / 8);
+ return nir_load_ubo(b, intr->def.num_components, intr->def.bit_size,
+ nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX),
+ nir_imm_int(b, offset),
+ .align_mul = intr->def.bit_size / 8, .align_offset = 0,
+ .range_base = offset, .range = intr->def.bit_size / 8);
}
struct sysval_options {
#undef SYSVAL
b->cursor = nir_after_instr(instr);
- nir_def_rewrite_uses(&intr->dest.ssa, val);
+ nir_def_rewrite_uses(&intr->def, val);
return true;
}
b->cursor = nir_before_instr(instr);
nir_def *ubo_load =
- nir_load_ubo(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
+ nir_load_ubo(b, intr->def.num_components, intr->def.bit_size,
nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
- .align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
+ .align_mul = intr->def.bit_size / 8, .align_offset = 0,
.range_base = nir_intrinsic_base(intr),
.range = nir_intrinsic_range(intr));
- nir_def_rewrite_uses(&intr->dest.ssa, ubo_load);
+ nir_def_rewrite_uses(&intr->def, ubo_load);
nir_instr_remove(instr);
return true;
}
tex->dest_type = nir_type_int32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
- &texture->dest.ssa);
+ &texture->def);
- nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex), 32);
+ nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex), 32);
nir_builder_instr_insert(b, &tex->instr);
- state->image_size = nir_i2f32(b, &tex->dest.ssa);
+ state->image_size = nir_i2f32(b, &tex->def);
return state->image_size;
}
tex->sampler_index = old_tex->sampler_index;
tex->is_array = old_tex->is_array;
- nir_def_init(&tex->instr, &tex->dest.ssa, old_tex->dest.ssa.num_components,
- old_tex->dest.ssa.bit_size);
+ nir_def_init(&tex->instr, &tex->def, old_tex->def.num_components,
+ old_tex->def.bit_size);
nir_builder_instr_insert(b, &tex->instr);
- return &tex->dest.ssa;
+ return &tex->def;
}
static unsigned
swizzled_bpcs);
}
- nir_def_rewrite_uses(&tex->dest.ssa, result);
+ nir_def_rewrite_uses(&tex->def, result);
nir_instr_remove(&tex->instr);
return true;