new_intrin->num_components = intrin->num_components;
if (op != nir_intrinsic_store_global_amd)
- nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest, intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest,
+ intrin->dest.ssa.num_components,
+ intrin->dest.ssa.bit_size);
unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
for (unsigned i = 0; i < num_src; i++)
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
- nir_tex_instr_dest_size(new_tex), 32, NULL);
+ nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
desc = &new_tex->dest.ssa;
break;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
- nir_tex_instr_dest_size(new_tex), 32, NULL);
+ nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
sampler_desc = &new_tex->dest.ssa;
break;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_ssa_dest_init(&new_tex->instr, &new_tex->dest,
- nir_tex_instr_dest_size(new_tex), 32, NULL);
+ nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
desc = &new_tex->dest.ssa;
break;
nir_intrinsic_instr *clone_intr = nir_instr_as_intrinsic(clone);
/* Shrink the load to count contiguous components */
- nir_ssa_dest_init(clone, &clone_intr->dest, count, bit_size, NULL);
+ nir_ssa_dest_init(clone, &clone_intr->dest, count, bit_size);
nir_ssa_def *clone_vec = &clone_intr->dest.ssa;
clone_intr->num_components = count;
query->op = nir_texop_lod_bias_agx;
query->dest_type = nir_type_float16;
- nir_ssa_dest_init(instr, &query->dest, 1, 16, NULL);
+ nir_ssa_dest_init(instr, &query->dest, 1, 16);
return &query->dest.ssa;
}
tex->coord_components = 2;
tex->texture_index = rt;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return nir_trim_vector(b, &tex->dest.ssa, nr);
}
}
- nir_ssa_dest_init(&new_intr->instr, &new_intr->dest,
- 1, bit_size, NULL);
+ nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, 1,
+ bit_size);
dest_components[component] = &new_intr->dest.ssa;
nir_builder_instr_insert(b, &new_intr->instr);
nir_intrinsic_instr *size =
nir_intrinsic_instr_create(b->shader, buffer_size_op);
size->src[0] = nir_src_for_ssa(nir_imm_int(b, buffer_idx));
- nir_ssa_dest_init(&size->instr, &size->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&size->instr, &size->dest, 1, 32);
nir_builder_instr_insert(b, &size->instr);
/* Compute the maximum offset being accessed and if it is
size_inst->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
nir_intrinsic_set_image_array(size_inst, is_array);
size_inst->num_components = num_coords;
- nir_ssa_dest_init(&size_inst->instr, &size_inst->dest,
- num_coords, 32, NULL);
+ nir_ssa_dest_init(&size_inst->instr, &size_inst->dest, num_coords, 32);
nir_ssa_def *size = &size_inst->dest.ssa;
nir_builder_instr_insert(b, &size_inst->instr);
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
chan_instr->num_components = 1;
nir_ssa_dest_init(&chan_instr->instr, &chan_instr->dest, 1,
- instr->dest.ssa.bit_size, NULL);
+ instr->dest.ssa.bit_size);
chan_instr->src[0] = nir_src_for_ssa(chan_offset);
tex->dest_type = nir_type_uint32;
tex->is_array = false;
tex->coord_components = 1;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "texel buffer result");
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
uint32_t swiz[4];
tex->is_array = glsl_sampler_type_is_array(sampler_type);
tex->coord_components = tex_pos->num_components;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
}
tex->is_array = false;
tex->coord_components = tex_pos->num_components;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
}
assert(ir->return_deref);
if (ir->return_deref->type->is_integer_64()) {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements, 64, NULL);
+ ir->return_deref->type->vector_elements, 64);
} else {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements, 32, NULL);
+ ir->return_deref->type->vector_elements, 32);
}
nir_builder_instr_insert(&b, &instr->instr);
break;
/* Set the intrinsic destination. */
if (ir->return_deref) {
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32);
}
/* Set the intrinsic parameters. */
} else
num_components = ir->return_deref->type->vector_elements;
- nir_ssa_dest_init(&instr->instr, &instr->dest,
- num_components, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, num_components, 32);
}
if (op == nir_intrinsic_image_deref_size) {
break;
}
case nir_intrinsic_shader_clock:
- nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32);
nir_intrinsic_set_memory_scope(instr, NIR_SCOPE_SUBGROUP);
nir_builder_instr_insert(&b, &instr->instr);
break;
/* Setup destination register */
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
- nir_ssa_dest_init(&instr->instr, &instr->dest,
- type->vector_elements, bit_size, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, type->vector_elements,
+ bit_size);
nir_builder_instr_insert(&b, &instr->instr);
assert(ir->return_deref);
unsigned bit_size = glsl_get_bit_size(ir->return_deref->type);
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements,
- bit_size, NULL);
+ ir->return_deref->type->vector_elements, bit_size);
nir_intrinsic_set_atomic_op(instr, atomic_op);
nir_builder_instr_insert(&b, &instr->instr);
break;
FALLTHROUGH;
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all: {
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
case nir_intrinsic_ballot: {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements, 64, NULL);
+ ir->return_deref->type->vector_elements, 64);
instr->num_components = ir->return_deref->type->vector_elements;
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
}
case nir_intrinsic_read_invocation: {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements, 32, NULL);
+ ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
}
case nir_intrinsic_read_first_invocation: {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- ir->return_deref->type->vector_elements, 32, NULL);
+ ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
break;
}
case nir_intrinsic_is_helper_invocation: {
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
nir_builder_instr_insert(&b, &instr->instr);
break;
}
case nir_intrinsic_is_sparse_texels_resident: {
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
nir_dest *dest = get_instr_dest(instr);
if (dest)
- nir_ssa_dest_init(instr, dest, num_components, bit_size, NULL);
+ nir_ssa_dest_init(instr, dest, num_components, bit_size);
nir_builder_instr_insert(&b, instr);
/* note: does *not* take ownership of 'name' */
void
nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
- unsigned num_components, unsigned bit_size,
- const char *name)
+ unsigned num_components, unsigned bit_size)
{
dest->is_ssa = true;
nir_ssa_def_init(instr, &dest->ssa, num_components, bit_size);
nir_dest new_dest);
void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
- unsigned num_components, unsigned bit_size,
- const char *name);
+ unsigned num_components, unsigned bit_size);
void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
unsigned num_components, unsigned bit_size);
static inline void
{
assert(glsl_type_is_vector_or_scalar(type));
nir_ssa_dest_init(instr, dest, glsl_get_components(type),
- glsl_get_bit_size(type), name);
+ glsl_get_bit_size(type));
}
void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_ssa_def *new_ssa);
void nir_ssa_def_rewrite_uses_src(nir_ssa_def *def, nir_src new_src);
}
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
- bit_size, NULL);
+ bit_size);
instr->dest.write_mask = nir_component_mask(num_components);
nir_builder_instr_insert(build, &instr->instr);
}
assert(src_idx == num_srcs);
- nir_ssa_dest_init(&tex->instr, &tex->dest,
- nir_tex_instr_dest_size(tex),
- nir_alu_type_get_type_size(tex->dest_type),
- NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
+ nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(build, &tex->instr);
return &tex->dest.ssa;
* can't re-guess the num_components when num_components == 1 (nir_op_mov).
*/
nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
- comp[0].def->bit_size, NULL);
+ comp[0].def->bit_size);
instr->dest.write_mask = nir_component_mask(num_components);
nir_builder_instr_insert(build, &instr->instr);
load->num_components = num_components;
load->const_index[0] = index;
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
assert(then_def->num_components == else_def->num_components);
assert(then_def->bit_size == else_def->bit_size);
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- then_def->num_components, then_def->bit_size, NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, then_def->num_components,
+ then_def->bit_size);
nir_builder_instr_insert(build, &phi->instr);
nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
- nir_src_bit_size(src.src), NULL);
+ nir_src_bit_size(src.src));
mov->exact = build->exact;
mov->dest.write_mask = (1 << num_components) - 1;
mov->src[0] = src;
deref->var = var;
nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
- nir_get_ptr_bitsize(build->shader), NULL);
+ nir_get_ptr_bitsize(build->shader));
nir_builder_instr_insert(build, &deref->instr);
nir_ssa_dest_init(&deref->instr, &deref->dest,
parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ parent->dest.ssa.bit_size);
nir_builder_instr_insert(build, &deref->instr);
nir_ssa_dest_init(&deref->instr, &deref->dest,
parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ parent->dest.ssa.bit_size);
nir_builder_instr_insert(build, &deref->instr);
nir_ssa_dest_init(&deref->instr, &deref->dest,
parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ parent->dest.ssa.bit_size);
nir_builder_instr_insert(build, &deref->instr);
nir_ssa_dest_init(&deref->instr, &deref->dest,
parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ parent->dest.ssa.bit_size);
nir_builder_instr_insert(build, &deref->instr);
deref->parent = nir_src_for_ssa(parent);
deref->cast.ptr_stride = ptr_stride;
- nir_ssa_dest_init(&deref->instr, &deref->dest,
- parent->num_components, parent->bit_size, NULL);
+ nir_ssa_dest_init(&deref->instr, &deref->dest, parent->num_components,
+ parent->bit_size);
nir_builder_instr_insert(build, &deref->instr);
nir_ssa_dest_init(&deref->instr, &deref->dest,
parent->dest.ssa.num_components,
- parent->dest.ssa.bit_size, NULL);
+ parent->dest.ssa.bit_size);
nir_builder_instr_insert(build, &deref->instr);
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
{
unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
- nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
+ nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32);
nir_intrinsic_set_interp_mode(bary, interp_mode);
nir_builder_instr_insert(build, &bary->instr);
return &bary->dest.ssa;
% endif
% if opcode.has_dest:
% if opcode.dest_components == 0:
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, intrin->num_components, ${get_intrinsic_bitsize(opcode)}, NULL);
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
% else:
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)}, NULL);
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
% endif
% endif
% for i in range(opcode.num_srcs):
txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
txs->src[idx].src_type = nir_tex_src_lod;
- nir_ssa_dest_init(&txs->instr, &txs->dest,
- nir_tex_instr_dest_size(txs), 32, NULL);
+ nir_ssa_dest_init(&txs->instr, &txs->dest, nir_tex_instr_dest_size(txs),
+ 32);
nir_builder_instr_insert(b, &txs->instr);
return &txs->dest.ssa;
}
}
- nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32);
nir_builder_instr_insert(b, &tql->instr);
/* The LOD is the y component of the result */
ndst->is_ssa = dst->is_ssa;
if (dst->is_ssa) {
nir_ssa_dest_init(ninstr, ndst, dst->ssa.num_components,
- dst->ssa.bit_size, NULL);
+ dst->ssa.bit_size);
if (likely(state->remap_table))
add_remap(state, &ndst->ssa, &dst->ssa);
} else {
tex->src[0].src_type = nir_tex_src_texture_deref;
tex->src[0].src = nir_src_for_ssa(&texture->dest.ssa);
- nir_ssa_dest_init(&tex->instr, &tex->dest,
- nir_tex_instr_dest_size(tex), 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
+ 32);
nir_builder_instr_insert(b, &tex->instr);
state->image_size = nir_i2f32(b, &tex->dest.ssa);
nir_ssa_dest_init(&tex->instr, &tex->dest,
old_tex->dest.ssa.num_components,
- nir_dest_bit_size(old_tex->dest), NULL);
+ nir_dest_bit_size(old_tex->dest));
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
}
nir_ssa_dest_init(&new_deref->instr, &new_deref->dest,
- deref->dest.ssa.num_components,
- deref->dest.ssa.bit_size,
- NULL);
+ deref->dest.ssa.num_components, deref->dest.ssa.bit_size);
nir_builder_instr_insert(b, &new_deref->instr);
return new_deref;
nir_parallel_copy_entry);
nir_ssa_dest_init(&pcopy->instr, &entry->dest,
phi->dest.ssa.num_components,
- phi->dest.ssa.bit_size, NULL);
+ phi->dest.ssa.bit_size);
entry->dest.ssa.divergent = nir_src_is_divergent(src->src);
exec_list_push_tail(&pcopy->entries, &entry->node);
nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
nir_parallel_copy_entry);
nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
- phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
- NULL);
+ phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
entry->dest.ssa.divergent = phi->dest.ssa.divergent;
exec_list_push_tail(&block_pcopy->entries, &entry->node);
nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
unsigned bit_size)
{
- nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
- bit_size, NULL);
+ nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components, bit_size);
alu->dest.write_mask = (1 << num_components) - 1;
}
}
nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
- instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size, NULL);
+ instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
nir_instr_insert_before(&instr->instr, &new_instr->instr);
nir_instr_remove(&instr->instr);
nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
}
- nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
- num_components, 32, NULL);
- nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
- num_components, 32, NULL);
+ nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest, num_components,
+ 32);
+ nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest, num_components,
+ 32);
b->cursor = nir_before_instr(&phi->instr);
nir_builder_instr_insert(b, &lowered[0]->instr);
nir_src_for_ssa(nir_channels(b, texcoord,
(1 << tex->coord_components) - 1));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
/* kill if tex != 0.0.. take .x or .w channel according to format: */
nir_src_for_ssa(nir_channels(b, texcoord,
(1 << tex->coord_components) - 1));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def = &tex->dest.ssa;
tex->src[2].src_type = nir_tex_src_coord;
tex->src[2].src = nir_src_for_ssa(nir_channels(b, def, 0x3));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def_xy = &tex->dest.ssa;
tex->src[0].src_type = nir_tex_src_coord;
tex->src[0].src = nir_src_for_ssa(nir_channels(b, def, 0xc));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def_zw = &tex->dest.ssa;
tex->src[2].src = nir_src_for_ssa(nir_imm_intN_t(b, io.location - FRAG_RESULT_DATA0, 32));
tex->src[2].src_type = nir_tex_src_texture_handle;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
break;
}
- nir_ssa_dest_init(&fmask_load->instr, &fmask_load->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&fmask_load->instr, &fmask_load->dest, 1, 32);
nir_builder_instr_insert(b, &fmask_load->instr);
nir_ssa_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
nir_ssa_dest_init(&load->instr, &load->dest,
orig_instr->dest.ssa.num_components,
- orig_instr->dest.ssa.bit_size, NULL);
+ orig_instr->dest.ssa.bit_size);
nir_builder_instr_insert(b, &load->instr);
*dest = &load->dest.ssa;
} else {
tex->texture_non_uniform = nir_intrinsic_access(load) & ACCESS_NON_UNIFORM;
- nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex), 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
+ 32);
nir_builder_instr_insert(b, &tex->instr);
if (tex->is_sparse) {
sizeof(intrin->const_index));
nir_ssa_dest_init(&split->instr, &split->dest,
- intrin->dest.ssa.num_components, 32, NULL);
+ intrin->dest.ssa.num_components, 32);
nir_builder_instr_insert(b, &split->instr);
res[i] = &split->dest.ssa;
nir_intrinsic_instr_create(b->shader, nir_intrinsic_vote_ieq);
vote->src[0] = nir_src_for_ssa(x);
vote->num_components = x->num_components;
- nir_ssa_dest_init(&vote->instr, &vote->dest, 1, 1, NULL);
+ nir_ssa_dest_init(&vote->instr, &vote->dest, 1, 1);
nir_builder_instr_insert(b, &vote->instr);
return &vote->dest.ssa;
}
nir_intrinsic_set_reduction_op(scan, reduction_op);
if (scan_op == nir_intrinsic_reduce)
nir_intrinsic_set_cluster_size(scan, cluster_size);
- nir_ssa_dest_init(&scan->instr, &scan->dest,
- val->num_components, val->bit_size, NULL);
+ nir_ssa_dest_init(&scan->instr, &scan->dest, val->num_components,
+ val->bit_size);
nir_builder_instr_insert(b, &scan->instr);
return &scan->dest.ssa;
}
load->src[0] = nir_src_for_ssa(offset);
}
- nir_ssa_dest_init(&load->instr, &load->dest,
- num_components, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
nir_builder_instr_insert(b, &load->instr);
return &load->dest.ssa;
nir_intrinsic_instr *bary_setup =
nir_intrinsic_instr_create(state->builder.shader, bary_op);
- nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32);
nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
assert(intrin->dest.is_ssa);
load->num_components = num_components;
- nir_ssa_dest_init(&load->instr, &load->dest, num_components,
- bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components, bit_size);
assert(bit_size % 8 == 0);
nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
assert(intrin->dest.ssa.num_components == 1);
- nir_ssa_dest_init(&atomic->instr, &atomic->dest,
- 1, intrin->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1,
+ intrin->dest.ssa.bit_size);
assert(atomic->dest.ssa.bit_size % 8 == 0);
if (intr->intrinsic != nir_intrinsic_store_deref) {
nir_ssa_dest_init(&element_intr->instr, &element_intr->dest,
- intr->num_components, intr->dest.ssa.bit_size, NULL);
+ intr->num_components, intr->dest.ssa.bit_size);
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
- 1, intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
+ intr->dest.ssa.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
- 1, intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
+ intr->dest.ssa.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_align_offset(chan_intr,
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
- 1, intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
+ intr->dest.ssa.bit_size);
chan_intr->num_components = 1;
nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
new_interp->num_components = interp->num_components;
nir_ssa_dest_init(&new_interp->instr, &new_interp->dest,
interp->dest.ssa.num_components,
- interp->dest.ssa.bit_size, NULL);
+ interp->dest.ssa.bit_size);
nir_builder_instr_insert(b, &new_interp->instr);
nir_store_deref(b, temp_deref, &new_interp->dest.ssa,
if (intrin->dest.is_ssa) {
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
intrin->num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
&mov->dest.dest.ssa);
} else {
if (info->has_dest) {
assert(intrin->dest.is_ssa);
- nir_ssa_dest_init(&dup->instr, &dup->dest,
- num_components, bit_size, NULL);
+ nir_ssa_dest_init(&dup->instr, &dup->dest, num_components, bit_size);
} else {
nir_intrinsic_set_write_mask(dup, (1 << num_components) - 1);
}
nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
- phi->dest.ssa.num_components,
- bit_size, NULL);
+ phi->dest.ssa.num_components, bit_size);
vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, 1,
- phi->dest.ssa.bit_size, NULL);
+ phi->dest.ssa.bit_size);
vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
/* We need to insert a mov to grab the i'th component of src */
nir_alu_instr *mov = nir_alu_instr_create(state->shader,
nir_op_mov);
- nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL);
+ nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size);
mov->dest.write_mask = 1;
nir_src_copy(&mov->src[0].src, &src->src, &mov->instr);
mov->src[0].swizzle[0] = i;
assert(num_srcs == 3);
tex->dest_type = nir_intrinsic_dest_type(intrin);
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
break;
}
assert(num_srcs == 2);
tex->dest_type = nir_type_uint32;
- nir_ssa_dest_init(&tex->instr, &tex->dest,
- coord_components, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, coord_components, 32);
break;
}
return true;
list_del(&dest->reg.def_link);
- nir_ssa_dest_init(instr, dest, reg->num_components,
- reg->bit_size, NULL);
+ nir_ssa_dest_init(instr, dest, reg->num_components, reg->bit_size);
nir_phi_builder_value_set_block_def(value, instr->block, &dest->ssa);
alu->dest.write_mask = (1 << num_components) - 1;
list_del(&alu->dest.dest.reg.def_link);
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
- reg->bit_size, NULL);
+ reg->bit_size);
nir_op vecN_op = nir_op_vec(reg->num_components);
}
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, reg->num_components,
- reg->bit_size, NULL);
+ reg->bit_size);
nir_instr_insert(nir_after_instr(&alu->instr), &vec->instr);
nir_phi_builder_value_set_block_def(value, alu->instr.block,
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
load->num_components = 1;
nir_src_copy(&load->src[0], idx, &load->instr);
- nir_ssa_dest_init(&load->instr, &load->dest, 1, bitsize, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, bitsize);
nir_builder_instr_insert(b, &load->instr);
return &load->dest.ssa;
}
} else {
nir_ssa_dest_init(&global->instr, &global->dest,
intr->dest.ssa.num_components,
- intr->dest.ssa.bit_size, NULL);
+ intr->dest.ssa.bit_size);
if (is_atomic) {
nir_src_copy(&global->src[1], &intr->src[2], &global->instr);
comp = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
nir_intrinsic_instr *intr = nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32);
intr->const_index[0] = intrin->const_index[0];
intr->const_index[1] = intrin->const_index[1];
intr->src[0] = nir_src_for_ssa(comp);
for (unsigned i = 0; i < intrin->num_components; i++) {
nir_intrinsic_instr *chan_intrin =
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest,
- 1, intrin->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest, 1,
+ intrin->dest.ssa.bit_size);
chan_intrin->num_components = 1;
/* value */
for (unsigned i = 0; i < intrin->num_components; i++) {
nir_intrinsic_instr *chan_intrin =
nir_intrinsic_instr_create(b->shader, intrin->intrinsic);
- nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest,
- 1, intrin->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intrin->instr, &chan_intrin->dest, 1,
+ intrin->dest.ssa.bit_size);
chan_intrin->num_components = 1;
chan_intrin->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
nir_builder_instr_insert(b, &chan_intrin->instr);
nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f);
nir_ssa_dest_init(&swizzle->instr, &swizzle->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
if (options->lower_to_scalar && swizzle->num_components > 1) {
return lower_subgroup_op_to_scalar(b, swizzle, options->lower_shuffle_to_32bit);
shuffle->src[1] = nir_src_for_ssa(index);
nir_ssa_dest_init(&shuffle->instr, &shuffle->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
bool lower_to_32bit = options->lower_shuffle_to_32bit && is_shuffle;
if (options->lower_to_scalar && shuffle->num_components > 1) {
nir_src_copy(&qbcst->src[0], &intrin->src[0], &qbcst->instr);
nir_ssa_dest_init(&qbcst->instr, &qbcst->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
nir_ssa_def *qbcst_dst = NULL;
plane_tex->sampler_index = tex->sampler_index;
nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4,
- nir_dest_bit_size(tex->dest), NULL);
+ nir_dest_bit_size(tex->dest));
nir_builder_instr_insert(b, &plane_tex->instr);
txd->src[tex->num_srcs + 1].src = nir_src_for_ssa(dfdy);
txd->src[tex->num_srcs + 1].src_type = nir_tex_src_ddy;
- nir_ssa_dest_init(&txd->instr, &txd->dest, nir_dest_num_components(tex->dest),
- nir_dest_bit_size(tex->dest), NULL);
+ nir_ssa_dest_init(&txd->instr, &txd->dest,
+ nir_dest_num_components(tex->dest),
+ nir_dest_bit_size(tex->dest));
nir_builder_instr_insert(b, &txd->instr);
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
nir_instr_remove(&tex->instr);
txl->src[tex->num_srcs - 1].src = nir_src_for_ssa(lod);
txl->src[tex->num_srcs - 1].src_type = nir_tex_src_lod;
- nir_ssa_dest_init(&txl->instr, &txl->dest, nir_dest_num_components(tex->dest),
- nir_dest_bit_size(tex->dest), NULL);
+ nir_ssa_dest_init(&txl->instr, &txl->dest,
+ nir_dest_num_components(tex->dest),
+ nir_dest_bit_size(tex->dest));
nir_builder_instr_insert(b, &txl->instr);
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
nir_instr_remove(&tex->instr);
tex_copy->src[tex_copy->num_srcs - 1] = src;
nir_ssa_dest_init(&tex_copy->instr, &tex_copy->dest,
- nir_tex_instr_dest_size(tex), 32, NULL);
+ nir_tex_instr_dest_size(tex), 32);
nir_builder_instr_insert(b, &tex_copy->instr);
fmask_fetch->is_array = tex->is_array;
fmask_fetch->texture_non_uniform = tex->texture_non_uniform;
fmask_fetch->dest_type = nir_type_uint32;
- nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32);
fmask_fetch->num_srcs = 0;
for (unsigned i = 0; i < tex->num_srcs; i++) {
nir_tex_instr *fmask_fetch = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
fmask_fetch->op = nir_texop_fragment_mask_fetch_amd;
fmask_fetch->dest_type = nir_type_uint32;
- nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&fmask_fetch->instr, &fmask_fetch->dest, 1, 32);
nir_builder_instr_insert(b, &fmask_fetch->instr);
nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_ieq_imm(b, &fmask_fetch->dest.ssa, 0));
}
/* NIR expects a vec4 result from the above texture instructions */
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_ssa_def *tex_r = nir_channel(b, &tex->dest.ssa, 0);
nir_ssa_def *cmp = tex->src[comp_index].src.ssa;
mov->dest.write_mask = (1 << intrin->num_components) - 1;
nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
intrin->num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
nir_instr_insert_before(&intrin->instr, &mov->instr);
nir_instr_remove(&intrin->instr);
mov_add->dest.write_mask = orig_add->dest.write_mask;
nir_ssa_dest_init(&mov_add->instr, &mov_add->dest.dest,
orig_add->dest.dest.ssa.num_components,
- orig_add->dest.dest.ssa.bit_size, NULL);
+ orig_add->dest.dest.ssa.bit_size);
mov_add->src[0].src = nir_src_for_ssa(fadd);
nir_builder_instr_insert(bld, &mov_add->instr);
mov_cmp->dest.write_mask = orig_cmp->dest.write_mask;
nir_ssa_dest_init(&mov_cmp->instr, &mov_cmp->dest.dest,
orig_cmp->dest.dest.ssa.num_components,
- orig_cmp->dest.dest.ssa.bit_size, NULL);
+ orig_cmp->dest.dest.ssa.bit_size);
mov_cmp->src[0].src = nir_src_for_ssa(cmp);
nir_builder_instr_insert(bld, &mov_cmp->instr);
nir_phi_instr_add_src(phi, prev_block, nir_src_for_ssa(prev_value));
nir_phi_instr_add_src(phi, continue_block, nir_src_for_ssa(alu_copy));
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- alu_copy->num_components, alu_copy->bit_size, NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, alu_copy->num_components,
+ alu_copy->bit_size);
b->cursor = nir_after_phis(header_block);
nir_builder_instr_insert(b, &phi->instr);
nir_phi_get_src_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
continue_block)->src);
- nir_ssa_dest_init(&phi->instr,
- &phi->dest,
+ nir_ssa_dest_init(&phi->instr, &phi->dest,
nir_dest_num_components(bcsel->dest.dest),
- nir_dest_bit_size(bcsel->dest.dest),
- NULL);
+ nir_dest_bit_size(bcsel->dest.dest));
b->cursor = nir_after_phis(header_block);
nir_builder_instr_insert(b, &phi->instr);
nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
alu->dest.dest.ssa.num_components,
- alu->dest.dest.ssa.bit_size, NULL);
+ alu->dest.dest.ssa.bit_size);
nalu->dest.saturate = alu->dest.saturate;
nalu->dest.write_mask = alu->dest.write_mask;
}
nir_ssa_dest_init(&sel->instr, &sel->dest.dest,
- phi->dest.ssa.num_components,
- phi->dest.ssa.bit_size, NULL);
+ phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
sel->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
nir_ssa_def_rewrite_uses(&phi->dest.ssa,
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest,
phi->dest.ssa.num_components,
- nir_alu_type_get_type_size(nir_op_infos[op].output_type),
- NULL);
+ nir_alu_type_get_type_size(nir_op_infos[op].output_type));
/* Push the conversion into the new phi sources: */
nir_foreach_phi_src (src, phi) {
/* construct replacement phi instruction: */
nir_phi_instr *new_phi = nir_phi_instr_create(b->shader);
nir_ssa_dest_init(&new_phi->instr, &new_phi->dest,
- phi->dest.ssa.num_components,
- bit_size, NULL);
+ phi->dest.ssa.num_components, bit_size);
/* Remove the widening conversions from the phi sources: */
nir_foreach_phi_src (src, phi) {
nir_ssa_def old_result = intrin->dest.ssa;
list_replace(&intrin->dest.ssa.uses, &old_result.uses);
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, intrin->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1,
+ intrin->dest.ssa.bit_size);
nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
b.cursor = nir_after_instr(instr1);
nir_alu_instr *new_alu = nir_alu_instr_create(b.shader, alu1->op);
- nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest,
- total_components, alu1->dest.dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest, total_components,
+ alu1->dest.dest.ssa.bit_size);
new_alu->dest.write_mask = (1 << total_components) - 1;
new_alu->instr.pass_flags = alu1->instr.pass_flags;
*/
nir_phi_instr *phi = nir_phi_instr_create(val->builder->shader);
nir_ssa_dest_init(&phi->instr, &phi->dest, val->num_components,
- val->bit_size, NULL);
+ val->bit_size);
phi->instr.block = dom;
exec_list_push_tail(&val->phis, &phi->instr.node);
def = &phi->dest.ssa;
cast->parent = nir_src_for_ssa(block_def);
cast->cast.ptr_stride = nir_deref_instr_array_stride(deref);
- nir_ssa_dest_init(&cast->instr, &cast->dest,
- def->num_components, def->bit_size, NULL);
+ nir_ssa_dest_init(&cast->instr, &cast->dest, def->num_components,
+ def->bit_size);
nir_instr_insert(nir_before_instr(src->parent_instr),
&cast->instr);
block_def = &cast->dest.ssa;
nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
- dst_bit_size, NULL);
+ dst_bit_size);
alu->dest.write_mask = (1 << num_components) - 1;
alu->dest.saturate = false;
num_components = blob_read_uint32(ctx->blob);
else
num_components = decode_num_components_in_3bits(dest.ssa.num_components);
- nir_ssa_dest_init(instr, dst, num_components, bit_size, NULL);
+ nir_ssa_dest_init(instr, dst, num_components, bit_size);
dst->ssa.divergent = dest.ssa.divergent;
read_add_object(ctx, &dst->ssa);
} else {
nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op);
nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
- phi->dest.ssa.num_components,
- 64, NULL);
+ phi->dest.ssa.num_components, 64);
vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
int num_comp[2] = {2, phi->dest.ssa.num_components - 2};
for (unsigned i = 0; i < 2; i++) {
new_phi[i] = nir_phi_instr_create(b->shader);
nir_ssa_dest_init(&new_phi[i]->instr, &new_phi[i]->dest, num_comp[i],
- phi->dest.ssa.bit_size, NULL);
+ phi->dest.ssa.bit_size);
nir_foreach_phi_src(src, phi) {
/* Insert at the end of the predecessor but before the jump
/* Initialize a phi-instruction */
nir_phi_instr *phi = nir_phi_instr_create(state->shader);
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- def->num_components, def->bit_size, "LCSSA-phi");
+ nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
+ def->bit_size);
/* Create a phi node with as many sources pointing to the same ssa_def as
* the block has predecessors.
cast->cast.ptr_stride = nir_deref_instr_array_stride(instr);
nir_ssa_dest_init(&cast->instr, &cast->dest,
- phi->dest.ssa.num_components,
- phi->dest.ssa.bit_size, NULL);
+ phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
nir_instr_insert(nir_after_phis(state->block_after_loop), &cast->instr);
dest = &cast->dest.ssa;
}
{
nir_phi_instr *phi = nir_phi_instr_create(shader);
nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- def->num_components, def->bit_size, NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
+ def->bit_size);
return phi;
}
nir_intrinsic_instr *res = nir_intrinsic_instr_create(
b->shader, nir_intrinsic_vulkan_resource_index);
- nir_ssa_dest_init(&res->instr, &res->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&res->instr, &res->dest, 1, 32);
res->num_components = 1;
res->src[0] = nir_src_for_ssa(nir_imm_zero(b, 1, 32));
nir_intrinsic_set_desc_type(
return NULL;
}
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, intrinsic);
- nir_ssa_dest_init(&load->instr, &load->dest, components, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, components, bit_size);
load->num_components = components;
if (res) {
load->src[0] = nir_src_for_ssa(res);
return;
}
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, intrinsic);
- nir_ssa_dest_init(&store->instr, &store->dest, components, bit_size, NULL);
+ nir_ssa_dest_init(&store->instr, &store->dest, components, bit_size);
store->num_components = components;
if (res) {
store->src[0] = nir_src_for_ssa(value);
{
nir_variable *var = nir_variable_create(b->shader, nir_var_mem_shared, glsl_array_type(glsl_uint_type(), 4, 0), "var");
nir_deref_instr *deref = nir_build_deref_var(b, var);
- nir_ssa_dest_init(&deref->instr, &deref->dest, 1, 64, NULL);
+ nir_ssa_dest_init(&deref->instr, &deref->dest, 1, 64);
create_shared_load(nir_build_deref_array_imm(b, deref, 0x100000000), 0x1);
create_shared_load(nir_build_deref_array_imm(b, deref, 0x200000001), 0x2);
nir_loop *loop = nir_push_loop(b);
{
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- ssa_0->num_components, ssa_0->bit_size,
- NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, ssa_0->num_components,
+ ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
nir_src_for_ssa(ssa_0));
nir_loop *loop = nir_push_loop(b);
{
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- ssa_0->num_components, ssa_0->bit_size,
- NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, ssa_0->num_components,
+ ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block,
nir_src_for_ssa(ssa_0));
nir_block *head_block = nir_loop_first_block(loop);
nir_phi_instr *phi = nir_phi_instr_create(bld->shader);
- nir_ssa_dest_init(&phi->instr, &phi->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, 1, 32);
nir_phi_instr_add_src(phi, top_block, nir_src_for_ssa(init));
nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(def));
- nir_ssa_dest_init(&phi->instr, &phi->dest,
- def->num_components, def->bit_size, NULL);
+ nir_ssa_dest_init(&phi->instr, &phi->dest, def->num_components,
+ def->bit_size);
return phi;
}
instr->src[1].src = nir_src_for_ssa(src1);
instr->src[1].swizzle[0] = 1;
- nir_ssa_dest_init(&instr->instr, &instr->dest.dest, 1, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest, 1, 32);
instr->dest.write_mask = 1;
nir_builder_instr_insert(b, &instr->instr);
nir_phi_instr_add_src(phi, then_block, nir_src_for_ssa(one));
nir_ssa_dest_init(&phi->instr, &phi->dest,
- one->num_components, one->bit_size, NULL);
+ one->num_components, one->bit_size);
nir_builder_instr_insert(&bld, &phi->instr);
nir_loop *loop = nir_push_loop(&bld);
{
nir_ssa_dest_init(&phi->instr, &phi->dest,
- x->num_components, x->bit_size, NULL);
+ x->num_components, x->bit_size);
nir_phi_instr_add_src(phi, x->parent_instr->block, nir_src_for_ssa(x));
nir_loop *loop = nir_push_loop(&bld);
nir_ssa_dest_init(&phi->instr, &phi->dest,
- v->num_components, v->bit_size,
- NULL);
+ v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
nir_loop *loop = nir_push_loop(&bld);
nir_ssa_dest_init(&phi->instr, &phi->dest,
- v->num_components, v->bit_size,
- NULL);
+ v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
nir_loop *loop = nir_push_loop(&bld);
nir_ssa_dest_init(&phi->instr, &phi->dest,
- v->num_components, v->bit_size,
- NULL);
+ v->num_components, v->bit_size);
nir_phi_instr_add_src(phi, v->parent_instr->block,
nir_src_for_ssa(v));
instr->dest_type = dest_type;
nir_ssa_dest_init(&instr->instr, &instr->dest,
- nir_tex_instr_dest_size(instr), 32, NULL);
+ nir_tex_instr_dest_size(instr), 32);
vtn_assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_result_size(instr));
bit_size = MIN2(bit_size, 32);
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
- nir_intrinsic_dest_components(intrin),
- bit_size, NULL);
+ nir_intrinsic_dest_components(intrin), bit_size);
nir_builder_instr_insert(&b->nb, &intrin->instr);
if (opcode == SpvOpAtomicFlagTestAndSet) {
/* map atomic flag to a 32-bit atomic integer. */
- nir_ssa_dest_init(&atomic->instr, &atomic->dest,
- 1, 32, NULL);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32);
} else {
nir_ssa_dest_init(&atomic->instr, &atomic->dest,
glsl_get_vector_elements(type->type),
- glsl_get_bit_size(type->type), NULL);
+ glsl_get_bit_size(type->type));
vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa);
}
{
nir_op op = nir_op_vec(num_components);
nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
- nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
- bit_size, NULL);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, bit_size);
vec->dest.write_mask = (1 << num_components) - 1;
return vec;
nir_intrinsic_report_ray_intersection);
intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1);
nir_builder_instr_insert(&b->nb, &intrin->instr);
vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
break;
intrin->num_components = glsl_get_vector_elements(deref->type);
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
glsl_get_vector_elements(deref->type),
- glsl_get_bit_size(deref->type), NULL);
+ glsl_get_bit_size(deref->type));
nir_builder_instr_insert(&b->nb, &intrin->instr);
intrin->num_components = glsl_get_vector_elements(deref->type);
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
glsl_get_vector_elements(deref->type),
- glsl_get_bit_size(deref->type), NULL);
+ glsl_get_bit_size(deref->type));
nir_builder_instr_insert(&b->nb, &intrin->instr);
nir_intrinsic_instr *ballot =
nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_ballot);
ballot->src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[3 + has_scope]));
- nir_ssa_dest_init(&ballot->instr, &ballot->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&ballot->instr, &ballot->dest, 4, 32);
ballot->num_components = 4;
nir_builder_instr_insert(&b->nb, &ballot->instr);
vtn_push_nir_ssa(b, w[2], &ballot->dest.ssa);
nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode);
nir_ssa_dest_init(&instr->instr, &instr->dest,
nir_address_format_num_components(addr_format),
- nir_address_format_bit_size(addr_format), NULL);
+ nir_address_format_bit_size(addr_format));
instr->num_components = instr->dest.ssa.num_components;
nir_builder_instr_insert(&b->nb, &instr->instr);
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
nir_ssa_dest_init(&instr->instr, &instr->dest,
nir_address_format_num_components(addr_format),
- nir_address_format_bit_size(addr_format), NULL);
+ nir_address_format_bit_size(addr_format));
instr->num_components = instr->dest.ssa.num_components;
nir_builder_instr_insert(&b->nb, &instr->instr);
nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
nir_address_format_num_components(addr_format),
- nir_address_format_bit_size(addr_format), NULL);
+ nir_address_format_bit_size(addr_format));
desc_load->num_components = desc_load->dest.ssa.num_components;
nir_builder_instr_insert(&b->nb, &desc_load->instr);
load->num_components = 2;
load->src[offset_src_idx] = nir_src_for_ssa(off);
- nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 2, 32);
nir_builder_instr_insert(b, &load->instr);
components[i] = nir_pack_64_2x32(b, &load->dest.ssa);
assert(intrinsic->dest.is_ssa);
nir_ssa_def *dest = &intrinsic->dest.ssa;
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
- dest->num_components, dest->bit_size, NULL);
+ dest->num_components, dest->bit_size);
new_dest = &new_intrinsic->dest.ssa;
}
.num_slots = 1,
};
nir_intrinsic_set_io_semantics(load_input, semantics);
- nir_ssa_dest_init(&load_input->instr, &load_input->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&load_input->instr, &load_input->dest, 1, 32);
nir_builder_instr_insert(b, &load_input->instr);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, &load_input->dest.ssa);
return true;
new_intr->num_components = intr->num_components;
if (nir_intrinsic_infos[op].has_dest)
- nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, intr->num_components,
- intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&new_intr->instr, &new_intr->dest,
+ intr->num_components, intr->dest.ssa.bit_size);
nir_builder_instr_insert(b, &new_intr->instr);
load->num_components = c;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, nir_intrinsic_align(intr), 0);
- nir_ssa_dest_init(&load->instr, &load->dest, c, bit_size, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, c, bit_size);
nir_builder_instr_insert(b, &load->instr);
addr = nir_iadd(b,
tex->src[0].src = nir_src_for_ssa(nir_load_var(b, in_coords));
tex->coord_components = coord_components;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
tex->src[1].src_type = nir_tex_src_ms_index;
tex->src[1].src = nir_src_for_ssa(nir_load_sample_id(b));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
nir_store_var(b, out_color, &tex->dest.ssa, 0xf);
if (info->has_dest) {
nir_ssa_dest_init(©->instr, ©->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
- NULL);
+ intrin->dest.ssa.bit_size);
results[i] = ©->dest.ssa;
}
tex->sampler_index = state->stip_tex->data.binding;
tex->src[0].src_type = nir_tex_src_coord;
tex->src[0].src = nir_src_for_ssa(texcoord);
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
}
load->src[srcn++] = nir_src_for_ssa(offset);
- nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 4, 32);
nir_builder_instr_insert(b, &load->instr);
src = nir_src_for_ssa(&load->dest.ssa);
assert(src_number == instr->num_srcs);
nir_ssa_dest_init(&instr->instr, &instr->dest,
- nir_tex_instr_dest_size(instr),
- 32, NULL);
+ nir_tex_instr_dest_size(instr), 32);
nir_builder_instr_insert(b, &instr->instr);
/* Resolve the writemask on the texture op. */
txs->src[1].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
txs->src[1].src_type = nir_tex_src_lod;
- nir_ssa_dest_init(&txs->instr, &txs->dest,
- nir_tex_instr_dest_size(txs), 32, NULL);
+ nir_ssa_dest_init(&txs->instr, &txs->dest, nir_tex_instr_dest_size(txs),
+ 32);
nir_builder_instr_insert(b, &txs->instr);
- nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32);
nir_builder_instr_insert(b, &qlv->instr);
ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
- 32, NULL);
+ 32);
nir_builder_instr_insert(b, &instr->instr);
ttn_move_dest(b, dest, &instr->dest.ssa);
} else {
nir_intrinsic_set_range(load_ubo, ~0);
nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size,
- NULL);
+ intrin->dest.ssa.bit_size);
nir_builder_instr_insert(&b, &load_ubo->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
nir_intrinsic_set_align(load, 4, 0);
nir_intrinsic_set_range_base(load, 0);
nir_intrinsic_set_range(load, ~0);
- nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, comps, 32);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
&load->dest.ssa);
txs->is_array = false;
txs->dest_type = nir_type_int;
- nir_ssa_dest_init(&txs->instr, &txs->dest, 2, 32, "tex");
+ nir_ssa_dest_init(&txs->instr, &txs->dest, 2, 32);
nir_builder_instr_insert(&b, &txs->instr);
pos_src = nir_vec4(&b,
tex->is_array = false;
tex->coord_components = 2;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
nir_store_var(&b, stencil_out, nir_channel(&b, &tex->dest.ssa, 1), 0x1);
}
mov->dest.write_mask = (1 << num_components) - 1;
- nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
+ nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32);
/* replace vec srcs with inserted mov */
for (unsigned i = 0, j = 0; i < 4; i++) {
vec->src[i].src = nir_src_for_ssa(src1->ssa);
vec->dest.write_mask = 0xf;
- nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32);
nir_tex_instr_remove_src(tex, src1_idx);
nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
mul->src[1].swizzle[0] = 1;
mul->dest.write_mask = 1;
- nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
+ nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32);
ssa->num_components = 2;
else
dupl->src[0].reg = itr->src[0].reg;
- nir_ssa_dest_init(&dupl->instr, &dupl->dest,
- dupl->num_components, itr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&dupl->instr, &dupl->dest, dupl->num_components,
+ itr->dest.ssa.bit_size);
dupl->instr.pass_flags = 1;
nir_builder_instr_insert(b, &dupl->instr);
else
dupl->src[0].reg = itr->src[0].reg;
- nir_ssa_dest_init(&dupl->instr, &dupl->dest,
- dupl->num_components, itr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&dupl->instr, &dupl->dest, dupl->num_components,
+ itr->dest.ssa.bit_size);
dupl->instr.pass_flags = 1;
nir_builder_instr_insert(b, &dupl->instr);
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
- 1, intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
+ intr->dest.ssa.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr) * 4 + i);
b->shader,
intrin->intrinsic);
nir_ssa_dest_init(&new_intrin->instr, &new_intrin->dest,
- nir_dest_num_components(alu->dest.dest),
- ssa->bit_size,
- NULL);
+ nir_dest_num_components(alu->dest.dest), ssa->bit_size);
new_intrin->num_components = nir_dest_num_components(alu->dest.dest);
nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin));
nir_intrinsic_set_component(new_intrin, nir_intrinsic_component(intrin) + swizzle);
}
auto fetch_sample = nir_instr_as_tex(nir_instr_clone(b->shader, &tex->instr));
- nir_ssa_dest_init(&fetch_sample->instr, &fetch_sample->dest, 4, 32, "sample_index");
+ nir_ssa_dest_init(&fetch_sample->instr, &fetch_sample->dest, 4, 32);
int used_coord_mask = 0;
nir_ssa_def *backend1 = prep_src(new_coord, used_coord_mask);
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_local_shared_r600);
load->num_components = nir_dest_num_components(op->dest);
load->src[0] = nir_src_for_ssa(addr);
- nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
+ 32);
nir_ssa_def_rewrite_uses(&op->dest.ssa, &load->dest.ssa);
nir_builder_instr_insert(&b, &load->instr);
} else {
(void)_options;
auto old_ir = nir_instr_as_intrinsic(instr);
auto load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_input);
- nir_ssa_dest_init(&load->instr,
- &load->dest,
+ nir_ssa_dest_init(&load->instr, &load->dest,
old_ir->dest.ssa.num_components,
- old_ir->dest.ssa.bit_size,
- NULL);
+ old_ir->dest.ssa.bit_size);
nir_intrinsic_set_io_semantics(load, nir_intrinsic_io_semantics(old_ir));
nir_intrinsic_set_base(load, nir_intrinsic_base(old_ir));
auto phi_lo = nir_phi_instr_create(b->shader);
auto phi_hi = nir_phi_instr_create(b->shader);
nir_ssa_dest_init(
- &phi_lo->instr, &phi_lo->dest, phi->dest.ssa.num_components * 2, 32, "");
+ &phi_lo->instr, &phi_lo->dest, phi->dest.ssa.num_components * 2, 32);
nir_ssa_dest_init(
- &phi_hi->instr, &phi_hi->dest, phi->dest.ssa.num_components * 2, 32, "");
+ &phi_hi->instr, &phi_hi->dest, phi->dest.ssa.num_components * 2, 32);
nir_foreach_phi_src(s, phi)
{
auto lo = nir_unpack_32_2x16_split_x(b, nir_ssa_for_src(b, s->src, 1));
nir_intrinsic_set_range(load2, nir_intrinsic_range(intr));
load2->num_components = second_components;
- nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
+ nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
nir_builder_instr_insert(b, &load2->instr);
intr->dest.ssa.num_components = intr->num_components = 2;
auto new_src0 = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1));
nir_instr_rewrite_src(&load2->instr, &load2->src[0], new_src0);
load2->num_components = second_components;
- nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
+ nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
nir_intrinsic_set_dest_type(load2, nir_intrinsic_dest_type(intr));
nir_builder_instr_insert(b, &load2->instr);
load2->num_components = second_components;
- nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64, nullptr);
+ nir_ssa_dest_init(&load2->instr, &load2->dest, second_components, 64);
nir_builder_instr_insert(b, &load2->instr);
intr->dest.ssa.num_components = intr->num_components = 2;
k += s->num_components;
}
- nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_comp, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_comp, 32);
instr->dest.write_mask = (1 << num_comp) - 1;
nir_builder_instr_insert(b, &instr->instr);
return &instr->dest.dest.ssa;
emit_load_param_base(nir_builder *b, nir_intrinsic_op op)
{
nir_intrinsic_instr *result = nir_intrinsic_instr_create(b->shader, op);
- nir_ssa_dest_init(&result->instr, &result->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&result->instr, &result->dest, 4, 32);
nir_builder_instr_insert(b, &result->instr);
return &result->dest.ssa;
}
{
auto patch_id =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_rel_patch_id_r600);
- nir_ssa_dest_init(&patch_id->instr, &patch_id->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&patch_id->instr, &patch_id->dest, 1, 32);
nir_builder_instr_insert(b, &patch_id->instr);
return &patch_id->dest.ssa;
}
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_local_shared_r600);
tf->num_components = ncomps;
tf->src[0] = nir_src_for_ssa(addr_outer);
- nir_ssa_dest_init(&tf->instr, &tf->dest, tf->num_components, 32, NULL);
+ nir_ssa_dest_init(&tf->instr, &tf->dest, tf->num_components, 32);
nir_builder_instr_insert(b, &tf->instr);
if (ncomps < 4) {
auto undef = nir_ssa_undef(b, 1, 32);
auto invocation_id =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_invocation_id);
- nir_ssa_dest_init(&invocation_id->instr, &invocation_id->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&invocation_id->instr, &invocation_id->dest, 1, 32);
nir_builder_instr_insert(b, &invocation_id->instr);
nir_push_if(b, nir_ieq_imm(b, &invocation_id->dest.ssa, 0));
tf_outer->num_components = outer_comps;
tf_outer->src[0] = nir_src_for_ssa(addr_outer);
nir_ssa_dest_init(
- &tf_outer->instr, &tf_outer->dest, tf_outer->num_components, 32, NULL);
+ &tf_outer->instr, &tf_outer->dest, tf_outer->num_components, 32);
nir_builder_instr_insert(b, &tf_outer->instr);
std::vector<nir_ssa_def *> tf_out;
auto tf_out_base =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_tcs_tess_factor_base_r600);
- nir_ssa_dest_init(&tf_out_base->instr, &tf_out_base->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&tf_out_base->instr, &tf_out_base->dest, 1, 32);
nir_builder_instr_insert(b, &tf_out_base->instr);
auto out_addr0 = nir_build_alu(b,
tf_inner->num_components = inner_comps;
tf_inner->src[0] = nir_src_for_ssa(addr1);
nir_ssa_dest_init(
- &tf_inner->instr, &tf_inner->dest, tf_inner->num_components, 32, NULL);
+ &tf_inner->instr, &tf_inner->dest, tf_inner->num_components, 32);
nir_builder_instr_insert(b, &tf_inner->instr);
tf_out.push_back(nir_vec2(b,
assert(intr->dest.is_ssa);
nir_intrinsic_instr *new_intr = nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(
- &new_intr->instr, &new_intr->dest, num_comps, intr->dest.ssa.bit_size, NULL);
+ nir_ssa_dest_init(&new_intr->instr, &new_intr->dest, num_comps,
+ intr->dest.ssa.bit_size);
new_intr->num_components = num_comps;
nir_deref_instr *deref = nir_build_deref_var(b, var);
nir_intrinsic_instr_create(c->s, intr->intrinsic);
intr_comp->num_components = 1;
nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
- intr->dest.ssa.bit_size, NULL);
+ intr->dest.ssa.bit_size);
/* Convert the uniform offset to bytes. If it happens
* to be a constant, constant-folding will clean up
txf->src[0].src_type = nir_tex_src_coord;
txf->src[0].src = nir_src_for_ssa(nir_vec2(b, addr, nir_imm_int(b, 0)));
- nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32);
nir_builder_instr_insert(b, &txf->instr);
return &txf->dest.ssa;
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_MODE_IS_INDEXED));
nir_intrinsic_set_range(load, 4);
load->num_components = 1;
- nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, "draw_mode_is_indexed");
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
nir_builder_instr_insert(b, &load->instr);
nir_ssa_def *composite = nir_build_alu(b, nir_op_bcsel,
load->src[0] = nir_src_for_ssa(nir_imm_int(b, ZINK_GFX_PUSHCONST_DRAW_ID));
nir_intrinsic_set_range(load, 4);
load->num_components = 1;
- nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, "draw_id");
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
nir_builder_instr_insert(b, &load->instr);
nir_ssa_def_rewrite_uses(&instr->dest.ssa, &load->dest.ssa);
nir_src_copy(&levels->src[!!(offset_idx >= 0)].src, &txf->src[handle_idx].src, &levels->instr);
}
nir_ssa_dest_init(&levels->instr, &levels->dest,
- nir_tex_instr_dest_size(levels), 32, NULL);
+ nir_tex_instr_dest_size(levels), 32);
nir_builder_instr_insert(b, &levels->instr);
nir_if *lod_oob_if = nir_push_if(b, nir_ilt(b, lod, &levels->dest.ssa));
for (unsigned i = 0; i < num_components; i++) {
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op);
- nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, 1, nir_dest_bit_size(intr->dest), "");
+ nir_ssa_dest_init(&new_instr->instr, &new_instr->dest, 1,
+ nir_dest_bit_size(intr->dest));
nir_intrinsic_set_atomic_op(new_instr, nir_intrinsic_atomic_op(intr));
new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa);
/* deref ops have no offset src, so copy the srcs after it */
}
nir_ssa_dest_init(&array_tex->instr, &array_tex->dest,
- nir_tex_instr_dest_size(array_tex), nir_dest_bit_size(tex->dest), NULL);
+ nir_tex_instr_dest_size(array_tex),
+ nir_dest_bit_size(tex->dest));
nir_builder_instr_insert(b, &array_tex->instr);
return &array_tex->dest.ssa;
}
txl->src[s].src_type = nir_tex_src_lod;
b->cursor = nir_before_instr(&tex->instr);
- nir_ssa_dest_init(&txl->instr, &txl->dest, nir_dest_num_components(tex->dest),
- nir_dest_bit_size(tex->dest), NULL);
+ nir_ssa_dest_init(&txl->instr, &txl->dest,
+ nir_dest_num_components(tex->dest),
+ nir_dest_bit_size(tex->dest));
nir_builder_instr_insert(b, &txl->instr);
nir_ssa_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
return txl;
for (uint8_t i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
- nir_ssa_dest_init(&chan_intr->instr,
- &chan_intr->dest,
- 1,
- intr->dest.ssa.bit_size,
- NULL);
+ nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest, 1,
+ intr->dest.ssa.bit_size);
chan_intr->num_components = 1;
nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
tex->src[0].src = nir_src_for_ssa(pos);
tex->coord_components = 3;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
return tex;
}
tex->texture_index = 0;
tex->sampler_index = 0;
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
nir_intrinsic_instr_create(b.shader, nir_intrinsic_ballot);
ballot->src[0] = nir_src_for_ssa(cond);
ballot->num_components = 1;
- nir_ssa_dest_init(&ballot->instr, &ballot->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&ballot->instr, &ballot->dest, 1, 32);
nir_builder_instr_insert(&b, &ballot->instr);
nir_store_deref(&b, ret, &ballot->dest.ssa, ~0);
nir_intrinsic_set_range(load, nir->num_uniforms);
nir_ssa_dest_init(&load->instr, &load->dest,
intrin->dest.ssa.num_components,
- intrin->dest.ssa.bit_size, NULL);
+ intrin->dest.ssa.bit_size);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, &load->dest.ssa);
nir_intrinsic_set_base(load, kernel_sysvals_start +
offsetof(struct brw_kernel_sysvals, num_work_groups));
nir_intrinsic_set_range(load, 3 * 4);
- nir_ssa_dest_init(&load->instr, &load->dest, 3, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 3, 32);
nir_builder_instr_insert(&b, &load->instr);
/* We may need to do a bit-size cast here */
}
load->num_components = 1;
- nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
default:
unreachable("Invalid param offset");
}
- nir_ssa_dest_init(&load->instr, &load->dest,
- load->num_components, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32);
nir_builder_instr_insert(b, &load->instr);
return &load->dest.ssa;
assert(add->dest.dest.is_ssa);
nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
- add->dest.dest.ssa.num_components,
- bit_size, NULL);
+ add->dest.dest.ssa.num_components, bit_size);
nir_ssa_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa);
nir_builder_instr_insert(b, &ffma->instr);
nir_alu_src_copy(&imul_32x16->src[1], &imul->src[small_val], imul_32x16);
nir_ssa_dest_init(&imul_32x16->instr, &imul_32x16->dest.dest,
- imul->dest.dest.ssa.num_components,
- 32, NULL);
+ imul->dest.dest.ssa.num_components, 32);
nir_ssa_def_rewrite_uses(&imul->dest.dest.ssa,
&imul_32x16->dest.dest.ssa);
tex->src[0].src_type = nir_tex_src_texture_deref;
tex->src[0].src = nir_src_for_ssa(&texture->dest.ssa);
- nir_ssa_dest_init(&tex->instr, &tex->dest,
- nir_tex_instr_dest_size(tex), 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex),
+ 32);
nir_builder_instr_insert(b, &tex->instr);
state->image_size = nir_i2f32(b, &tex->dest.ssa);
nir_ssa_dest_init(&tex->instr, &tex->dest,
old_tex->dest.ssa.num_components,
- nir_dest_bit_size(old_tex->dest), NULL);
+ nir_dest_bit_size(old_tex->dest));
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
} else {
assert(swizzle != SWIZZLE_NIL);
nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
- nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, 32, NULL);
+ nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, 32);
mov->dest.write_mask = 0x1;
mov->src[0] = src;
mov->src[0].swizzle[0] = swizzle;
assert(src_number == num_srcs);
- nir_ssa_dest_init(&instr->instr, &instr->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 4, 32);
nir_builder_instr_insert(b, &instr->instr);
/* Resolve the writemask on the texture op. */
nir_src_for_ssa(nir_channels(t->b, coord,
(1 << tex->coord_components) - 1));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(t->b, &tex->instr);
t->temps[r] = &tex->dest.ssa;
nir_src_for_ssa(nir_channels(b, nir_load_var(b, texcoord),
(1 << tex->coord_components) - 1));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return nir_channel(b, &tex->dest.ssa, 0);
}
tex->src[1].src = nir_src_for_ssa(&tex_deref->dest.ssa);
tex->src[2].src_type = nir_tex_src_coord;
tex->src[2].src = nir_src_for_ssa(texcoord);
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
nir_ssa_def *result = &tex->dest.ssa;
nir_deref_instr *sampler_deref = nir_build_deref_var(&b, sampler);
txf->src[2].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
- nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&txf->instr, &txf->dest, 4, 32);
nir_builder_instr_insert(&b, &txf->instr);
/* pass the grid offset as the coord to get the zero-indexed buffer offset */
load->num_components = 1;
load->src[0] = nir_src_for_ssa(nir_iadd(b, index, nir_imm_int(b, i)));
- nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32);
nir_builder_instr_insert(b, &load->instr);
comps[i] = &load->dest.ssa;
}
atomic->src[2] = nir_src_for_ssa(intr->src[2].ssa);
}
atomic->num_components = 0;
- nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32, NULL);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32);
nir_intrinsic_set_atomic_op(atomic, nir_intrinsic_atomic_op(intr));
nir_builder_instr_insert(b, &atomic->instr);
nir_phi_instr_add_src(lowered, src->pred, nir_src_for_ssa(cast));
}
- nir_ssa_dest_init(&lowered->instr, &lowered->dest,
- num_components, new_bit_size, NULL);
+ nir_ssa_dest_init(&lowered->instr, &lowered->dest, num_components,
+ new_bit_size);
b->cursor = nir_before_instr(&phi->instr);
nir_builder_instr_insert(b, &lowered->instr);
}
nir_ssa_dest_init(&array_tex->instr, &array_tex->dest,
- nir_tex_instr_dest_size(array_tex), 32, NULL);
+ nir_tex_instr_dest_size(array_tex), 32);
nir_builder_instr_insert(b, &array_tex->instr);
return &array_tex->dest.ssa;
}
}
}
- nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&tql->instr, &tql->dest, 2, 32);
nir_builder_instr_insert(b, &tql->instr);
/* DirectX LOD only has a value in x channel */
}
}
- nir_ssa_dest_init(&txf->instr, &txf->dest,
- nir_tex_instr_dest_size(txf), 32, NULL);
+ nir_ssa_dest_init(&txf->instr, &txf->dest, nir_tex_instr_dest_size(txf),
+ 32);
nir_builder_instr_insert(b, &txf->instr);
return txf;
tex->src[3].src_type = nir_tex_src_texture_deref;
tex->src[3].src = nir_src_for_ssa(&tex_deref->dest.ssa);
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
res = res ? nir_build_alu2(&b, resolve_op, res, &tex->dest.ssa) : &tex->dest.ssa;
tex->src[2].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
}
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
res = &tex->dest.ssa;
}
tex->src[2].src_type = nir_tex_src_lod;
tex->src[2].src = nir_src_for_ssa(nir_imm_int(&b, 0));
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
res = res ? nir_fadd(&b, res, &tex->dest.ssa) : &tex->dest.ssa;
tex->coord_components = coord_comps;
}
- nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
res = &tex->dest.ssa;
}
nir_intrinsic_set_align(shared_load, compsz / 8, 0);
nir_intrinsic_set_base(shared_load, nir_intrinsic_base(intr));
nir_ssa_dest_init(&shared_load->instr, &shared_load->dest,
- shared_load->num_components, compsz, NULL);
+ shared_load->num_components, compsz);
nir_builder_instr_insert(b, &shared_load->instr);
load = &shared_load->dest.ssa;
}
nir_intrinsic_instr *l = nir_intrinsic_instr_create(
b->shader, nir_intrinsic_load_sampler_lod_parameters_pan);
l->num_components = 3;
- nir_ssa_dest_init(&l->instr, &l->dest, 3, 32, NULL);
+ nir_ssa_dest_init(&l->instr, &l->dest, 3, 32);
/* TODO: Indirect samplers, separate sampler objects XXX */
nir_src idx = nir_src_for_ssa(nir_imm_int(b, tex->texture_index));
}
nir_ssa_dest_init(&tex->instr, &tex->dest, 4,
- nir_alu_type_get_type_size(tex->dest_type), NULL);
+ nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(&b, &tex->instr);
nir_ssa_def *texel = &tex->dest.ssa;
tex->src[0].src = nir_src_for_ssa(imgcoords);
tex->coord_components = texdim + texisarray;
nir_ssa_dest_init(&tex->instr, &tex->dest, 4,
- nir_alu_type_get_type_size(tex->dest_type), NULL);
+ nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(&b, &tex->instr);
nir_ssa_def *texel = &tex->dest.ssa;