* align_offset. Subtracting align_offset should eliminate it.
*/
b->cursor = nir_before_instr(instr);
- nir_instr_rewrite_src_ssa(instr, src_offset,
- nir_iadd_imm(b, offset, -align_offset));
+ nir_src_rewrite(src_offset, nir_iadd_imm(b, offset, -align_offset));
b->cursor = nir_after_instr(instr);
result = nir_extract_bits(b, &result, 1, comp_offset * bit_size,
/* Round down by masking out the bits. */
b->cursor = nir_before_instr(instr);
- nir_instr_rewrite_src_ssa(instr, src_offset,
- nir_iand_imm(b, offset, ~0x3));
+ nir_src_rewrite(src_offset, nir_iand_imm(b, offset, ~0x3));
/* We need to shift bits in the loaded vector by this number. */
b->cursor = nir_after_instr(instr);
nir_def *x = nir_fsub(b, nir_fmul(b, deriv_sc, invma), nir_fmul(b, deriv_ma, sc));
nir_def *y = nir_fsub(b, nir_fmul(b, deriv_tc, invma), nir_fmul(b, deriv_ma, tc));
- nir_instr_rewrite_src_ssa(&tex->instr, i ? ddy : ddx, nir_vec2(b, x, y));
+ nir_src_rewrite(i ? ddy : ddx, nir_vec2(b, x, y));
}
sc = nir_fadd_imm(b, sc, 1.5);
if (offset_src >= 0) {
nir_src *offset = &tex->src[offset_src].src;
nir_def *zero = nir_imm_intN_t(b, 0, offset->ssa->bit_size);
- nir_instr_rewrite_src_ssa(&tex->instr, offset, nir_vec2(b, offset->ssa, zero));
+ nir_src_rewrite(offset, nir_vec2(b, offset->ssa, zero));
}
if (ddx || ddy) {
nir_def *def = nir_vec2(b, ddx->ssa, nir_imm_floatN_t(b, 0.0, ddx->ssa->bit_size));
- nir_instr_rewrite_src_ssa(&tex->instr, ddx, def);
+ nir_src_rewrite(ddx, def);
def = nir_vec2(b, ddy->ssa, nir_imm_floatN_t(b, 0.0, ddy->ssa->bit_size));
- nir_instr_rewrite_src_ssa(&tex->instr, ddy, def);
+ nir_src_rewrite(ddy, def);
}
} else if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
prepare_cube_coords(b, tex, coords, ddx, ddy, options);
nir_def *coords = tex->src[coord_idx].src.ssa;
if (lower_tex_coords(b, tex, &coords, options)) {
tex->coord_components = coords->num_components;
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[coord_idx].src, coords);
+ nir_src_rewrite(&tex->src[coord_idx].src, coords);
return true;
}
case nir_intrinsic_ssbo_atomic:
case nir_intrinsic_ssbo_atomic_swap:
rsrc = load_buffer_descriptor(b, state, intrin->src[0].ssa, nir_intrinsic_access(intrin));
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[0], rsrc);
+ nir_src_rewrite(&intrin->src[0], rsrc);
break;
case nir_intrinsic_store_ssbo:
rsrc = load_buffer_descriptor(b, state, intrin->src[1].ssa, nir_intrinsic_access(intrin));
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[1], rsrc);
+ nir_src_rewrite(&intrin->src[1], rsrc);
break;
case nir_intrinsic_get_ssbo_size:
visit_get_ssbo_size(b, state, intrin);
switch (tex->src[i].src_type) {
case nir_tex_src_texture_deref:
tex->src[i].src_type = nir_tex_src_texture_handle;
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, image);
+ nir_src_rewrite(&tex->src[i].src, image);
break;
case nir_tex_src_sampler_deref:
tex->src[i].src_type = nir_tex_src_sampler_handle;
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, sampler);
+ nir_src_rewrite(&tex->src[i].src, sampler);
break;
default:
break;
break;
}
case nir_intrinsic_load_scratch: {
- nir_instr_rewrite_src_ssa(
- instr, &intr->src[0],
- nir_iadd_nuw(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), intr->src[0].ssa));
+ nir_src_rewrite(&intr->src[0],
+ nir_iadd_nuw(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), intr->src[0].ssa));
continue;
}
case nir_intrinsic_store_scratch: {
- nir_instr_rewrite_src_ssa(
- instr, &intr->src[1],
- nir_iadd_nuw(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), intr->src[1].ssa));
+ nir_src_rewrite(&intr->src[1],
+ nir_iadd_nuw(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), intr->src[1].ssa));
continue;
}
case nir_intrinsic_load_rt_arg_scratch_offset_amd: {
*/
case nir_intrinsic_load_scratch:
b->cursor = nir_before_instr(instr);
- nir_instr_rewrite_src_ssa(instr, &intrin->src[0], nir_iadd_nuw(b, scratch_offset, intrin->src[0].ssa));
+ nir_src_rewrite(&intrin->src[0], nir_iadd_nuw(b, scratch_offset, intrin->src[0].ssa));
break;
case nir_intrinsic_store_scratch:
b->cursor = nir_before_instr(instr);
- nir_instr_rewrite_src_ssa(instr, &intrin->src[1], nir_iadd_nuw(b, scratch_offset, intrin->src[1].ssa));
+ nir_src_rewrite(&intrin->src[1], nir_iadd_nuw(b, scratch_offset, intrin->src[1].ssa));
break;
case nir_intrinsic_load_rt_arg_scratch_offset_amd:
b->cursor = nir_after_instr(instr);
assert((nir_intrinsic_base(zs_emit) & base) == 0 &&
"each of depth/stencil may only be written once");
- nir_instr_rewrite_src_ssa(&zs_emit->instr, &zs_emit->src[src_idx], value);
+ nir_src_rewrite(&zs_emit->src[src_idx], value);
nir_intrinsic_set_base(zs_emit, nir_intrinsic_base(zs_emit) | base);
nir_instr_remove(instr);
/* Write it out from this store_zs */
nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) | BASE_Z);
- nir_instr_rewrite_src_ssa(instr, &intr->src[1], z);
+ nir_src_rewrite(&intr->src[1], z);
/* We'll set outputs_written after the pass in case there are multiple
* store_zs_agx instructions needing fixup.
return false;
b->cursor = nir_before_instr(instr);
- nir_instr_rewrite_src_ssa(instr, offset,
- nir_u2u16(b, nir_ssa_for_src(b, *offset, 1)));
+ nir_src_rewrite(offset, nir_u2u16(b, nir_ssa_for_src(b, *offset, 1)));
return true;
}
nir_def_rewrite_uses(&tex->def, phi);
nir_phi_instr *phi_instr = nir_instr_as_phi(phi->parent_instr);
nir_phi_src *else_src = nir_phi_get_src_from_block(phi_instr, else_block);
- nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->def);
+ nir_src_rewrite(&else_src->src, &tex->def);
return true;
}
nir_def *rgb1 = nir_vector_insert_imm(
&b, rgba, nir_imm_floatN_t(&b, 1.0, rgba->bit_size), 3);
- nir_instr_rewrite_src_ssa(instr, &intr->src[0], rgb1);
+ nir_src_rewrite(&intr->src[0], rgb1);
}
}
if (src_deref->deref_type != nir_deref_type_cast)
continue;
- nir_instr_rewrite_src_ssa(&alu->instr, &alu->src[i].src,
- src_deref->parent.ssa);
+ nir_src_rewrite(&alu->src[i].src, src_deref->parent.ssa);
progress = true;
}
nir_component_mask(num_components));
/* Write out the final color instead of the input */
- nir_instr_rewrite_src_ssa(instr, &store->src[0], blended);
+ nir_src_rewrite(&store->src[0], blended);
/* Sink to bottom */
nir_instr_remove(instr);
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
instr->block);
- nir_instr_rewrite_src_ssa(phi->parent_instr, &phi_src->src,
- &intr->def);
+ nir_src_rewrite(&phi_src->src, &intr->def);
}
return true;
nir_def *sample_index_new = nir_ubfe(b, fmask, fmask_offset, fmask_width);
/* fix color buffer load */
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[2], sample_index_new);
+ nir_src_rewrite(&intrin->src[2], sample_index_new);
/* Mark uses fmask to prevent lower this intrinsic again. */
enum gl_access_qualifier access = nir_intrinsic_access(intrin);
/* Convert the 32-bit store into a 16-bit store. */
b.cursor = nir_before_instr(&intr->instr);
- nir_instr_rewrite_src_ssa(&intr->instr, &intr->src[0],
- convert(&b, intr->src[0].ssa));
+ nir_src_rewrite(&intr->src[0], convert(&b, intr->src[0].ssa));
nir_intrinsic_set_src_type(intr, (type & ~32) | 16);
} else {
if (!sem.medium_precision)
nir_def *conv =
convert(&b, nir_ssa_for_src(&b, tex->src[i].src,
tex->src[i].src.ssa->num_components));
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, conv);
+ nir_src_rewrite(&tex->src[i].src, conv);
changed = true;
}
}
nir_def *new_vec = nir_vec_scalars(b, new_comps, src->ssa->num_components);
- nir_instr_rewrite_src_ssa(instr, src, new_vec);
+ nir_src_rewrite(src, new_vec);
}
static bool
instr->src[offset_src].ssa);
/* Rewrite offset */
- nir_instr_rewrite_src_ssa(&instr->instr, &instr->src[offset_src], offset);
+ nir_src_rewrite(&instr->src[offset_src], offset);
}
/*
nir_builder b = nir_builder_at(nir_before_instr(instr));
nir_def *value = nir_channels(&b, intrin->src[0].ssa, read_mask);
- nir_instr_rewrite_src_ssa(instr, &intrin->src[0], value);
+ nir_src_rewrite(&intrin->src[0], value);
intrin->num_components = util_bitcount(read_mask);
nir_intrinsic_set_write_mask(intrin, (1u << intrin->num_components) - 1);
/* Update instruction. */
tex->op = nir_texop_fragment_fetch_amd;
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[ms_index].src, new_sample);
+ nir_src_rewrite(&tex->src[ms_index].src, new_sample);
}
static void
channels[1] = nir_channel(b, pntc, 1);
} else {
sem.location = VARYING_SLOT_PNTC;
- nir_instr_rewrite_src_ssa(instr, offset, nir_imm_int(b, 0));
+ nir_src_rewrite(offset, nir_imm_int(b, 0));
nir_intrinsic_set_io_semantics(intr, sem);
nir_def *raw = &intr->def;
nir_channel(b, orig_coord, 3), 3);
}
- nir_instr_rewrite_src_ssa(instr, &tex->src[idx].src, normalized);
+ nir_src_rewrite(&tex->src[idx].src, normalized);
return true;
}
src->swizzle[i] = copy->src[src->swizzle[i]].swizzle[0];
}
- nir_instr_rewrite_src_ssa(src->src.parent_instr, &src->src, def);
+ nir_src_rewrite(&src->src, def);
return true;
}
}
}
- nir_instr_rewrite_src_ssa(use->parent_instr, use, new_ssa);
+ nir_src_rewrite(use, new_ssa);
progress = true;
}
alu_src.swizzle[i] = src_reswizzle[i];
nir_def *mov = nir_mov_alu(b, alu_src, num_components);
- nir_instr_rewrite_src_ssa(&instr->instr, &phi_src->src, mov);
+ nir_src_rewrite(&phi_src->src, mov);
}
b->cursor = nir_before_instr(&instr->instr);
nir_builder b = nir_builder_at(nir_before_instr(&store->instr));
nir_def *copy = nir_mov(&b, store->src[0].ssa);
copy->divergent = store->src[0].ssa->divergent;
- nir_instr_rewrite_src_ssa(&store->instr, &store->src[0], copy);
+ nir_src_rewrite(&store->src[0], copy);
}
static void
if (nir_scalar_is_const(scalar_idx)) {
nir_def *bindless =
nir_bindless_resource_ir3(b, 32, descriptor_idx, .desc_set = nir_scalar_as_uint(scalar_idx));
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[buffer_src], bindless);
+ nir_src_rewrite(&intrin->src[buffer_src], bindless);
return;
}
nir_def_rewrite_uses_after(value, new_value, new_value->parent_instr);
nir_intrinsic_set_dest_type(intr, alu_type);
} else {
- nir_instr_rewrite_src_ssa(instr, &intr->src[3], new_value);
+ nir_src_rewrite(&intr->src[3], new_value);
nir_intrinsic_set_src_type(intr, alu_type);
}
nir_intrinsic_set_format(intr, emulation_format);
*/
src = nir_umod_imm(b, src, IR3_BINDLESS_DESC_COUNT);
nir_def *bindless = nir_bindless_resource_ir3(b, 32, src, set);
- nir_instr_rewrite_src_ssa(&intr->instr, &intr->src[buffer_src], bindless);
+ nir_src_rewrite(&intr->src[buffer_src], bindless);
return true;
}
nir_umin(b,
ir->src[0].ssa,
nir_imm_int(b, b->shader->info.num_images - 1));
- nir_instr_rewrite_src_ssa(instr, &ir->src[0], new_index);
+ nir_src_rewrite(&ir->src[0], new_index);
enum glsl_sampler_dim dim = nir_intrinsic_image_dim(ir);
nir_def *clamped = nir_fsat(b, compare);
compare = nir_bcsel(b, upgraded, clamped, compare);
- nir_instr_rewrite_src_ssa(instr, &tex->src[comp_index].src, compare);
+ nir_src_rewrite(&tex->src[comp_index].src, compare);
return true;
}
assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
nir_def *desc = load_ubo_desc(b, intrin->src[0].ssa, s);
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[0], desc);
+ nir_src_rewrite(&intrin->src[0], desc);
break;
}
case nir_intrinsic_load_ssbo:
assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
nir_def *desc = load_ssbo_desc(b, &intrin->src[0], s);
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[0], desc);
+ nir_src_rewrite(&intrin->src[0], desc);
break;
}
case nir_intrinsic_store_ssbo: {
assert(!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM));
nir_def *desc = load_ssbo_desc(b, &intrin->src[1], s);
- nir_instr_rewrite_src_ssa(&intrin->instr, &intrin->src[1], desc);
+ nir_src_rewrite(&intrin->src[1], desc);
break;
}
case nir_intrinsic_get_ssbo_size: {
tex->src[i].src_type = nir_tex_src_texture_handle;
FALLTHROUGH;
case nir_tex_src_texture_handle:
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, image);
+ nir_src_rewrite(&tex->src[i].src, image);
break;
case nir_tex_src_sampler_deref:
tex->src[i].src_type = nir_tex_src_sampler_handle;
FALLTHROUGH;
case nir_tex_src_sampler_handle:
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, sampler);
+ nir_src_rewrite(&tex->src[i].src, sampler);
break;
default:
break;
nir_def *color = intrin->src[0].ssa;
nir_def *clamp = nir_load_clamp_vertex_color_amd(b);
nir_def *new_color = nir_bcsel(b, clamp, nir_fsat(b, color), color);
- nir_instr_rewrite_src_ssa(instr, &intrin->src[0], new_color);
+ nir_src_rewrite(&intrin->src[0], new_color);
return true;
}
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, tex->src[idx].src.ssa, 32));
- nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->def);
+ nir_src_rewrite(&tex->src[idx].src, &deref->def);
/* bindless sampling uses the variable type directly, which means the tex instr has to exactly
* match up with it in contrast to normal sampler ops where things are a bit more flexible;
unsigned coord_components = nir_src_num_components(tex->src[c].src);
if (coord_components < needed_components) {
nir_def *def = nir_pad_vector(b, tex->src[c].src.ssa, needed_components);
- nir_instr_rewrite_src_ssa(in, &tex->src[c].src, def);
+ nir_src_rewrite(&tex->src[c].src, def);
tex->coord_components = needed_components;
}
return true;
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, instr->src[0].ssa, 32));
- nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->def);
+ nir_src_rewrite(&instr->src[0], &deref->def);
return true;
}
def = nir_vec2(b, tex->src[c].src.ssa, zero);
else
def = nir_vec3(b, nir_channel(b, tex->src[c].src.ssa, 0), zero, nir_channel(b, tex->src[c].src.ssa, 1));
- nir_instr_rewrite_src_ssa(instr, &tex->src[c].src, def);
+ nir_src_rewrite(&tex->src[c].src, def);
}
b->cursor = nir_after_instr(instr);
unsigned needed_components = nir_tex_instr_dest_size(tex);
}
nir_def *resource = vulkan_resource_from_deref(b, deref, layout);
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[i].src, resource);
+ nir_src_rewrite(&tex->src[i].src, resource);
}
}
b->cursor = nir_before_instr(&path.path[i]->instr);
- nir_instr_rewrite_src_ssa(&path.path[i]->instr,
- &path.path[i]->arr.index,
- nir_umin(b,
- path.path[i]->arr.index.ssa,
- nir_iadd_imm(b, nir_load_patch_vertices_in(b), -1)));
+ nir_src_rewrite(&path.path[i]->arr.index,
+ nir_umin(b, path.path[i]->arr.index.ssa, nir_iadd_imm(b, nir_load_patch_vertices_in(b), -1)));
progress = true;
break;
binding_offset = nir_ishl_imm(b, binding_offset, 6);
}
- nir_instr_rewrite_src_ssa(instr, &intrin->src[1],
- nir_iadd(b, set_offset, binding_offset));
+ nir_src_rewrite(&intrin->src[1],
+ nir_iadd(b, set_offset, binding_offset));
}
/* Now unused values : set offset, array index */
- nir_instr_rewrite_src_ssa(instr, &intrin->src[0], nir_imm_int(b, 0xdeaddeed));
- nir_instr_rewrite_src_ssa(instr, &intrin->src[2], nir_imm_int(b, 0xdeaddeed));
+ nir_src_rewrite(&intrin->src[0], nir_imm_int(b, 0xdeaddeed));
+ nir_src_rewrite(&intrin->src[2], nir_imm_int(b, 0xdeaddeed));
return true;
}
*/
if (coord_components != tex->coord_components) {
nir_def *coords = nir_ssa_for_src(b, tex->src[coords_idx].src, tex->coord_components);
- nir_instr_rewrite_src_ssa(instr, &tex->src[coords_idx].src,
- nir_resize_vector(b, coords, coord_components));
+ nir_src_rewrite(&tex->src[coords_idx].src,
+ nir_resize_vector(b, coords, coord_components));
tex->coord_components = coord_components;
}
/* retain the non-fog-blended alpha value for color */
color = nir_vector_insert_imm(b, fog, nir_channel(b, color, 3), 3);
- nir_instr_rewrite_src_ssa(instr, &intr->src[0], nir_resize_vector(b, color, intr->num_components));
+ nir_src_rewrite(&intr->src[0],
+ nir_resize_vector(b, color, intr->num_components));
return true;
}
components[c] = nir_pack_double_2x32_dxil(b, unpacked_double);
alu->src[i].swizzle[c] = c;
}
- nir_instr_rewrite_src_ssa(instr, &alu->src[i].src, nir_vec(b, components, num_components));
+ nir_src_rewrite(&alu->src[i].src,
+ nir_vec(b, components, num_components));
progress = true;
}
}
}
nir_deref_path_finish(&path);
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[sampler_idx].src, &new_tail->def);
+ nir_src_rewrite(&tex->src[sampler_idx].src, &new_tail->def);
return true;
}
}
nir_deref_path_finish(&path);
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[texture_idx].src, &new_tail->def);
+ nir_src_rewrite(&tex->src[texture_idx].src, &new_tail->def);
return true;
}
channels[i] = nir_imm_intN_t(b, 0, src->bit_size);
intr->num_components = 4;
- nir_instr_rewrite_src_ssa(instr, &intr->src[0], nir_vec(b, channels, 4));
+ nir_src_rewrite(&intr->src[0], nir_vec(b, channels, 4));
nir_intrinsic_set_component(intr, 0);
nir_intrinsic_set_write_mask(intr, 0xf);
return true;
nir_def *layer = intr->src[1].ssa;
nir_def *new_layer = nir_iadd(b, layer,
nir_load_view_index(b));
- nir_instr_rewrite_src_ssa(instr, &intr->src[1], new_layer);
+ nir_src_rewrite(&intr->src[1], new_layer);
return true;
}
if (!handle)
return false;
- nir_instr_rewrite_src_ssa(&tex->instr, &tex->src[index].src, handle);
+ nir_src_rewrite(&tex->src[index].src, handle);
tex->src[index].src_type = new;
return true;
}
/* TODO: The nv50 back-end assumes it gets handles both places, even for
* texelFetch.
*/
- nir_instr_rewrite_src_ssa(&tex->instr,
- &tex->src[texture_src_idx].src,
- combined_handle);
+ nir_src_rewrite(&tex->src[texture_src_idx].src, combined_handle);
tex->src[texture_src_idx].src_type = nir_tex_src_texture_handle;
if (sampler_src_idx < 0) {
nir_tex_instr_add_src(tex, nir_tex_src_sampler_handle, combined_handle);
} else {
- nir_instr_rewrite_src_ssa(&tex->instr,
- &tex->src[sampler_src_idx].src,
- combined_handle);
+ nir_src_rewrite(&tex->src[sampler_src_idx].src, combined_handle);
tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
}
bifrost_nir_valid_channel(b, in, 3, first, mask));
/* Rewrite to use our replicated version */
- nir_instr_rewrite_src_ssa(instr, &intr->src[0], replicated);
+ nir_src_rewrite(&intr->src[0], replicated);
nir_intrinsic_set_component(intr, 0);
nir_intrinsic_set_write_mask(intr, 0xF);
intr->num_components = 4;
nir_def *orig = nir_load_sample_mask(b);
- nir_instr_rewrite_src_ssa(
- instr, &intr->src[0],
+ nir_src_rewrite(
+ &intr->src[0],
nir_b32csel(b, nir_load_multisampled_pan(b),
nir_iand(b, orig, nir_ssa_for_src(b, intr->src[0], 1)),
orig));
}
intr->num_components = util_last_bit(mask);
- nir_instr_rewrite_src_ssa(instr, &intr->src[0],
- nir_vec(b, channels, intr->num_components));
+ nir_src_rewrite(&intr->src[0], nir_vec(b, channels, intr->num_components));
nir_intrinsic_set_component(intr, 0);
nir_intrinsic_set_write_mask(intr, mask);