From ab0d8789326177ae38c8c6e75f5051a7da1117d5 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Tue, 1 Aug 2023 11:36:45 -0400 Subject: [PATCH] treewide: Remove more is_ssa asserts Stuff Coccinelle missed. sed -i -e '/assert(.*\.is_ssa)/d' $(git grep -l is_ssa) sed -i -e '/ASSERT.*\.is_ssa)/d' $(git grep -l is_ssa) + a manual fixup to restore the assert for parallel copy lowering. Signed-off-by: Alyssa Rosenzweig Reviewed-by: Faith Ekstrand Part-of: --- src/amd/llvm/ac_nir_to_llvm.c | 5 ---- src/asahi/compiler/agx_compile.c | 1 - src/broadcom/compiler/nir_to_vir.c | 3 --- src/broadcom/compiler/v3d40_tex.c | 1 - src/broadcom/vulkan/v3dv_pipeline.c | 2 -- src/compiler/nir/nir.c | 6 ----- src/compiler/nir/nir.h | 2 -- src/compiler/nir/nir_builder.h | 4 ---- src/compiler/nir/nir_deref.c | 11 --------- src/compiler/nir/nir_from_ssa.c | 14 ++--------- src/compiler/nir/nir_inline_functions.c | 1 - src/compiler/nir/nir_legacy.c | 1 - src/compiler/nir/nir_lower_bit_size.c | 1 - src/compiler/nir/nir_lower_convert_alu_types.c | 1 - src/compiler/nir/nir_lower_locals_to_regs.c | 2 -- src/compiler/nir/nir_lower_mem_access_bit_sizes.c | 1 - src/compiler/nir/nir_lower_scratch.c | 1 - src/compiler/nir/nir_lower_system_values.c | 4 ---- src/compiler/nir/nir_lower_var_copies.c | 1 - src/compiler/nir/nir_opt_find_array_copies.c | 1 - src/compiler/nir/nir_opt_idiv_const.c | 1 - src/compiler/nir/nir_schedule.c | 2 -- src/compiler/nir/nir_validate.c | 8 ------- src/compiler/nir/tests/vars_tests.cpp | 28 ---------------------- src/freedreno/ir3/ir3_compiler_nir.c | 1 - src/gallium/drivers/d3d12/d3d12_nir_passes.c | 1 - src/gallium/drivers/r600/sfn/sfn_nir.cpp | 2 -- .../r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp | 1 - src/intel/compiler/brw_fs.cpp | 1 - src/intel/compiler/brw_fs_nir.cpp | 1 - .../compiler/brw_nir_lower_alpha_to_coverage.c | 2 -- src/intel/compiler/brw_nir_opt_peephole_ffma.c | 1 - src/intel/vulkan/anv_nir_apply_pipeline_layout.c | 1 - .../vulkan_hasvk/anv_nir_apply_pipeline_layout.c | 1 - src/microsoft/spirv_to_dxil/dxil_spirv_nir.c | 1 - .../vulkan/panvk_vX_nir_lower_descriptors.c | 1 - 36 files changed, 2 insertions(+), 114 deletions(-) diff --git a/src/amd/llvm/ac_nir_to_llvm.c b/src/amd/llvm/ac_nir_to_llvm.c index 46cfbfe..a54be16 100644 --- a/src/amd/llvm/ac_nir_to_llvm.c +++ b/src/amd/llvm/ac_nir_to_llvm.c @@ -1281,7 +1281,6 @@ static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr) } if (result) { - assert(instr->dest.dest.is_ssa); result = ac_to_integer_or_pointer(&ctx->ac, result); ctx->ssa_defs[instr->dest.dest.ssa.index] = result; } @@ -2338,7 +2337,6 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, ""); - assert(instr->dest.is_ssa); bool can_speculate = access & ACCESS_CAN_REORDER; res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels, args.access, can_speculate, @@ -2372,7 +2370,6 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri args.dmask = 15; args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0; - assert(instr->dest.is_ssa); args.d16 = instr->dest.ssa.bit_size == 16; res = ac_build_image_opcode(&ctx->ac, &args); @@ -4072,7 +4069,6 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr) args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, ""); } - assert(instr->dest.is_ssa); args.d16 = instr->dest.ssa.bit_size == 16; args.tfe = instr->is_sparse; @@ -4102,7 +4098,6 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr) result = ac_build_concat(&ctx->ac, result, code); if (result) { - assert(instr->dest.is_ssa); result = ac_to_integer(&ctx->ac, result); for (int i = ARRAY_SIZE(wctx); --i >= 0;) { diff --git a/src/asahi/compiler/agx_compile.c b/src/asahi/compiler/agx_compile.c index 469e997..f04f705 100644 --- a/src/asahi/compiler/agx_compile.c +++ b/src/asahi/compiler/agx_compile.c @@ -2088,7 +2088,6 @@ agx_lower_front_face(struct nir_builder *b, nir_instr *instr, UNUSED void *data) if (intr->intrinsic != nir_intrinsic_load_front_face) return false; - assert(intr->dest.is_ssa); nir_ssa_def *def = &intr->dest.ssa; assert(def->bit_size == 1); diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c index 9c5ad2a..b50bbab 100644 --- a/src/broadcom/compiler/nir_to_vir.c +++ b/src/broadcom/compiler/nir_to_vir.c @@ -850,7 +850,6 @@ ntq_get_src(struct v3d_compile *c, nir_src src, int i) { struct hash_entry *entry; - assert(src.is_ssa); nir_intrinsic_instr *load = nir_load_reg_for_def(src.ssa); if (load == NULL) { assert(i < src.ssa->num_components); @@ -1228,7 +1227,6 @@ ntq_emit_comparison(struct v3d_compile *c, static struct nir_alu_instr * ntq_get_alu_parent(nir_src src) { - assert(src.is_ssa); if (src.ssa->parent_instr->type != nir_instr_type_alu) return NULL; nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr); @@ -1240,7 +1238,6 @@ ntq_get_alu_parent(nir_src src) * src. */ for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { - assert(instr->src[i].src.is_ssa); if (nir_load_reg_for_def(instr->src[i].src.ssa)) return NULL; } diff --git a/src/broadcom/compiler/v3d40_tex.c b/src/broadcom/compiler/v3d40_tex.c index 8f8983b..874ba47 100644 --- a/src/broadcom/compiler/v3d40_tex.c +++ b/src/broadcom/compiler/v3d40_tex.c @@ -244,7 +244,6 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) /* Limit the number of channels returned to both how many the NIR * instruction writes and how many the instruction could produce. */ - assert(instr->dest.is_ssa); nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa); if (store == NULL) { p0_unpacked.return_words_of_texture_data = diff --git a/src/broadcom/vulkan/v3dv_pipeline.c b/src/broadcom/vulkan/v3dv_pipeline.c index 71fcc3c..04eb7e0 100644 --- a/src/broadcom/vulkan/v3dv_pipeline.c +++ b/src/broadcom/vulkan/v3dv_pipeline.c @@ -605,7 +605,6 @@ lower_tex_src(nir_builder *b, /* We compute first the offsets */ nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr); while (deref->deref_type != nir_deref_type_var) { - assert(deref->parent.is_ssa); nir_deref_instr *parent = nir_instr_as_deref(deref->parent.ssa->parent_instr); @@ -743,7 +742,6 @@ lower_image_deref(nir_builder *b, unsigned base_index = 0; while (deref->deref_type != nir_deref_type_var) { - assert(deref->parent.is_ssa); nir_deref_instr *parent = nir_instr_as_deref(deref->parent.ssa->parent_instr); diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c index 7166b30..d820151 100644 --- a/src/compiler/nir/nir.c +++ b/src/compiler/nir/nir.c @@ -1400,21 +1400,17 @@ nir_instr_ssa_def(nir_instr *instr) { switch (instr->type) { case nir_instr_type_alu: - assert(nir_instr_as_alu(instr)->dest.dest.is_ssa); return &nir_instr_as_alu(instr)->dest.dest.ssa; case nir_instr_type_deref: - assert(nir_instr_as_deref(instr)->dest.is_ssa); return &nir_instr_as_deref(instr)->dest.ssa; case nir_instr_type_tex: - assert(nir_instr_as_tex(instr)->dest.is_ssa); return &nir_instr_as_tex(instr)->dest.ssa; case nir_instr_type_intrinsic: { nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); if (nir_intrinsic_infos[intrin->intrinsic].has_dest) { - assert(intrin->dest.is_ssa); return &intrin->dest.ssa; } else { return NULL; @@ -1422,7 +1418,6 @@ nir_instr_ssa_def(nir_instr *instr) } case nir_instr_type_phi: - assert(nir_instr_as_phi(instr)->dest.is_ssa); return &nir_instr_as_phi(instr)->dest.ssa; case nir_instr_type_parallel_copy: @@ -2914,7 +2909,6 @@ nir_variable *nir_get_binding_variable(nir_shader *shader, nir_binding binding) bool nir_alu_instr_is_copy(nir_alu_instr *instr) { - assert(instr->src[0].src.is_ssa); if (instr->op == nir_op_mov) { return !instr->src[0].abs && diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index c6a15e0..c93a1f1 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -2729,7 +2729,6 @@ nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx) assert(s.comp < s.def->num_components); assert(alu->dest.write_mask & (1u << s.comp)); - assert(alu->src[alu_src_idx].src.is_ssa); out.def = alu->src[alu_src_idx].src.ssa; if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) { @@ -2769,7 +2768,6 @@ nir_ssa_scalar_resolved(nir_ssa_def *def, unsigned channel) static inline uint64_t nir_alu_src_as_uint(nir_alu_src src) { - assert(src.src.is_ssa && "precondition"); nir_ssa_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]); return nir_ssa_scalar_as_uint(scalar); } diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h index f4cb332..ffbc474 100644 --- a/src/compiler/nir/nir_builder.h +++ b/src/compiler/nir/nir_builder.h @@ -1313,7 +1313,6 @@ static inline nir_deref_instr * nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent, int64_t index) { - assert(parent->dest.is_ssa); nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index, parent->dest.ssa.bit_size); @@ -1447,7 +1446,6 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent, nir_deref_instr *leader) { /* If the derefs would have the same parent, don't make a new one */ - assert(leader->parent.is_ssa); if (leader->parent.ssa == &parent->dest.ssa) return leader; @@ -1468,7 +1466,6 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent, glsl_get_length(leader_parent->type)); if (leader->deref_type == nir_deref_type_array) { - assert(leader->arr.index.is_ssa); nir_ssa_def *index = nir_i2iN(b, leader->arr.index.ssa, parent->dest.ssa.bit_size); return nir_build_deref_array(b, parent, index); @@ -1736,7 +1733,6 @@ nir_steal_tex_src(nir_tex_instr *tex, nir_tex_src_type type_) if (idx < 0) return NULL; - assert(tex->src[idx].src.is_ssa); nir_ssa_def *ssa = tex->src[idx].src.ssa; nir_tex_instr_remove_src(tex, idx); return ssa; diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index 63f7c36..216c4f3 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -109,7 +109,6 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr) for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) { /* If anyone is using this deref, leave it alone */ - assert(d->dest.is_ssa); if (!nir_ssa_def_is_unused(&d->dest.ssa)) break; @@ -539,7 +538,6 @@ compare_deref_paths(nir_deref_path *a_path, nir_deref_path *b_path, } else { assert(a[*i]->deref_type == nir_deref_type_array && b[*i]->deref_type == nir_deref_type_array); - assert(a[*i]->arr.index.is_ssa && b[*i]->arr.index.is_ssa); if (nir_src_is_const(a[*i]->arr.index) && nir_src_is_const(b[*i]->arr.index)) { @@ -923,7 +921,6 @@ opt_alu_of_cast(nir_alu_instr *alu) bool progress = false; for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) { - assert(alu->src[i].src.is_ssa); nir_instr *src_instr = alu->src[i].src.ssa->parent_instr; if (src_instr->type != nir_instr_type_deref) continue; @@ -932,7 +929,6 @@ opt_alu_of_cast(nir_alu_instr *alu) if (src_deref->deref_type != nir_deref_type_cast) continue; - assert(src_deref->parent.is_ssa); nir_instr_rewrite_src_ssa(&alu->instr, &alu->src[i].src, src_deref->parent.ssa); progress = true; @@ -1200,8 +1196,6 @@ opt_deref_cast(nir_builder *b, nir_deref_instr *cast) bool trivial_array_cast = is_trivial_array_deref_cast(cast); - assert(cast->dest.is_ssa); - assert(cast->parent.is_ssa); nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) { assert(!use_src->is_if && "there cannot be if-uses"); @@ -1256,9 +1250,6 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref) parent->deref_type != nir_deref_type_ptr_as_array) return false; - assert(parent->parent.is_ssa); - assert(parent->arr.index.is_ssa); - assert(deref->arr.index.is_ssa); deref->arr.in_bounds &= parent->arr.in_bounds; @@ -1354,7 +1345,6 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load) /* Stomp it to reference the parent */ nir_instr_rewrite_src(&load->instr, &load->src[0], nir_src_for_ssa(&parent->dest.ssa)); - assert(load->dest.is_ssa); load->dest.ssa.bit_size = new_bit_size; load->dest.ssa.num_components = new_num_comps; load->num_components = new_num_comps; @@ -1384,7 +1374,6 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store) * results in a LOT of vec4->vec3 casts on loads and stores. */ if (is_vector_bitcast_deref(deref, write_mask, true)) { - assert(store->src[1].is_ssa); nir_ssa_def *data = store->src[1].ssa; const unsigned old_bit_size = data->bit_size; diff --git a/src/compiler/nir/nir_from_ssa.c b/src/compiler/nir/nir_from_ssa.c index ee8c171..dc1e0fe 100644 --- a/src/compiler/nir/nir_from_ssa.c +++ b/src/compiler/nir/nir_from_ssa.c @@ -392,7 +392,6 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx) nir_instr_insert_after(&last_phi->instr, &block_pcopy->instr); nir_foreach_phi(phi, block) { - assert(phi->dest.is_ssa); nir_foreach_phi_src(src, phi) { if (nir_src_is_undef(src->src)) continue; @@ -411,7 +410,6 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx) entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src); exec_list_push_tail(&pcopy->entries, &entry->node); - assert(src->src.is_ssa); nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src); nir_instr_rewrite_src(&phi->instr, &src->src, @@ -630,7 +628,6 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state) if (nir_src_is_undef(src->src)) continue; - assert(src->src.is_ssa); entry = _mesa_hash_table_search(state->merge_node_table, src->src.ssa); assert(entry != NULL); merge_node *src_node = (merge_node *)entry->data; @@ -719,7 +716,6 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state) nir_foreach_parallel_copy_entry(entry, pcopy) { assert(!entry->dest_is_reg); - assert(entry->dest.dest.is_ssa); assert(nir_ssa_def_is_unused(&entry->dest.dest.ssa)); /* Parallel copy destinations will always be registers */ @@ -734,7 +730,6 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state) nir_foreach_parallel_copy_entry(entry, pcopy) { assert(!entry->src_is_reg); - assert(entry->src.is_ssa); nir_ssa_def *reg = reg_for_ssa_def(entry->src.ssa, state); if (reg == NULL) continue; @@ -817,8 +812,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy, unsigned num_copies = 0; nir_foreach_parallel_copy_entry(entry, pcopy) { /* Sources may be SSA but destinations are always registers */ - assert(entry->src.is_ssa); - assert(entry->dest_is_reg && entry->dest.dest.is_ssa); + assert(entry->dest_is_reg); if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa) continue; @@ -858,7 +852,6 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy, if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa) continue; - assert(entry->src.is_ssa); struct copy_value src_value = { .is_reg = entry->src_is_reg, .ssa = entry->src.ssa, @@ -874,7 +867,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy, values[src_idx] = src_value; } - assert(entry->dest_is_reg && entry->dest.dest.is_ssa); + assert(entry->dest_is_reg); struct copy_value dest_value = { .is_reg = true, .ssa = entry->dest.reg.ssa, @@ -1166,14 +1159,12 @@ nir_lower_phis_to_regs_block(nir_block *block) bool progress = false; nir_foreach_phi_safe(phi, block) { - assert(phi->dest.is_ssa); nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa); b.cursor = nir_after_instr(&phi->instr); nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg)); nir_foreach_phi_src(src, phi) { - assert(src->src.is_ssa); _mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block); place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks); @@ -1228,7 +1219,6 @@ instr_is_load_new_reg(nir_instr *instr, unsigned old_num_ssa) if (load->intrinsic != nir_intrinsic_load_reg) return false; - assert(load->src[0].is_ssa); nir_ssa_def *reg = load->src[0].ssa; return reg->index >= old_num_ssa; diff --git a/src/compiler/nir/nir_inline_functions.c b/src/compiler/nir/nir_inline_functions.c index e18ce25..0fe2263 100644 --- a/src/compiler/nir/nir_inline_functions.c +++ b/src/compiler/nir/nir_inline_functions.c @@ -82,7 +82,6 @@ void nir_inline_function_impl(struct nir_builder *b, unsigned param_idx = nir_intrinsic_param_idx(load); assert(param_idx < impl->function->num_params); - assert(load->dest.is_ssa); nir_ssa_def_rewrite_uses(&load->dest.ssa, params[param_idx]); diff --git a/src/compiler/nir/nir_legacy.c b/src/compiler/nir/nir_legacy.c index 593d7fd..639fa42 100644 --- a/src/compiler/nir/nir_legacy.c +++ b/src/compiler/nir/nir_legacy.c @@ -278,7 +278,6 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_) * this for loads in the same block as the use because uses of loads * which cross block boundaries aren't trivial anyway. */ - assert(alu->src[0].src.is_ssa); nir_intrinsic_instr *load = nir_load_reg_for_def(alu->src[0].src.ssa); if (load != NULL) { /* Duplicate the load before changing it in case there are other diff --git a/src/compiler/nir/nir_lower_bit_size.c b/src/compiler/nir/nir_lower_bit_size.c index 45ba463..57c447e 100644 --- a/src/compiler/nir/nir_lower_bit_size.c +++ b/src/compiler/nir/nir_lower_bit_size.c @@ -146,7 +146,6 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin, case nir_intrinsic_reduce: case nir_intrinsic_inclusive_scan: case nir_intrinsic_exclusive_scan: { - assert(intrin->src[0].is_ssa && intrin->dest.is_ssa); const unsigned old_bit_size = intrin->dest.ssa.bit_size; assert(old_bit_size < bit_size); diff --git a/src/compiler/nir/nir_lower_convert_alu_types.c b/src/compiler/nir/nir_lower_convert_alu_types.c index e3d2777..8fa51bc 100644 --- a/src/compiler/nir/nir_lower_convert_alu_types.c +++ b/src/compiler/nir/nir_lower_convert_alu_types.c @@ -54,7 +54,6 @@ static void lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv) { assert(conv->intrinsic == nir_intrinsic_convert_alu_types); - assert(conv->src[0].is_ssa && conv->dest.is_ssa); b->cursor = nir_instr_remove(&conv->instr); nir_ssa_def *val = diff --git a/src/compiler/nir/nir_lower_locals_to_regs.c b/src/compiler/nir/nir_lower_locals_to_regs.c index 3648d8b..ab9b47d 100644 --- a/src/compiler/nir/nir_lower_locals_to_regs.c +++ b/src/compiler/nir/nir_lower_locals_to_regs.c @@ -234,7 +234,6 @@ lower_locals_to_regs_block(nir_block *block, loc.reg, .base = loc.base_offset); } - assert(intrin->dest.is_ssa); nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value); nir_instr_remove(&intrin->instr); state->progress = true; @@ -251,7 +250,6 @@ lower_locals_to_regs_block(nir_block *block, struct reg_location loc = get_deref_reg_location(deref, state); nir_intrinsic_instr *decl = nir_reg_get_decl(loc.reg); - assert(intrin->src[1].is_ssa); nir_ssa_def *val = intrin->src[1].ssa; unsigned num_array_elems = nir_intrinsic_num_array_elems(decl); unsigned write_mask = nir_intrinsic_write_mask(intrin); diff --git a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c index 562e598..5f3bcaa 100644 --- a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c +++ b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c @@ -229,7 +229,6 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin, nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb, const void *cb_data, bool allow_unaligned_stores_as_atomics) { - assert(intrin->src[0].is_ssa); nir_ssa_def *value = intrin->src[0].ssa; assert(intrin->num_components == value->num_components); diff --git a/src/compiler/nir/nir_lower_scratch.c b/src/compiler/nir/nir_lower_scratch.c index a38bc35..6bdcfc2 100644 --- a/src/compiler/nir/nir_lower_scratch.c +++ b/src/compiler/nir/nir_lower_scratch.c @@ -58,7 +58,6 @@ lower_load_store(nir_builder *b, } else { assert(intrin->intrinsic == nir_intrinsic_store_deref); - assert(intrin->src[1].is_ssa); nir_ssa_def *value = intrin->src[1].ssa; if (value->bit_size == 1) value = nir_b2b32(b, value); diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c index 0f21d2f..fd05625 100644 --- a/src/compiler/nir/nir_lower_system_values.c +++ b/src/compiler/nir/nir_lower_system_values.c @@ -74,7 +74,6 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) if (!nir_intrinsic_infos[intrin->intrinsic].has_dest) return NULL; - assert(intrin->dest.is_ssa); const unsigned bit_size = intrin->dest.ssa.bit_size; switch (intrin->intrinsic) { @@ -135,11 +134,9 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) case nir_intrinsic_interp_deref_at_centroid: return nir_load_barycentric_coord_centroid(b, 32, .interp_mode = interp_mode); case nir_intrinsic_interp_deref_at_sample: - assert(intrin->src[1].is_ssa); return nir_load_barycentric_coord_at_sample(b, 32, intrin->src[1].ssa, .interp_mode = interp_mode); case nir_intrinsic_interp_deref_at_offset: - assert(intrin->src[1].is_ssa); return nir_load_barycentric_coord_at_offset(b, 32, intrin->src[1].ssa, .interp_mode = interp_mode); default: @@ -159,7 +156,6 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state) * couple of ray-tracing intrinsics which are matrices. */ assert(deref->deref_type == nir_deref_type_array); - assert(deref->arr.index.is_ssa); column = deref->arr.index.ssa; nir_deref_instr *arr_deref = deref; deref = nir_deref_instr_parent(deref); diff --git a/src/compiler/nir/nir_lower_var_copies.c b/src/compiler/nir/nir_lower_var_copies.c index d8681c5..8ca4fe2 100644 --- a/src/compiler/nir/nir_lower_var_copies.c +++ b/src/compiler/nir/nir_lower_var_copies.c @@ -96,7 +96,6 @@ nir_lower_deref_copy_instr(nir_builder *b, nir_intrinsic_instr *copy) /* Unfortunately, there's just no good way to handle wildcards except to * flip the chain around and walk the list from variable to final pointer. */ - assert(copy->src[0].is_ssa && copy->src[1].is_ssa); nir_deref_instr *dst = nir_instr_as_deref(copy->src[0].ssa->parent_instr); nir_deref_instr *src = nir_instr_as_deref(copy->src[1].ssa->parent_instr); diff --git a/src/compiler/nir/nir_opt_find_array_copies.c b/src/compiler/nir/nir_opt_find_array_copies.c index 5f3f621..7a1723f 100644 --- a/src/compiler/nir/nir_opt_find_array_copies.c +++ b/src/compiler/nir/nir_opt_find_array_copies.c @@ -337,7 +337,6 @@ try_match_deref(nir_deref_path *base_path, int *path_array_idx, continue; case nir_deref_type_array: { - assert(b->arr.index.is_ssa && d->arr.index.is_ssa); const bool const_b_idx = nir_src_is_const(b->arr.index); const bool const_d_idx = nir_src_is_const(d->arr.index); const unsigned b_idx = const_b_idx ? nir_src_as_uint(b->arr.index) : 0; diff --git a/src/compiler/nir/nir_opt_idiv_const.c b/src/compiler/nir/nir_opt_idiv_const.c index d44783b..394bf7a 100644 --- a/src/compiler/nir/nir_opt_idiv_const.c +++ b/src/compiler/nir/nir_opt_idiv_const.c @@ -161,7 +161,6 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_instr *instr, void *user_data) alu->op != nir_op_irem) return false; - assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa); if (alu->dest.dest.ssa.bit_size < *min_bit_size) return false; diff --git a/src/compiler/nir/nir_schedule.c b/src/compiler/nir/nir_schedule.c index 4cc9307..40d7584 100644 --- a/src/compiler/nir/nir_schedule.c +++ b/src/compiler/nir/nir_schedule.c @@ -573,7 +573,6 @@ nir_schedule_regs_freed_store_reg(nir_intrinsic_instr *store, nir_schedule_regs_freed_state *state) { assert(nir_is_store_reg(store)); - assert(store->src[0].is_ssa && store->src[1].is_ssa); nir_schedule_regs_freed_src_cb(&store->src[0], state); if (store->intrinsic == nir_intrinsic_store_reg_indirect) @@ -962,7 +961,6 @@ nir_schedule_mark_store_reg_scheduled(nir_intrinsic_instr *store, nir_schedule_scoreboard *scoreboard) { assert(nir_is_store_reg(store)); - assert(store->src[0].is_ssa && store->src[1].is_ssa); nir_ssa_def *reg = store->src[1].ssa; nir_schedule_mark_src_scheduled(&store->src[0], scoreboard); diff --git a/src/compiler/nir/nir_validate.c b/src/compiler/nir/nir_validate.c index 374349a..6a5e9ca 100644 --- a/src/compiler/nir/nir_validate.c +++ b/src/compiler/nir/nir_validate.c @@ -339,9 +339,6 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state) validate_assert(state, instr->cast.align_offset == 0); } } else { - /* We require the parent to be SSA. This may be lifted in the future */ - validate_assert(state, instr->parent.is_ssa); - /* The parent pointer value must have the same number of components * as the destination. */ @@ -477,9 +474,6 @@ validate_register_handle(nir_src handle_src, unsigned bit_size, validate_state *state) { - if (!validate_assert(state, handle_src.is_ssa)) - return; - nir_ssa_def *handle = handle_src.ssa; nir_instr *parent = handle->parent_instr; @@ -1081,12 +1075,10 @@ validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state) { state->instr = &instr->instr; - validate_assert(state, instr->dest.is_ssa); exec_list_validate(&instr->srcs); nir_foreach_phi_src(src, instr) { if (src->pred == pred) { - validate_assert(state, src->src.is_ssa); validate_src(&src->src, state, instr->dest.ssa.bit_size, instr->dest.ssa.num_components); state->instr = NULL; diff --git a/src/compiler/nir/tests/vars_tests.cpp b/src/compiler/nir/tests/vars_tests.cpp index a79f970..3dcd5d7 100644 --- a/src/compiler/nir/tests/vars_tests.cpp +++ b/src/compiler/nir/tests/vars_tests.cpp @@ -251,10 +251,8 @@ TEST_F(nir_redundant_load_vars_test, duplicated_load_volatile) ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2); nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(first_store->src[1].is_ssa); nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(third_store->src[1].is_ssa); EXPECT_EQ(first_store->src[1].ssa, third_store->src[1].ssa); } @@ -374,10 +372,8 @@ TEST_F(nir_copy_prop_vars_test, simple_copies) ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2); nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0); - ASSERT_TRUE(first_copy->src[1].is_ssa); nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1); - ASSERT_TRUE(second_copy->src[1].is_ssa); EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa); } @@ -420,7 +416,6 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load) for (int i = 0; i < 2; i++) { nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, stored_value); } } @@ -451,7 +446,6 @@ TEST_F(nir_copy_prop_vars_test, store_store_load) /* Store to v[1] should use second_value directly. */ nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2); ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]); - ASSERT_TRUE(store_to_v1->src[1].is_ssa); EXPECT_EQ(store_to_v1->src[1].ssa, second_value); } @@ -563,7 +557,6 @@ TEST_F(nir_copy_prop_vars_test, store_volatile) */ nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3); ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]); - ASSERT_TRUE(store_to_v1->src[1].is_ssa); EXPECT_EQ(store_to_v1->src[1].ssa, third_value); } @@ -1056,7 +1049,6 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks) for (int i = 0; i < 2; i++) { nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, stored_value); } } @@ -1091,7 +1083,6 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previou ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(store->src[1].is_ssa); /* NOTE: The ALU instruction is how we get the vec.y. */ ASSERT_TRUE(nir_src_as_alu_instr(store->src[1])); @@ -1151,7 +1142,6 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused) ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1); - ASSERT_TRUE(store->src[1].is_ssa); ASSERT_TRUE(nir_src_as_alu_instr(store->src[1])); } @@ -1191,12 +1181,10 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector) /* Third store will just use the value from first store. */ nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0); nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(third_store->src[1].is_ssa); EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa); /* Fourth store will compose first and second store values. */ nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3); - ASSERT_TRUE(fourth_store->src[1].is_ssa); EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1])); } @@ -1232,8 +1220,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector) /* Store to vec[idx] propagated to out. */ nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0); nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1); - ASSERT_TRUE(first->src[1].is_ssa); - ASSERT_TRUE(second->src[1].is_ssa); EXPECT_EQ(first->src[1].ssa, second->src[1].ssa); } @@ -1270,8 +1256,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_ve /* Store to vec[idx] propagated to out. */ nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1); nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(second->src[1].is_ssa); - ASSERT_TRUE(third->src[1].is_ssa); EXPECT_EQ(second->src[1].ssa, third->src[1].ssa); } @@ -1309,8 +1293,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref) /* Store to arr[idx] propagated to out. */ nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0); nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1); - ASSERT_TRUE(first->src[1].is_ssa); - ASSERT_TRUE(second->src[1].is_ssa); EXPECT_EQ(first->src[1].ssa, second->src[1].ssa); } @@ -1356,8 +1338,6 @@ TEST_F(nir_copy_prop_vars_test, restrict_ssbo_bindings) /* Store to b0.x propagated to out. */ nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0); nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(first->src[1].is_ssa); - ASSERT_TRUE(third->src[1].is_ssa); EXPECT_EQ(first->src[1].ssa, third->src[1].ssa); } @@ -1489,8 +1469,6 @@ TEST_F(nir_copy_prop_vars_test, restrict_ssbo_array_binding) /* Store to b0.x propagated to out. */ nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0); nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2); - ASSERT_TRUE(first->src[1].is_ssa); - ASSERT_TRUE(third->src[1].is_ssa); EXPECT_EQ(first->src[1].ssa, third->src[1].ssa); } @@ -1655,7 +1633,6 @@ TEST_F(nir_dead_write_vars_test, dead_write_in_block) EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref)); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, load_v2); } @@ -1673,7 +1650,6 @@ TEST_F(nir_dead_write_vars_test, dead_write_components_in_block) EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref)); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, load_v2); } @@ -1701,7 +1677,6 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks) EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref)); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, load_v2); } @@ -1723,7 +1698,6 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks) EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref)); nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(store->src[1].is_ssa); EXPECT_EQ(store->src[1].ssa, load_v2); } @@ -1749,11 +1723,9 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement) EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref)); nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0); - ASSERT_TRUE(first_store->src[1].is_ssa); EXPECT_EQ(first_store->src[1].ssa, load_v2); nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1); - ASSERT_TRUE(second_store->src[1].is_ssa); EXPECT_EQ(second_store->src[1].ssa, load_v3); } diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 5e067db..044d9f7 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -3213,7 +3213,6 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex) if (opc == OPC_META_TEX_PREFETCH) { int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord); - compile_assert(ctx, tex->src[idx].src.is_ssa); sam = ir3_SAM(ctx->in_block, opc, type, MASK(ncomp), 0, NULL, get_barycentric(ctx, IJ_PERSP_PIXEL), 0); diff --git a/src/gallium/drivers/d3d12/d3d12_nir_passes.c b/src/gallium/drivers/d3d12/d3d12_nir_passes.c index 2901b3b..d7f6406 100644 --- a/src/gallium/drivers/d3d12/d3d12_nir_passes.c +++ b/src/gallium/drivers/d3d12/d3d12_nir_passes.c @@ -496,7 +496,6 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b, nir_instr_remove(&instr->instr); for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) { /* If anyone is using this deref, leave it alone */ - assert(d->dest.is_ssa); if (!list_is_empty(&d->dest.ssa.uses)) break; diff --git a/src/gallium/drivers/r600/sfn/sfn_nir.cpp b/src/gallium/drivers/r600/sfn/sfn_nir.cpp index fab8a7d..56d1829 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir.cpp @@ -243,7 +243,6 @@ private: auto buf_id = nir_imm_int(b, R600_BUFFER_INFO_CONST_BUFFER); - assert(intr->src[0].is_ssa); auto clip_vtx = intr->src[0].ssa; for (int i = 0; i < 8; ++i) { @@ -312,7 +311,6 @@ private: { auto intr = nir_instr_as_intrinsic(instr); assert(intr->intrinsic == nir_intrinsic_load_ubo_vec4); - assert(intr->src[0].is_ssa); auto parent = intr->src[0].ssa->parent_instr; diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp index b3fbc7d..5315707 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_fs_out_to_vector.cpp @@ -394,7 +394,6 @@ NirLowerIOToVector::vec_instr_stack_pop(nir_builder *b, assert(glsl_get_vector_elements(glsl_without_array(var2->type)) < 4); if (srcs[var2->data.location_frac] == &instr_undef->def) { - assert(intr2->src[1].is_ssa); assert(intr2->src[1].ssa); srcs[var2->data.location_frac] = intr2->src[1].ssa; } diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp index 04aa8d9..27ab4aa 100644 --- a/src/intel/compiler/brw_fs.cpp +++ b/src/intel/compiler/brw_fs.cpp @@ -7388,7 +7388,6 @@ brw_compute_barycentric_interp_modes(const struct intel_device_info *devinfo, } /* Ignore WPOS; it doesn't require interpolation. */ - assert(intrin->dest.is_ssa); if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa)) continue; diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index 5e141dc..68f7359e 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -4368,7 +4368,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr nir_ssa_bind_infos[instr->dest.ssa.index].binding = nir_intrinsic_binding(instr); - assert(instr->src[1].is_ssa); if (nir_intrinsic_resource_access_intel(instr) & nir_resource_intel_non_uniform) { nir_resource_values[instr->dest.ssa.index] = fs_reg(); diff --git a/src/intel/compiler/brw_nir_lower_alpha_to_coverage.c b/src/intel/compiler/brw_nir_lower_alpha_to_coverage.c index 8618270..41ef112 100644 --- a/src/intel/compiler/brw_nir_lower_alpha_to_coverage.c +++ b/src/intel/compiler/brw_nir_lower_alpha_to_coverage.c @@ -149,12 +149,10 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader, * assuming an alpha of 1.0 and letting the sample mask pass through * unaltered seems like the kindest thing to do to apps. */ - assert(color0_write->src[0].is_ssa); nir_ssa_def *color0 = color0_write->src[0].ssa; if (color0->num_components < 4) goto skip; - assert(sample_mask_write->src[0].is_ssa); nir_ssa_def *sample_mask = sample_mask_write->src[0].ssa; if (sample_mask_write_first) { diff --git a/src/intel/compiler/brw_nir_opt_peephole_ffma.c b/src/intel/compiler/brw_nir_opt_peephole_ffma.c index 0b66689..8dc59b8 100644 --- a/src/intel/compiler/brw_nir_opt_peephole_ffma.c +++ b/src/intel/compiler/brw_nir_opt_peephole_ffma.c @@ -169,7 +169,6 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b, if (add->exact) return false; - assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa); /* This, is the case a + a. We would rather handle this with an * algebraic reduction than fuse it. Also, we want to only fuse diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index e65ca3f..29a2c1a 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -1356,7 +1356,6 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, { b->cursor = nir_before_instr(&intrin->instr); - assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa); nir_ssa_def *index = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa); diff --git a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c index 2c8e63c..5de085c 100644 --- a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c @@ -779,7 +779,6 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, nir_address_format addr_format = addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state); - assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa); nir_ssa_def *index = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa, diff --git a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c index 111ba19..f6e4137 100644 --- a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c +++ b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c @@ -195,7 +195,6 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr, if (!nir_intrinsic_infos[intrin->intrinsic].has_dest) return false; - assert(intrin->dest.is_ssa); const struct dxil_spirv_runtime_conf *conf = (const struct dxil_spirv_runtime_conf *)cb_data; diff --git a/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c b/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c index 034d861..6d9c68b 100644 --- a/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c +++ b/src/panfrost/vulkan/panvk_vX_nir_lower_descriptors.c @@ -262,7 +262,6 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, break; case nir_intrinsic_vulkan_resource_reindex: - assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa); res = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa, addr_format); break; -- 2.7.4