case nir_intrinsic_atomic_counter_read:
case nir_intrinsic_atomic_counter_read_deref:
case nir_intrinsic_quad_swizzle_amd:
- case nir_intrinsic_masked_swizzle_amd: {
+ case nir_intrinsic_masked_swizzle_amd:
+ case nir_intrinsic_is_sparse_texels_resident: {
unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
for (unsigned i = 0; i < num_srcs; i++) {
if (instr->src[i].ssa->divergent) {
intrinsic("addr_mode_is", src_comp=[-1], dest_comp=1,
indices=[MEMORY_MODES], flags=[CAN_ELIMINATE, CAN_REORDER])
+intrinsic("is_sparse_texels_resident", dest_comp=1, src_comp=[1], bit_sizes=[1],
+ flags=[CAN_ELIMINATE, CAN_REORDER])
+
# a barrier is an intrinsic with no inputs/outputs but which can't be moved
# around/optimized in general
def barrier(name):
case nir_intrinsic_load_subgroup_invocation:
case nir_intrinsic_load_num_subgroups:
case nir_intrinsic_load_frag_shading_rate:
+ case nir_intrinsic_is_sparse_texels_resident:
if (!alu_ok)
return false;
break;
vtn_push_image(b, w[2], si.image, access & ACCESS_NON_UNIFORM);
return;
+ } else if (opcode == SpvOpImageSparseTexelsResident) {
+ nir_ssa_def *code = vtn_get_nir_ssa(b, w[3]);
+ vtn_push_nir_ssa(b, w[2], nir_is_sparse_texels_resident(&b->nb, code));
+ return;
}
nir_deref_instr *image = NULL, *sampler = NULL;
case SpvOpSampledImage:
case SpvOpImage:
+ case SpvOpImageSparseTexelsResident:
case SpvOpImageSampleImplicitLod:
case SpvOpImageSparseSampleImplicitLod:
case SpvOpImageSampleExplicitLod: