.shaderInputAttachmentArrayDynamicIndexing = false,
.shaderUniformTexelBufferArrayDynamicIndexing = true,
.shaderStorageTexelBufferArrayDynamicIndexing = true,
- .shaderUniformBufferArrayNonUniformIndexing = false,
+ .shaderUniformBufferArrayNonUniformIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageBufferArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
nir_address_format addr_format = descriptor_address_format(desc, state);
+ /* Although we could lower non uniform binding table accesses with
+ * nir_opt_non_uniform_access, we might as well use an A64 message and
+ * avoid the loops inserted by that lowering pass.
+ */
+ if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)
+ return false;
+
if (nir_deref_mode_is(deref, nir_var_mem_ssbo)) {
/* 64-bit atomics only support A64 messages so we can't lower them to
* the index+offset model.
!state->pdevice->info.has_lsc)
return false;
- /* Normal binding table-based messages can't handle non-uniform access
- * so we have to fall back to A64.
- */
- if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)
- return false;
-
if (!descriptor_has_bti(desc, state))
return false;