.KHR_sampler_mirror_clamp_to_edge = true,
.KHR_sampler_ycbcr_conversion = true,
.KHR_separate_depth_stencil_layouts = true,
- .KHR_shader_atomic_int64 = device->info.ver >= 9,
.KHR_shader_clock = true,
.KHR_shader_draw_parameters = true,
.KHR_shader_float16_int8 = device->info.ver >= 8,
.EXT_scalar_block_layout = true,
.EXT_separate_stencil_usage = true,
.EXT_shader_atomic_float = true,
- .EXT_shader_atomic_float2 = device->info.ver >= 9,
.EXT_shader_demote_to_helper_invocation = true,
.EXT_shader_module_identifier = true,
.EXT_shader_stencil_export = device->info.ver >= 9,
f->storageBuffer8BitAccess = pdevice->info.ver >= 8;
f->uniformAndStorageBuffer8BitAccess = pdevice->info.ver >= 8;
f->storagePushConstant8 = pdevice->info.ver >= 8;
- f->shaderBufferInt64Atomics = pdevice->info.ver >= 9;
+ f->shaderBufferInt64Atomics = false;
f->shaderSharedInt64Atomics = false;
f->shaderFloat16 = pdevice->info.ver >= 8;
f->shaderInt8 = pdevice->info.ver >= 8;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT: {
- VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT *features = (void *)ext;
- features->shaderBufferFloat16Atomics = false;
- features->shaderBufferFloat16AtomicAdd = false;
- features->shaderBufferFloat16AtomicMinMax = false;
- features->shaderBufferFloat32AtomicMinMax = pdevice->info.ver >= 9;
- features->shaderBufferFloat64AtomicMinMax =
- pdevice->info.has_64bit_float && pdevice->info.has_lsc;
- features->shaderSharedFloat16Atomics = false;
- features->shaderSharedFloat16AtomicAdd = false;
- features->shaderSharedFloat16AtomicMinMax = false;
- features->shaderSharedFloat32AtomicMinMax = pdevice->info.ver >= 9;
- features->shaderSharedFloat64AtomicMinMax = false;
- features->shaderImageFloat32AtomicMinMax = false;
- features->sparseImageFloat32AtomicMinMax = false;
- break;
- }
-
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
VkPhysicalDeviceShaderClockFeaturesKHR *features =
(VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
static bool
try_lower_direct_buffer_intrinsic(nir_builder *b,
- nir_intrinsic_instr *intrin, bool is_atomic,
+ nir_intrinsic_instr *intrin,
struct apply_pipeline_layout_state *state)
{
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
nir_address_format addr_format = descriptor_address_format(desc, state);
if (nir_deref_mode_is(deref, nir_var_mem_ssbo)) {
- /* 64-bit atomics only support A64 messages so we can't lower them to
- * the index+offset model.
- */
- if (is_atomic && nir_dest_bit_size(intrin->dest) == 64 &&
- !state->pdevice->info.has_lsc)
- return false;
-
/* Normal binding table-based messages can't handle non-uniform access
* so we have to fall back to A64.
*/
switch (intrin->intrinsic) {
case nir_intrinsic_load_deref:
case nir_intrinsic_store_deref:
- return try_lower_direct_buffer_intrinsic(b, intrin, false, state);
-
case nir_intrinsic_deref_atomic_add:
case nir_intrinsic_deref_atomic_imin:
case nir_intrinsic_deref_atomic_umin:
case nir_intrinsic_deref_atomic_fmin:
case nir_intrinsic_deref_atomic_fmax:
case nir_intrinsic_deref_atomic_fcomp_swap:
- return try_lower_direct_buffer_intrinsic(b, intrin, true, state);
+ return try_lower_direct_buffer_intrinsic(b, intrin, state);
case nir_intrinsic_get_ssbo_size: {
/* The get_ssbo_size intrinsic always just takes a