Add tests for VK_EXT_shader_image_atomic_int64
authorArkadiusz Sarwa <arkadiusz.sarwa@amd.com>
Fri, 29 May 2020 14:16:58 +0000 (16:16 +0200)
committerAlexander Galazin <alexander.galazin@arm.com>
Fri, 20 Nov 2020 15:50:37 +0000 (16:50 +0100)
Components: Vulkan, Framework

VK-GL-CTS issue: 2392

New Tests:
dEQP-VK.image.atomic_operations.*r64*
dEQP-VK.image.atomic_operations.*buffer*
Affects:
dEQP-VK.image.atomic_operations.*

Change-Id: Ia5d8742a758f92577e0931e22f21d26ca20115b4
(cherry picked from commit e9cf49b80bff652c42a9608f1f2efcf007e29999)

android/cts/master/vk-master-2020-03-01.txt
android/cts/master/vk-master.txt
external/vulkancts/framework/vulkan/vkImageUtil.cpp
external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.cpp
external/vulkancts/modules/vulkan/image/vktImageAtomicSpirvShaders.cpp
external/vulkancts/modules/vulkan/image/vktImageTestsUtil.cpp
external/vulkancts/mustpass/master/vk-default.txt
framework/common/tcuTexture.cpp

index d550c86..bdeeabd 100644 (file)
@@ -186124,120 +186124,428 @@ dEQP-VK.image.format_reinterpret.buffer.r8_snorm_r8_sint
 dEQP-VK.image.format_reinterpret.buffer.r8_snorm_r8_unorm
 dEQP-VK.image.atomic_operations.add.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.add.buffer.r32f_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.extend_operands_spirv1p4.r32g32b32a32_uint_matching_extend
 dEQP-VK.image.extend_operands_spirv1p4.r16g16b16a16_uint_matching_extend
 dEQP-VK.image.extend_operands_spirv1p4.r16g16b16a16_uint_relaxed_matching_extend
index 5dfd16a..1674646 100644 (file)
@@ -515040,340 +515040,648 @@ dEQP-VK.image.atomic_operations.add.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.1d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.add.buffer.r32f_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.min.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.max.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.and.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.or.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_unorm
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_snorm
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_uscaled
index e1f5658..2290477 100644 (file)
@@ -2526,6 +2526,8 @@ VkFormat mapTextureFormat (const tcu::TextureFormat& format)
 
                case FMT_CASE(R, UNSIGNED_INT32):                                       return VK_FORMAT_R32_UINT;
                case FMT_CASE(R, SIGNED_INT32):                                         return VK_FORMAT_R32_SINT;
+               case FMT_CASE(R, UNSIGNED_INT64):                                       return VK_FORMAT_R64_UINT;
+               case FMT_CASE(R, SIGNED_INT64):                                         return VK_FORMAT_R64_SINT;
                case FMT_CASE(R, FLOAT):                                                        return VK_FORMAT_R32_SFLOAT;
 
                case FMT_CASE(RG, UNSIGNED_INT32):                                      return VK_FORMAT_R32G32_UINT;
@@ -3314,12 +3316,14 @@ deUint32 getFormatComponentWidth (const VkFormat format, const deUint32 componen
                                return 32;
 
                        case tcu::TextureFormat::FLOAT64:
-                       return 64;
+                       case tcu::TextureFormat::UNSIGNED_INT64:
+                       case tcu::TextureFormat::SIGNED_INT64:
+                               return 64;
 
                        // Packed formats
                        case tcu::TextureFormat::UNORM_SHORT_4444:
                        case tcu::TextureFormat::UNSIGNED_SHORT_4444:
-                       return 4;
+                               return 4;
 
                        case tcu::TextureFormat::UNORM_SHORT_565:
                        case tcu::TextureFormat::UNSIGNED_SHORT_565:
index 5a21588..9776289 100644 (file)
@@ -209,6 +209,36 @@ T getOperationInitialValue (const AtomicOperation op)
        }
 }
 
+template <>
+deInt64 getOperationInitialValue<deInt64>(const AtomicOperation op)
+{
+       switch (op)
+       {
+               // \note 0x000000BEFFFFFF18 is just an arbitrary nonzero value.
+               case ATOMIC_OPERATION_ADD:                              return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_INC:                              return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_SUB:                              return (1ull << 56) - 1;
+               case ATOMIC_OPERATION_DEC:                              return (1ull << 56) - 1;
+               case ATOMIC_OPERATION_MIN:                              return (1ull << 47) - 1;
+               case ATOMIC_OPERATION_MAX:                              return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_AND:                              return (1ull << 47) - 1;
+               case ATOMIC_OPERATION_OR:                               return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_XOR:                              return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_EXCHANGE:                 return 0x000000BEFFFFFF18;
+               case ATOMIC_OPERATION_COMPARE_EXCHANGE: return 0x000000BEFFFFFF18;
+               default:
+                       DE_ASSERT(false);
+                       return 0xFFFFFFFFFFFFFFFF;
+       }
+}
+
+template <>
+deUint64 getOperationInitialValue<deUint64>(const AtomicOperation op)
+{
+       return (deUint64)getOperationInitialValue<deInt64>(op);
+}
+
+
 template <typename T>
 static T getAtomicFuncArgument (const AtomicOperation  op,
                                                                const IVec3&                    invocationID,
@@ -318,6 +348,100 @@ static T computeBinaryAtomicOperationResult (const AtomicOperation op, const T a
        }
 }
 
+void AddFillReadShader (SourceCollections&                     sourceCollections,
+                                               const ImageType&                        imageType,
+                                               const tcu::TextureFormat&       format,
+                                               const string&                           type)
+{
+       const string    imageInCoord                    = getCoordStr(imageType, "gx", "gy", "gz");
+       const string    shaderImageFormatStr    = getShaderImageFormatQualifier(format);
+       const string    shaderImageTypeStr              = getShaderImageType(format, imageType);
+
+       const string fillShader =       "#version 450\n"
+                                                               "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
+                                                               "#extension GL_EXT_shader_image_int64 : require\n"
+                                                               "precision highp " + shaderImageTypeStr + ";\n"
+                                                               "\n"
+                                                               "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+                                                               "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+                                                               "\n"
+                                                               "layout(std430, binding = 1) buffer inputBuffer\n"
+                                                               "{\n"
+                                                               "       "+ type + " data[];\n"
+                                                               "} inBuffer;\n"
+                                                               "\n"
+                                                               "void main(void)\n"
+                                                               "{\n"
+                                                               "       int gx = int(gl_GlobalInvocationID.x);\n"
+                                                               "       int gy = int(gl_GlobalInvocationID.y);\n"
+                                                               "       int gz = int(gl_GlobalInvocationID.z);\n"
+                                                               "       uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n"
+                                                               "       imageStore(u_resultImage, " + imageInCoord + ", i64vec4(inBuffer.data[index]));\n"
+                                                               "}\n";
+
+       const string readShader =       "#version 450\n"
+                                                               "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
+                                                               "#extension GL_EXT_shader_image_int64 : require\n"
+                                                               "precision highp " + shaderImageTypeStr + ";\n"
+                                                               "\n"
+                                                               "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+                                                               "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+                                                               "\n"
+                                                               "layout(std430, binding = 1) buffer outputBuffer\n"
+                                                               "{\n"
+                                                               "       " + type + " data[];\n"
+                                                               "} outBuffer;\n"
+                                                               "\n"
+                                                               "void main(void)\n"
+                                                               "{\n"
+                                                               "       int gx = int(gl_GlobalInvocationID.x);\n"
+                                                               "       int gy = int(gl_GlobalInvocationID.y);\n"
+                                                               "       int gz = int(gl_GlobalInvocationID.z);\n"
+                                                               "       uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n"
+                                                               "       outBuffer.data[index] = imageLoad(u_resultImage, " + imageInCoord + ").x;\n"
+                                                               "}\n";
+
+
+       if ((imageType != IMAGE_TYPE_1D) &&
+               (imageType != IMAGE_TYPE_1D_ARRAY) &&
+               (imageType != IMAGE_TYPE_BUFFER))
+       {
+               const string gvec4 = isUintFormat(mapTextureFormat(format)) ? "u64vec4" : "i64vec4";
+
+               const string readShaderResidency  = "#version 450\n"
+                                                                                       "#extension GL_ARB_sparse_texture2 : require\n"
+                                                                                       "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
+                                                                                       "#extension GL_EXT_shader_image_int64 : require\n"
+                                                                                       "precision highp " + shaderImageTypeStr + ";\n"
+                                                                                       "\n"
+                                                                                       "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+                                                                                       "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+                                                                                       "\n"
+                                                                                       "layout(std430, binding = 1) buffer outputBuffer\n"
+                                                                                       "{\n"
+                                                                                       "       " + type + " data[];\n"
+                                                                                       "} outBuffer;\n"
+                                                                                       "\n"
+                                                                                       "void main(void)\n"
+                                                                                       "{\n"
+                                                                                       "       int gx = int(gl_GlobalInvocationID.x);\n"
+                                                                                       "       int gy = int(gl_GlobalInvocationID.y);\n"
+                                                                                       "       int gz = int(gl_GlobalInvocationID.z);\n"
+                                                                                       "       uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n"
+                                                                                       "       outBuffer.data[index] = imageLoad(u_resultImage, " + imageInCoord + ").x;\n"
+                                                                                       "       " + gvec4 + " sparseValue;\n"
+                                                                                       "       sparseImageLoadARB(u_resultImage, " + imageInCoord + ", sparseValue);\n"
+                                                                                       "       if (outBuffer.data[index] != sparseValue.x)\n"
+                                                                                       "               outBuffer.data[index] = " + gvec4 + "(1234).x;\n"
+                                                                                       "}\n";
+
+               sourceCollections.glslSources.add("readShaderResidency") << glu::ComputeSource(readShaderResidency.c_str()) << vk::ShaderBuildOptions(sourceCollections.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+       }
+
+       sourceCollections.glslSources.add("fillShader") << glu::ComputeSource(fillShader.c_str()) << vk::ShaderBuildOptions(sourceCollections.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+       sourceCollections.glslSources.add("readShader") << glu::ComputeSource(readShader.c_str()) << vk::ShaderBuildOptions(sourceCollections.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+}
+
 static bool isSupportedFeatureTransfer (const Context& context, const VkFormat& format)
 {
        const VkFormatProperties        formatProperties        = getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
@@ -336,15 +460,30 @@ static void initDataForImage (const VkDevice                      device,
                                                          Buffer&                                       buffer)
 {
        Allocation&                             bufferAllocation        = buffer.getAllocation();
+       const VkFormat                  imageFormat                     = mapTextureFormat(format);
        tcu::PixelBufferAccess  pixelBuffer                     (format, gridSize.x(), gridSize.y(), gridSize.z(), bufferAllocation.getHostPtr());
 
-       const tcu::IVec4 initialValue(getOperationInitialValue<deInt32>(operation));
+       if (imageFormat == VK_FORMAT_R64_UINT || imageFormat == VK_FORMAT_R64_SINT)
+       {
+               const deInt64 initialValue(getOperationInitialValue<deInt64>(operation));
 
-       for (deUint32 z = 0; z < gridSize.z(); z++)
-       for (deUint32 y = 0; y < gridSize.y(); y++)
-       for (deUint32 x = 0; x < gridSize.x(); x++)
+               for (deUint32 z = 0; z < gridSize.z(); z++)
+               for (deUint32 y = 0; y < gridSize.y(); y++)
+               for (deUint32 x = 0; x < gridSize.x(); x++)
+               {
+                       *((deInt64*)pixelBuffer.getPixelPtr(x, y, z)) = initialValue;
+               }
+       }
+       else
        {
-               pixelBuffer.setPixel(initialValue, x, y, z);
+               const tcu::IVec4 initialValue(getOperationInitialValue<deInt32>(operation));
+
+               for (deUint32 z = 0; z < gridSize.z(); z++)
+               for (deUint32 y = 0; y < gridSize.y(); y++)
+               for (deUint32 x = 0; x < gridSize.x(); x++)
+               {
+                       pixelBuffer.setPixel(initialValue, x, y, z);
+               }
        }
 
        flushAlloc(deviceInterface, device, bufferAllocation);
@@ -423,6 +562,22 @@ void BinaryAtomicEndResultCase::checkSupport (Context& context) const
                if ((formatProperties.optimalTilingFeatures & requiredFormatSupport) != requiredFormatSupport)
                        TCU_FAIL("VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT and VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT must be supported");
        }
+
+       if (format == VK_FORMAT_R64_UINT || format == VK_FORMAT_R64_SINT)
+       {
+               const VkFormatFeatureFlags      requisiteSupportR64 = VkFormatFeatureFlags(VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT |
+                                                                                                                                                               VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT);
+
+               context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
+
+               if (!context.getShaderImageAtomicInt64FeaturesEXT().shaderImageInt64Atomics)
+               {
+                       TCU_THROW(NotSupportedError, "shaderImageInt64Atomics is not supported");
+               }
+
+               if ((formatProperties.optimalTilingFeatures & requisiteSupportR64) != requisiteSupportR64)
+                       TCU_FAIL("VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT and VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT must be supported");
+       }
 }
 
 void BinaryAtomicEndResultCase::initPrograms (SourceCollections& sourceCollections) const
@@ -435,6 +590,11 @@ void BinaryAtomicEndResultCase::initPrograms (SourceCollections& sourceCollectio
                                                                          (uintFormat ? "uint64_t" : intFormat ? "int64_t" : "double") :
                                                                          (uintFormat ? "uint" : intFormat ? "int" : "float"));
 
+       if (imageFormat == VK_FORMAT_R64_UINT || imageFormat == VK_FORMAT_R64_SINT)
+       {
+               AddFillReadShader(sourceCollections, m_imageType, m_format, type);
+       }
+
        if (isSpirvAtomicOperation(m_operation))
        {
                const CaseVariant                                       caseVariant{m_imageType, m_format.order, m_format.type, CaseVariant::CHECK_TYPE_END_RESULTS};
@@ -450,6 +610,7 @@ void BinaryAtomicEndResultCase::initPrograms (SourceCollections& sourceCollectio
        else
        {
                const string    versionDecl                             = glu::getGLSLVersionDeclaration(m_glslVersion);
+
                const UVec3             gridSize                                = getShaderGridSize(m_imageType, m_imageSize);
                const string    atomicCoord                             = getCoordStr(m_imageType, "gx % " + toString(gridSize.x()), "gy", "gz");
 
@@ -465,20 +626,26 @@ void BinaryAtomicEndResultCase::initPrograms (SourceCollections& sourceCollectio
                const string    shaderImageTypeStr              = getShaderImageType(m_format, m_imageType);
                const string    extensions                              = "#extension GL_EXT_shader_atomic_float : enable\n#extension GL_KHR_memory_scope_semantics : enable  ";
 
-               string source = versionDecl + "\n" + extensions + "\n"
-                                               "precision highp " + shaderImageTypeStr + ";\n";
+               string source = versionDecl + "\n" + extensions + "\n";
 
-               source +=               "\n"
-                                               "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
-                                               "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
-                                               "\n"
-                                               "void main (void)\n"
-                                               "{\n"
-                                               "       int gx = int(gl_GlobalInvocationID.x);\n"
-                                               "       int gy = int(gl_GlobalInvocationID.y);\n"
-                                               "       int gz = int(gl_GlobalInvocationID.z);\n"
-                                               "       " + atomicInvocation + ";\n"
-                                               "}\n";
+               if (64 == componentWidth)
+               {
+                       source +=       "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
+                                               "#extension GL_EXT_shader_image_int64 : require\n";
+               }
+
+               source +=       "precision highp " + shaderImageTypeStr + ";\n"
+                                       "\n"
+                                       "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+                                       "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+                                       "\n"
+                                       "void main (void)\n"
+                                       "{\n"
+                                       "       int gx = int(gl_GlobalInvocationID.x);\n"
+                                       "       int gy = int(gl_GlobalInvocationID.y);\n"
+                                       "       int gz = int(gl_GlobalInvocationID.z);\n"
+                                       "       " + atomicInvocation + ";\n"
+                                       "}\n";
 
                sourceCollections.glslSources.add(m_name) << glu::ComputeSource(source.c_str());
        }
@@ -556,6 +723,22 @@ void BinaryAtomicIntermValuesCase::checkSupport (Context& context) const
                if ((formatProperties.optimalTilingFeatures & requiredFormatSupport) != requiredFormatSupport)
                        TCU_FAIL("VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT and VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT must be supported");
        }
+
+       if (format == VK_FORMAT_R64_UINT || format == VK_FORMAT_R64_SINT)
+       {
+               const VkFormatFeatureFlags      requisiteSupportR64 = VkFormatFeatureFlags(VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT |
+                                                                                                                                                               VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT);
+
+               context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
+
+               if (!context.getShaderImageAtomicInt64FeaturesEXT().shaderImageInt64Atomics)
+               {
+                       TCU_THROW(NotSupportedError, "shaderImageInt64Atomics is not supported");
+               }
+
+               if ((formatProperties.optimalTilingFeatures & requisiteSupportR64) != requisiteSupportR64)
+                       TCU_FAIL("VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT and VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT must be supported");
+       }
 }
 
 void BinaryAtomicIntermValuesCase::initPrograms (SourceCollections& sourceCollections) const
@@ -568,6 +751,11 @@ void BinaryAtomicIntermValuesCase::initPrograms (SourceCollections& sourceCollec
                                                                          (uintFormat ? "uint64_t" : intFormat ? "int64_t" : "double") :
                                                                          (uintFormat ? "uint" : intFormat ? "int" : "float"));
 
+       if (imageFormat == VK_FORMAT_R64_UINT || imageFormat == VK_FORMAT_R64_SINT)
+       {
+               AddFillReadShader(sourceCollections, m_imageType, m_format, type);
+       }
+
        if (isSpirvAtomicOperation(m_operation))
        {
                const CaseVariant                                       caseVariant{m_imageType, m_format.order, m_format.type, CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS};
@@ -601,20 +789,26 @@ void BinaryAtomicIntermValuesCase::initPrograms (SourceCollections& sourceCollec
                const string    extensions                              = "#extension GL_EXT_shader_atomic_float : enable\n#extension GL_KHR_memory_scope_semantics : enable  ";
 
                string source = versionDecl + "\n" + extensions + "\n"
-                                               "precision highp " + shaderImageTypeStr + ";\n"
                                                "\n";
 
-               source +=       "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
-                                       "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
-                                       "layout (" + shaderImageFormatStr + ", binding=1) writeonly uniform " + shaderImageTypeStr + " u_intermValuesImage;\n"
-                                       "\n"
-                                       "void main (void)\n"
-                                       "{\n"
-                                       "       int gx = int(gl_GlobalInvocationID.x);\n"
-                                       "       int gy = int(gl_GlobalInvocationID.y);\n"
-                                       "       int gz = int(gl_GlobalInvocationID.z);\n"
-                                       "       imageStore(u_intermValuesImage, " + invocationCoord + ", " + colorVecTypeName + "(" + atomicInvocation + "));\n"
-                                       "}\n";
+               if (64 == componentWidth)
+               {
+                       source +=       "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
+                                               "#extension GL_EXT_shader_image_int64 : require\n";
+               }
+
+                       source +=       "precision highp " + shaderImageTypeStr + "; \n"
+                                               "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+                                               "layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+                                               "layout (" + shaderImageFormatStr + ", binding=1) writeonly uniform " + shaderImageTypeStr + " u_intermValuesImage;\n"
+                                               "\n"
+                                               "void main (void)\n"
+                                               "{\n"
+                                               "       int gx = int(gl_GlobalInvocationID.x);\n"
+                                               "       int gy = int(gl_GlobalInvocationID.y);\n"
+                                               "       int gz = int(gl_GlobalInvocationID.z);\n"
+                                               "       imageStore(u_intermValuesImage, " + invocationCoord + ", " + colorVecTypeName + "(" + atomicInvocation + "));\n"
+                                               "}\n";
 
                sourceCollections.glslSources.add(m_name) << glu::ComputeSource(source.c_str());
        }
@@ -708,7 +902,9 @@ tcu::TestStatus     BinaryAtomicInstanceBase::iterate (void)
        const VkDeviceSize              imageSizeInBytes        = tcu::getPixelSize(m_format) * getNumPixels(m_imageType, m_imageSize);
        const VkDeviceSize              outBuffSizeInBytes      = getOutputBufferSize();
        const VkFormat                  imageFormat                     = mapTextureFormat(m_format);
-       const bool                              isSupportedTransfer     = isSupportedFeatureTransfer(m_context, imageFormat);
+       const bool                              isSupportedTransfer     = (imageFormat != VK_FORMAT_R64_SINT) &&
+                                                                                                 (imageFormat != VK_FORMAT_R64_UINT) &&
+                                                                                                 isSupportedFeatureTransfer(m_context, imageFormat);
        const bool                              isTexelBuffer           = (m_imageType == IMAGE_TYPE_BUFFER);
 
        if (!isTexelBuffer)
@@ -725,7 +921,7 @@ tcu::TestStatus     BinaryAtomicInstanceBase::iterate (void)
                                                                                                        makeBufferCreateInfo(imageSizeInBytes,
                                                                                                                                                 VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
                                                                                                                                                 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
-                                                                                                                                                (isTexelBuffer ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT : VkBufferUsageFlagBits(0u))),
+                                                                                                                                                (isTexelBuffer ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT : static_cast<VkBufferUsageFlagBits>(0u))),
                                                                                                        MemoryRequirement::HostVisible));
 
        // Fill in buffer with initial data used for image.
@@ -738,7 +934,7 @@ tcu::TestStatus     BinaryAtomicInstanceBase::iterate (void)
                                                                                                        makeBufferCreateInfo(outBuffSizeInBytes,
                                                                                                                                                 VK_BUFFER_USAGE_TRANSFER_DST_BIT |
                                                                                                                                                 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
-                                                                                                                                                (isTexelBuffer ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT : VkBufferUsageFlagBits(0u))),
+                                                                                                                                                (isTexelBuffer ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT : static_cast<VkBufferUsageFlagBits>(0u))),
                                                                                                        MemoryRequirement::HostVisible));
 
        if (!isTexelBuffer)
@@ -786,7 +982,20 @@ tcu::TestStatus    BinaryAtomicInstanceBase::iterate (void)
                pipelineLayoutFillImage = makePipelineLayout(deviceInterface, device, *m_descriptorSetLayoutNoTransfer);
                pipelineFillImage               = makeComputePipeline(deviceInterface, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
 
-               shaderModuleReadImage   = createShaderModule(deviceInterface, device, m_context.getBinaryCollection().get("readShader"), 0);
+               const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
+
+               if ((deviceFeatures.shaderResourceResidency == VK_TRUE) &&
+                       m_context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics &&
+                       (m_imageType != IMAGE_TYPE_1D) &&
+                       (m_imageType != IMAGE_TYPE_1D_ARRAY) &&
+                       (m_imageType != IMAGE_TYPE_BUFFER))
+               {
+                       shaderModuleReadImage = createShaderModule(deviceInterface, device, m_context.getBinaryCollection().get("readShaderResidency"), 0);
+               }
+               else
+               {
+                       shaderModuleReadImage = createShaderModule(deviceInterface, device, m_context.getBinaryCollection().get("readShader"), 0);
+               }
                pipelineLayoutReadImage = makePipelineLayout(deviceInterface, device, *m_descriptorSetLayoutNoTransfer);
                pipelineReadImage               = makeComputePipeline(deviceInterface, device, *pipelineLayoutFillImage, *shaderModuleReadImage);
        }
@@ -856,7 +1065,7 @@ tcu::TestStatus    BinaryAtomicInstanceBase::iterate (void)
 
        invalidateAlloc(deviceInterface, device, outputBufferAllocation);
 
-       if (verifyResult(outputBufferAllocation, false))
+       if (verifyResult(outputBufferAllocation, (imageFormat == VK_FORMAT_R64_UINT || imageFormat == VK_FORMAT_R64_SINT)))
                return tcu::TestStatus::pass("Comparison succeeded");
        else
                return tcu::TestStatus::fail("Comparison failed");
@@ -1613,6 +1822,8 @@ tcu::TestCaseGroup* createImageAtomicOperationTests (tcu::TestContext& testCtx)
                tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::UNSIGNED_INT32),
                tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT32),
                tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::FLOAT),
+               tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::UNSIGNED_INT64),
+               tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT64)
        };
 
        for (deUint32 operationI = 0; operationI < ATOMIC_OPERATION_LAST; operationI++)
index 59da84e..892bfa4 100644 (file)
@@ -2757,6 +2757,3747 @@ OpReturn
 OpFunctionEnd
 )";
 
+const std::string kShader_image_buffer_r32ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimageBuffer;
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r32ui, binding=0) coherent uniform uimageBuffer u_resultImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 50
+; Schema: 0
+OpCapability Shader
+OpCapability ImageBuffer
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %30 DescriptorSet 0
+OpDecorate %30 Binding 0
+OpDecorate %30 Coherent
+OpDecorate %49 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeImage %9 Buffer 0 0 0 2 R32ui
+%29 = OpTypePointer UniformConstant %28
+%30 = OpVariable %29 UniformConstant
+%32 = OpConstant %6 64
+%46 = OpTypePointer Image %9
+%49 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%31 = OpLoad %6 %8
+%33 = OpSMod %6 %31 %32
+%34 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%36 = OpIMul %6 %34 %35
+%37 = OpLoad %6 %18
+%38 = OpLoad %6 %18
+%39 = OpIMul %6 %37 %38
+%40 = OpIAdd %6 %36 %39
+%41 = OpLoad %6 %23
+%42 = OpLoad %6 %23
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpBitcast %9 %44
+%47 = OpImageTexelPointer %46 %30 %33 %13
+%48 = ${OPNAME} %9 %47 %19 %13 ${LASTARG:default=%45}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r32ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimageBuffer;
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r32ui, binding=0) coherent uniform uimageBuffer u_resultImage;
+;layout (r32ui, binding=1) writeonly uniform uimageBuffer u_intermValuesImage;
+;
+;void main (void)
+;{
+;   int gx = int(gl_GlobalInvocationID.x);
+;   int gy = int(gl_GlobalInvocationID.y);
+;   int gz = int(gl_GlobalInvocationID.z);
+;   imageStore(u_intermValuesImage, gx, uvec4(imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz))));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 55
+; Schema: 0
+OpCapability Shader
+OpCapability ImageBuffer
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %30 DescriptorSet 0
+OpDecorate %30 Binding 1
+OpDecorate %30 NonReadable
+OpDecorate %33 DescriptorSet 0
+OpDecorate %33 Binding 0
+OpDecorate %33 Coherent
+OpDecorate %54 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeImage %9 Buffer 0 0 0 2 R32ui
+%29 = OpTypePointer UniformConstant %28
+%30 = OpVariable %29 UniformConstant
+%33 = OpVariable %29 UniformConstant
+%35 = OpConstant %6 64
+%49 = OpTypePointer Image %9
+%52 = OpTypeVector %9 4
+%54 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%31 = OpLoad %28 %30
+%32 = OpLoad %6 %8
+%34 = OpLoad %6 %8
+%36 = OpSMod %6 %34 %35
+%37 = OpLoad %6 %8
+%38 = OpLoad %6 %8
+%39 = OpIMul %6 %37 %38
+%40 = OpLoad %6 %18
+%41 = OpLoad %6 %18
+%42 = OpIMul %6 %40 %41
+%43 = OpIAdd %6 %39 %42
+%44 = OpLoad %6 %23
+%45 = OpLoad %6 %23
+%46 = OpIMul %6 %44 %45
+%47 = OpIAdd %6 %43 %46
+%48 = OpBitcast %9 %47
+%50 = OpImageTexelPointer %49 %33 %36 %13
+%51 = ${OPNAME} %9 %50 %19 %13 ${LASTARG:default=%48}
+%53 = OpCompositeConstruct %52 %51 %51 %51 %51
+OpImageWrite %31 %32 %53
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r32i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimageBuffer;
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r32i, binding=0) coherent uniform iimageBuffer u_resultImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 49
+; Schema: 0
+OpCapability Shader
+OpCapability ImageBuffer
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %30 DescriptorSet 0
+OpDecorate %30 Binding 0
+OpDecorate %30 Coherent
+OpDecorate %48 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeImage %6 Buffer 0 0 0 2 R32i
+%29 = OpTypePointer UniformConstant %28
+%30 = OpVariable %29 UniformConstant
+%32 = OpConstant %6 64
+%45 = OpTypePointer Image %6
+%48 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%31 = OpLoad %6 %8
+%33 = OpSMod %6 %31 %32
+%34 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%36 = OpIMul %6 %34 %35
+%37 = OpLoad %6 %18
+%38 = OpLoad %6 %18
+%39 = OpIMul %6 %37 %38
+%40 = OpIAdd %6 %36 %39
+%41 = OpLoad %6 %23
+%42 = OpLoad %6 %23
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%46 = OpImageTexelPointer %45 %30 %33 %13
+%47 = ${OPNAME} %6 %46 %19 %13 ${LASTARG:default=%44}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r32i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimageBuffer;
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r32i, binding=0) coherent uniform iimageBuffer u_resultImage;
+;layout (r32i, binding=1) writeonly uniform iimageBuffer u_intermValuesImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, gx, ivec4(imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz))));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 54
+; Schema: 0
+OpCapability Shader
+OpCapability ImageBuffer
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %30 DescriptorSet 0
+OpDecorate %30 Binding 1
+OpDecorate %30 NonReadable
+OpDecorate %33 DescriptorSet 0
+OpDecorate %33 Binding 0
+OpDecorate %33 Coherent
+OpDecorate %53 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeImage %6 Buffer 0 0 0 2 R32i
+%29 = OpTypePointer UniformConstant %28
+%30 = OpVariable %29 UniformConstant
+%33 = OpVariable %29 UniformConstant
+%35 = OpConstant %6 64
+%48 = OpTypePointer Image %6
+%51 = OpTypeVector %6 4
+%53 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%31 = OpLoad %28 %30
+%32 = OpLoad %6 %8
+%34 = OpLoad %6 %8
+%36 = OpSMod %6 %34 %35
+%37 = OpLoad %6 %8
+%38 = OpLoad %6 %8
+%39 = OpIMul %6 %37 %38
+%40 = OpLoad %6 %18
+%41 = OpLoad %6 %18
+%42 = OpIMul %6 %40 %41
+%43 = OpIAdd %6 %39 %42
+%44 = OpLoad %6 %23
+%45 = OpLoad %6 %23
+%46 = OpIMul %6 %44 %45
+%47 = OpIAdd %6 %43 %46
+%49 = OpImageTexelPointer %48 %33 %36 %13
+%50 = ${OPNAME} %6 %49 %19 %13 ${LASTARG:default=%47}
+%52 = OpCompositeConstruct %51 %50 %50 %50 %50
+OpImageWrite %31 %32 %52
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage1D;
+;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout(r64ui, binding = 0) coherent uniform uimage1D u_resultImage;
+; void main(void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR - V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 53
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %52 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 1D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%46 = OpTypeInt 64 1
+%49 = OpTypePointer Image %28
+%52 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %8
+%36 = OpLoad %6 %8
+%37 = OpIMul %6 %35 %36
+%38 = OpLoad %6 %18
+%39 = OpLoad %6 %18
+%40 = OpIMul %6 %38 %39
+%41 = OpIAdd %6 %37 %40
+%42 = OpLoad %6 %23
+%43 = OpLoad %6 %23
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%47 = OpSConvert %46 %45
+%48 = OpBitcast %28 %47
+%50 = OpImageTexelPointer %49 %31 %34 %13
+%51 = ${OPNAME} %28 %50 %19 %13 ${LASTARG:default=%48}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage1D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout(r64ui, binding = 0) coherent uniform uimage1D u_resultImage;
+; layout(r64ui, binding = 1) writeonly uniform uimage1D u_intermValuesImage;
+;
+; void main(void)
+; {
+;   int gx = int(gl_GlobalInvocationID.x);
+;   int gy = int(gl_GlobalInvocationID.y);
+;   int gz = int(gl_GlobalInvocationID.z);
+;   imageStore(u_intermValuesImage, gx, u64vec4(imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR - V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 58
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %34 DescriptorSet 0
+OpDecorate %34 Binding 0
+OpDecorate %34 Coherent
+OpDecorate %57 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 1D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%34 = OpVariable %30 UniformConstant
+%36 = OpConstant %6 64
+%49 = OpTypeInt 64 1
+%52 = OpTypePointer Image %28
+%55 = OpTypeVector %28 4
+%57 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%37 = OpSMod %6 %35 %36
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%50 = OpSConvert %49 %48
+%51 = OpBitcast %28 %50
+%53 = OpImageTexelPointer %52 %34 %37 %13
+%54 = ${OPNAME} %28 %53 %19 %13 ${LASTARG:default=%51}
+%56 = OpCompositeConstruct %55 %54 %54 %54 %54
+OpImageWrite %32 %33 %56
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimage1D;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64i, binding=0) coherent uniform iimage1D u_resultImage;
+;
+;void main (void)
+;{
+;   int gx = int(gl_GlobalInvocationID.x);
+;   int gy = int(gl_GlobalInvocationID.y);
+;   int gz = int(gl_GlobalInvocationID.z);
+;   imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 51
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %50 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 1D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%47 = OpTypePointer Image %28
+%50 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %8
+%36 = OpLoad %6 %8
+%37 = OpIMul %6 %35 %36
+%38 = OpLoad %6 %18
+%39 = OpLoad %6 %18
+%40 = OpIMul %6 %38 %39
+%41 = OpIAdd %6 %37 %40
+%42 = OpLoad %6 %23
+%43 = OpLoad %6 %23
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpSConvert %28 %45
+%48 = OpImageTexelPointer %47 %31 %34 %13
+%49 = ${OPNAME} %28 %48 %19 %13 ${LASTARG:default=%46}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimage1D;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64i, binding=0) coherent uniform iimage1D u_resultImage;
+;layout (r64i, binding=1) writeonly uniform iimage1D u_intermValuesImage;
+;
+;void main (void)
+;{
+;   int gx = int(gl_GlobalInvocationID.x);
+;   int gy = int(gl_GlobalInvocationID.y);
+;   int gz = int(gl_GlobalInvocationID.z);
+;   imageStore(u_intermValuesImage, gx, i64vec4(imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz))));
+;}
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 56
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %34 DescriptorSet 0
+OpDecorate %34 Binding 0
+OpDecorate %34 Coherent
+OpDecorate %55 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 1D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%34 = OpVariable %30 UniformConstant
+%36 = OpConstant %6 64
+%50 = OpTypePointer Image %28
+%53 = OpTypeVector %28 4
+%55 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%37 = OpSMod %6 %35 %36
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%49 = OpSConvert %28 %48
+%51 = OpImageTexelPointer %50 %34 %37 %13
+%52 = ${OPNAME} %28 %51 %19 %13 ${LASTARG:default=%49}
+%54 = OpCompositeConstruct %53 %52 %52 %52 %52
+OpImageWrite %32 %33 %54
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_array_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimage1DArray;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64ui, binding=0) coherent uniform uimage1DArray u_resultImage;
+;
+;void main (void)
+;{
+;   int gx = int(gl_GlobalInvocationID.x);
+;   int gy = int(gl_GlobalInvocationID.y);
+;   int gz = int(gl_GlobalInvocationID.z);
+;   imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), uint(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 56
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %55 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 1D 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%36 = OpTypeVector %6 2
+%49 = OpTypeInt 64 1
+%52 = OpTypePointer Image %28
+%55 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%37 = OpCompositeConstruct %36 %34 %35
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%50 = OpSConvert %49 %48
+%51 = OpBitcast %28 %50
+%53 = OpImageTexelPointer %52 %31 %37 %13
+%54 = ${OPNAME} %28 %53 %19 %13 ${LASTARG:default=%51}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_array_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimage1DArray;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64ui, binding=0) coherent uniform uimage1DArray u_resultImage;
+;layout (r64ui, binding=1) writeonly uniform uimage1DArray u_intermValuesImage;
+;
+;void main (void)
+;{
+;int gx = int(gl_GlobalInvocationID.x);
+;int gy = int(gl_GlobalInvocationID.y);
+;int gz = int(gl_GlobalInvocationID.z);
+;imageStore(u_intermValuesImage, ivec2(gx,gy), u64vec4(imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), uint(gx*gx + gy*gy + gz*gz))));
+;}
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %37 DescriptorSet 0
+OpDecorate %37 Binding 0
+OpDecorate %37 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 1D 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%35 = OpTypeVector %6 2
+%37 = OpVariable %30 UniformConstant
+%39 = OpConstant %6 64
+%54 = OpTypeInt 64 1
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%36 = OpCompositeConstruct %35 %33 %34
+%38 = OpLoad %6 %8
+%40 = OpSMod %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpCompositeConstruct %35 %40 %41
+%43 = OpLoad %6 %8
+%44 = OpLoad %6 %8
+%45 = OpIMul %6 %43 %44
+%46 = OpLoad %6 %18
+%47 = OpLoad %6 %18
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpLoad %6 %23
+%51 = OpLoad %6 %23
+%52 = OpIMul %6 %50 %51
+%53 = OpIAdd %6 %49 %52
+%55 = OpSConvert %54 %53
+%56 = OpBitcast %28 %55
+%58 = OpImageTexelPointer %57 %37 %42 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %36 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_array_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage1DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage1DArray u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 54
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %53 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 1D 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%36 = OpTypeVector %6 2
+%50 = OpTypePointer Image %28
+%53 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%37 = OpCompositeConstruct %36 %34 %35
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%49 = OpSConvert %28 %48
+%51 = OpImageTexelPointer %50 %31 %37 %13
+%52 = ${OPNAME} %28 %51 %19 %13 ${LASTARG:default=%49}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_1d_array_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage1DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage1DArray u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimage1DArray u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec2(gx, gy), i64vec4(imageAtomicAdd(u_resultImage, ivec2(gx % 64, gy), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 61
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Image1D
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %37 DescriptorSet 0
+OpDecorate %37 Binding 0
+OpDecorate %37 Coherent
+OpDecorate %60 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 1D 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%35 = OpTypeVector %6 2
+%37 = OpVariable %30 UniformConstant
+%39 = OpConstant %6 64
+%55 = OpTypePointer Image %28
+%58 = OpTypeVector %28 4
+%60 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%36 = OpCompositeConstruct %35 %33 %34
+%38 = OpLoad %6 %8
+%40 = OpSMod %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpCompositeConstruct %35 %40 %41
+%43 = OpLoad %6 %8
+%44 = OpLoad %6 %8
+%45 = OpIMul %6 %43 %44
+%46 = OpLoad %6 %18
+%47 = OpLoad %6 %18
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpLoad %6 %23
+%51 = OpLoad %6 %23
+%52 = OpIMul %6 %50 %51
+%53 = OpIAdd %6 %49 %52
+%54 = OpSConvert %28 %53
+%56 = OpImageTexelPointer %55 %37 %42 %13
+%57 = ${OPNAME} %28 %56 %19 %13 ${LASTARG:default=%54}
+%59 = OpCompositeConstruct %58 %57 %57 %57 %57
+OpImageWrite %32 %36 %59
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage2D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimage2D u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 56
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %55 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 2D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%36 = OpTypeVector %6 2
+%49 = OpTypeInt 64 1
+%52 = OpTypePointer Image %28
+%55 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%37 = OpCompositeConstruct %36 %34 %35
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%50 = OpSConvert %49 %48
+%51 = OpBitcast %28 %50
+%53 = OpImageTexelPointer %52 %31 %37 %13
+%54 = ${OPNAME} %28 %53 %19 %13 ${LASTARG:default=%51}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage2D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+; layout (r64ui, binding=0) coherent uniform uimage2D u_resultImage;
+; layout (r64ui, binding=1) writeonly uniform uimage2D u_intermValuesImage;
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec2(gx,gy), u64vec4(imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %37 DescriptorSet 0
+OpDecorate %37 Binding 0
+OpDecorate %37 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 2D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%35 = OpTypeVector %6 2
+%37 = OpVariable %30 UniformConstant
+%39 = OpConstant %6 64
+%54 = OpTypeInt 64 1
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%36 = OpCompositeConstruct %35 %33 %34
+%38 = OpLoad %6 %8
+%40 = OpSMod %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpCompositeConstruct %35 %40 %41
+%43 = OpLoad %6 %8
+%44 = OpLoad %6 %8
+%45 = OpIMul %6 %43 %44
+%46 = OpLoad %6 %18
+%47 = OpLoad %6 %18
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpLoad %6 %23
+%51 = OpLoad %6 %23
+%52 = OpIMul %6 %50 %51
+%53 = OpIAdd %6 %49 %52
+%55 = OpSConvert %54 %53
+%56 = OpBitcast %28 %55
+%58 = OpImageTexelPointer %57 %37 %42 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %36 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage2D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage2D u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec2(gx % 64,gy), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 54
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %53 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 2D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%36 = OpTypeVector %6 2
+%50 = OpTypePointer Image %28
+%53 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%37 = OpCompositeConstruct %36 %34 %35
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%49 = OpSConvert %28 %48
+%51 = OpImageTexelPointer %50 %31 %37 %13
+%52 = ${OPNAME} %28 %51 %19 %13 ${LASTARG:default=%49}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage2D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage2D u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimage2D u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec2(gx, gy), i64vec4(imageAtomicAdd(u_resultImage, ivec2(gx%64, gy), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 61
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %37 DescriptorSet 0
+OpDecorate %37 Binding 0
+OpDecorate %37 Coherent
+OpDecorate %60 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 2D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%35 = OpTypeVector %6 2
+%37 = OpVariable %30 UniformConstant
+%39 = OpConstant %6 64
+%55 = OpTypePointer Image %28
+%58 = OpTypeVector %28 4
+%60 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%36 = OpCompositeConstruct %35 %33 %34
+%38 = OpLoad %6 %8
+%40 = OpSMod %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpCompositeConstruct %35 %40 %41
+%43 = OpLoad %6 %8
+%44 = OpLoad %6 %8
+%45 = OpIMul %6 %43 %44
+%46 = OpLoad %6 %18
+%47 = OpLoad %6 %18
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpLoad %6 %23
+%51 = OpLoad %6 %23
+%52 = OpIMul %6 %50 %51
+%53 = OpIAdd %6 %49 %52
+%54 = OpSConvert %28 %53
+%56 = OpImageTexelPointer %55 %37 %42 %13
+%57 = ${OPNAME} %28 %56 %19 %13 ${LASTARG:default=%54}
+%59 = OpCompositeConstruct %58 %57 %57 %57 %57
+OpImageWrite %32 %36 %59
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_array_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage2DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimage2DArray u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 57
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %56 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 2D 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%50 = OpTypeInt 64 1
+%53 = OpTypePointer Image %28
+%56 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%51 = OpSConvert %50 %49
+%52 = OpBitcast %28 %51
+%54 = OpImageTexelPointer %53 %31 %38 %13
+%55 = ${OPNAME} %28 %54 %19 %13 ${LASTARG:default=%52}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_array_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage2DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimage2DArray u_resultImage;
+; layout (r64ui, binding=1) writeonly uniform uimage2DArray u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), u64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 65
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %64 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 2D 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%56 = OpTypeInt 64 1
+%59 = OpTypePointer Image %28
+%62 = OpTypeVector %28 4
+%64 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%57 = OpSConvert %56 %55
+%58 = OpBitcast %28 %57
+%60 = OpImageTexelPointer %59 %38 %44 %13
+%61 = ${OPNAME} %28 %60 %19 %13 ${LASTARG:default=%58}
+%63 = OpCompositeConstruct %62 %61 %61 %61 %61
+OpImageWrite %32 %37 %63
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_array_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage2DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage2DArray u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 55
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %54 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 2D 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%51 = OpTypePointer Image %28
+%54 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpSConvert %28 %49
+%52 = OpImageTexelPointer %51 %31 %38 %13
+%53 = ${OPNAME} %28 %52 %19 %13 ${LASTARG:default=%50}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_2d_array_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage2DArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage2DArray u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimage2DArray u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), i64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 2D 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%56 = OpSConvert %28 %55
+%58 = OpImageTexelPointer %57 %38 %44 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %37 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_3d_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage3D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimage3D u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 57
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %56 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 3D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%50 = OpTypeInt 64 1
+%53 = OpTypePointer Image %28
+%56 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%51 = OpSConvert %50 %49
+%52 = OpBitcast %28 %51
+%54 = OpImageTexelPointer %53 %31 %38 %13
+%55 = ${OPNAME} %28 %54 %19 %13 ${LASTARG:default=%52}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_3d_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimage3D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimage3D u_resultImage;
+; layout (r64ui, binding=1) writeonly uniform uimage3D u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), u64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 65
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %64 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 3D 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%56 = OpTypeInt 64 1
+%59 = OpTypePointer Image %28
+%62 = OpTypeVector %28 4
+%64 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%57 = OpSConvert %56 %55
+%58 = OpBitcast %28 %57
+%60 = OpImageTexelPointer %59 %38 %44 %13
+%61 = ${OPNAME} %28 %60 %19 %13 ${LASTARG:default=%58}
+%63 = OpCompositeConstruct %62 %61 %61 %61 %61
+OpImageWrite %32 %37 %63
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_3d_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage3D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage3D u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 55
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %54 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 3D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%51 = OpTypePointer Image %28
+%54 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpSConvert %28 %49
+%52 = OpImageTexelPointer %51 %31 %38 %13
+%53 = ${OPNAME} %28 %52 %19 %13 ${LASTARG:default=%50}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_3d_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimage3D;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimage3D u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimage3D u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), i64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 3D 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%56 = OpSConvert %28 %55
+%58 = OpImageTexelPointer %57 %38 %44 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %37 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimageCube;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimageCube u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 57
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %56 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Cube 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%50 = OpTypeInt 64 1
+%53 = OpTypePointer Image %28
+%56 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%51 = OpSConvert %50 %49
+%52 = OpBitcast %28 %51
+%54 = OpImageTexelPointer %53 %31 %38 %13
+%55 = ${OPNAME} %28 %54 %19 %13 ${LASTARG:default=%52}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimageCube;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimageCube u_resultImage;
+; layout (r64ui, binding=1) writeonly uniform uimageCube u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), u64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 65
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %64 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Cube 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%56 = OpTypeInt 64 1
+%59 = OpTypePointer Image %28
+%62 = OpTypeVector %28 4
+%64 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%57 = OpSConvert %56 %55
+%58 = OpBitcast %28 %57
+%60 = OpImageTexelPointer %59 %38 %44 %13
+%61 = ${OPNAME} %28 %60 %19 %13 ${LASTARG:default=%58}
+%63 = OpCompositeConstruct %62 %61 %61 %61 %61
+OpImageWrite %32 %37 %63
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimageCube;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimageCube u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx % 64, gy, gz), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 55
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %54 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Cube 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%51 = OpTypePointer Image %28
+%54 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpSConvert %28 %49
+%52 = OpImageTexelPointer %51 %31 %38 %13
+%53 = ${OPNAME} %28 %52 %19 %13 ${LASTARG:default=%50}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimageCube;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimageCube u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimageCube u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), i64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Cube 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%56 = OpSConvert %28 %55
+%58 = OpImageTexelPointer %57 %38 %44 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %37 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_array_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimageCubeArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimageCubeArray u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 57
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageCubeArray
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %56 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Cube 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%50 = OpTypeInt 64 1
+%53 = OpTypePointer Image %28
+%56 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%51 = OpSConvert %50 %49
+%52 = OpBitcast %28 %51
+%54 = OpImageTexelPointer %53 %31 %38 %13
+%55 = ${OPNAME} %28 %54 %19 %13 ${LASTARG:default=%52}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_array_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp uimageCubeArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64ui, binding=0) coherent uniform uimageCubeArray u_resultImage;
+; layout (r64ui, binding=1) writeonly uniform uimageCubeArray u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), u64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), uint(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 65
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageCubeArray
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %64 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Cube 0 1 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%56 = OpTypeInt 64 1
+%59 = OpTypePointer Image %28
+%62 = OpTypeVector %28 4
+%64 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%57 = OpSConvert %56 %55
+%58 = OpBitcast %28 %57
+%60 = OpImageTexelPointer %59 %38 %44 %13
+%61 = ${OPNAME} %28 %60 %19 %13 ${LASTARG:default=%58}
+%63 = OpCompositeConstruct %62 %61 %61 %61 %61
+OpImageWrite %32 %37 %63
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_array_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimageCubeArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimageCubeArray u_resultImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, ivec3(gx % 64, gy, gz), int(gx*gx + gy*gy + gz*gz));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 55
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageCubeArray
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %54 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Cube 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%37 = OpTypeVector %6 3
+%51 = OpTypePointer Image %28
+%54 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %18
+%36 = OpLoad %6 %23
+%38 = OpCompositeConstruct %37 %34 %35 %36
+%39 = OpLoad %6 %8
+%40 = OpLoad %6 %8
+%41 = OpIMul %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %18
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpLoad %6 %23
+%47 = OpLoad %6 %23
+%48 = OpIMul %6 %46 %47
+%49 = OpIAdd %6 %45 %48
+%50 = OpSConvert %28 %49
+%52 = OpImageTexelPointer %51 %31 %38 %13
+%53 = ${OPNAME} %28 %52 %19 %13 ${LASTARG:default=%50}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_cube_array_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+; #version 440
+; precision highp iimageCubeArray;
+; #extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+; #extension GL_EXT_shader_image_int64 : require
+;
+; layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+; layout (r64i, binding=0) coherent uniform iimageCubeArray u_resultImage;
+; layout (r64i, binding=1) writeonly uniform iimageCubeArray u_intermValuesImage;
+;
+; void main (void)
+; {
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, ivec3(gx, gy, gz), i64vec4(imageAtomicAdd(u_resultImage, ivec3(gx%64, gy, gz), int(gx*gx + gy*gy + gz*gz))));
+; }
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 63
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageCubeArray
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %38 DescriptorSet 0
+OpDecorate %38 Binding 0
+OpDecorate %38 Coherent
+OpDecorate %62 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Cube 0 1 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%36 = OpTypeVector %6 3
+%38 = OpVariable %30 UniformConstant
+%40 = OpConstant %6 64
+%57 = OpTypePointer Image %28
+%60 = OpTypeVector %28 4
+%62 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%34 = OpLoad %6 %18
+%35 = OpLoad %6 %23
+%37 = OpCompositeConstruct %36 %33 %34 %35
+%39 = OpLoad %6 %8
+%41 = OpSMod %6 %39 %40
+%42 = OpLoad %6 %18
+%43 = OpLoad %6 %23
+%44 = OpCompositeConstruct %36 %41 %42 %43
+%45 = OpLoad %6 %8
+%46 = OpLoad %6 %8
+%47 = OpIMul %6 %45 %46
+%48 = OpLoad %6 %18
+%49 = OpLoad %6 %18
+%50 = OpIMul %6 %48 %49
+%51 = OpIAdd %6 %47 %50
+%52 = OpLoad %6 %23
+%53 = OpLoad %6 %23
+%54 = OpIMul %6 %52 %53
+%55 = OpIAdd %6 %51 %54
+%56 = OpSConvert %28 %55
+%58 = OpImageTexelPointer %57 %38 %44 %13
+%59 = ${OPNAME} %28 %58 %19 %13 ${LASTARG:default=%56}
+%61 = OpCompositeConstruct %60 %59 %59 %59 %59
+OpImageWrite %32 %37 %61
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r64ui_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimageBuffer;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64ui, binding=0) coherent uniform uimageBuffer u_resultImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.3
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 53
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageBuffer
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %52 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Buffer 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%46 = OpTypeInt 64 1
+%49 = OpTypePointer Image %28
+%52 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %8
+%36 = OpLoad %6 %8
+%37 = OpIMul %6 %35 %36
+%38 = OpLoad %6 %18
+%39 = OpLoad %6 %18
+%40 = OpIMul %6 %38 %39
+%41 = OpIAdd %6 %37 %40
+%42 = OpLoad %6 %23
+%43 = OpLoad %6 %23
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%47 = OpSConvert %46 %45
+%48 = OpBitcast %28 %47
+%50 = OpImageTexelPointer %49 %31 %34 %13
+%51 = ${OPNAME} %28 %50 %19 %13 ${LASTARG:default=%48}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r64ui_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp uimageBuffer;
+;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64ui, binding=0) coherent uniform uimageBuffer u_resultImage;
+;layout (r64ui, binding=1) writeonly uniform uimageBuffer u_intermValuesImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, gx, u64vec4(imageAtomicAdd(u_resultImage, gx % 64, uint(gx*gx + gy*gy + gz*gz))));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 58
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageBuffer
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %34 DescriptorSet 0
+OpDecorate %34 Binding 0
+OpDecorate %34 Coherent
+OpDecorate %57 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 0
+%29 = OpTypeImage %28 Buffer 0 0 0 2 R64ui
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%34 = OpVariable %30 UniformConstant
+%36 = OpConstant %6 64
+%49 = OpTypeInt 64 1
+%52 = OpTypePointer Image %28
+%55 = OpTypeVector %28 4
+%57 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%37 = OpSMod %6 %35 %36
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%50 = OpSConvert %49 %48
+%51 = OpBitcast %28 %50
+%53 = OpImageTexelPointer %52 %34 %37 %13
+%54 = ${OPNAME} %28 %53 %19 %13 ${LASTARG:default=%51}
+%56 = OpCompositeConstruct %55 %54 %54 %54 %54
+OpImageWrite %32 %33 %56
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r64i_end_result = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimageBuffer;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64i, binding=0) coherent uniform iimageBuffer u_resultImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz));
+;}
+;
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 51
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageBuffer
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 0
+OpDecorate %31 Coherent
+OpDecorate %50 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Buffer 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%33 = OpConstant %6 64
+%47 = OpTypePointer Image %28
+%50 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %6 %8
+%34 = OpSMod %6 %32 %33
+%35 = OpLoad %6 %8
+%36 = OpLoad %6 %8
+%37 = OpIMul %6 %35 %36
+%38 = OpLoad %6 %18
+%39 = OpLoad %6 %18
+%40 = OpIMul %6 %38 %39
+%41 = OpIAdd %6 %37 %40
+%42 = OpLoad %6 %23
+%43 = OpLoad %6 %23
+%44 = OpIMul %6 %42 %43
+%45 = OpIAdd %6 %41 %44
+%46 = OpSConvert %28 %45
+%48 = OpImageTexelPointer %47 %31 %34 %13
+%49 = ${OPNAME} %28 %48 %19 %13 ${LASTARG:default=%46}
+OpReturn
+OpFunctionEnd
+)";
+
+const std::string kShader_image_buffer_r64i_intermediate_values = R"(
+; The SPIR-V shader below is based on the following GLSL shader, but OpAtomicIAdd has been
+; replaced with a template parameter and the last argument for it has been made optional.
+;
+;#version 440
+;precision highp iimageBuffer;
+;
+;#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
+;#extension GL_EXT_shader_image_int64 : require
+;layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+;layout (r64i, binding=0) coherent uniform iimageBuffer u_resultImage;
+;layout (r64i, binding=1) writeonly uniform iimageBuffer u_intermValuesImage;
+;
+;void main (void)
+;{
+;    int gx = int(gl_GlobalInvocationID.x);
+;    int gy = int(gl_GlobalInvocationID.y);
+;    int gz = int(gl_GlobalInvocationID.z);
+;    imageStore(u_intermValuesImage, gx, i64vec4(imageAtomicAdd(u_resultImage, gx % 64, int(gx*gx + gy*gy + gz*gz))));
+;}
+; SPIR-V
+; Version: 1.0
+; Generator: Khronos Glslang Reference Front End; 8
+; Bound: 56
+; Schema: 0
+OpCapability Shader
+OpCapability Int64
+OpCapability Int64Atomics
+OpCapability ImageBuffer
+OpCapability Int64ImageEXT
+OpExtension "SPV_EXT_shader_image_int64"
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint GLCompute %4 "main" %12
+OpExecutionMode %4 LocalSize 1 1 1
+OpDecorate %12 BuiltIn GlobalInvocationId
+OpDecorate %31 DescriptorSet 0
+OpDecorate %31 Binding 1
+OpDecorate %31 NonReadable
+OpDecorate %34 DescriptorSet 0
+OpDecorate %34 Binding 0
+OpDecorate %34 Coherent
+OpDecorate %55 BuiltIn WorkgroupSize
+%2 = OpTypeVoid
+%3 = OpTypeFunction %2
+%6 = OpTypeInt 32 1
+%7 = OpTypePointer Function %6
+%9 = OpTypeInt 32 0
+%10 = OpTypeVector %9 3
+%11 = OpTypePointer Input %10
+%12 = OpVariable %11 Input
+%13 = OpConstant %9 0
+%14 = OpTypePointer Input %9
+%19 = OpConstant %9 1
+%24 = OpConstant %9 2
+%28 = OpTypeInt 64 1
+%29 = OpTypeImage %28 Buffer 0 0 0 2 R64i
+%30 = OpTypePointer UniformConstant %29
+%31 = OpVariable %30 UniformConstant
+%34 = OpVariable %30 UniformConstant
+%36 = OpConstant %6 64
+%50 = OpTypePointer Image %28
+%53 = OpTypeVector %28 4
+%55 = OpConstantComposite %10 %19 %19 %19
+%4 = OpFunction %2 None %3
+%5 = OpLabel
+%8 = OpVariable %7 Function
+%18 = OpVariable %7 Function
+%23 = OpVariable %7 Function
+%15 = OpAccessChain %14 %12 %13
+%16 = OpLoad %9 %15
+%17 = OpBitcast %6 %16
+OpStore %8 %17
+%20 = OpAccessChain %14 %12 %19
+%21 = OpLoad %9 %20
+%22 = OpBitcast %6 %21
+OpStore %18 %22
+%25 = OpAccessChain %14 %12 %24
+%26 = OpLoad %9 %25
+%27 = OpBitcast %6 %26
+OpStore %23 %27
+%32 = OpLoad %29 %31
+%33 = OpLoad %6 %8
+%35 = OpLoad %6 %8
+%37 = OpSMod %6 %35 %36
+%38 = OpLoad %6 %8
+%39 = OpLoad %6 %8
+%40 = OpIMul %6 %38 %39
+%41 = OpLoad %6 %18
+%42 = OpLoad %6 %18
+%43 = OpIMul %6 %41 %42
+%44 = OpIAdd %6 %40 %43
+%45 = OpLoad %6 %23
+%46 = OpLoad %6 %23
+%47 = OpIMul %6 %45 %46
+%48 = OpIAdd %6 %44 %47
+%49 = OpSConvert %28 %48
+%51 = OpImageTexelPointer %50 %34 %37 %13
+%52 = ${OPNAME} %28 %51 %19 %13 ${LASTARG:default=%49}
+%54 = OpCompositeConstruct %53 %52 %52 %52 %52
+OpImageWrite %32 %33 %54
+OpReturn
+OpFunctionEnd
+)";
+
 } // anonymous namespace
 
 bool CaseVariant::operator< (const CaseVariant& other) const
@@ -2820,6 +6561,43 @@ std::string getSpirvAtomicOpShader (const CaseVariant& caseVariant)
                ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT32,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_array_r32ui_intermediate_values},
                ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT32,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_cube_array_r32i_end_result},
                ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT32,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_array_r32i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT32,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_image_buffer_r32ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT32,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_image_buffer_r32ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT32,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_image_buffer_r32i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT32,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_image_buffer_r32i_intermediate_values},
+
+               ValueType{CaseVariant{IMAGE_TYPE_1D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_1d_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_1D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_1d_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_1D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_1d_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_1D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_1d_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_1D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_1d_array_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_1D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_1d_array_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_1D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_1d_array_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_1D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_1d_array_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_2D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_2d_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_2D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_2d_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_2D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_2d_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_2D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_2d_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_2D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_2d_array_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_2D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_2d_array_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_2D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_2d_array_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_2D_ARRAY,              tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_2d_array_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_3D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_3d_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_3D,                    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_3d_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_3D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_3d_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_3D,                    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_3d_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE,                  tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_cube_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE,                  tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE,                  tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_cube_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE,                  tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_cube_array_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_array_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_cube_array_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_CUBE_ARRAY,    tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_cube_array_r64i_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_image_buffer_r64ui_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::UNSIGNED_INT64,     CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_image_buffer_r64ui_intermediate_values},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_END_RESULTS},                   &kShader_image_buffer_r64i_end_result},
+               ValueType{CaseVariant{IMAGE_TYPE_BUFFER,                tcu::TextureFormat::R,  tcu::TextureFormat::SIGNED_INT64,       CaseVariant::CHECK_TYPE_INTERMEDIATE_RESULTS},  &kShader_image_buffer_r64i_intermediate_values},
        };
 
        const auto iter = kShadersMap.find(caseVariant);
index cd195d4..62c1b2e 100644 (file)
@@ -559,8 +559,9 @@ std::string getImageTypeName (const ImageType imageType)
 
 std::string getFormatPrefix (const tcu::TextureFormat& format)
 {
-       return tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
-                  tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER   ? "i" : "";
+       const std::string image64 = ((mapTextureFormat(format) == VK_FORMAT_R64_UINT || mapTextureFormat(format) == VK_FORMAT_R64_SINT) ? "64" : "");
+       return tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" + image64 :
+                  tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER   ? "i" + image64 : "";
 }
 
 std::string getShaderImageType (const tcu::TextureFormat& format, const ImageType imageType, const bool multisample)
@@ -625,12 +626,14 @@ std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
                        case tcu::TextureFormat::FLOAT:                         typePart = "32f";               break;
                        case tcu::TextureFormat::HALF_FLOAT:            typePart = "16f";               break;
 
+                       case tcu::TextureFormat::UNSIGNED_INT64:        typePart = "64ui";              break;
                        case tcu::TextureFormat::UNSIGNED_INT32:        typePart = "32ui";              break;
                        case tcu::TextureFormat::USCALED_INT16:
                        case tcu::TextureFormat::UNSIGNED_INT16:        typePart = "16ui";              break;
                        case tcu::TextureFormat::USCALED_INT8:
                        case tcu::TextureFormat::UNSIGNED_INT8:         typePart = "8ui";               break;
 
+                       case tcu::TextureFormat::SIGNED_INT64:          typePart = "64i";               break;
                        case tcu::TextureFormat::SIGNED_INT32:          typePart = "32i";               break;
                        case tcu::TextureFormat::SSCALED_INT16:
                        case tcu::TextureFormat::SIGNED_INT16:          typePart = "16i";               break;
index 1029c91..99afb49 100644 (file)
@@ -515076,340 +515076,648 @@ dEQP-VK.image.atomic_operations.add.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.1d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.add.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.add.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.add.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.add.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.add.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.add.buffer.r32f_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.sub.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.sub.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.inc.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.inc.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.dec.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.dec.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.min.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.min.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.min.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.min.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.min.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.min.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.min.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.max.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.max.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.max.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.max.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.max.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.max.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.max.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.and.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.and.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.and.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.and.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.and.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.and.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.and.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.or.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.or.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.or.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.or.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.or.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.or.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.or.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.xor.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.xor.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.xor.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.1d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.2d_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.3d.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.cube_array.r32f_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_end_result
 dEQP-VK.image.atomic_operations.exchange.buffer.r32f_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.1d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.2d_array.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.3d.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.3d.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube.r64i_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32ui_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32ui_intermediate_values
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32i_end_result
 dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r32i_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64ui_intermediate_values
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_end_result
+dEQP-VK.image.atomic_operations.compare_exchange.cube_array.r64i_intermediate_values
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_unorm
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_snorm
 dEQP-VK.image.texel_view_compatible.compute.basic.1d_image.image_load.bc1_rgb_unorm_block.r16g16b16a16_uscaled
index 5dc7aa3..825ad49 100644 (file)
@@ -325,10 +325,12 @@ inline int channelToInt (const deUint8* value, TextureFormat::ChannelType type)
                case TextureFormat::SIGNED_INT8:                return (int)*((const deInt8*)value);
                case TextureFormat::SIGNED_INT16:               return (int)*((const deInt16*)value);
                case TextureFormat::SIGNED_INT32:               return (int)*((const deInt32*)value);
+               case TextureFormat::SIGNED_INT64:               return (int)*((const deInt64*)value);
                case TextureFormat::UNSIGNED_INT8:              return (int)*((const deUint8*)value);
                case TextureFormat::UNSIGNED_INT16:             return (int)*((const deUint16*)value);
                case TextureFormat::UNSIGNED_INT24:             return (int)readUint24(value);
                case TextureFormat::UNSIGNED_INT32:             return (int)*((const deUint32*)value);
+               case TextureFormat::UNSIGNED_INT64:             return (int)*((const deUint64*)value);
                case TextureFormat::HALF_FLOAT:                 return (int)deFloat16To32(*(const deFloat16*)value);
                case TextureFormat::FLOAT:                              return (int)*((const float*)value);
                case TextureFormat::FLOAT64:                    return (int)*((const double*)value);
@@ -450,10 +452,12 @@ void intToChannel (deUint8* dst, int src, TextureFormat::ChannelType type)
                case TextureFormat::SIGNED_INT8:                *((deInt8*)dst)                 = convertSat<deInt8>    (src);                          break;
                case TextureFormat::SIGNED_INT16:               *((deInt16*)dst)                = convertSat<deInt16>   (src);                          break;
                case TextureFormat::SIGNED_INT32:               *((deInt32*)dst)                = convertSat<deInt32>   (src);                          break;
+               case TextureFormat::SIGNED_INT64:               *((deInt64*)dst)                = convertSat<deInt64>   ((deInt64)src);         break;
                case TextureFormat::UNSIGNED_INT8:              *((deUint8*)dst)                = convertSat<deUint8>   ((deUint32)src);        break;
                case TextureFormat::UNSIGNED_INT16:             *((deUint16*)dst)               = convertSat<deUint16>  ((deUint32)src);        break;
                case TextureFormat::UNSIGNED_INT24:             writeUint24(dst,                  convertSatUint24              ((deUint32)src));       break;
                case TextureFormat::UNSIGNED_INT32:             *((deUint32*)dst)               = convertSat<deUint32>  ((deUint32)src);        break;
+               case TextureFormat::UNSIGNED_INT64:             *((deUint64*)dst)               = convertSat<deUint64>  ((deUint64)src);        break;
                case TextureFormat::HALF_FLOAT:                 *((deFloat16*)dst)              = deFloat32To16((float)src);                            break;
                case TextureFormat::FLOAT:                              *((float*)dst)                  = (float)src;                                                           break;
                case TextureFormat::FLOAT64:                    *((double*)dst)                 = (double)src;                                                          break;