Tests for global atomic operations
authorRicardo Garcia <rgarcia@igalia.com>
Wed, 10 Jun 2020 14:35:59 +0000 (16:35 +0200)
committerAlexander Galazin <Alexander.Galazin@arm.com>
Fri, 3 Jul 2020 11:58:38 +0000 (07:58 -0400)
This commits adds new tests to check atomic operations work as expected
when used with global shared memory variables in compute shaders
(Workgroup storage in SPIR-V).

In addition, this commit fixes a missing barrier to make writes
available to the host domain before invalidating memory.

New tests:
dEQP-VK.glsl.atomic_operations.*_shared

Affected tests:
dEQP-VK.glsl.atomic_operations.*

Components: Vulkan
VK-GL-CTS issue: 2400

Change-Id: Id5e59c9acb1acbee284f98813091d4a6f31ee169

android/cts/master/vk-master-2020-03-01.txt
android/cts/master/vk-master.txt
external/vulkancts/modules/vulkan/shaderexecutor/vktAtomicOperationTests.cpp
external/vulkancts/modules/vulkan/shaderexecutor/vktShaderExecutor.cpp
external/vulkancts/modules/vulkan/shaderexecutor/vktShaderExecutor.hpp
external/vulkancts/mustpass/master/vk-default.txt

index 5cbcfbf..bfa7d9c 100644 (file)
@@ -127339,6 +127339,38 @@ dEQP-VK.glsl.opaque_type_indexing.atomic_counter.uniform_tess_eval
 dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_geometry
 dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_tess_ctrl
 dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
 dEQP-VK.glsl.shader_clock.vertex.clockARB
 dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
 dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT
index 75a3600..d9fecb1 100644 (file)
@@ -404278,192 +404278,224 @@ dEQP-VK.glsl.atomic_operations.exchange_signed_geometry
 dEQP-VK.glsl.atomic_operations.exchange_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_signed_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.add_signed_vertex
 dEQP-VK.glsl.atomic_operations.add_signed_fragment
 dEQP-VK.glsl.atomic_operations.add_signed_geometry
 dEQP-VK.glsl.atomic_operations.add_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.add_signed_compute
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.add_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.add_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.add_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.min_signed_vertex
 dEQP-VK.glsl.atomic_operations.min_signed_fragment
 dEQP-VK.glsl.atomic_operations.min_signed_geometry
 dEQP-VK.glsl.atomic_operations.min_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.min_signed_compute
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.min_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.min_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.min_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.max_signed_vertex
 dEQP-VK.glsl.atomic_operations.max_signed_fragment
 dEQP-VK.glsl.atomic_operations.max_signed_geometry
 dEQP-VK.glsl.atomic_operations.max_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.max_signed_compute
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.max_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.max_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.max_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.and_signed_vertex
 dEQP-VK.glsl.atomic_operations.and_signed_fragment
 dEQP-VK.glsl.atomic_operations.and_signed_geometry
 dEQP-VK.glsl.atomic_operations.and_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.and_signed_compute
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.and_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.and_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.and_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.or_signed_vertex
 dEQP-VK.glsl.atomic_operations.or_signed_fragment
 dEQP-VK.glsl.atomic_operations.or_signed_geometry
 dEQP-VK.glsl.atomic_operations.or_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.or_signed_compute
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.or_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.or_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.or_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_signed_vertex
 dEQP-VK.glsl.atomic_operations.xor_signed_fragment
 dEQP-VK.glsl.atomic_operations.xor_signed_geometry
 dEQP-VK.glsl.atomic_operations.xor_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_signed_compute
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.xor_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.xor_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
 dEQP-VK.glsl.shader_clock.vertex.clockARB
 dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
 dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT
index 62b2cdb..0047359 100644 (file)
@@ -28,6 +28,9 @@
 #include "vkRefUtil.hpp"
 #include "vkMemUtil.hpp"
 #include "vkQueryUtil.hpp"
+#include "vkObjUtil.hpp"
+#include "vkBarrierUtil.hpp"
+#include "vkCmdUtil.hpp"
 #include "vktTestGroupUtil.hpp"
 
 #include "tcuTestLog.hpp"
@@ -55,6 +58,28 @@ using std::vector;
 
 using namespace vk;
 
+// Helper struct to indicate the shader type and if it should use shared global memory.
+class AtomicShaderType
+{
+public:
+       AtomicShaderType (glu::ShaderType type, bool sharedGlobalMemory = false)
+               : m_type                                (type)
+               , m_sharedGlobalMemory  (sharedGlobalMemory)
+       {
+               // Shared global memory can only be set to true with compute shaders.
+               DE_ASSERT(!sharedGlobalMemory || type == glu::SHADERTYPE_COMPUTE);
+       }
+
+       glu::ShaderType getType                                         (void) const    { return m_type; }
+       bool                    useSharedGlobalMemory           (void) const    { return m_sharedGlobalMemory; }
+       // This allows using an AtomicShaderType whenever a glu::ShaderType is required.
+                                       operator glu::ShaderType        (void) const    { return getType();     }
+
+private:
+       glu::ShaderType m_type;
+       bool                    m_sharedGlobalMemory;
+};
+
 // Buffer helper
 class Buffer
 {
@@ -69,6 +94,8 @@ public:
 private:
        const DeviceInterface&          m_vkd;
        const VkDevice                          m_device;
+       const VkQueue                           m_queue;
+       const deUint32                          m_queueIndex;
        const Unique<VkBuffer>          m_buffer;
        const UniquePtr<Allocation>     m_allocation;
 };
@@ -103,6 +130,8 @@ MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice
 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
        : m_vkd                 (context.getDeviceInterface())
        , m_device              (context.getDevice())
+       , m_queue               (context.getUniversalQueue())
+       , m_queueIndex  (context.getUniversalQueueFamilyIndex())
        , m_buffer              (createBuffer                   (context.getDeviceInterface(),
                                                                                         context.getDevice(),
                                                                                         (VkDeviceSize)size,
@@ -121,6 +150,16 @@ void Buffer::flush (void)
 
 void Buffer::invalidate (void)
 {
+       const auto      cmdPool                 = vk::makeCommandPool(m_vkd, m_device, m_queueIndex);
+       const auto      cmdBufferPtr    = vk::allocateCommandBuffer(m_vkd, m_device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+       const auto      cmdBuffer               = cmdBufferPtr.get();
+       const auto      bufferBarrier   = vk::makeBufferMemoryBarrier(VK_ACCESS_MEMORY_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, m_buffer.get(), 0ull, VK_WHOLE_SIZE);
+
+       beginCommandBuffer(m_vkd, cmdBuffer);
+       m_vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, nullptr, 1u, &bufferBarrier, 0u, nullptr);
+       endCommandBuffer(m_vkd, cmdBuffer);
+       submitCommandsAndWait(m_vkd, m_device, m_queue, cmdBuffer);
+
        invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
 }
 
@@ -414,7 +453,7 @@ void TestBuffer<T>::checkOperation (const BufferData<T>&    original,
                                break;
                };
 
-               const T resIo                   = result.inout[elementNdx];
+               const T resIo           = result.inout[elementNdx];
                const T resOutput0      = result.output[elementNdx];
                const T resOutput1      = result.output[elementNdx + NUM_ELEMENTS / 2];
 
@@ -443,7 +482,7 @@ class AtomicOperationCaseInstance : public TestInstance
 public:
                                                                        AtomicOperationCaseInstance             (Context&                       context,
                                                                                                                                         const ShaderSpec&      shaderSpec,
-                                                                                                                                        glu::ShaderType        shaderType,
+                                                                                                                                        AtomicShaderType       shaderType,
                                                                                                                                         DataType                       dataType,
                                                                                                                                         AtomicOperation        atomicOp);
 
@@ -451,7 +490,7 @@ public:
 
 private:
        const ShaderSpec&                               m_shaderSpec;
-       glu::ShaderType                                 m_shaderType;
+       AtomicShaderType                                m_shaderType;
        const DataType                                  m_dataType;
        AtomicOperation                                 m_atomicOp;
 
@@ -459,7 +498,7 @@ private:
 
 AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context&                             context,
                                                                                                                  const ShaderSpec&             shaderSpec,
-                                                                                                                 glu::ShaderType               shaderType,
+                                                                                                                 AtomicShaderType              shaderType,
                                                                                                                  DataType                              dataType,
                                                                                                                  AtomicOperation               atomicOp)
        : TestInstance  (context)
@@ -485,9 +524,13 @@ AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context&                         context,
 
                context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
 
-               if (shaderAtomicInt64Features.shaderBufferInt64Atomics == VK_FALSE)
+               if (!m_shaderType.useSharedGlobalMemory() && shaderAtomicInt64Features.shaderBufferInt64Atomics == VK_FALSE)
+               {
+                       TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit integer atomic operations not supported for buffers");
+               }
+               if (m_shaderType.useSharedGlobalMemory() && shaderAtomicInt64Features.shaderSharedInt64Atomics == VK_FALSE)
                {
-                       TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit unsigned and signed integer atomic operations not supported");
+                       TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit integer atomic operations not supported for shared memory");
                }
        }
 }
@@ -550,6 +593,7 @@ tcu::TestStatus AtomicOperationCaseInstance::iterate(void)
        {
                { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u }
        };
+
        const VkDescriptorPoolCreateInfo poolInfo =
        {
                VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
@@ -592,7 +636,6 @@ tcu::TestStatus AtomicOperationCaseInstance::iterate(void)
                (const VkBufferView*)DE_NULL
        };
 
-
        vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
 
        // Storage for output varying data.
@@ -605,8 +648,10 @@ tcu::TestStatus AtomicOperationCaseInstance::iterate(void)
                outputPtr[i] = &outputs[i];
        }
 
-       UniquePtr<ShaderExecutor> executor(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
-       executor->execute(NUM_ELEMENTS, DE_NULL, &outputPtr[0], *extraResourcesSet);
+       const int                                       numWorkGroups   = (m_shaderType.useSharedGlobalMemory() ? 1 : static_cast<int>(NUM_ELEMENTS));
+       UniquePtr<ShaderExecutor>       executor                (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
+
+       executor->execute(numWorkGroups, DE_NULL, &outputPtr[0], *extraResourcesSet);
        buffer.invalidate();
 
        tcu::ResultCollector resultCollector(log);
@@ -623,7 +668,7 @@ public:
                                                        AtomicOperationCase             (tcu::TestContext&              testCtx,
                                                                                                         const char*                    name,
                                                                                                         const char*                    description,
-                                                                                                        glu::ShaderType                type,
+                                                                                                        AtomicShaderType               type,
                                                                                                         DataType                               dataType,
                                                                                                         AtomicOperation                atomicOp);
        virtual                                 ~AtomicOperationCase    (void);
@@ -638,7 +683,7 @@ private:
 
        void                                    createShaderSpec();
        ShaderSpec                              m_shaderSpec;
-       const glu::ShaderType   m_shaderType;
+       const AtomicShaderType  m_shaderType;
        const DataType                  m_dataType;
        const AtomicOperation   m_atomicOp;
 };
@@ -646,7 +691,7 @@ private:
 AtomicOperationCase::AtomicOperationCase (tcu::TestContext&    testCtx,
                                                                                  const char*           name,
                                                                                  const char*           description,
-                                                                                 glu::ShaderType       shaderType,
+                                                                                 AtomicShaderType      shaderType,
                                                                                  DataType                      dataType,
                                                                                  AtomicOperation       atomicOp)
        : TestCase                      (testCtx, name, description)
@@ -669,38 +714,77 @@ TestInstance* AtomicOperationCase::createInstance (Context& ctx) const
 
 void AtomicOperationCase::createShaderSpec (void)
 {
-       const tcu::StringTemplate shaderTemplateGlobal(
-               "${EXTENSIONS}\n"
-               "layout (set = ${SETIDX}, binding = 0) buffer AtomicBuffer\n"
-               "{\n"
-               "    ${DATATYPE} inoutValues[${N}/2];\n"
-               "    ${DATATYPE} inputValues[${N}];\n"
-               "    ${DATATYPE} compareValues[${N}];\n"
-               "    ${DATATYPE} outputValues[${N}];\n"
-               "    ${DATATYPE} invocationHitCount[${N}];\n"
-               "    int index;\n"
-               "} buf;\n");
+       // Global declarations.
+       std::ostringstream shaderTemplateGlobalStream;
+
+       shaderTemplateGlobalStream
+               << "${EXTENSIONS:opt}\n"
+               << "\n"
+               << "struct AtomicStruct\n"
+               << "{\n"
+               << "    ${DATATYPE} inoutValues[${N}/2];\n"
+               << "    ${DATATYPE} inputValues[${N}];\n"
+               << "    ${DATATYPE} compareValues[${N}];\n"
+               << "    ${DATATYPE} outputValues[${N}];\n"
+               << "    ${DATATYPE} invocationHitCount[${N}];\n"
+               << "    int index;\n"
+               << "};\n"
+               << "\n"
+               << "layout (set = ${SETIDX}, binding = 0) buffer AtomicBuffer {\n"
+               << "    AtomicStruct buf;\n"
+               << "} ${RESULT_BUFFER_NAME};\n"
+               << "\n"
+               ;
+
+       // When using global shared memory in the compute variant, invocations will use a shared global structure instead of a
+       // descriptor set as the sources and results of each tested operation.
+       if (m_shaderType.useSharedGlobalMemory())
+       {
+               shaderTemplateGlobalStream
+                       << "shared AtomicStruct buf;\n"
+                       << "\n"
+                       ;
+       }
 
-       std::map<std::string, std::string> specializations;
-       if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
+       const auto                                      shaderTemplateGlobalString      = shaderTemplateGlobalStream.str();
+       const tcu::StringTemplate       shaderTemplateGlobal            (shaderTemplateGlobalString);
+
+       // Shader body for the non-vertex case.
+       std::ostringstream nonVertexShaderTemplateStream;
+
+       if (m_shaderType.useSharedGlobalMemory())
        {
-               specializations["EXTENSIONS"] = "#extension GL_ARB_gpu_shader_int64 : enable\n"
-                                                                               "#extension GL_EXT_shader_atomic_int64 : enable\n";
+               // Invocation zero will initialize the shared structure from the descriptor set.
+               nonVertexShaderTemplateStream
+                       << "if (gl_LocalInvocationIndex == 0u)\n"
+                       << "{\n"
+                       << "    buf = ${RESULT_BUFFER_NAME}.buf;\n"
+                       << "}\n"
+                       << "barrier();\n"
+                       ;
        }
-       else
+
+       nonVertexShaderTemplateStream
+               << "int idx = atomicAdd(buf.index, 1);\n"
+               << "buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n"
+               ;
+
+       if (m_shaderType.useSharedGlobalMemory())
        {
-               specializations["EXTENSIONS"] = "";
+               // Invocation zero will copy results back to the descriptor set.
+               nonVertexShaderTemplateStream
+                       << "barrier();\n"
+                       << "if (gl_LocalInvocationIndex == 0u)\n"
+                       << "{\n"
+                       << "    ${RESULT_BUFFER_NAME}.buf = buf;\n"
+                       << "}\n"
+                       ;
        }
-       specializations["DATATYPE"] = dataType2Str(m_dataType);
-       specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
-       specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
-       specializations["N"] = de::toString((int)NUM_ELEMENTS);
-       specializations["COMPARE_ARG"] = m_atomicOp == ATOMIC_OP_COMP_SWAP ? "buf.compareValues[idx], " : "";
 
-       const tcu::StringTemplate nonVertexShaderTemplateSrc(
-               "int idx = atomicAdd(buf.index, 1);\n"
-               "buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n");
+       const auto                                      nonVertexShaderTemplateStreamStr        = nonVertexShaderTemplateStream.str();
+       const tcu::StringTemplate       nonVertexShaderTemplateSrc                      (nonVertexShaderTemplateStreamStr);
 
+       // Shader body for the vertex case.
        const tcu::StringTemplate vertexShaderTemplateSrc(
                "int idx = gl_VertexIndex;\n"
                "if (atomicAdd(buf.invocationHitCount[idx], 1) == 0)\n"
@@ -708,11 +792,33 @@ void AtomicOperationCase::createShaderSpec (void)
                "    buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n"
                "}\n");
 
+       // Specializations.
+       std::map<std::string, std::string> specializations;
+       if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
+       {
+               specializations["EXTENSIONS"] = "#extension GL_ARB_gpu_shader_int64 : enable\n"
+                                                                               "#extension GL_EXT_shader_atomic_int64 : enable\n";
+       }
+       specializations["DATATYPE"] = dataType2Str(m_dataType);
+       specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
+       specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
+       specializations["N"] = de::toString((int)NUM_ELEMENTS);
+       specializations["COMPARE_ARG"] = ((m_atomicOp == ATOMIC_OP_COMP_SWAP) ? "buf.compareValues[idx], " : "");
+       specializations["RESULT_BUFFER_NAME"] = (m_shaderType.useSharedGlobalMemory() ? "result" : "");
+
+       // Shader spec.
        m_shaderSpec.outputs.push_back(Symbol("outData", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
-       m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
-       m_shaderSpec.glslVersion = glu::GLSL_VERSION_450;
-       m_shaderSpec.source = m_shaderType == glu::SHADERTYPE_VERTEX ?
-               vertexShaderTemplateSrc.specialize(specializations) : nonVertexShaderTemplateSrc.specialize(specializations);
+       m_shaderSpec.glslVersion                = glu::GLSL_VERSION_450;
+       m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
+       m_shaderSpec.source                             = ((m_shaderType.getType() == glu::SHADERTYPE_VERTEX)
+                                                                               ? vertexShaderTemplateSrc.specialize(specializations)
+                                                                               : nonVertexShaderTemplateSrc.specialize(specializations));
+
+       if (m_shaderType.useSharedGlobalMemory())
+       {
+               // When using global shared memory, use a single workgroup and an appropriate number of local invocations.
+               m_shaderSpec.localSizeX = static_cast<int>(NUM_ELEMENTS);
+       }
 }
 
 void addAtomicOperationTests (tcu::TestCaseGroup* atomicOperationTestsGroup)
@@ -721,16 +827,17 @@ void addAtomicOperationTests (tcu::TestCaseGroup* atomicOperationTestsGroup)
 
        static const struct
        {
-               glu::ShaderType type;
-               const char*             name;
+               AtomicShaderType        type;
+               const char*                     name;
        } shaderTypes[] =
        {
-               { glu::SHADERTYPE_VERTEX,                                       "vertex"        },
-               { glu::SHADERTYPE_FRAGMENT,                                     "fragment"      },
-               { glu::SHADERTYPE_GEOMETRY,                                     "geometry"      },
-               { glu::SHADERTYPE_TESSELLATION_CONTROL,         "tess_ctrl"     },
-               { glu::SHADERTYPE_TESSELLATION_EVALUATION,      "tess_eval"     },
-               { glu::SHADERTYPE_COMPUTE,                                      "compute"       }
+               { glu::SHADERTYPE_VERTEX,                                                       "vertex"                        },
+               { glu::SHADERTYPE_FRAGMENT,                                                     "fragment"                      },
+               { glu::SHADERTYPE_GEOMETRY,                                                     "geometry"                      },
+               { glu::SHADERTYPE_TESSELLATION_CONTROL,                         "tess_ctrl"                     },
+               { glu::SHADERTYPE_TESSELLATION_EVALUATION,                      "tess_eval"                     },
+               { glu::SHADERTYPE_COMPUTE,                                                      "compute"                       },
+               { AtomicShaderType(glu::SHADERTYPE_COMPUTE, true),      "compute_shared"        },
        };
 
        static const struct
index 0aa37d1..76d6de0 100644 (file)
@@ -2615,7 +2615,7 @@ std::string ComputeShaderExecutor::generateComputeShader (const ShaderSpec& spec
                if (!spec.globalDeclarations.empty())
                        src << spec.globalDeclarations << "\n";
 
-               src << "layout(local_size_x = 1) in;\n"
+               src << "layout(local_size_x = " << spec.localSizeX << ") in;\n"
                        << "\n";
 
                declareBufferBlocks(src, spec);
index dc1a5e0..7174a25 100644 (file)
@@ -68,11 +68,13 @@ struct ShaderSpec
        vk::ShaderBuildOptions  buildOptions;
        bool                                    packFloat16Bit;
        SpirVCaseT                              spirvCase;
+       int                                             localSizeX;                     // May be used for compute shaders.
 
        ShaderSpec (void)
                : glslVersion           (glu::GLSL_VERSION_450)
                , packFloat16Bit        (false)
                , spirvCase                     (SPIRV_CASETYPE_NONE)
+               , localSizeX            (1)
        {}
 };
 
index 03692a6..6238ce3 100644 (file)
@@ -404183,192 +404183,224 @@ dEQP-VK.glsl.atomic_operations.exchange_signed_geometry
 dEQP-VK.glsl.atomic_operations.exchange_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_signed_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.add_signed_vertex
 dEQP-VK.glsl.atomic_operations.add_signed_fragment
 dEQP-VK.glsl.atomic_operations.add_signed_geometry
 dEQP-VK.glsl.atomic_operations.add_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.add_signed_compute
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.add_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.add_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.add_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.min_signed_vertex
 dEQP-VK.glsl.atomic_operations.min_signed_fragment
 dEQP-VK.glsl.atomic_operations.min_signed_geometry
 dEQP-VK.glsl.atomic_operations.min_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.min_signed_compute
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.min_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.min_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.min_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.max_signed_vertex
 dEQP-VK.glsl.atomic_operations.max_signed_fragment
 dEQP-VK.glsl.atomic_operations.max_signed_geometry
 dEQP-VK.glsl.atomic_operations.max_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.max_signed_compute
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.max_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.max_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.max_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.and_signed_vertex
 dEQP-VK.glsl.atomic_operations.and_signed_fragment
 dEQP-VK.glsl.atomic_operations.and_signed_geometry
 dEQP-VK.glsl.atomic_operations.and_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.and_signed_compute
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.and_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.and_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.and_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.or_signed_vertex
 dEQP-VK.glsl.atomic_operations.or_signed_fragment
 dEQP-VK.glsl.atomic_operations.or_signed_geometry
 dEQP-VK.glsl.atomic_operations.or_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.or_signed_compute
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.or_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.or_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.or_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_signed_vertex
 dEQP-VK.glsl.atomic_operations.xor_signed_fragment
 dEQP-VK.glsl.atomic_operations.xor_signed_geometry
 dEQP-VK.glsl.atomic_operations.xor_signed_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_signed_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_signed_compute
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_unsigned_vertex
 dEQP-VK.glsl.atomic_operations.xor_unsigned_fragment
 dEQP-VK.glsl.atomic_operations.xor_unsigned_geometry
 dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
 dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
 dEQP-VK.glsl.shader_clock.vertex.clockARB
 dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
 dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT