dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_geometry
dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_tess_ctrl
dEQP-VK.glsl.opaque_type_indexing.atomic_counter.dynamically_uniform_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
dEQP-VK.glsl.shader_clock.vertex.clockARB
dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT
dEQP-VK.glsl.atomic_operations.exchange_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_signed_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_signed_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_unsigned_vertex
dEQP-VK.glsl.atomic_operations.exchange_unsigned_fragment
dEQP-VK.glsl.atomic_operations.exchange_unsigned_geometry
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.add_signed_vertex
dEQP-VK.glsl.atomic_operations.add_signed_fragment
dEQP-VK.glsl.atomic_operations.add_signed_geometry
dEQP-VK.glsl.atomic_operations.add_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_signed_tess_eval
dEQP-VK.glsl.atomic_operations.add_signed_compute
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
dEQP-VK.glsl.atomic_operations.add_unsigned_vertex
dEQP-VK.glsl.atomic_operations.add_unsigned_fragment
dEQP-VK.glsl.atomic_operations.add_unsigned_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.min_signed_vertex
dEQP-VK.glsl.atomic_operations.min_signed_fragment
dEQP-VK.glsl.atomic_operations.min_signed_geometry
dEQP-VK.glsl.atomic_operations.min_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_signed_tess_eval
dEQP-VK.glsl.atomic_operations.min_signed_compute
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
dEQP-VK.glsl.atomic_operations.min_unsigned_vertex
dEQP-VK.glsl.atomic_operations.min_unsigned_fragment
dEQP-VK.glsl.atomic_operations.min_unsigned_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.max_signed_vertex
dEQP-VK.glsl.atomic_operations.max_signed_fragment
dEQP-VK.glsl.atomic_operations.max_signed_geometry
dEQP-VK.glsl.atomic_operations.max_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_signed_tess_eval
dEQP-VK.glsl.atomic_operations.max_signed_compute
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
dEQP-VK.glsl.atomic_operations.max_unsigned_vertex
dEQP-VK.glsl.atomic_operations.max_unsigned_fragment
dEQP-VK.glsl.atomic_operations.max_unsigned_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.and_signed_vertex
dEQP-VK.glsl.atomic_operations.and_signed_fragment
dEQP-VK.glsl.atomic_operations.and_signed_geometry
dEQP-VK.glsl.atomic_operations.and_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_signed_tess_eval
dEQP-VK.glsl.atomic_operations.and_signed_compute
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
dEQP-VK.glsl.atomic_operations.and_unsigned_vertex
dEQP-VK.glsl.atomic_operations.and_unsigned_fragment
dEQP-VK.glsl.atomic_operations.and_unsigned_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.or_signed_vertex
dEQP-VK.glsl.atomic_operations.or_signed_fragment
dEQP-VK.glsl.atomic_operations.or_signed_geometry
dEQP-VK.glsl.atomic_operations.or_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_signed_tess_eval
dEQP-VK.glsl.atomic_operations.or_signed_compute
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
dEQP-VK.glsl.atomic_operations.or_unsigned_vertex
dEQP-VK.glsl.atomic_operations.or_unsigned_fragment
dEQP-VK.glsl.atomic_operations.or_unsigned_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.xor_signed_vertex
dEQP-VK.glsl.atomic_operations.xor_signed_fragment
dEQP-VK.glsl.atomic_operations.xor_signed_geometry
dEQP-VK.glsl.atomic_operations.xor_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_signed_tess_eval
dEQP-VK.glsl.atomic_operations.xor_signed_compute
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
dEQP-VK.glsl.atomic_operations.xor_unsigned_vertex
dEQP-VK.glsl.atomic_operations.xor_unsigned_fragment
dEQP-VK.glsl.atomic_operations.xor_unsigned_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
dEQP-VK.glsl.shader_clock.vertex.clockARB
dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT
#include "vkRefUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkQueryUtil.hpp"
+#include "vkObjUtil.hpp"
+#include "vkBarrierUtil.hpp"
+#include "vkCmdUtil.hpp"
#include "vktTestGroupUtil.hpp"
#include "tcuTestLog.hpp"
using namespace vk;
+// Helper struct to indicate the shader type and if it should use shared global memory.
+class AtomicShaderType
+{
+public:
+ AtomicShaderType (glu::ShaderType type, bool sharedGlobalMemory = false)
+ : m_type (type)
+ , m_sharedGlobalMemory (sharedGlobalMemory)
+ {
+ // Shared global memory can only be set to true with compute shaders.
+ DE_ASSERT(!sharedGlobalMemory || type == glu::SHADERTYPE_COMPUTE);
+ }
+
+ glu::ShaderType getType (void) const { return m_type; }
+ bool useSharedGlobalMemory (void) const { return m_sharedGlobalMemory; }
+ // This allows using an AtomicShaderType whenever a glu::ShaderType is required.
+ operator glu::ShaderType (void) const { return getType(); }
+
+private:
+ glu::ShaderType m_type;
+ bool m_sharedGlobalMemory;
+};
+
// Buffer helper
class Buffer
{
private:
const DeviceInterface& m_vkd;
const VkDevice m_device;
+ const VkQueue m_queue;
+ const deUint32 m_queueIndex;
const Unique<VkBuffer> m_buffer;
const UniquePtr<Allocation> m_allocation;
};
Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
: m_vkd (context.getDeviceInterface())
, m_device (context.getDevice())
+ , m_queue (context.getUniversalQueue())
+ , m_queueIndex (context.getUniversalQueueFamilyIndex())
, m_buffer (createBuffer (context.getDeviceInterface(),
context.getDevice(),
(VkDeviceSize)size,
void Buffer::invalidate (void)
{
+ const auto cmdPool = vk::makeCommandPool(m_vkd, m_device, m_queueIndex);
+ const auto cmdBufferPtr = vk::allocateCommandBuffer(m_vkd, m_device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ const auto cmdBuffer = cmdBufferPtr.get();
+ const auto bufferBarrier = vk::makeBufferMemoryBarrier(VK_ACCESS_MEMORY_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, m_buffer.get(), 0ull, VK_WHOLE_SIZE);
+
+ beginCommandBuffer(m_vkd, cmdBuffer);
+ m_vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, nullptr, 1u, &bufferBarrier, 0u, nullptr);
+ endCommandBuffer(m_vkd, cmdBuffer);
+ submitCommandsAndWait(m_vkd, m_device, m_queue, cmdBuffer);
+
invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
}
break;
};
- const T resIo = result.inout[elementNdx];
+ const T resIo = result.inout[elementNdx];
const T resOutput0 = result.output[elementNdx];
const T resOutput1 = result.output[elementNdx + NUM_ELEMENTS / 2];
public:
AtomicOperationCaseInstance (Context& context,
const ShaderSpec& shaderSpec,
- glu::ShaderType shaderType,
+ AtomicShaderType shaderType,
DataType dataType,
AtomicOperation atomicOp);
private:
const ShaderSpec& m_shaderSpec;
- glu::ShaderType m_shaderType;
+ AtomicShaderType m_shaderType;
const DataType m_dataType;
AtomicOperation m_atomicOp;
AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context& context,
const ShaderSpec& shaderSpec,
- glu::ShaderType shaderType,
+ AtomicShaderType shaderType,
DataType dataType,
AtomicOperation atomicOp)
: TestInstance (context)
context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
- if (shaderAtomicInt64Features.shaderBufferInt64Atomics == VK_FALSE)
+ if (!m_shaderType.useSharedGlobalMemory() && shaderAtomicInt64Features.shaderBufferInt64Atomics == VK_FALSE)
+ {
+ TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit integer atomic operations not supported for buffers");
+ }
+ if (m_shaderType.useSharedGlobalMemory() && shaderAtomicInt64Features.shaderSharedInt64Atomics == VK_FALSE)
{
- TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit unsigned and signed integer atomic operations not supported");
+ TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit integer atomic operations not supported for shared memory");
}
}
}
{
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u }
};
+
const VkDescriptorPoolCreateInfo poolInfo =
{
VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
(const VkBufferView*)DE_NULL
};
-
vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
// Storage for output varying data.
outputPtr[i] = &outputs[i];
}
- UniquePtr<ShaderExecutor> executor(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
- executor->execute(NUM_ELEMENTS, DE_NULL, &outputPtr[0], *extraResourcesSet);
+ const int numWorkGroups = (m_shaderType.useSharedGlobalMemory() ? 1 : static_cast<int>(NUM_ELEMENTS));
+ UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
+
+ executor->execute(numWorkGroups, DE_NULL, &outputPtr[0], *extraResourcesSet);
buffer.invalidate();
tcu::ResultCollector resultCollector(log);
AtomicOperationCase (tcu::TestContext& testCtx,
const char* name,
const char* description,
- glu::ShaderType type,
+ AtomicShaderType type,
DataType dataType,
AtomicOperation atomicOp);
virtual ~AtomicOperationCase (void);
void createShaderSpec();
ShaderSpec m_shaderSpec;
- const glu::ShaderType m_shaderType;
+ const AtomicShaderType m_shaderType;
const DataType m_dataType;
const AtomicOperation m_atomicOp;
};
AtomicOperationCase::AtomicOperationCase (tcu::TestContext& testCtx,
const char* name,
const char* description,
- glu::ShaderType shaderType,
+ AtomicShaderType shaderType,
DataType dataType,
AtomicOperation atomicOp)
: TestCase (testCtx, name, description)
void AtomicOperationCase::createShaderSpec (void)
{
- const tcu::StringTemplate shaderTemplateGlobal(
- "${EXTENSIONS}\n"
- "layout (set = ${SETIDX}, binding = 0) buffer AtomicBuffer\n"
- "{\n"
- " ${DATATYPE} inoutValues[${N}/2];\n"
- " ${DATATYPE} inputValues[${N}];\n"
- " ${DATATYPE} compareValues[${N}];\n"
- " ${DATATYPE} outputValues[${N}];\n"
- " ${DATATYPE} invocationHitCount[${N}];\n"
- " int index;\n"
- "} buf;\n");
+ // Global declarations.
+ std::ostringstream shaderTemplateGlobalStream;
+
+ shaderTemplateGlobalStream
+ << "${EXTENSIONS:opt}\n"
+ << "\n"
+ << "struct AtomicStruct\n"
+ << "{\n"
+ << " ${DATATYPE} inoutValues[${N}/2];\n"
+ << " ${DATATYPE} inputValues[${N}];\n"
+ << " ${DATATYPE} compareValues[${N}];\n"
+ << " ${DATATYPE} outputValues[${N}];\n"
+ << " ${DATATYPE} invocationHitCount[${N}];\n"
+ << " int index;\n"
+ << "};\n"
+ << "\n"
+ << "layout (set = ${SETIDX}, binding = 0) buffer AtomicBuffer {\n"
+ << " AtomicStruct buf;\n"
+ << "} ${RESULT_BUFFER_NAME};\n"
+ << "\n"
+ ;
+
+ // When using global shared memory in the compute variant, invocations will use a shared global structure instead of a
+ // descriptor set as the sources and results of each tested operation.
+ if (m_shaderType.useSharedGlobalMemory())
+ {
+ shaderTemplateGlobalStream
+ << "shared AtomicStruct buf;\n"
+ << "\n"
+ ;
+ }
- std::map<std::string, std::string> specializations;
- if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
+ const auto shaderTemplateGlobalString = shaderTemplateGlobalStream.str();
+ const tcu::StringTemplate shaderTemplateGlobal (shaderTemplateGlobalString);
+
+ // Shader body for the non-vertex case.
+ std::ostringstream nonVertexShaderTemplateStream;
+
+ if (m_shaderType.useSharedGlobalMemory())
{
- specializations["EXTENSIONS"] = "#extension GL_ARB_gpu_shader_int64 : enable\n"
- "#extension GL_EXT_shader_atomic_int64 : enable\n";
+ // Invocation zero will initialize the shared structure from the descriptor set.
+ nonVertexShaderTemplateStream
+ << "if (gl_LocalInvocationIndex == 0u)\n"
+ << "{\n"
+ << " buf = ${RESULT_BUFFER_NAME}.buf;\n"
+ << "}\n"
+ << "barrier();\n"
+ ;
}
- else
+
+ nonVertexShaderTemplateStream
+ << "int idx = atomicAdd(buf.index, 1);\n"
+ << "buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n"
+ ;
+
+ if (m_shaderType.useSharedGlobalMemory())
{
- specializations["EXTENSIONS"] = "";
+ // Invocation zero will copy results back to the descriptor set.
+ nonVertexShaderTemplateStream
+ << "barrier();\n"
+ << "if (gl_LocalInvocationIndex == 0u)\n"
+ << "{\n"
+ << " ${RESULT_BUFFER_NAME}.buf = buf;\n"
+ << "}\n"
+ ;
}
- specializations["DATATYPE"] = dataType2Str(m_dataType);
- specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
- specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
- specializations["N"] = de::toString((int)NUM_ELEMENTS);
- specializations["COMPARE_ARG"] = m_atomicOp == ATOMIC_OP_COMP_SWAP ? "buf.compareValues[idx], " : "";
- const tcu::StringTemplate nonVertexShaderTemplateSrc(
- "int idx = atomicAdd(buf.index, 1);\n"
- "buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n");
+ const auto nonVertexShaderTemplateStreamStr = nonVertexShaderTemplateStream.str();
+ const tcu::StringTemplate nonVertexShaderTemplateSrc (nonVertexShaderTemplateStreamStr);
+ // Shader body for the vertex case.
const tcu::StringTemplate vertexShaderTemplateSrc(
"int idx = gl_VertexIndex;\n"
"if (atomicAdd(buf.invocationHitCount[idx], 1) == 0)\n"
" buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n"
"}\n");
+ // Specializations.
+ std::map<std::string, std::string> specializations;
+ if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
+ {
+ specializations["EXTENSIONS"] = "#extension GL_ARB_gpu_shader_int64 : enable\n"
+ "#extension GL_EXT_shader_atomic_int64 : enable\n";
+ }
+ specializations["DATATYPE"] = dataType2Str(m_dataType);
+ specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
+ specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
+ specializations["N"] = de::toString((int)NUM_ELEMENTS);
+ specializations["COMPARE_ARG"] = ((m_atomicOp == ATOMIC_OP_COMP_SWAP) ? "buf.compareValues[idx], " : "");
+ specializations["RESULT_BUFFER_NAME"] = (m_shaderType.useSharedGlobalMemory() ? "result" : "");
+
+ // Shader spec.
m_shaderSpec.outputs.push_back(Symbol("outData", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
- m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
- m_shaderSpec.glslVersion = glu::GLSL_VERSION_450;
- m_shaderSpec.source = m_shaderType == glu::SHADERTYPE_VERTEX ?
- vertexShaderTemplateSrc.specialize(specializations) : nonVertexShaderTemplateSrc.specialize(specializations);
+ m_shaderSpec.glslVersion = glu::GLSL_VERSION_450;
+ m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
+ m_shaderSpec.source = ((m_shaderType.getType() == glu::SHADERTYPE_VERTEX)
+ ? vertexShaderTemplateSrc.specialize(specializations)
+ : nonVertexShaderTemplateSrc.specialize(specializations));
+
+ if (m_shaderType.useSharedGlobalMemory())
+ {
+ // When using global shared memory, use a single workgroup and an appropriate number of local invocations.
+ m_shaderSpec.localSizeX = static_cast<int>(NUM_ELEMENTS);
+ }
}
void addAtomicOperationTests (tcu::TestCaseGroup* atomicOperationTestsGroup)
static const struct
{
- glu::ShaderType type;
- const char* name;
+ AtomicShaderType type;
+ const char* name;
} shaderTypes[] =
{
- { glu::SHADERTYPE_VERTEX, "vertex" },
- { glu::SHADERTYPE_FRAGMENT, "fragment" },
- { glu::SHADERTYPE_GEOMETRY, "geometry" },
- { glu::SHADERTYPE_TESSELLATION_CONTROL, "tess_ctrl" },
- { glu::SHADERTYPE_TESSELLATION_EVALUATION, "tess_eval" },
- { glu::SHADERTYPE_COMPUTE, "compute" }
+ { glu::SHADERTYPE_VERTEX, "vertex" },
+ { glu::SHADERTYPE_FRAGMENT, "fragment" },
+ { glu::SHADERTYPE_GEOMETRY, "geometry" },
+ { glu::SHADERTYPE_TESSELLATION_CONTROL, "tess_ctrl" },
+ { glu::SHADERTYPE_TESSELLATION_EVALUATION, "tess_eval" },
+ { glu::SHADERTYPE_COMPUTE, "compute" },
+ { AtomicShaderType(glu::SHADERTYPE_COMPUTE, true), "compute_shared" },
};
static const struct
if (!spec.globalDeclarations.empty())
src << spec.globalDeclarations << "\n";
- src << "layout(local_size_x = 1) in;\n"
+ src << "layout(local_size_x = " << spec.localSizeX << ") in;\n"
<< "\n";
declareBufferBlocks(src, spec);
vk::ShaderBuildOptions buildOptions;
bool packFloat16Bit;
SpirVCaseT spirvCase;
+ int localSizeX; // May be used for compute shaders.
ShaderSpec (void)
: glslVersion (glu::GLSL_VERSION_450)
, packFloat16Bit (false)
, spirvCase (SPIRV_CASETYPE_NONE)
+ , localSizeX (1)
{}
};
dEQP-VK.glsl.atomic_operations.exchange_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_signed_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_signed_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_unsigned_vertex
dEQP-VK.glsl.atomic_operations.exchange_unsigned_fragment
dEQP-VK.glsl.atomic_operations.exchange_unsigned_geometry
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_signed_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.add_signed_vertex
dEQP-VK.glsl.atomic_operations.add_signed_fragment
dEQP-VK.glsl.atomic_operations.add_signed_geometry
dEQP-VK.glsl.atomic_operations.add_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_signed_tess_eval
dEQP-VK.glsl.atomic_operations.add_signed_compute
+dEQP-VK.glsl.atomic_operations.add_signed_compute_shared
dEQP-VK.glsl.atomic_operations.add_unsigned_vertex
dEQP-VK.glsl.atomic_operations.add_unsigned_fragment
dEQP-VK.glsl.atomic_operations.add_unsigned_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.min_signed_vertex
dEQP-VK.glsl.atomic_operations.min_signed_fragment
dEQP-VK.glsl.atomic_operations.min_signed_geometry
dEQP-VK.glsl.atomic_operations.min_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_signed_tess_eval
dEQP-VK.glsl.atomic_operations.min_signed_compute
+dEQP-VK.glsl.atomic_operations.min_signed_compute_shared
dEQP-VK.glsl.atomic_operations.min_unsigned_vertex
dEQP-VK.glsl.atomic_operations.min_unsigned_fragment
dEQP-VK.glsl.atomic_operations.min_unsigned_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.max_signed_vertex
dEQP-VK.glsl.atomic_operations.max_signed_fragment
dEQP-VK.glsl.atomic_operations.max_signed_geometry
dEQP-VK.glsl.atomic_operations.max_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_signed_tess_eval
dEQP-VK.glsl.atomic_operations.max_signed_compute
+dEQP-VK.glsl.atomic_operations.max_signed_compute_shared
dEQP-VK.glsl.atomic_operations.max_unsigned_vertex
dEQP-VK.glsl.atomic_operations.max_unsigned_fragment
dEQP-VK.glsl.atomic_operations.max_unsigned_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.and_signed_vertex
dEQP-VK.glsl.atomic_operations.and_signed_fragment
dEQP-VK.glsl.atomic_operations.and_signed_geometry
dEQP-VK.glsl.atomic_operations.and_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_signed_tess_eval
dEQP-VK.glsl.atomic_operations.and_signed_compute
+dEQP-VK.glsl.atomic_operations.and_signed_compute_shared
dEQP-VK.glsl.atomic_operations.and_unsigned_vertex
dEQP-VK.glsl.atomic_operations.and_unsigned_fragment
dEQP-VK.glsl.atomic_operations.and_unsigned_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.or_signed_vertex
dEQP-VK.glsl.atomic_operations.or_signed_fragment
dEQP-VK.glsl.atomic_operations.or_signed_geometry
dEQP-VK.glsl.atomic_operations.or_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_signed_tess_eval
dEQP-VK.glsl.atomic_operations.or_signed_compute
+dEQP-VK.glsl.atomic_operations.or_signed_compute_shared
dEQP-VK.glsl.atomic_operations.or_unsigned_vertex
dEQP-VK.glsl.atomic_operations.or_unsigned_fragment
dEQP-VK.glsl.atomic_operations.or_unsigned_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute_shared
dEQP-VK.glsl.atomic_operations.xor_signed_vertex
dEQP-VK.glsl.atomic_operations.xor_signed_fragment
dEQP-VK.glsl.atomic_operations.xor_signed_geometry
dEQP-VK.glsl.atomic_operations.xor_signed_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_signed_tess_eval
dEQP-VK.glsl.atomic_operations.xor_signed_compute
+dEQP-VK.glsl.atomic_operations.xor_signed_compute_shared
dEQP-VK.glsl.atomic_operations.xor_unsigned_vertex
dEQP-VK.glsl.atomic_operations.xor_unsigned_fragment
dEQP-VK.glsl.atomic_operations.xor_unsigned_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned_compute_shared
dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute_shared
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_shared
dEQP-VK.glsl.shader_clock.vertex.clockARB
dEQP-VK.glsl.shader_clock.vertex.clock2x32ARB
dEQP-VK.glsl.shader_clock.vertex.clockRealtimeEXT