dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.add_signed_vertex
dEQP-VK.glsl.atomic_operations.add_signed_fragment
dEQP-VK.glsl.atomic_operations.add_signed_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.min_signed_vertex
dEQP-VK.glsl.atomic_operations.min_signed_fragment
dEQP-VK.glsl.atomic_operations.min_signed_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.max_signed_vertex
dEQP-VK.glsl.atomic_operations.max_signed_fragment
dEQP-VK.glsl.atomic_operations.max_signed_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.and_signed_vertex
dEQP-VK.glsl.atomic_operations.and_signed_fragment
dEQP-VK.glsl.atomic_operations.and_signed_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.or_signed_vertex
dEQP-VK.glsl.atomic_operations.or_signed_fragment
dEQP-VK.glsl.atomic_operations.or_signed_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.xor_signed_vertex
dEQP-VK.glsl.atomic_operations.xor_signed_fragment
dEQP-VK.glsl.atomic_operations.xor_signed_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
dEQP-VK.renderpass.suballocation.simple.color
dEQP-VK.renderpass.suballocation.simple.depth
dEQP-VK.renderpass.suballocation.simple.stencil
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = 1000161003,
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = 1000161004,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = 1000177000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = 1000196000,
};
enum VkSystemAllocationScope
return VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT;
}
+template<> VkStructureType getStructureType<VkPhysicalDeviceShaderAtomicInt64FeaturesKHR> (void)
+{
+ return VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR;
+}
+
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceInlineUniformBlockPropertiesEXT& value);
std::ostream& operator<< (std::ostream& s, const VkWriteDescriptorSetInlineUniformBlockEXT& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorPoolInlineUniformBlockCreateInfoEXT& value);
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceShaderAtomicInt64FeaturesKHR& value);
case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT: return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT";
case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT: return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR";
default: return DE_NULL;
}
}
s << '}';
return s;
}
+
+std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceShaderAtomicInt64FeaturesKHR& value)
+{
+ s << "VkPhysicalDeviceShaderAtomicInt64FeaturesKHR = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tshaderBufferInt64Atomics = " << value.shaderBufferInt64Atomics << '\n';
+ s << "\tshaderSharedInt64Atomics = " << value.shaderSharedInt64Atomics << '\n';
+ s << '}';
+ return s;
+}
deUint32 maxInlineUniformBlockBindings;
};
+struct VkPhysicalDeviceShaderAtomicInt64FeaturesKHR
+{
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderBufferInt64Atomics;
+ VkBool32 shaderSharedInt64Atomics;
+};
+
"VK_KHR_multiview",
"VK_KHR_maintenance3",
"VK_KHR_draw_indirect_count",
- "VK_KHR_create_renderpass2"
+ "VK_KHR_create_renderpass2",
+ "VK_KHR_driver_properties",
+ "VK_KHR_shader_atomic_int64",
};
checkKhrExtensions(results, extensions, DE_LENGTH_OF_ARRAY(s_allowedDeviceKhrExtensions), s_allowedDeviceKhrExtensions);
NUM_ELEMENTS = 32
};
-class AtomicOperationCaseInstance : public TestInstance
+enum DataType
+{
+ DATA_TYPE_INT32 = 0,
+ DATA_TYPE_UINT32,
+ DATA_TYPE_INT64,
+ DATA_TYPE_UINT64,
+
+ DATA_TYPE_LAST
+};
+
+std::string dataType2Str(DataType type)
+{
+ static const char* const s_names[] =
+ {
+ "int",
+ "uint",
+ "int64_t",
+ "uint64_t",
+ };
+ return de::getSizedArrayElement<DATA_TYPE_LAST>(s_names, type);
+}
+
+class BufferInterface
{
public:
- AtomicOperationCaseInstance (Context& context,
- const ShaderSpec& shaderSpec,
- glu::ShaderType shaderType,
- bool sign,
- AtomicOperation atomicOp);
- virtual ~AtomicOperationCaseInstance (void);
+ virtual void setBuffer(void* ptr) = 0;
- virtual tcu::TestStatus iterate (void);
+ virtual size_t bufferSize() = 0;
-private:
- const ShaderSpec& m_shaderSpec;
- glu::ShaderType m_shaderType;
- bool m_sign;
- AtomicOperation m_atomicOp;
+ virtual void fillWithTestData(de::Random &rnd) = 0;
+
+ virtual void checkResults(tcu::ResultCollector& resultCollector) = 0;
+
+ virtual ~BufferInterface() {};
+};
+
+template<typename dataTypeT>
+class TestBuffer : public BufferInterface
+{
+public:
- struct BufferInterface
+ TestBuffer(AtomicOperation atomicOp)
+ : m_atomicOp(atomicOp)
+ {}
+
+ template<typename T>
+ struct BufferData
{
// Use half the number of elements for inout to cause overlap between atomic operations.
// Each inout element at index i will have two atomic operations using input from
// indices i and i + NUM_ELEMENTS / 2.
deInt32 index;
- deUint32 inout[NUM_ELEMENTS / 2];
- deUint32 input[NUM_ELEMENTS];
- deUint32 compare[NUM_ELEMENTS];
- deUint32 output[NUM_ELEMENTS];
+ T inout[NUM_ELEMENTS / 2];
+ T input[NUM_ELEMENTS];
+ T compare[NUM_ELEMENTS];
+ T output[NUM_ELEMENTS];
};
+ virtual void setBuffer(void* ptr)
+ {
+ m_ptr = static_cast<BufferData<dataTypeT>*>(ptr);
+ }
+
+ virtual size_t bufferSize()
+ {
+ return sizeof(BufferData<dataTypeT>);
+ }
+
+ virtual void fillWithTestData(de::Random &rnd)
+ {
+ dataTypeT pattern;
+ deMemset(&pattern, 0xcd, sizeof(dataTypeT));
+
+ for (int i = 0; i < NUM_ELEMENTS / 2; i++)
+ {
+ m_ptr->inout[i] = static_cast<dataTypeT>(rnd.getUint64());
+ // The first half of compare elements match with every even index.
+ // The second half matches with odd indices. This causes the
+ // overlapping operations to only select one.
+ m_ptr->compare[i] = m_ptr->inout[i] + (i % 2);
+ m_ptr->compare[i + NUM_ELEMENTS / 2] = m_ptr->inout[i] + 1 - (i % 2);
+ }
+ for (int i = 0; i < NUM_ELEMENTS; i++)
+ {
+ m_ptr->input[i] = static_cast<dataTypeT>(rnd.getUint64());
+ m_ptr->output[i] = pattern;
+ }
+ m_ptr->index = 0;
+
+ // Take a copy to be used when calculating expected values.
+ m_original = *m_ptr;
+ }
+
+ virtual void checkResults(tcu::ResultCollector& resultCollector)
+ {
+ checkOperation(m_original, *m_ptr, resultCollector);
+ }
+
template<typename T>
struct Expected
{
m_output[1] = output1;
}
- bool compare (deUint32 inout, deUint32 output0, deUint32 output1)
+ bool compare (T inout, T output0, T output1)
{
return (deMemCmp((const void*)&m_inout, (const void*)&inout, sizeof(inout)) == 0
&& deMemCmp((const void*)&m_output[0], (const void*)&output0, sizeof(output0)) == 0
}
};
- template<typename T> void checkOperation (const BufferInterface& original,
- const BufferInterface& result,
- tcu::ResultCollector& resultCollector);
+ void checkOperation (const BufferData<dataTypeT>& original,
+ const BufferData<dataTypeT>& result,
+ tcu::ResultCollector& resultCollector);
-};
+ const AtomicOperation m_atomicOp;
-AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context& context,
- const ShaderSpec& shaderSpec,
- glu::ShaderType shaderType,
- bool sign,
- AtomicOperation atomicOp)
- : TestInstance (context)
- , m_shaderSpec (shaderSpec)
- , m_shaderType (shaderType)
- , m_sign (sign)
- , m_atomicOp (atomicOp)
-{
-}
+ BufferData<dataTypeT>* m_ptr;
+ BufferData<dataTypeT> m_original;
+
+};
-AtomicOperationCaseInstance::~AtomicOperationCaseInstance (void)
+static BufferInterface* createTestBuffer(DataType type, AtomicOperation atomicOp)
{
+ switch (type)
+ {
+ case DATA_TYPE_INT32:
+ return new TestBuffer<deInt32>(atomicOp);
+ case DATA_TYPE_UINT32:
+ return new TestBuffer<deUint32>(atomicOp);
+ case DATA_TYPE_INT64:
+ return new TestBuffer<deInt64>(atomicOp);
+ case DATA_TYPE_UINT64:
+ return new TestBuffer<deUint64>(atomicOp);
+ default:
+ DE_ASSERT(false);
+ return DE_NULL;
+ }
}
// Use template to handle both signed and unsigned cases. SPIR-V should
// have separate operations for both.
template<typename T>
-void AtomicOperationCaseInstance::checkOperation (const BufferInterface& original,
- const BufferInterface& result,
- tcu::ResultCollector& resultCollector)
+void TestBuffer<T>::checkOperation (const BufferData<T>& original,
+ const BufferData<T>& result,
+ tcu::ResultCollector& resultCollector)
{
// originalInout = original inout
// input0 = input at index i
break;
};
- const deUint32 resIo = result.inout[elementNdx];
- const deUint32 resOutput0 = result.output[elementNdx];
- const deUint32 resOutput1 = result.output[elementNdx + NUM_ELEMENTS / 2];
+ const T resIo = result.inout[elementNdx];
+ const T resOutput0 = result.output[elementNdx];
+ const T resOutput1 = result.output[elementNdx + NUM_ELEMENTS / 2];
+
if (!exp[0].compare(resIo, resOutput0, resOutput1) && !exp[1].compare(resIo, resOutput0, resOutput1))
{
}
}
-tcu::TestStatus AtomicOperationCaseInstance::iterate (void)
+
+class AtomicOperationCaseInstance : public TestInstance
{
- //Check stores and atomic operation support.
- switch (m_shaderType)
- {
- case glu::SHADERTYPE_VERTEX:
- case glu::SHADERTYPE_TESSELLATION_CONTROL:
- case glu::SHADERTYPE_TESSELLATION_EVALUATION:
- case glu::SHADERTYPE_GEOMETRY:
- if(!m_context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
- TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
- break;
- case glu::SHADERTYPE_FRAGMENT:
- if(!m_context.getDeviceFeatures().fragmentStoresAndAtomics)
- TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
- break;
- case glu::SHADERTYPE_COMPUTE:
- break;
- default:
- DE_FATAL("Unsupported shader type");
- }
+public:
+ AtomicOperationCaseInstance (Context& context,
+ const ShaderSpec& shaderSpec,
+ glu::ShaderType shaderType,
+ DataType dataType,
+ AtomicOperation atomicOp);
+
+ virtual tcu::TestStatus iterate (void);
+
+private:
+ const ShaderSpec& m_shaderSpec;
+ glu::ShaderType m_shaderType;
+ const DataType m_dataType;
+ AtomicOperation m_atomicOp;
- tcu::TestLog& log = m_context.getTestContext().getLog();
- const DeviceInterface& vkd = m_context.getDeviceInterface();
- const VkDevice device = m_context.getDevice();
- de::Random rnd (0x62a15e34);
- Buffer buffer (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(BufferInterface));
- BufferInterface* ptr = (BufferInterface*)buffer.getHostPtr();
+};
- for (int i = 0; i < NUM_ELEMENTS / 2; i++)
+AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context& context,
+ const ShaderSpec& shaderSpec,
+ glu::ShaderType shaderType,
+ DataType dataType,
+ AtomicOperation atomicOp)
+ : TestInstance (context)
+ , m_shaderSpec (shaderSpec)
+ , m_shaderType (shaderType)
+ , m_dataType (dataType)
+ , m_atomicOp (atomicOp)
+{
+ if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
{
- ptr->inout[i] = rnd.getUint32();
- // The first half of compare elements match with every even index.
- // The second half matches with odd indices. This causes the
- // overlapping operations to only select one.
- ptr->compare[i] = ptr->inout[i] + (i % 2);
- ptr->compare[i + NUM_ELEMENTS / 2] = ptr->inout[i] + 1 - (i % 2);
+ if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_shader_atomic_int64"))
+ TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_shader_atomic_int64");
+
+ VkPhysicalDeviceShaderAtomicInt64FeaturesKHR shaderAtomicInt64Features;
+ deMemset(&shaderAtomicInt64Features, 0x0, sizeof(VkPhysicalDeviceShaderAtomicInt64FeaturesKHR));
+ shaderAtomicInt64Features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR;
+ shaderAtomicInt64Features.pNext = DE_NULL;
+
+ VkPhysicalDeviceFeatures2 features;
+ deMemset(&features, 0x0, sizeof(VkPhysicalDeviceFeatures2));
+ features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features.pNext = &shaderAtomicInt64Features;
+
+ context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+
+ if (shaderAtomicInt64Features.shaderBufferInt64Atomics == VK_FALSE)
+ {
+ TCU_THROW(NotSupportedError, "VkShaderAtomicInt64: 64-bit unsigned and signed integer atomic operations not supported");
+ }
}
- for (int i = 0; i < NUM_ELEMENTS; i++)
+}
+
+tcu::TestStatus AtomicOperationCaseInstance::iterate(void)
+{
+ //Check stores and atomic operation support.
+ switch (m_shaderType)
{
- ptr->input[i] = rnd.getUint32();
- ptr->output[i] = 0xcdcdcdcd;
+ case glu::SHADERTYPE_VERTEX:
+ case glu::SHADERTYPE_TESSELLATION_CONTROL:
+ case glu::SHADERTYPE_TESSELLATION_EVALUATION:
+ case glu::SHADERTYPE_GEOMETRY:
+ if (!m_context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
+ TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
+ break;
+ case glu::SHADERTYPE_FRAGMENT:
+ if (!m_context.getDeviceFeatures().fragmentStoresAndAtomics)
+ TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
+ break;
+ case glu::SHADERTYPE_COMPUTE:
+ break;
+ default:
+ DE_FATAL("Unsupported shader type");
}
- ptr->index = 0;
- // Take a copy to be used when calculating expected values.
- BufferInterface original = *ptr;
+ de::UniquePtr<BufferInterface> testBuffer (createTestBuffer(m_dataType, m_atomicOp));
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ de::Random rnd (0x62a15e34);
+ Buffer buffer (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, testBuffer->bufferSize());
+
+ testBuffer->setBuffer(buffer.getHostPtr());
+ testBuffer->fillWithTestData(rnd);
buffer.flush();
tcu::ResultCollector resultCollector(log);
// Check the results of the atomic operation
- if (m_sign)
- checkOperation<deInt32>(original, *ptr, resultCollector);
- else
- checkOperation<deUint32>(original, *ptr, resultCollector);
+ testBuffer->checkResults(resultCollector);
return tcu::TestStatus(resultCollector.getResult(), resultCollector.getMessage());
}
const char* name,
const char* description,
glu::ShaderType type,
- bool sign,
+ DataType dataType,
AtomicOperation atomicOp);
virtual ~AtomicOperationCase (void);
void createShaderSpec();
ShaderSpec m_shaderSpec;
const glu::ShaderType m_shaderType;
- const bool m_sign;
+ const DataType m_dataType;
const AtomicOperation m_atomicOp;
};
const char* name,
const char* description,
glu::ShaderType shaderType,
- bool sign,
+ DataType dataType,
AtomicOperation atomicOp)
: TestCase (testCtx, name, description)
, m_shaderType (shaderType)
- , m_sign (sign)
+ , m_dataType (dataType)
, m_atomicOp (atomicOp)
{
createShaderSpec();
TestInstance* AtomicOperationCase::createInstance (Context& ctx) const
{
- return new AtomicOperationCaseInstance(ctx, m_shaderSpec, m_shaderType, m_sign, m_atomicOp);
+ return new AtomicOperationCaseInstance(ctx, m_shaderSpec, m_shaderType, m_dataType, m_atomicOp);
}
void AtomicOperationCase::createShaderSpec (void)
{
const tcu::StringTemplate shaderTemplateGlobal(
- "layout (set = ${SETIDX}, binding = 0, std430) buffer AtomicBuffer\n"
+ "${EXTENSIONS}\n"
+ "layout (set = ${SETIDX}, binding = 0) buffer AtomicBuffer\n"
"{\n"
- " highp int index;\n"
- " highp ${DATATYPE} inoutValues[${N}/2];\n"
- " highp ${DATATYPE} inputValues[${N}];\n"
- " highp ${DATATYPE} compareValues[${N}];\n"
- " highp ${DATATYPE} outputValues[${N}];\n"
+ " int index;\n"
+ " ${DATATYPE} inoutValues[${N}/2];\n"
+ " ${DATATYPE} inputValues[${N}];\n"
+ " ${DATATYPE} compareValues[${N}];\n"
+ " ${DATATYPE} outputValues[${N}];\n"
"} buf;\n");
std::map<std::string, std::string> specializations;
- specializations["DATATYPE"] = m_sign ? "int" : "uint";
+ if ((m_dataType == DATA_TYPE_INT64) || (m_dataType == DATA_TYPE_UINT64))
+ {
+ specializations["EXTENSIONS"] = "#extension GL_ARB_gpu_shader_int64 : enable\n"
+ "#extension GL_EXT_shader_atomic_int64 : enable\n";
+ }
+ else
+ {
+ specializations["EXTENSIONS"] = "";
+ }
+ specializations["DATATYPE"] = dataType2Str(m_dataType);
specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
specializations["N"] = de::toString((int)NUM_ELEMENTS);
m_shaderSpec.outputs.push_back(Symbol("outData", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
m_shaderSpec.source = shaderTemplateSrc.specialize(specializations);
+ m_shaderSpec.glslVersion = glu::GLSL_VERSION_450;
}
void addAtomicOperationTests (tcu::TestCaseGroup* atomicOperationTestsGroup)
static const struct
{
- bool value;
+ DataType dataType;
const char* name;
const char* description;
} dataSign[] =
{
- { true, "signed", "Tests using signed data (int)" },
- { false, "unsigned", "Tests using unsigned data (uint)" }
+ { DATA_TYPE_INT32, "signed", "Tests using signed data (int)" },
+ { DATA_TYPE_UINT32, "unsigned", "Tests using unsigned data (uint)" },
+ { DATA_TYPE_INT64, "signed64bit", "Tests using 64 bit signed data (int64)" },
+ { DATA_TYPE_UINT64, "unsigned64bit", "Tests using 64 bit unsigned data (uint64)" }
};
static const struct
{
const std::string description = std::string("Tests atomic operation ") + atomicOp2Str(atomicOp[opNdx].value) + std::string(".");
std::string name = std::string(atomicOp[opNdx].name) + "_" + std::string(dataSign[signNdx].name) + "_" + std::string(shaderTypes[shaderTypeNdx].name);
- atomicOperationTestsGroup->addChild(new AtomicOperationCase(testCtx, name.c_str(), description.c_str(), shaderTypes[shaderTypeNdx].type, dataSign[signNdx].value, atomicOp[opNdx].value));
+ atomicOperationTestsGroup->addChild(new AtomicOperationCase(testCtx, name.c_str(), description.c_str(), shaderTypes[shaderTypeNdx].type, dataSign[signNdx].dataType, atomicOp[opNdx].value));
}
}
}
{
std::ostringstream src;
- src <<"#version 310 es\n";
+ src <<"#version 450\n";
if (!shaderSpec.globalDeclarations.empty())
src << shaderSpec.globalDeclarations << "\n";
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.add_signed_vertex
dEQP-VK.glsl.atomic_operations.add_signed_fragment
dEQP-VK.glsl.atomic_operations.add_signed_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.min_signed_vertex
dEQP-VK.glsl.atomic_operations.min_signed_fragment
dEQP-VK.glsl.atomic_operations.min_signed_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.max_signed_vertex
dEQP-VK.glsl.atomic_operations.max_signed_fragment
dEQP-VK.glsl.atomic_operations.max_signed_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.and_signed_vertex
dEQP-VK.glsl.atomic_operations.and_signed_fragment
dEQP-VK.glsl.atomic_operations.and_signed_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.or_signed_vertex
dEQP-VK.glsl.atomic_operations.or_signed_fragment
dEQP-VK.glsl.atomic_operations.or_signed_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.xor_signed_vertex
dEQP-VK.glsl.atomic_operations.xor_signed_fragment
dEQP-VK.glsl.atomic_operations.xor_signed_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
dEQP-VK.renderpass.suballocation.simple.color
dEQP-VK.renderpass.suballocation.simple.depth
dEQP-VK.renderpass.suballocation.simple.stencil
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.exchange_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.exchange_unsigned_compute
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.exchange_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.comp_swap_signed_vertex
dEQP-VK.glsl.atomic_operations.comp_swap_signed_fragment
dEQP-VK.glsl.atomic_operations.comp_swap_signed_geometry
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.comp_swap_unsigned_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.comp_swap_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.add_signed_vertex
dEQP-VK.glsl.atomic_operations.add_signed_fragment
dEQP-VK.glsl.atomic_operations.add_signed_geometry
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.add_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.add_unsigned_compute
+dEQP-VK.glsl.atomic_operations.add_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.add_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.min_signed_vertex
dEQP-VK.glsl.atomic_operations.min_signed_fragment
dEQP-VK.glsl.atomic_operations.min_signed_geometry
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.min_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.min_unsigned_compute
+dEQP-VK.glsl.atomic_operations.min_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.min_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.max_signed_vertex
dEQP-VK.glsl.atomic_operations.max_signed_fragment
dEQP-VK.glsl.atomic_operations.max_signed_geometry
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.max_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.max_unsigned_compute
+dEQP-VK.glsl.atomic_operations.max_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.max_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.and_signed_vertex
dEQP-VK.glsl.atomic_operations.and_signed_fragment
dEQP-VK.glsl.atomic_operations.and_signed_geometry
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.and_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.and_unsigned_compute
+dEQP-VK.glsl.atomic_operations.and_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.and_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.or_signed_vertex
dEQP-VK.glsl.atomic_operations.or_signed_fragment
dEQP-VK.glsl.atomic_operations.or_signed_geometry
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.or_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.or_unsigned_compute
+dEQP-VK.glsl.atomic_operations.or_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.or_unsigned64bit_compute
dEQP-VK.glsl.atomic_operations.xor_signed_vertex
dEQP-VK.glsl.atomic_operations.xor_signed_fragment
dEQP-VK.glsl.atomic_operations.xor_signed_geometry
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_ctrl
dEQP-VK.glsl.atomic_operations.xor_unsigned_tess_eval
dEQP-VK.glsl.atomic_operations.xor_unsigned_compute
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_signed64bit_compute
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_vertex
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_fragment
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_geometry
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_ctrl
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_tess_eval
+dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute
dEQP-VK.renderpass.suballocation.simple.color
dEQP-VK.renderpass.suballocation.simple.depth
dEQP-VK.renderpass.suballocation.simple.stencil
name = name.replace("WIN_32_", "WIN32_")
name = name.replace("8_BIT_", "8BIT_")
name = name.replace("16_BIT_", "16BIT_")
+ name = name.replace("INT_64_", "INT64_")
name = name.replace("D_3_D_12_", "D3D12_")
name = name.replace("IOSSURFACE_", "IOS_SURFACE_")
name = name.replace("MAC_OS", "MACOS_")
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = 1000161003,
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = 1000161004,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = 1000177000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = 1000196000,
VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
uint32_t maxInlineUniformBlockBindings;
} VkDescriptorPoolInlineUniformBlockCreateInfoEXT;
+#define VK_KHR_shader_atomic_int64 1
+#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1
+#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64"
+
+typedef struct VkPhysicalDeviceShaderAtomicInt64FeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderBufferInt64Atomics;
+ VkBool32 shaderSharedInt64Atomics;
+} VkPhysicalDeviceShaderAtomicInt64FeaturesKHR;
#ifdef __cplusplus
}