Vulkan leaves it to implementations to determine the layout of shared memory.
This test ensures that the fields in the shared memory won't end up
overlapping each other.
This commit also does a minor clean up for the framework core and adds
util functions for comparing types which are common for this and the SSBO tests.
VK-GL-CTS issue: 2372
New Tests:
dEQP-VK.memory_model.shared.*
Components: Vulkan
Change-Id: I4ac0a03f91a5ce699fe03f3c43943fb8eac88850
external/vulkancts/modules/vulkan/memory/vktMemoryTests.cpp \
external/vulkancts/modules/vulkan/memory_model/vktMemoryModelMessagePassing.cpp \
external/vulkancts/modules/vulkan/memory_model/vktMemoryModelPadding.cpp \
+ external/vulkancts/modules/vulkan/memory_model/vktMemoryModelSharedLayout.cpp \
+ external/vulkancts/modules/vulkan/memory_model/vktMemoryModelSharedLayoutCase.cpp \
external/vulkancts/modules/vulkan/modifiers/vktModifiersTests.cpp \
external/vulkancts/modules/vulkan/multiview/vktMultiViewRenderPassUtil.cpp \
external/vulkancts/modules/vulkan/multiview/vktMultiViewRenderTests.cpp \
external/vulkancts/modules/vulkan/ubo/vktUniformBlockTests.cpp \
external/vulkancts/modules/vulkan/util/vktDrawUtil.cpp \
external/vulkancts/modules/vulkan/util/vktExternalMemoryUtil.cpp \
+ external/vulkancts/modules/vulkan/util/vktTypeComparisonUtil.cpp \
external/vulkancts/modules/vulkan/vktCustomInstancesDevices.cpp \
external/vulkancts/modules/vulkan/vktInfoTests.cpp \
external/vulkancts/modules/vulkan/vktShaderLibrary.cpp \
dEQP-VK.memory_model.write_after_read.ext.f64.noncoherent.atomic_atomic.atomicrmw.subgroup.payload_local.physbuffer.guard_local.physbuffer.comp
dEQP-VK.memory_model.write_after_read.ext.f64.noncoherent.atomic_atomic.atomicrmw.subgroup.payload_local.physbuffer.guard_local.physbuffer.vert
dEQP-VK.memory_model.write_after_read.ext.f64.noncoherent.atomic_atomic.atomicrmw.subgroup.payload_local.physbuffer.guard_local.physbuffer.frag
+dEQP-VK.memory_model.shared.scalar_types.0
+dEQP-VK.memory_model.shared.scalar_types.1
+dEQP-VK.memory_model.shared.scalar_types.2
+dEQP-VK.memory_model.shared.scalar_types.3
+dEQP-VK.memory_model.shared.scalar_types.4
+dEQP-VK.memory_model.shared.scalar_types.5
+dEQP-VK.memory_model.shared.scalar_types.6
+dEQP-VK.memory_model.shared.scalar_types.7
+dEQP-VK.memory_model.shared.scalar_types.8
+dEQP-VK.memory_model.shared.scalar_types.9
+dEQP-VK.memory_model.shared.vector_types.0
+dEQP-VK.memory_model.shared.vector_types.1
+dEQP-VK.memory_model.shared.vector_types.2
+dEQP-VK.memory_model.shared.vector_types.3
+dEQP-VK.memory_model.shared.vector_types.4
+dEQP-VK.memory_model.shared.vector_types.5
+dEQP-VK.memory_model.shared.vector_types.6
+dEQP-VK.memory_model.shared.vector_types.7
+dEQP-VK.memory_model.shared.vector_types.8
+dEQP-VK.memory_model.shared.vector_types.9
+dEQP-VK.memory_model.shared.basic_types.0
+dEQP-VK.memory_model.shared.basic_types.1
+dEQP-VK.memory_model.shared.basic_types.2
+dEQP-VK.memory_model.shared.basic_types.3
+dEQP-VK.memory_model.shared.basic_types.4
+dEQP-VK.memory_model.shared.basic_types.5
+dEQP-VK.memory_model.shared.basic_types.6
+dEQP-VK.memory_model.shared.basic_types.7
+dEQP-VK.memory_model.shared.basic_types.8
+dEQP-VK.memory_model.shared.basic_types.9
+dEQP-VK.memory_model.shared.basic_arrays.0
+dEQP-VK.memory_model.shared.basic_arrays.1
+dEQP-VK.memory_model.shared.basic_arrays.2
+dEQP-VK.memory_model.shared.basic_arrays.3
+dEQP-VK.memory_model.shared.basic_arrays.4
+dEQP-VK.memory_model.shared.basic_arrays.5
+dEQP-VK.memory_model.shared.basic_arrays.6
+dEQP-VK.memory_model.shared.basic_arrays.7
+dEQP-VK.memory_model.shared.basic_arrays.8
+dEQP-VK.memory_model.shared.basic_arrays.9
+dEQP-VK.memory_model.shared.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.nested_structs.0
+dEQP-VK.memory_model.shared.nested_structs.1
+dEQP-VK.memory_model.shared.nested_structs.2
+dEQP-VK.memory_model.shared.nested_structs.3
+dEQP-VK.memory_model.shared.nested_structs.4
+dEQP-VK.memory_model.shared.nested_structs.5
+dEQP-VK.memory_model.shared.nested_structs.6
+dEQP-VK.memory_model.shared.nested_structs.7
+dEQP-VK.memory_model.shared.nested_structs.8
+dEQP-VK.memory_model.shared.nested_structs.9
+dEQP-VK.memory_model.shared.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.16bit.scalar_types.0
+dEQP-VK.memory_model.shared.16bit.scalar_types.1
+dEQP-VK.memory_model.shared.16bit.scalar_types.2
+dEQP-VK.memory_model.shared.16bit.scalar_types.3
+dEQP-VK.memory_model.shared.16bit.scalar_types.4
+dEQP-VK.memory_model.shared.16bit.scalar_types.5
+dEQP-VK.memory_model.shared.16bit.scalar_types.6
+dEQP-VK.memory_model.shared.16bit.scalar_types.7
+dEQP-VK.memory_model.shared.16bit.scalar_types.8
+dEQP-VK.memory_model.shared.16bit.scalar_types.9
+dEQP-VK.memory_model.shared.16bit.vector_types.0
+dEQP-VK.memory_model.shared.16bit.vector_types.1
+dEQP-VK.memory_model.shared.16bit.vector_types.2
+dEQP-VK.memory_model.shared.16bit.vector_types.3
+dEQP-VK.memory_model.shared.16bit.vector_types.4
+dEQP-VK.memory_model.shared.16bit.vector_types.5
+dEQP-VK.memory_model.shared.16bit.vector_types.6
+dEQP-VK.memory_model.shared.16bit.vector_types.7
+dEQP-VK.memory_model.shared.16bit.vector_types.8
+dEQP-VK.memory_model.shared.16bit.vector_types.9
+dEQP-VK.memory_model.shared.16bit.basic_types.0
+dEQP-VK.memory_model.shared.16bit.basic_types.1
+dEQP-VK.memory_model.shared.16bit.basic_types.2
+dEQP-VK.memory_model.shared.16bit.basic_types.3
+dEQP-VK.memory_model.shared.16bit.basic_types.4
+dEQP-VK.memory_model.shared.16bit.basic_types.5
+dEQP-VK.memory_model.shared.16bit.basic_types.6
+dEQP-VK.memory_model.shared.16bit.basic_types.7
+dEQP-VK.memory_model.shared.16bit.basic_types.8
+dEQP-VK.memory_model.shared.16bit.basic_types.9
+dEQP-VK.memory_model.shared.16bit.basic_arrays.0
+dEQP-VK.memory_model.shared.16bit.basic_arrays.1
+dEQP-VK.memory_model.shared.16bit.basic_arrays.2
+dEQP-VK.memory_model.shared.16bit.basic_arrays.3
+dEQP-VK.memory_model.shared.16bit.basic_arrays.4
+dEQP-VK.memory_model.shared.16bit.basic_arrays.5
+dEQP-VK.memory_model.shared.16bit.basic_arrays.6
+dEQP-VK.memory_model.shared.16bit.basic_arrays.7
+dEQP-VK.memory_model.shared.16bit.basic_arrays.8
+dEQP-VK.memory_model.shared.16bit.basic_arrays.9
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.16bit.nested_structs.0
+dEQP-VK.memory_model.shared.16bit.nested_structs.1
+dEQP-VK.memory_model.shared.16bit.nested_structs.2
+dEQP-VK.memory_model.shared.16bit.nested_structs.3
+dEQP-VK.memory_model.shared.16bit.nested_structs.4
+dEQP-VK.memory_model.shared.16bit.nested_structs.5
+dEQP-VK.memory_model.shared.16bit.nested_structs.6
+dEQP-VK.memory_model.shared.16bit.nested_structs.7
+dEQP-VK.memory_model.shared.16bit.nested_structs.8
+dEQP-VK.memory_model.shared.16bit.nested_structs.9
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.8bit.scalar_types.0
+dEQP-VK.memory_model.shared.8bit.scalar_types.1
+dEQP-VK.memory_model.shared.8bit.scalar_types.2
+dEQP-VK.memory_model.shared.8bit.scalar_types.3
+dEQP-VK.memory_model.shared.8bit.scalar_types.4
+dEQP-VK.memory_model.shared.8bit.scalar_types.5
+dEQP-VK.memory_model.shared.8bit.scalar_types.6
+dEQP-VK.memory_model.shared.8bit.scalar_types.7
+dEQP-VK.memory_model.shared.8bit.scalar_types.8
+dEQP-VK.memory_model.shared.8bit.scalar_types.9
+dEQP-VK.memory_model.shared.8bit.vector_types.0
+dEQP-VK.memory_model.shared.8bit.vector_types.1
+dEQP-VK.memory_model.shared.8bit.vector_types.2
+dEQP-VK.memory_model.shared.8bit.vector_types.3
+dEQP-VK.memory_model.shared.8bit.vector_types.4
+dEQP-VK.memory_model.shared.8bit.vector_types.5
+dEQP-VK.memory_model.shared.8bit.vector_types.6
+dEQP-VK.memory_model.shared.8bit.vector_types.7
+dEQP-VK.memory_model.shared.8bit.vector_types.8
+dEQP-VK.memory_model.shared.8bit.vector_types.9
+dEQP-VK.memory_model.shared.8bit.basic_types.0
+dEQP-VK.memory_model.shared.8bit.basic_types.1
+dEQP-VK.memory_model.shared.8bit.basic_types.2
+dEQP-VK.memory_model.shared.8bit.basic_types.3
+dEQP-VK.memory_model.shared.8bit.basic_types.4
+dEQP-VK.memory_model.shared.8bit.basic_types.5
+dEQP-VK.memory_model.shared.8bit.basic_types.6
+dEQP-VK.memory_model.shared.8bit.basic_types.7
+dEQP-VK.memory_model.shared.8bit.basic_types.8
+dEQP-VK.memory_model.shared.8bit.basic_types.9
+dEQP-VK.memory_model.shared.8bit.basic_arrays.0
+dEQP-VK.memory_model.shared.8bit.basic_arrays.1
+dEQP-VK.memory_model.shared.8bit.basic_arrays.2
+dEQP-VK.memory_model.shared.8bit.basic_arrays.3
+dEQP-VK.memory_model.shared.8bit.basic_arrays.4
+dEQP-VK.memory_model.shared.8bit.basic_arrays.5
+dEQP-VK.memory_model.shared.8bit.basic_arrays.6
+dEQP-VK.memory_model.shared.8bit.basic_arrays.7
+dEQP-VK.memory_model.shared.8bit.basic_arrays.8
+dEQP-VK.memory_model.shared.8bit.basic_arrays.9
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.8bit.nested_structs.0
+dEQP-VK.memory_model.shared.8bit.nested_structs.1
+dEQP-VK.memory_model.shared.8bit.nested_structs.2
+dEQP-VK.memory_model.shared.8bit.nested_structs.3
+dEQP-VK.memory_model.shared.8bit.nested_structs.4
+dEQP-VK.memory_model.shared.8bit.nested_structs.5
+dEQP-VK.memory_model.shared.8bit.nested_structs.6
+dEQP-VK.memory_model.shared.8bit.nested_structs.7
+dEQP-VK.memory_model.shared.8bit.nested_structs.8
+dEQP-VK.memory_model.shared.8bit.nested_structs.9
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.9
dEQP-VK.memory_model.transitive.noncoherent.atomic_atomic.payload_local.physbuffer.guard_local.physbuffer.nontransvis
dEQP-VK.memory_model.transitive.noncoherent.atomic_atomic.payload_local.physbuffer.guard_local.physbuffer.transvis
dEQP-VK.memory_model.padding.test
+dEQP-VK.memory_model.shared.scalar_types.0
+dEQP-VK.memory_model.shared.scalar_types.1
+dEQP-VK.memory_model.shared.scalar_types.2
+dEQP-VK.memory_model.shared.scalar_types.3
+dEQP-VK.memory_model.shared.scalar_types.4
+dEQP-VK.memory_model.shared.scalar_types.5
+dEQP-VK.memory_model.shared.scalar_types.6
+dEQP-VK.memory_model.shared.scalar_types.7
+dEQP-VK.memory_model.shared.scalar_types.8
+dEQP-VK.memory_model.shared.scalar_types.9
+dEQP-VK.memory_model.shared.vector_types.0
+dEQP-VK.memory_model.shared.vector_types.1
+dEQP-VK.memory_model.shared.vector_types.2
+dEQP-VK.memory_model.shared.vector_types.3
+dEQP-VK.memory_model.shared.vector_types.4
+dEQP-VK.memory_model.shared.vector_types.5
+dEQP-VK.memory_model.shared.vector_types.6
+dEQP-VK.memory_model.shared.vector_types.7
+dEQP-VK.memory_model.shared.vector_types.8
+dEQP-VK.memory_model.shared.vector_types.9
+dEQP-VK.memory_model.shared.basic_types.0
+dEQP-VK.memory_model.shared.basic_types.1
+dEQP-VK.memory_model.shared.basic_types.2
+dEQP-VK.memory_model.shared.basic_types.3
+dEQP-VK.memory_model.shared.basic_types.4
+dEQP-VK.memory_model.shared.basic_types.5
+dEQP-VK.memory_model.shared.basic_types.6
+dEQP-VK.memory_model.shared.basic_types.7
+dEQP-VK.memory_model.shared.basic_types.8
+dEQP-VK.memory_model.shared.basic_types.9
+dEQP-VK.memory_model.shared.basic_arrays.0
+dEQP-VK.memory_model.shared.basic_arrays.1
+dEQP-VK.memory_model.shared.basic_arrays.2
+dEQP-VK.memory_model.shared.basic_arrays.3
+dEQP-VK.memory_model.shared.basic_arrays.4
+dEQP-VK.memory_model.shared.basic_arrays.5
+dEQP-VK.memory_model.shared.basic_arrays.6
+dEQP-VK.memory_model.shared.basic_arrays.7
+dEQP-VK.memory_model.shared.basic_arrays.8
+dEQP-VK.memory_model.shared.basic_arrays.9
+dEQP-VK.memory_model.shared.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.nested_structs.0
+dEQP-VK.memory_model.shared.nested_structs.1
+dEQP-VK.memory_model.shared.nested_structs.2
+dEQP-VK.memory_model.shared.nested_structs.3
+dEQP-VK.memory_model.shared.nested_structs.4
+dEQP-VK.memory_model.shared.nested_structs.5
+dEQP-VK.memory_model.shared.nested_structs.6
+dEQP-VK.memory_model.shared.nested_structs.7
+dEQP-VK.memory_model.shared.nested_structs.8
+dEQP-VK.memory_model.shared.nested_structs.9
+dEQP-VK.memory_model.shared.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.16bit.scalar_types.0
+dEQP-VK.memory_model.shared.16bit.scalar_types.1
+dEQP-VK.memory_model.shared.16bit.scalar_types.2
+dEQP-VK.memory_model.shared.16bit.scalar_types.3
+dEQP-VK.memory_model.shared.16bit.scalar_types.4
+dEQP-VK.memory_model.shared.16bit.scalar_types.5
+dEQP-VK.memory_model.shared.16bit.scalar_types.6
+dEQP-VK.memory_model.shared.16bit.scalar_types.7
+dEQP-VK.memory_model.shared.16bit.scalar_types.8
+dEQP-VK.memory_model.shared.16bit.scalar_types.9
+dEQP-VK.memory_model.shared.16bit.vector_types.0
+dEQP-VK.memory_model.shared.16bit.vector_types.1
+dEQP-VK.memory_model.shared.16bit.vector_types.2
+dEQP-VK.memory_model.shared.16bit.vector_types.3
+dEQP-VK.memory_model.shared.16bit.vector_types.4
+dEQP-VK.memory_model.shared.16bit.vector_types.5
+dEQP-VK.memory_model.shared.16bit.vector_types.6
+dEQP-VK.memory_model.shared.16bit.vector_types.7
+dEQP-VK.memory_model.shared.16bit.vector_types.8
+dEQP-VK.memory_model.shared.16bit.vector_types.9
+dEQP-VK.memory_model.shared.16bit.basic_types.0
+dEQP-VK.memory_model.shared.16bit.basic_types.1
+dEQP-VK.memory_model.shared.16bit.basic_types.2
+dEQP-VK.memory_model.shared.16bit.basic_types.3
+dEQP-VK.memory_model.shared.16bit.basic_types.4
+dEQP-VK.memory_model.shared.16bit.basic_types.5
+dEQP-VK.memory_model.shared.16bit.basic_types.6
+dEQP-VK.memory_model.shared.16bit.basic_types.7
+dEQP-VK.memory_model.shared.16bit.basic_types.8
+dEQP-VK.memory_model.shared.16bit.basic_types.9
+dEQP-VK.memory_model.shared.16bit.basic_arrays.0
+dEQP-VK.memory_model.shared.16bit.basic_arrays.1
+dEQP-VK.memory_model.shared.16bit.basic_arrays.2
+dEQP-VK.memory_model.shared.16bit.basic_arrays.3
+dEQP-VK.memory_model.shared.16bit.basic_arrays.4
+dEQP-VK.memory_model.shared.16bit.basic_arrays.5
+dEQP-VK.memory_model.shared.16bit.basic_arrays.6
+dEQP-VK.memory_model.shared.16bit.basic_arrays.7
+dEQP-VK.memory_model.shared.16bit.basic_arrays.8
+dEQP-VK.memory_model.shared.16bit.basic_arrays.9
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.16bit.nested_structs.0
+dEQP-VK.memory_model.shared.16bit.nested_structs.1
+dEQP-VK.memory_model.shared.16bit.nested_structs.2
+dEQP-VK.memory_model.shared.16bit.nested_structs.3
+dEQP-VK.memory_model.shared.16bit.nested_structs.4
+dEQP-VK.memory_model.shared.16bit.nested_structs.5
+dEQP-VK.memory_model.shared.16bit.nested_structs.6
+dEQP-VK.memory_model.shared.16bit.nested_structs.7
+dEQP-VK.memory_model.shared.16bit.nested_structs.8
+dEQP-VK.memory_model.shared.16bit.nested_structs.9
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.8bit.scalar_types.0
+dEQP-VK.memory_model.shared.8bit.scalar_types.1
+dEQP-VK.memory_model.shared.8bit.scalar_types.2
+dEQP-VK.memory_model.shared.8bit.scalar_types.3
+dEQP-VK.memory_model.shared.8bit.scalar_types.4
+dEQP-VK.memory_model.shared.8bit.scalar_types.5
+dEQP-VK.memory_model.shared.8bit.scalar_types.6
+dEQP-VK.memory_model.shared.8bit.scalar_types.7
+dEQP-VK.memory_model.shared.8bit.scalar_types.8
+dEQP-VK.memory_model.shared.8bit.scalar_types.9
+dEQP-VK.memory_model.shared.8bit.vector_types.0
+dEQP-VK.memory_model.shared.8bit.vector_types.1
+dEQP-VK.memory_model.shared.8bit.vector_types.2
+dEQP-VK.memory_model.shared.8bit.vector_types.3
+dEQP-VK.memory_model.shared.8bit.vector_types.4
+dEQP-VK.memory_model.shared.8bit.vector_types.5
+dEQP-VK.memory_model.shared.8bit.vector_types.6
+dEQP-VK.memory_model.shared.8bit.vector_types.7
+dEQP-VK.memory_model.shared.8bit.vector_types.8
+dEQP-VK.memory_model.shared.8bit.vector_types.9
+dEQP-VK.memory_model.shared.8bit.basic_types.0
+dEQP-VK.memory_model.shared.8bit.basic_types.1
+dEQP-VK.memory_model.shared.8bit.basic_types.2
+dEQP-VK.memory_model.shared.8bit.basic_types.3
+dEQP-VK.memory_model.shared.8bit.basic_types.4
+dEQP-VK.memory_model.shared.8bit.basic_types.5
+dEQP-VK.memory_model.shared.8bit.basic_types.6
+dEQP-VK.memory_model.shared.8bit.basic_types.7
+dEQP-VK.memory_model.shared.8bit.basic_types.8
+dEQP-VK.memory_model.shared.8bit.basic_types.9
+dEQP-VK.memory_model.shared.8bit.basic_arrays.0
+dEQP-VK.memory_model.shared.8bit.basic_arrays.1
+dEQP-VK.memory_model.shared.8bit.basic_arrays.2
+dEQP-VK.memory_model.shared.8bit.basic_arrays.3
+dEQP-VK.memory_model.shared.8bit.basic_arrays.4
+dEQP-VK.memory_model.shared.8bit.basic_arrays.5
+dEQP-VK.memory_model.shared.8bit.basic_arrays.6
+dEQP-VK.memory_model.shared.8bit.basic_arrays.7
+dEQP-VK.memory_model.shared.8bit.basic_arrays.8
+dEQP-VK.memory_model.shared.8bit.basic_arrays.9
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.8bit.nested_structs.0
+dEQP-VK.memory_model.shared.8bit.nested_structs.1
+dEQP-VK.memory_model.shared.8bit.nested_structs.2
+dEQP-VK.memory_model.shared.8bit.nested_structs.3
+dEQP-VK.memory_model.shared.8bit.nested_structs.4
+dEQP-VK.memory_model.shared.8bit.nested_structs.5
+dEQP-VK.memory_model.shared.8bit.nested_structs.6
+dEQP-VK.memory_model.shared.8bit.nested_structs.7
+dEQP-VK.memory_model.shared.8bit.nested_structs.8
+dEQP-VK.memory_model.shared.8bit.nested_structs.9
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.9
vktMemoryModelMessagePassing.cpp
vktMemoryModelPadding.hpp
vktMemoryModelPadding.cpp
+ vktMemoryModelSharedLayout.cpp
+ vktMemoryModelSharedLayout.hpp
+ vktMemoryModelSharedLayoutCase.cpp
+ vktMemoryModelSharedLayoutCase.hpp
+ ../util/vktTypeComparisonUtil.hpp
+ ../util/vktTypeComparisonUtil.cpp
)
set(DEQP_VK_DEVICE_GROUP_LIBS
#include "vktMemoryModelTests.hpp"
#include "vktMemoryModelPadding.hpp"
+#include "vktMemoryModelSharedLayout.hpp"
#include "vkBufferWithMemory.hpp"
#include "vkImageWithMemory.hpp"
#include "vkTypeUtil.hpp"
#include "vkObjUtil.hpp"
-#include "vktTestGroupUtil.hpp"
#include "vktTestCase.hpp"
#include "deDefs.h"
// Padding tests.
group->addChild(createPaddingTests(testCtx));
+ // Shared memory layout tests.
+ group->addChild(createSharedMemoryLayoutTests(testCtx));
return group.release();
}
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Shared memory model layout tests.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktMemoryModelSharedLayout.hpp"
+#include "vktMemoryModelSharedLayoutCase.hpp"
+
+#include "tcuCommandLine.hpp"
+#include "tcuTestLog.hpp"
+#include "deRandom.hpp"
+#include "deStringUtil.hpp"
+#include "vktTestCaseUtil.hpp"
+#include "vkMemUtil.hpp"
+
+namespace vkt
+{
+namespace MemoryModel
+{
+using std::string;
+using std::vector;
+
+namespace
+{
+
+enum FeatureBits
+{
+ FEATURE_VECTORS = (1 << 0),
+ FEATURE_MATRICES = (1 << 1),
+ FEATURE_ARRAYS = (1 << 2),
+ FEATURE_STRUCTS = (1 << 3),
+ FEATURE_UNUSED_VARS = (1 << 4),
+ FEATURE_UNUSED_MEMBERS = (1 << 5),
+ FEATURE_ARRAYS_OF_ARRAYS = (1 << 6),
+ FEATURE_16BIT_TYPES = (1 << 7),
+ FEATURE_8BIT_TYPES = (1 << 8),
+};
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Generates names for shared memory structs and their members.
+ * \param first The first character of the alphabet.
+ * \param last The last character of the alphabet.
+ * \param ndx The index of the name in the alphabet.
+ *
+ * If the index lies within the range [1, (last-first)+1], returns
+ * the character represented by the ASCII code 'first + ndx - 1'
+ * as a string.
+ *
+ * E.g. if "first" is 'a', "last" 'z' and "ndx" is 1, returns a. If "ndx"
+ * is 2, returns b and so forth.
+ *
+ * If "ndx" is greater than the range, the function keeps dividing it by
+ * the alphabet length until the index is within the range. In each iteration,
+ * the name is prefixed with the ASCII character represented by the modulo
+ * of the index.
+ *
+ * E.g. if "first" is 'a', "last" 'z' and "ndx" is 28, returns ab. If "ndx"
+ * is 702, returns aaa and so forth.
+ *//*--------------------------------------------------------------------*/
+string genName (char first, char last, int ndx)
+{
+ string str;
+ int alphabetLen = static_cast<int>(last) - static_cast<int>(first) + 1;
+
+ while (ndx > 0)
+ {
+ const int asciiCode = static_cast<int>(first) + ((ndx - 1) % alphabetLen);
+ str.insert(str.begin(), static_cast<char>(asciiCode));
+ ndx = ((ndx - 1) / alphabetLen);
+ }
+
+ return str;
+}
+
+void createRandomCaseGroup (tcu::TestCaseGroup* parentGroup, tcu::TestContext &testCtx, const char *groupName,
+ const char *description, const deUint32 features, const int numCases, deUint32 baseSeed)
+{
+ tcu::TestCaseGroup *group = new tcu::TestCaseGroup(testCtx, groupName, description);
+ parentGroup->addChild(group);
+
+ baseSeed += static_cast<deUint32>(testCtx.getCommandLine().getBaseSeed());
+
+ for (int i = 0; i < numCases; i++)
+ group->addChild(new RandomSharedLayoutCase(testCtx, de::toString(i).c_str(), "", features, static_cast<deUint32>(i + baseSeed)));
+}
+} // anonymous
+
+RandomSharedLayoutCase::RandomSharedLayoutCase (tcu::TestContext &testCtx, const char *name, const char *description,
+ deUint32 features, deUint32 seed)
+ : SharedLayoutCase(testCtx, name, description)
+ , m_features(features)
+ , m_maxArrayLength((features & FEATURE_ARRAYS) ? 3 : 0)
+ , m_seed(seed)
+{
+ de::Random rnd(m_seed);
+
+ m_interface.enable16BitTypes(features & FEATURE_16BIT_TYPES);
+ m_interface.enable8BitTypes(features & FEATURE_8BIT_TYPES);
+
+ for (int i = 0; i < rnd.getInt(1, m_maxSharedObjects); i++)
+ generateSharedMemoryObject(rnd);
+
+ init();
+}
+
+/*--------------------------------------------------------------------*//*!
+ * \brief Creates definitions for shared memory structs.
+ * \param rnd Random value generator used for deciding the type of the variable.
+ *
+ * Creates definitions for shared memory structs. Each struct's name starts with
+ * an upper-case S and its instance name with a lower-case s followed by its index
+ * number.
+ *//*--------------------------------------------------------------------*/
+void RandomSharedLayoutCase::generateSharedMemoryObject (de::Random &rnd)
+{
+ const string name = "S" + de::toString(m_interface.getNumSharedObjects() + 1);
+ const string instanceName = "s" + de::toString(m_interface.getNumSharedObjects() + 1);
+ SharedStruct &object = m_interface.allocSharedObject(name, instanceName);
+ const int numVars = rnd.getInt(2, m_maxSharedObjectMembers);
+
+ for (int i = 0; i < numVars; i++)
+ generateSharedMemoryVar(rnd, object);
+}
+
+void RandomSharedLayoutCase::generateSharedMemoryVar (de::Random &rnd, SharedStruct &object)
+{
+ SharedStructVar var;
+ var.name = genName('a', 'z', object.getNumMembers() + 1);
+
+ if ((m_features & FEATURE_ARRAYS_OF_ARRAYS) != 0 || (m_features & FEATURE_STRUCTS) != 0)
+ var.type = generateType(rnd, 3, true);
+ else
+ var.type = generateType(rnd, 1, true);
+
+ var.topLevelArraySize = 1;
+ if (var.type.isArrayType())
+ var.topLevelArraySize = var.type.getArraySize() == glu::VarType::UNSIZED_ARRAY ? 0 : var.type.getArraySize();
+
+ object.addMember(var);
+}
+
+glu::VarType RandomSharedLayoutCase::generateType (de::Random &rnd, int typeDepth, bool arrayOk)
+{
+ const float structWeight = 0.7f;
+ const float arrayWeight = 0.8f;
+
+ if (typeDepth > 0 && rnd.getFloat() < structWeight && (m_features & FEATURE_STRUCTS))
+ {
+ vector<glu::VarType> memberTypes;
+ const int numMembers = rnd.getInt(1, m_maxStructMembers);
+
+ // Generate members first so nested struct declarations are in correct order.
+ for (int i = 0; i < numMembers; i++)
+ memberTypes.push_back(generateType(rnd, typeDepth - 1, true));
+
+ const string name = "s" + genName('A', 'Z', m_interface.getNumStructs() + 1);
+ de::SharedPtr<glu::StructType> structType = m_interface.allocStruct(name);
+
+ DE_ASSERT(numMembers <= 'Z' - 'A');
+ for (int i = 0; i < numMembers; i++)
+ structType.get()->addMember((string("m") + static_cast<char>(('A' + i))).c_str(), memberTypes[i]);
+
+ return glu::VarType(structType.get());
+ }
+ else if (typeDepth > 0 && m_maxArrayLength > 0 && arrayOk && rnd.getFloat() < arrayWeight)
+ {
+ const int arrayLength = rnd.getInt(1, m_maxArrayLength);
+ const bool childArrayOk = (m_features & FEATURE_ARRAYS_OF_ARRAYS) != 0;
+ const glu::VarType elementType = generateType(rnd, typeDepth - 1, childArrayOk);
+
+ return glu::VarType(elementType, arrayLength);
+ }
+ else
+ {
+ const float weight8Bit = (m_features & FEATURE_8BIT_TYPES) ? 0.7f : 0.0f;
+ const float weight16Bit = (m_features & FEATURE_16BIT_TYPES) ? 0.7f : 0.0f;
+ const float weightMatrices = (m_features & FEATURE_MATRICES) ? 0.3f : 0.0f;
+
+ vector<glu::DataType> typeCandidates;
+ if (rnd.getFloat() < weight16Bit)
+ {
+ typeCandidates.push_back(glu::TYPE_UINT16);
+ typeCandidates.push_back(glu::TYPE_INT16);
+ typeCandidates.push_back(glu::TYPE_FLOAT16);
+
+ if (m_features & FEATURE_VECTORS)
+ {
+ typeCandidates.push_back(glu::TYPE_FLOAT16_VEC2);
+ typeCandidates.push_back(glu::TYPE_FLOAT16_VEC3);
+ typeCandidates.push_back(glu::TYPE_FLOAT16_VEC4);
+ typeCandidates.push_back(glu::TYPE_INT16_VEC2);
+ typeCandidates.push_back(glu::TYPE_INT16_VEC3);
+ typeCandidates.push_back(glu::TYPE_INT16_VEC4);
+ typeCandidates.push_back(glu::TYPE_UINT16_VEC2);
+ typeCandidates.push_back(glu::TYPE_UINT16_VEC3);
+ typeCandidates.push_back(glu::TYPE_UINT16_VEC4);
+ }
+ }
+ else if (rnd.getFloat() < weight8Bit)
+ {
+ typeCandidates.push_back(glu::TYPE_UINT8);
+ typeCandidates.push_back(glu::TYPE_INT8);
+
+ if (m_features & FEATURE_VECTORS)
+ {
+ typeCandidates.push_back(glu::TYPE_INT8_VEC2);
+ typeCandidates.push_back(glu::TYPE_INT8_VEC3);
+ typeCandidates.push_back(glu::TYPE_INT8_VEC4);
+ typeCandidates.push_back(glu::TYPE_UINT8_VEC2);
+ typeCandidates.push_back(glu::TYPE_UINT8_VEC3);
+ typeCandidates.push_back(glu::TYPE_UINT8_VEC4);
+ }
+ }
+ else
+ {
+ typeCandidates.push_back(glu::TYPE_FLOAT);
+ typeCandidates.push_back(glu::TYPE_INT);
+ typeCandidates.push_back(glu::TYPE_UINT);
+ typeCandidates.push_back(glu::TYPE_BOOL);
+
+ if (m_features & FEATURE_VECTORS)
+ {
+ typeCandidates.push_back(glu::TYPE_FLOAT_VEC2);
+ typeCandidates.push_back(glu::TYPE_FLOAT_VEC3);
+ typeCandidates.push_back(glu::TYPE_FLOAT_VEC4);
+ typeCandidates.push_back(glu::TYPE_INT_VEC2);
+ typeCandidates.push_back(glu::TYPE_INT_VEC3);
+ typeCandidates.push_back(glu::TYPE_INT_VEC4);
+ typeCandidates.push_back(glu::TYPE_UINT_VEC2);
+ typeCandidates.push_back(glu::TYPE_UINT_VEC3);
+ typeCandidates.push_back(glu::TYPE_UINT_VEC4);
+ typeCandidates.push_back(glu::TYPE_BOOL_VEC2);
+ typeCandidates.push_back(glu::TYPE_BOOL_VEC3);
+ typeCandidates.push_back(glu::TYPE_BOOL_VEC4);
+ }
+ }
+
+ if (rnd.getFloat() < weightMatrices)
+ {
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT2);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT2X3);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT3X2);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT3);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT3X4);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT4X2);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT4X3);
+ typeCandidates.push_back(glu::TYPE_FLOAT_MAT4);
+ }
+
+ glu::DataType type = rnd.choose<glu::DataType>(typeCandidates.begin(), typeCandidates.end());
+ glu::Precision precision;
+
+ if (glu::dataTypeSupportsPrecisionModifier(type))
+ {
+ const glu::Precision precisionCandidates[] = { glu::PRECISION_LOWP, glu::PRECISION_MEDIUMP, glu::PRECISION_HIGHP};
+ precision = rnd.choose<glu::Precision>(&precisionCandidates[0],
+ &precisionCandidates[DE_LENGTH_OF_ARRAY(precisionCandidates)]);
+ }
+ else
+ precision = glu::PRECISION_LAST;
+
+ return glu::VarType(type, precision);
+ }
+}
+
+tcu::TestCaseGroup* createSharedMemoryLayoutTests (tcu::TestContext &testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> sharedMemoryLayoutGroup(new tcu::TestCaseGroup(testCtx, "shared", "Shared memory layout tests"));
+ tcu::TestCaseGroup *parentGroup = sharedMemoryLayoutGroup.get();
+ {
+ const deUint32 allBasicTypes = FEATURE_VECTORS | FEATURE_MATRICES;
+ const deUint32 unused = FEATURE_UNUSED_MEMBERS | FEATURE_UNUSED_VARS;
+
+ for (int i = 0; i < 3; ++i)
+ {
+ if (i == 1)
+ {
+ parentGroup = new tcu::TestCaseGroup(testCtx, "16bit", "16bit");
+ sharedMemoryLayoutGroup->addChild(parentGroup);
+ }
+ else if (i == 2)
+ {
+ parentGroup = new tcu::TestCaseGroup(testCtx, "8bit", "8bit");
+ sharedMemoryLayoutGroup->addChild(parentGroup);
+ }
+ const deUint32 use16BitTypes = i == 1 ? FEATURE_16BIT_TYPES : 0;
+ const deUint32 use8BitTypes = i == 2 ? FEATURE_8BIT_TYPES : 0;
+
+ createRandomCaseGroup(parentGroup, testCtx, "scalar_types", "Scalar types only",
+ use8BitTypes | use16BitTypes | unused, 10, 0);
+ createRandomCaseGroup(parentGroup, testCtx, "vector_types", "Scalar and vector types only",
+ use8BitTypes | use16BitTypes | unused | FEATURE_VECTORS, 10, 25);
+ createRandomCaseGroup(parentGroup, testCtx, "basic_types", "All basic types",
+ use8BitTypes | use16BitTypes | unused | allBasicTypes, 10, 50);
+ createRandomCaseGroup(parentGroup, testCtx, "basic_arrays", "Arrays",
+ use8BitTypes | use16BitTypes | unused | allBasicTypes | FEATURE_ARRAYS, 10, 50);
+ createRandomCaseGroup(parentGroup, testCtx, "arrays_of_arrays", "Arrays of arrays",
+ use8BitTypes | use16BitTypes | unused | allBasicTypes | FEATURE_ARRAYS |
+ FEATURE_ARRAYS_OF_ARRAYS, 10, 950);
+ createRandomCaseGroup(parentGroup, testCtx, "nested_structs", "Nested structs",
+ use8BitTypes | use16BitTypes | unused | allBasicTypes | FEATURE_STRUCTS, 10, 100);
+ createRandomCaseGroup(parentGroup, testCtx, "nested_structs_arrays", "Nested structs, arrays",
+ use8BitTypes | use16BitTypes | unused | allBasicTypes | FEATURE_STRUCTS |
+ FEATURE_ARRAYS | FEATURE_ARRAYS_OF_ARRAYS, 10, 150);
+ }
+ }
+
+ return sharedMemoryLayoutGroup.release();
+}
+
+} // MemoryModel
+} // vkt
--- /dev/null
+#ifndef _VKTMEMORYMODELSHAREDLAYOUT_HPP
+#define _VKTMEMORYMODELSHAREDLAYOUT_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Shared memory layout tests.
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace MemoryModel
+{
+
+tcu::TestCaseGroup* createSharedMemoryLayoutTests (tcu::TestContext& testCtx);
+
+} // MemoryModel
+} // vkt
+
+#endif // _VKTMEMORYMODELSHAREDLAYOUT_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Shared memory layout test case.
+ *//*--------------------------------------------------------------------*/
+
+#include <vkDefs.hpp>
+#include "deRandom.hpp"
+#include "gluContextInfo.hpp"
+#include "gluVarTypeUtil.hpp"
+#include "tcuTestLog.hpp"
+
+#include "vkBuilderUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "vkRef.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkCmdUtil.hpp"
+
+#include "vktMemoryModelSharedLayoutCase.hpp"
+#include "util/vktTypeComparisonUtil.hpp"
+
+namespace vkt
+{
+namespace MemoryModel
+{
+
+using tcu::TestLog;
+using std::string;
+using std::vector;
+using glu::VarType;
+using glu::StructMember;
+
+namespace
+{
+ void computeReferenceLayout (const VarType& type, vector<SharedStructVarEntry>& entries)
+ {
+ if (type.isBasicType())
+ entries.push_back(SharedStructVarEntry(type.getBasicType(), 1));
+ else if (type.isArrayType())
+ {
+ const VarType &elemType = type.getElementType();
+
+ // Array of scalars, vectors or matrices.
+ if (elemType.isBasicType())
+ entries.push_back(SharedStructVarEntry(elemType.getBasicType(), type.getArraySize()));
+ else
+ {
+ DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
+ for (int i = 0; i < type.getArraySize(); i++)
+ computeReferenceLayout(type.getElementType(), entries);
+ }
+ }
+ else
+ {
+ DE_ASSERT(type.isStructType());
+ for (const auto& member : *type.getStructPtr())
+ computeReferenceLayout(member.getType(), entries);
+ }
+ }
+
+ void computeReferenceLayout (SharedStructVar& var)
+ {
+ // Top-level arrays need special care.
+ if (var.type.isArrayType())
+ computeReferenceLayout(var.type.getElementType(), var.entries);
+ else
+ computeReferenceLayout(var.type, var.entries);
+ }
+
+ void generateValue (const SharedStructVarEntry& entry, de::Random& rnd, vector<string>& values)
+ {
+ const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
+ const int scalarSize = glu::getDataTypeScalarSize(entry.type);
+ const int arraySize = entry.arraySize;
+ const bool isMatrix = glu::isDataTypeMatrix(entry.type);
+ const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
+ const int vecSize = scalarSize / numVecs;
+
+ DE_ASSERT(scalarSize % numVecs == 0);
+ DE_ASSERT(arraySize >= 0);
+
+ string generatedValue;
+ for (int elemNdx = 0; elemNdx < arraySize; elemNdx++)
+ {
+ for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
+ {
+ for (int compNdx = 0; compNdx < vecSize; compNdx++)
+ {
+ switch (scalarType)
+ {
+ case glu::TYPE_INT:
+ case glu::TYPE_INT8:
+ case glu::TYPE_INT16:
+ // Fall through. This fits into all the types above.
+ generatedValue = de::toString(rnd.getInt(-9, 9));
+ break;
+ case glu::TYPE_UINT:
+ case glu::TYPE_UINT8:
+ case glu::TYPE_UINT16:
+ // Fall through. This fits into all the types above.
+ generatedValue = de::toString(rnd.getInt(0, 9)).append("u");
+ break;
+ case glu::TYPE_FLOAT:
+ case glu::TYPE_FLOAT16:
+ // Fall through. This fits into all the types above.
+ generatedValue = de::floatToString(static_cast<float>(rnd.getInt(-9, 9)), 1);
+ break;
+ case glu::TYPE_BOOL:
+ generatedValue = rnd.getBool() ? "true" : "false";
+ break;
+ default:
+ DE_ASSERT(false);
+ }
+
+ values.push_back(generatedValue);
+ }
+ }
+ }
+ }
+
+ string getStructMemberName (const SharedStructVar& var, const glu::TypeComponentVector& accessPath)
+ {
+ std::ostringstream name;
+
+ name << "." << var.name;
+
+ for (auto pathComp = accessPath.begin(); pathComp != accessPath.end(); pathComp++)
+ {
+ if (pathComp->type == glu::VarTypeComponent::STRUCT_MEMBER)
+ {
+ const VarType curType = glu::getVarType(var.type, accessPath.begin(), pathComp);
+ const glu::StructType *structPtr = curType.getStructPtr();
+
+ name << "." << structPtr->getMember(pathComp->index).getName();
+ }
+ else if (pathComp->type == glu::VarTypeComponent::ARRAY_ELEMENT)
+ name << "[" << pathComp->index << "]";
+ else
+ DE_ASSERT(false);
+ }
+
+ return name.str();
+ }
+} // anonymous
+
+NamedStructSP ShaderInterface::allocStruct (const string& name)
+{
+ m_structs.emplace_back(new glu::StructType(name.c_str()));
+ return m_structs.back();
+}
+
+SharedStruct& ShaderInterface::allocSharedObject (const string& name, const string& instanceName)
+{
+ m_sharedMemoryObjects.emplace_back(name, instanceName);
+ return m_sharedMemoryObjects.back();
+}
+
+void generateCompareFuncs (std::ostream &str, const ShaderInterface &interface)
+{
+ std::set<glu::DataType> types;
+ std::set<glu::DataType> compareFuncs;
+
+ // Collect unique basic types.
+ for (const auto& sharedObj : interface.getSharedObjects())
+ for (const auto& var : sharedObj)
+ vkt::typecomputil::collectUniqueBasicTypes(types, var.type);
+
+ // Set of compare functions required.
+ for (const auto& type : types)
+ vkt::typecomputil::getCompareDependencies(compareFuncs, type);
+
+ for (int type = 0; type < glu::TYPE_LAST; ++type)
+ if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
+ str << vkt::typecomputil::getCompareFuncForType(glu::DataType(type));
+}
+
+void generateSharedMemoryWrites (std::ostream &src, const SharedStruct &object,
+ const SharedStructVar &var, const glu::SubTypeAccess &accessPath,
+ vector<string>::const_iterator &valueIter, bool compare)
+{
+ const VarType curType = accessPath.getType();
+
+ if (curType.isArrayType())
+ {
+ const int arraySize = curType.getArraySize();
+ for (int i = 0; i < arraySize; i++)
+ generateSharedMemoryWrites(src, object, var, accessPath.element(i), valueIter, compare);
+ }
+ else if (curType.isStructType())
+ {
+ const int numMembers = curType.getStructPtr()->getNumMembers();
+ for (int i = 0; i < numMembers; i++)
+ generateSharedMemoryWrites(src, object, var, accessPath.member(i), valueIter, compare);
+ }
+ else
+ {
+ DE_ASSERT(curType.isBasicType());
+
+ const glu::DataType basicType = curType.getBasicType();
+ const string typeName = glu::getDataTypeName(basicType);
+ const string sharedObjectVarName = object.getInstanceName();
+ const string structMember = getStructMemberName(var, accessPath.getPath());
+ const glu::DataType promoteType = vkt::typecomputil::getPromoteType(basicType);
+
+ int numElements = glu::getDataTypeScalarSize(basicType);
+ if (glu::isDataTypeMatrix(basicType))
+ numElements = glu::getDataTypeMatrixNumColumns(basicType) * glu::getDataTypeMatrixNumRows(basicType);
+
+ if (compare)
+ {
+ src << "\t" << "allOk" << " = " << "allOk" << " && compare_" << typeName << "(";
+ // Comparison functions use 32-bit values. Convert 8/16-bit scalar and vector types if necessary.
+ // E.g. uint8_t becomes int.
+ if (basicType != promoteType || numElements > 1)
+ src << glu::getDataTypeName(promoteType) << "(";
+ }
+ else
+ {
+ src << "\t" << sharedObjectVarName << structMember << " = " << "";
+ // If multiple literals or a 8/16-bit literal is assigned, the variable must be
+ // initialized with the constructor.
+ if (basicType != promoteType || numElements > 1)
+ src << glu::getDataTypeName(basicType) << "(";
+ }
+
+ for (int i = 0; i < numElements; i++)
+ src << (i != 0 ? ", " : "") << *valueIter++;
+
+ if (basicType != promoteType)
+ src << ")";
+ else if (numElements > 1)
+ src << ")";
+
+ // Write the variable in the shared memory as the next argument for the comparison function.
+ // Initialize it as a new 32-bit variable in the case it's a 8-bit or a 16-bit variable.
+ if (compare)
+ {
+ if (basicType != promoteType)
+ src << ", " << glu::getDataTypeName(promoteType) << "(" << sharedObjectVarName
+ << structMember
+ << "))";
+ else
+ src << ", " << sharedObjectVarName << structMember << ")";
+ }
+
+ src << ";\n";
+ }
+}
+
+string generateComputeShader (ShaderInterface &interface)
+{
+ std::ostringstream src;
+
+ src << "#version 450\n";
+
+ if (interface.is16BitTypesEnabled())
+ src << "#extension GL_EXT_shader_explicit_arithmetic_types : enable\n";
+ if (interface.is8BitTypesEnabled())
+ src << "#extension GL_EXT_shader_explicit_arithmetic_types_int8 : enable\n";
+
+ src << "layout(local_size_x = 1) in;\n";
+ src << "\n";
+
+ src << "layout(std140, binding = 0) buffer block { highp uint passed; };\n";
+
+ // Output definitions for the struct fields of the shared memory objects.
+ std::vector<NamedStructSP>& namedStructs = interface.getStructs();
+
+ for (const auto& s: namedStructs)
+ src << glu::declare(s.get()) << ";\n";
+
+ // Output definitions for the shared memory structs.
+ for (auto& sharedObj : interface.getSharedObjects())
+ {
+ src << "struct " << sharedObj.getName() << " {\n";
+
+ for (auto& var : sharedObj)
+ src << "\t" << glu::declare(var.type, var.name, 1) << ";\n";
+
+ src << "};\n";
+ }
+
+ // Comparison utilities.
+ src << "\n";
+ generateCompareFuncs(src, interface);
+
+ src << "\n";
+ for (auto& sharedObj : interface.getSharedObjects())
+ src << "shared " << sharedObj.getName() << " " << sharedObj.getInstanceName() << ";\n";
+
+ src << "\n";
+ src << "void main (void) {\n";
+
+ for (auto& sharedObj : interface.getSharedObjects())
+ {
+ for (const auto& var : sharedObj)
+ {
+ vector<string>::const_iterator valueIter = var.entryValues.begin();
+ generateSharedMemoryWrites(src, sharedObj, var, glu::SubTypeAccess(var.type), valueIter, false);
+ }
+ }
+
+ src << "\n";
+ src << "\tbarrier();\n";
+ src << "\tmemoryBarrier();\n";
+ src << "\tbool allOk = true;\n";
+
+ for (auto& sharedObj : interface.getSharedObjects())
+ {
+ for (const auto& var : sharedObj)
+ {
+ vector<string>::const_iterator valueIter = var.entryValues.begin();
+ generateSharedMemoryWrites(src, sharedObj, var, glu::SubTypeAccess(var.type), valueIter, true);
+ }
+ }
+
+ src << "\tif (allOk)\n"
+ << "\t\tpassed++;\n"
+ << "\n";
+
+ src << "}\n";
+
+ return src.str();
+}
+
+void SharedLayoutCase::checkSupport(Context& context) const
+{
+ if ((m_interface.is16BitTypesEnabled() || m_interface.is8BitTypesEnabled())
+ && !context.isDeviceFunctionalitySupported("VK_KHR_shader_float16_int8"))
+ TCU_THROW(NotSupportedError, "VK_KHR_shader_float16_int8 extension for 16-/8-bit types not supported");
+
+ const vk::VkPhysicalDeviceVulkan12Features features = context.getDeviceVulkan12Features();
+ if (m_interface.is16BitTypesEnabled() && !features.shaderFloat16)
+ TCU_THROW(NotSupportedError, "16-bit types not supported");
+ if (m_interface.is8BitTypesEnabled() && !features.shaderInt8)
+ TCU_THROW(NotSupportedError, "8-bit types not supported");
+}
+
+tcu::TestStatus SharedLayoutCaseInstance::iterate (void)
+{
+ const vk::DeviceInterface &vk = m_context.getDeviceInterface();
+ const vk::VkDevice device = m_context.getDevice();
+ const vk::VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ const deUint32 bufferSize = 4;
+
+ // Create descriptor set
+ const vk::VkBufferCreateInfo params =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
+ DE_NULL, // pNext
+ 0u, // flags
+ bufferSize, // size
+ vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // usage
+ vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
+ 1u, // queueFamilyCount
+ &queueFamilyIndex // pQueueFamilyIndices
+ };
+
+ vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vk, device, ¶ms));
+
+ de::MovePtr<vk::Allocation> bufferAlloc (vk::bindBuffer (m_context.getDeviceInterface(), m_context.getDevice(),
+ m_context.getDefaultAllocator(), *buffer, vk::MemoryRequirement::HostVisible));
+
+ deMemset(bufferAlloc->getHostPtr(), 0, bufferSize);
+ flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), bufferSize);
+
+ vk::DescriptorSetLayoutBuilder setLayoutBuilder;
+ vk::DescriptorPoolBuilder poolBuilder;
+
+ setLayoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
+
+ poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, deUint32(1));
+
+ const vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout (setLayoutBuilder.build(vk, device));
+ const vk::Unique<vk::VkDescriptorPool> descriptorPool (poolBuilder.build(vk, device,
+ vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+
+ const vk::VkDescriptorSetAllocateInfo allocInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *descriptorPool, // VkDescriptorPool descriptorPool;
+ 1u, // deUint32 descriptorSetCount;
+ &descriptorSetLayout.get(), // const VkDescriptorSetLayout *pSetLayouts;
+ };
+
+ const vk::Unique<vk::VkDescriptorSet> descriptorSet (allocateDescriptorSet(vk, device, &allocInfo));
+ const vk::VkDescriptorBufferInfo descriptorInfo = makeDescriptorBufferInfo(*buffer, 0ull, bufferSize);
+
+ vk::DescriptorSetUpdateBuilder setUpdateBuilder;
+ std::vector<vk::VkDescriptorBufferInfo> descriptors;
+
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
+ vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfo);
+
+ setUpdateBuilder.update(vk, device);
+
+ const vk::VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (vk::VkPipelineLayoutCreateFlags) 0, // VkPipelineLayoutCreateFlags flags;
+ 1u, // deUint32 descriptorSetCount;
+ &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL // const VkPushConstantRange* pPushConstantRanges;
+ };
+ vk::Move<vk::VkPipelineLayout> pipelineLayout (createPipelineLayout(vk, device, &pipelineLayoutParams));
+
+ vk::Move<vk::VkShaderModule> shaderModule (createShaderModule(vk, device, m_context.getBinaryCollection().get("compute"), 0));
+ const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (vk::VkPipelineShaderStageCreateFlags) 0, // VkPipelineShaderStageCreateFlags flags;
+ vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStage stage;
+ *shaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
+ };
+ const vk::VkComputePipelineCreateInfo pipelineCreateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0, // VkPipelineCreateFlags flags;
+ pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
+ *pipelineLayout, // VkPipelineLayout layout;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0, // deInt32 basePipelineIndex;
+ };
+
+ vk::Move<vk::VkPipeline> pipeline (createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
+ vk::Move<vk::VkCommandPool> cmdPool (createCommandPool(vk, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
+ vk::Move<vk::VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+
+ beginCommandBuffer(vk, *cmdBuffer, 0u);
+
+ vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+
+ vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout,
+ 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+
+ vk.cmdDispatch(*cmdBuffer, 1, 1, 1);
+
+ endCommandBuffer(vk, *cmdBuffer);
+
+ submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
+
+ // Read back passed data
+ bool counterOk;
+ const int refCount = 1;
+ int resCount = 0;
+
+ invalidateAlloc(vk, device, *bufferAlloc);
+
+ resCount = *(static_cast<const int *>(bufferAlloc->getHostPtr()));
+
+ counterOk = (refCount == resCount);
+ if (!counterOk)
+ m_context.getTestContext().getLog() << TestLog::Message << "Error: passed = " << resCount
+ << ", expected " << refCount << TestLog::EndMessage;
+
+ // Validate result
+ if (counterOk)
+ return tcu::TestStatus::pass("Counter value OK");
+
+ return tcu::TestStatus::fail("Counter value incorrect");
+}
+
+void SharedLayoutCase::initPrograms (vk::SourceCollections &programCollection) const
+{
+ DE_ASSERT(!m_computeShaderSrc.empty());
+ programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
+}
+
+TestInstance* SharedLayoutCase::createInstance (Context &context) const
+{
+ return new SharedLayoutCaseInstance(context);
+}
+
+void SharedLayoutCase::delayedInit (void)
+{
+
+ for (auto& sharedObj : m_interface.getSharedObjects())
+ for (auto &var : sharedObj)
+ computeReferenceLayout(var);
+
+ deUint32 seed = deStringHash(getName()) ^ 0xad2f7214;
+ de::Random rnd (seed);
+
+ for (auto& sharedObj : m_interface.getSharedObjects())
+ for (auto &var : sharedObj)
+ for (int i = 0; i < var.topLevelArraySize; i++)
+ for (auto &entry : var.entries)
+ generateValue(entry, rnd, var.entryValues);
+
+ m_computeShaderSrc = generateComputeShader(m_interface);
+}
+
+} // MemoryModel
+} // vkt
--- /dev/null
+#ifndef _VKTMEMORYMODELSHAREDLAYOUTCASE_HPP
+#define _VKTMEMORYMODELSHAREDLAYOUTCASE_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Shared memory layout tests.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestCase.hpp"
+#include "tcuDefs.hpp"
+#include "gluShaderUtil.hpp"
+#include "gluVarType.hpp"
+
+#include "deRandom.hpp"
+#include "deSharedPtr.hpp"
+
+#include <vector>
+
+namespace vkt
+{
+namespace MemoryModel
+{
+typedef de::SharedPtr<glu::StructType> NamedStructSP;
+
+struct SharedStructVarEntry
+{
+ SharedStructVarEntry(glu::DataType type_, int arraySize_)
+ : type(type_), arraySize(arraySize_) {}
+
+ glu::DataType type;
+ int arraySize;
+};
+
+struct SharedStructVar
+{
+ std::string name;
+ glu::VarType type;
+ int arraySize;
+ int topLevelArraySize;
+ std::vector<SharedStructVarEntry> entries;
+
+ // Contains all the values assigned to the variable.
+ std::vector<std::string> entryValues;
+};
+
+class SharedStruct
+{
+public:
+ typedef std::vector<SharedStructVar>::iterator iterator;
+ typedef std::vector<SharedStructVar>::const_iterator const_iterator;
+
+ SharedStruct (const std::string name, const std::string instanceName)
+ : m_name(name), m_instanceName(instanceName) {}
+
+ const std::string getName (void) const { return m_name; }
+ const std::string getInstanceName (void) const { return m_instanceName; }
+
+ void addMember (SharedStructVar var) { m_members.push_back(var); }
+ int getNumMembers (void) { return static_cast<int>(m_members.size()); };
+
+ inline iterator begin (void) { return m_members.begin(); }
+ inline const_iterator begin (void) const { return m_members.begin(); }
+ inline iterator end (void) { return m_members.end(); }
+ inline const_iterator end (void) const { return m_members.end(); }
+
+private:
+ // Shared struct name
+ std::string m_name;
+
+ // Shared struct instance name
+ std::string m_instanceName;
+
+ // Contains the members of this struct.
+ std::vector<SharedStructVar> m_members;
+};
+
+class ShaderInterface
+{
+public:
+ ShaderInterface (void) {};
+ ~ShaderInterface (void) {};
+
+ SharedStruct& allocSharedObject (const std::string& name, const std::string& instanceName);
+ NamedStructSP allocStruct (const std::string& name);
+
+ std::vector<NamedStructSP>& getStructs (void) { return m_structs; };
+ int getNumStructs (void) { return static_cast<int>(m_structs.size()); };
+
+ int getNumSharedObjects (void) const { return static_cast<int>(m_sharedMemoryObjects.size()); }
+ std::vector<SharedStruct>& getSharedObjects (void) { return m_sharedMemoryObjects; }
+ const std::vector<SharedStruct>& getSharedObjects (void) const { return m_sharedMemoryObjects; }
+
+ void enable8BitTypes (bool enabled) { m_8BitTypesEnabled = enabled; }
+ void enable16BitTypes (bool enabled) { m_16BitTypesEnabled = enabled; }
+ bool is8BitTypesEnabled (void) const { return m_8BitTypesEnabled; }
+ bool is16BitTypesEnabled (void) const { return m_16BitTypesEnabled; }
+private:
+ ShaderInterface (const ShaderInterface&);
+ ShaderInterface& operator= (const ShaderInterface&);
+
+ std::vector<NamedStructSP> m_structs;
+ std::vector<SharedStruct> m_sharedMemoryObjects;
+ bool m_8BitTypesEnabled;
+ bool m_16BitTypesEnabled;
+};
+
+class SharedLayoutCaseInstance : public TestInstance
+{
+public:
+ SharedLayoutCaseInstance(Context& context)
+ : TestInstance(context) {}
+ virtual ~SharedLayoutCaseInstance(void) {}
+ virtual tcu::TestStatus iterate(void);
+};
+
+class SharedLayoutCase : public vkt::TestCase
+{
+public:
+ SharedLayoutCase (tcu::TestContext& testCtx, const char* name, const char* description)
+ : TestCase(testCtx, name, description) {}
+ virtual ~SharedLayoutCase (void) {}
+ virtual void delayedInit (void);
+ virtual void initPrograms (vk::SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const;
+ virtual void checkSupport (Context& context) const;
+
+protected:
+ ShaderInterface m_interface;
+ std::string m_computeShaderSrc;
+
+private:
+ SharedLayoutCase (const SharedLayoutCase&);
+ SharedLayoutCase& operator= (const SharedLayoutCase&);
+};
+
+class RandomSharedLayoutCase : public SharedLayoutCase
+{
+public:
+ RandomSharedLayoutCase (tcu::TestContext& testCtx, const char* name, const char* description,
+ deUint32 features, deUint32 seed);
+
+private:
+ void generateSharedMemoryObject (de::Random& rnd);
+ void generateSharedMemoryVar (de::Random& rnd, SharedStruct& object);
+ glu::VarType generateType (de::Random& rnd, int typeDepth, bool arrayOk);
+
+ deUint32 m_features;
+ int m_maxArrayLength;
+ deUint32 m_seed;
+
+ const int m_maxSharedObjects = 3;
+ const int m_maxSharedObjectMembers = 4;
+ const int m_maxStructMembers = 3;
+};
+
+} // MemoryModel
+} // vkt
+
+#endif // _VKTMEMORYMODELSHAREDLAYOUTCASE_HPP
vktSSBOLayoutTests.hpp
vktSSBOCornerCase.cpp
vktSSBOCornerCase.hpp
+ ../util/vktTypeComparisonUtil.cpp
+ ../util/vktTypeComparisonUtil.hpp
)
set(DEQP_VK_SSBO_LIBS
#include "vkTypeUtil.hpp"
#include "vkCmdUtil.hpp"
+#include "util/vktTypeComparisonUtil.hpp"
+
namespace vkt
{
namespace ssbo
// Shader generator.
-const char* getCompareFuncForType (glu::DataType type)
-{
- switch (type)
- {
- case glu::TYPE_FLOAT: return "bool compare_float (highp float a, highp float b) { return abs(a - b) < 0.05; }\n";
- case glu::TYPE_FLOAT_VEC2: return "bool compare_vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y); }\n";
- case glu::TYPE_FLOAT_VEC3: return "bool compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z); }\n";
- case glu::TYPE_FLOAT_VEC4: return "bool compare_vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z)&&compare_float(a.w, b.w); }\n";
- case glu::TYPE_FLOAT_MAT2: return "bool compare_mat2 (highp mat2 a, highp mat2 b) { return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1]); }\n";
- case glu::TYPE_FLOAT_MAT2X3: return "bool compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1]); }\n";
- case glu::TYPE_FLOAT_MAT2X4: return "bool compare_mat2x4 (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1]); }\n";
- case glu::TYPE_FLOAT_MAT3X2: return "bool compare_mat3x2 (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1])&&compare_vec2(a[2], b[2]); }\n";
- case glu::TYPE_FLOAT_MAT3: return "bool compare_mat3 (highp mat3 a, highp mat3 b) { return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1])&&compare_vec3(a[2], b[2]); }\n";
- case glu::TYPE_FLOAT_MAT3X4: return "bool compare_mat3x4 (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1])&&compare_vec4(a[2], b[2]); }\n";
- case glu::TYPE_FLOAT_MAT4X2: return "bool compare_mat4x2 (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1])&&compare_vec2(a[2], b[2])&&compare_vec2(a[3], b[3]); }\n";
- case glu::TYPE_FLOAT_MAT4X3: return "bool compare_mat4x3 (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1])&&compare_vec3(a[2], b[2])&&compare_vec3(a[3], b[3]); }\n";
- case glu::TYPE_FLOAT_MAT4: return "bool compare_mat4 (highp mat4 a, highp mat4 b) { return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1])&&compare_vec4(a[2], b[2])&&compare_vec4(a[3], b[3]); }\n";
- case glu::TYPE_INT: return "bool compare_int (highp int a, highp int b) { return a == b; }\n";
- case glu::TYPE_INT_VEC2: return "bool compare_ivec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
- case glu::TYPE_INT_VEC3: return "bool compare_ivec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
- case glu::TYPE_INT_VEC4: return "bool compare_ivec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
- case glu::TYPE_UINT: return "bool compare_uint (highp uint a, highp uint b) { return a == b; }\n";
- case glu::TYPE_UINT_VEC2: return "bool compare_uvec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
- case glu::TYPE_UINT_VEC3: return "bool compare_uvec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
- case glu::TYPE_UINT_VEC4: return "bool compare_uvec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
- case glu::TYPE_BOOL: return "bool compare_bool (bool a, bool b) { return a == b; }\n";
- case glu::TYPE_BOOL_VEC2: return "bool compare_bvec2 (bvec2 a, bvec2 b) { return a == b; }\n";
- case glu::TYPE_BOOL_VEC3: return "bool compare_bvec3 (bvec3 a, bvec3 b) { return a == b; }\n";
- case glu::TYPE_BOOL_VEC4: return "bool compare_bvec4 (bvec4 a, bvec4 b) { return a == b; }\n";
- case glu::TYPE_FLOAT16: return "bool compare_float16_t(highp float a, highp float b) { return abs(a - b) < 0.05; }\n";
- case glu::TYPE_FLOAT16_VEC2: return "bool compare_f16vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y); }\n";
- case glu::TYPE_FLOAT16_VEC3: return "bool compare_f16vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z); }\n";
- case glu::TYPE_FLOAT16_VEC4: return "bool compare_f16vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z)&&compare_float(a.w, b.w); }\n";
- case glu::TYPE_INT8: return "bool compare_int8_t (highp int a, highp int b) { return a == b; }\n";
- case glu::TYPE_INT8_VEC2: return "bool compare_i8vec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
- case glu::TYPE_INT8_VEC3: return "bool compare_i8vec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
- case glu::TYPE_INT8_VEC4: return "bool compare_i8vec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
- case glu::TYPE_UINT8: return "bool compare_uint8_t (highp uint a, highp uint b) { return a == b; }\n";
- case glu::TYPE_UINT8_VEC2: return "bool compare_u8vec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
- case glu::TYPE_UINT8_VEC3: return "bool compare_u8vec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
- case glu::TYPE_UINT8_VEC4: return "bool compare_u8vec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
- case glu::TYPE_INT16: return "bool compare_int16_t (highp int a, highp int b) { return a == b; }\n";
- case glu::TYPE_INT16_VEC2: return "bool compare_i16vec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
- case glu::TYPE_INT16_VEC3: return "bool compare_i16vec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
- case glu::TYPE_INT16_VEC4: return "bool compare_i16vec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
- case glu::TYPE_UINT16: return "bool compare_uint16_t (highp uint a, highp uint b) { return a == b; }\n";
- case glu::TYPE_UINT16_VEC2: return "bool compare_u16vec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
- case glu::TYPE_UINT16_VEC3: return "bool compare_u16vec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
- case glu::TYPE_UINT16_VEC4: return "bool compare_u16vec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
- default:
- DE_ASSERT(false);
- return DE_NULL;
- }
-}
-
-void getCompareDependencies (std::set<glu::DataType>& compareFuncs, glu::DataType basicType)
-{
- switch (basicType)
- {
- case glu::TYPE_FLOAT_VEC2:
- case glu::TYPE_FLOAT_VEC3:
- case glu::TYPE_FLOAT_VEC4:
- case glu::TYPE_FLOAT16_VEC2:
- case glu::TYPE_FLOAT16_VEC3:
- case glu::TYPE_FLOAT16_VEC4:
- compareFuncs.insert(glu::TYPE_FLOAT);
- compareFuncs.insert(basicType);
- break;
-
- case glu::TYPE_FLOAT_MAT2:
- case glu::TYPE_FLOAT_MAT2X3:
- case glu::TYPE_FLOAT_MAT2X4:
- case glu::TYPE_FLOAT_MAT3X2:
- case glu::TYPE_FLOAT_MAT3:
- case glu::TYPE_FLOAT_MAT3X4:
- case glu::TYPE_FLOAT_MAT4X2:
- case glu::TYPE_FLOAT_MAT4X3:
- case glu::TYPE_FLOAT_MAT4:
- compareFuncs.insert(glu::TYPE_FLOAT);
- compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
- compareFuncs.insert(basicType);
- break;
-
- default:
- compareFuncs.insert(basicType);
- break;
- }
-}
-
-void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const VarType& type)
-{
- if (type.isStructType())
- {
- for (StructType::ConstIterator iter = type.getStructPtr()->begin(); iter != type.getStructPtr()->end(); ++iter)
- collectUniqueBasicTypes(basicTypes, iter->getType());
- }
- else if (type.isArrayType())
- collectUniqueBasicTypes(basicTypes, type.getElementType());
- else
- {
- DE_ASSERT(type.isBasicType());
- basicTypes.insert(type.getBasicType());
- }
-}
-
void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const BufferBlock& bufferBlock)
{
for (BufferBlock::const_iterator iter = bufferBlock.begin(); iter != bufferBlock.end(); ++iter)
- collectUniqueBasicTypes(basicTypes, iter->getType());
+ vkt::typecomputil::collectUniqueBasicTypes(basicTypes, iter->getType());
}
void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const ShaderInterface& interface)
// Set of compare functions required
for (std::set<glu::DataType>::const_iterator iter = types.begin(); iter != types.end(); ++iter)
{
- getCompareDependencies(compareFuncs, *iter);
+ vkt::typecomputil::getCompareDependencies(compareFuncs, *iter);
}
for (int type = 0; type < glu::TYPE_LAST; ++type)
{
if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
- str << getCompareFuncForType(glu::DataType(type));
+ str << vkt::typecomputil::getCompareFuncForType(glu::DataType(type));
}
}
return str;
}
-glu::DataType getPromoteType(glu::DataType type)
-{
- switch (type)
- {
- case glu::TYPE_UINT8: return glu::TYPE_UINT;
- case glu::TYPE_UINT8_VEC2: return glu::TYPE_UINT_VEC2;
- case glu::TYPE_UINT8_VEC3: return glu::TYPE_UINT_VEC3;
- case glu::TYPE_UINT8_VEC4: return glu::TYPE_UINT_VEC4;
- case glu::TYPE_INT8: return glu::TYPE_INT;
- case glu::TYPE_INT8_VEC2: return glu::TYPE_INT_VEC2;
- case glu::TYPE_INT8_VEC3: return glu::TYPE_INT_VEC3;
- case glu::TYPE_INT8_VEC4: return glu::TYPE_INT_VEC4;
- case glu::TYPE_UINT16: return glu::TYPE_UINT;
- case glu::TYPE_UINT16_VEC2: return glu::TYPE_UINT_VEC2;
- case glu::TYPE_UINT16_VEC3: return glu::TYPE_UINT_VEC3;
- case glu::TYPE_UINT16_VEC4: return glu::TYPE_UINT_VEC4;
- case glu::TYPE_INT16: return glu::TYPE_INT;
- case glu::TYPE_INT16_VEC2: return glu::TYPE_INT_VEC2;
- case glu::TYPE_INT16_VEC3: return glu::TYPE_INT_VEC3;
- case glu::TYPE_INT16_VEC4: return glu::TYPE_INT_VEC4;
- case glu::TYPE_FLOAT16: return glu::TYPE_FLOAT;
- case glu::TYPE_FLOAT16_VEC2: return glu::TYPE_FLOAT_VEC2;
- case glu::TYPE_FLOAT16_VEC3: return glu::TYPE_FLOAT_VEC3;
- case glu::TYPE_FLOAT16_VEC4: return glu::TYPE_FLOAT_VEC4;
- default: return type;
- }
-}
-
void generateDeclaration (std::ostream& src, const BufferVar& bufferVar, int indentLevel)
{
// \todo [pyry] Qualifiers
const size_t compSize = getDataTypeByteSize(scalarType);
if (scalarSize > 1)
- src << glu::getDataTypeName(getPromoteType(basicType)) << "(";
+ src << glu::getDataTypeName(vkt::typecomputil::getPromoteType(basicType)) << "(";
for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
{
else
{
const char* castName = "";
- glu::DataType promoteType = getPromoteType(basicType);
+ glu::DataType promoteType = vkt::typecomputil::getPromoteType(basicType);
if (basicType != promoteType)
castName = glu::getDataTypeName(promoteType);
const void* valuePtr = (const deUint8*)blockPtr.ptr + computeOffset(varLayout, accessPath.getPath());
const char* castName = "";
- glu::DataType promoteType = getPromoteType(basicType);
+ glu::DataType promoteType = vkt::typecomputil::getPromoteType(basicType);
if (basicType != promoteType)
castName = glu::getDataTypeName((!isMatrix || matrixStoreFlag == STORE_FULL_MATRIX) ? basicType : glu::getDataTypeMatrixColumnType(basicType));
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Utility functions for generating comparison code for values with different types.
+ *//*--------------------------------------------------------------------*/
+
+#include "gluShaderUtil.hpp"
+#include "gluVarTypeUtil.hpp"
+#include <set>
+
+namespace vkt
+{
+namespace typecomputil
+{
+
+const char* getCompareFuncForType (glu::DataType type)
+{
+ switch (type)
+ {
+ case glu::TYPE_FLOAT:
+ return "bool compare_float (highp float a, highp float b) { return abs(a - b) < 0.05; }\n";
+ case glu::TYPE_FLOAT_VEC2:
+ return "bool compare_vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y); }\n";
+ case glu::TYPE_FLOAT_VEC3:
+ return "bool compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z); }\n";
+ case glu::TYPE_FLOAT_VEC4:
+ return "bool compare_vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z)&&compare_float(a.w, b.w); }\n";
+ case glu::TYPE_FLOAT_MAT2:
+ return "bool compare_mat2 (highp mat2 a, highp mat2 b) { return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1]); }\n";
+ case glu::TYPE_FLOAT_MAT2X3:
+ return "bool compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1]); }\n";
+ case glu::TYPE_FLOAT_MAT2X4:
+ return "bool compare_mat2x4 (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1]); }\n";
+ case glu::TYPE_FLOAT_MAT3X2:
+ return "bool compare_mat3x2 (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1])&&compare_vec2(a[2], b[2]); }\n";
+ case glu::TYPE_FLOAT_MAT3:
+ return "bool compare_mat3 (highp mat3 a, highp mat3 b) { return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1])&&compare_vec3(a[2], b[2]); }\n";
+ case glu::TYPE_FLOAT_MAT3X4:
+ return "bool compare_mat3x4 (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1])&&compare_vec4(a[2], b[2]); }\n";
+ case glu::TYPE_FLOAT_MAT4X2:
+ return "bool compare_mat4x2 (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])&&compare_vec2(a[1], b[1])&&compare_vec2(a[2], b[2])&&compare_vec2(a[3], b[3]); }\n";
+ case glu::TYPE_FLOAT_MAT4X3:
+ return "bool compare_mat4x3 (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])&&compare_vec3(a[1], b[1])&&compare_vec3(a[2], b[2])&&compare_vec3(a[3], b[3]); }\n";
+ case glu::TYPE_FLOAT_MAT4:
+ return "bool compare_mat4 (highp mat4 a, highp mat4 b) { return compare_vec4(a[0], b[0])&&compare_vec4(a[1], b[1])&&compare_vec4(a[2], b[2])&&compare_vec4(a[3], b[3]); }\n";
+ case glu::TYPE_INT:
+ return "bool compare_int (highp int a, highp int b) { return a == b; }\n";
+ case glu::TYPE_INT_VEC2:
+ return "bool compare_ivec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
+ case glu::TYPE_INT_VEC3:
+ return "bool compare_ivec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
+ case glu::TYPE_INT_VEC4:
+ return "bool compare_ivec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
+ case glu::TYPE_UINT:
+ return "bool compare_uint (highp uint a, highp uint b) { return a == b; }\n";
+ case glu::TYPE_UINT_VEC2:
+ return "bool compare_uvec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
+ case glu::TYPE_UINT_VEC3:
+ return "bool compare_uvec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
+ case glu::TYPE_UINT_VEC4:
+ return "bool compare_uvec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
+ case glu::TYPE_BOOL:
+ return "bool compare_bool (bool a, bool b) { return a == b; }\n";
+ case glu::TYPE_BOOL_VEC2:
+ return "bool compare_bvec2 (bvec2 a, bvec2 b) { return a == b; }\n";
+ case glu::TYPE_BOOL_VEC3:
+ return "bool compare_bvec3 (bvec3 a, bvec3 b) { return a == b; }\n";
+ case glu::TYPE_BOOL_VEC4:
+ return "bool compare_bvec4 (bvec4 a, bvec4 b) { return a == b; }\n";
+ case glu::TYPE_FLOAT16:
+ return "bool compare_float16_t(highp float a, highp float b) { return abs(a - b) < 0.05; }\n";
+ case glu::TYPE_FLOAT16_VEC2:
+ return "bool compare_f16vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y); }\n";
+ case glu::TYPE_FLOAT16_VEC3:
+ return "bool compare_f16vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z); }\n";
+ case glu::TYPE_FLOAT16_VEC4:
+ return "bool compare_f16vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)&&compare_float(a.y, b.y)&&compare_float(a.z, b.z)&&compare_float(a.w, b.w); }\n";
+ case glu::TYPE_INT8:
+ return "bool compare_int8_t (highp int a, highp int b) { return a == b; }\n";
+ case glu::TYPE_INT8_VEC2:
+ return "bool compare_i8vec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
+ case glu::TYPE_INT8_VEC3:
+ return "bool compare_i8vec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
+ case glu::TYPE_INT8_VEC4:
+ return "bool compare_i8vec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
+ case glu::TYPE_UINT8:
+ return "bool compare_uint8_t (highp uint a, highp uint b) { return a == b; }\n";
+ case glu::TYPE_UINT8_VEC2:
+ return "bool compare_u8vec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
+ case glu::TYPE_UINT8_VEC3:
+ return "bool compare_u8vec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
+ case glu::TYPE_UINT8_VEC4:
+ return "bool compare_u8vec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
+ case glu::TYPE_INT16:
+ return "bool compare_int16_t (highp int a, highp int b) { return a == b; }\n";
+ case glu::TYPE_INT16_VEC2:
+ return "bool compare_i16vec2 (highp ivec2 a, highp ivec2 b) { return a == b; }\n";
+ case glu::TYPE_INT16_VEC3:
+ return "bool compare_i16vec3 (highp ivec3 a, highp ivec3 b) { return a == b; }\n";
+ case glu::TYPE_INT16_VEC4:
+ return "bool compare_i16vec4 (highp ivec4 a, highp ivec4 b) { return a == b; }\n";
+ case glu::TYPE_UINT16:
+ return "bool compare_uint16_t (highp uint a, highp uint b) { return a == b; }\n";
+ case glu::TYPE_UINT16_VEC2:
+ return "bool compare_u16vec2 (highp uvec2 a, highp uvec2 b) { return a == b; }\n";
+ case glu::TYPE_UINT16_VEC3:
+ return "bool compare_u16vec3 (highp uvec3 a, highp uvec3 b) { return a == b; }\n";
+ case glu::TYPE_UINT16_VEC4:
+ return "bool compare_u16vec4 (highp uvec4 a, highp uvec4 b) { return a == b; }\n";
+ default:
+ DE_ASSERT(false);
+ return DE_NULL;
+ }
+}
+
+void getCompareDependencies (std::set<glu::DataType> &compareFuncs, glu::DataType basicType)
+{
+ switch (basicType)
+ {
+ case glu::TYPE_FLOAT_VEC2:
+ case glu::TYPE_FLOAT_VEC3:
+ case glu::TYPE_FLOAT_VEC4:
+ case glu::TYPE_FLOAT16_VEC2:
+ case glu::TYPE_FLOAT16_VEC3:
+ case glu::TYPE_FLOAT16_VEC4:
+ compareFuncs.insert(glu::TYPE_FLOAT);
+ compareFuncs.insert(basicType);
+ break;
+
+ case glu::TYPE_FLOAT_MAT2:
+ case glu::TYPE_FLOAT_MAT2X3:
+ case glu::TYPE_FLOAT_MAT2X4:
+ case glu::TYPE_FLOAT_MAT3X2:
+ case glu::TYPE_FLOAT_MAT3:
+ case glu::TYPE_FLOAT_MAT3X4:
+ case glu::TYPE_FLOAT_MAT4X2:
+ case glu::TYPE_FLOAT_MAT4X3:
+ case glu::TYPE_FLOAT_MAT4:
+ compareFuncs.insert(glu::TYPE_FLOAT);
+ compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
+ compareFuncs.insert(basicType);
+ break;
+
+ default:
+ compareFuncs.insert(basicType);
+ break;
+ }
+}
+
+void collectUniqueBasicTypes (std::set<glu::DataType> &basicTypes, const glu::VarType &type)
+{
+ if (type.isStructType())
+ {
+ for (const auto &iter: *type.getStructPtr())
+ collectUniqueBasicTypes(basicTypes, iter.getType());
+ }
+ else if (type.isArrayType())
+ collectUniqueBasicTypes(basicTypes, type.getElementType());
+ else
+ {
+ DE_ASSERT(type.isBasicType());
+ basicTypes.insert(type.getBasicType());
+ }
+}
+
+glu::DataType getPromoteType (glu::DataType type)
+{
+ switch (type)
+ {
+ case glu::TYPE_UINT8:
+ return glu::TYPE_UINT;
+ case glu::TYPE_UINT8_VEC2:
+ return glu::TYPE_UINT_VEC2;
+ case glu::TYPE_UINT8_VEC3:
+ return glu::TYPE_UINT_VEC3;
+ case glu::TYPE_UINT8_VEC4:
+ return glu::TYPE_UINT_VEC4;
+ case glu::TYPE_INT8:
+ return glu::TYPE_INT;
+ case glu::TYPE_INT8_VEC2:
+ return glu::TYPE_INT_VEC2;
+ case glu::TYPE_INT8_VEC3:
+ return glu::TYPE_INT_VEC3;
+ case glu::TYPE_INT8_VEC4:
+ return glu::TYPE_INT_VEC4;
+ case glu::TYPE_UINT16:
+ return glu::TYPE_UINT;
+ case glu::TYPE_UINT16_VEC2:
+ return glu::TYPE_UINT_VEC2;
+ case glu::TYPE_UINT16_VEC3:
+ return glu::TYPE_UINT_VEC3;
+ case glu::TYPE_UINT16_VEC4:
+ return glu::TYPE_UINT_VEC4;
+ case glu::TYPE_INT16:
+ return glu::TYPE_INT;
+ case glu::TYPE_INT16_VEC2:
+ return glu::TYPE_INT_VEC2;
+ case glu::TYPE_INT16_VEC3:
+ return glu::TYPE_INT_VEC3;
+ case glu::TYPE_INT16_VEC4:
+ return glu::TYPE_INT_VEC4;
+ case glu::TYPE_FLOAT16:
+ return glu::TYPE_FLOAT;
+ case glu::TYPE_FLOAT16_VEC2:
+ return glu::TYPE_FLOAT_VEC2;
+ case glu::TYPE_FLOAT16_VEC3:
+ return glu::TYPE_FLOAT_VEC3;
+ case glu::TYPE_FLOAT16_VEC4:
+ return glu::TYPE_FLOAT_VEC4;
+ default:
+ return type;
+ }
+}
+} // typecomputil
+} // vkt
--- /dev/null
+#ifndef _VKTTYPECOMPARISONUTIL_HPP
+#define _VKTTYPECOMPARISONUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2021 The Khronos Group Inc.
+ * Copyright (c) 2021 Google LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Utility functions for generating comparison code for values with different types.
+ *//*--------------------------------------------------------------------*/
+
+#include <gluShaderUtil.hpp>
+#include "gluVarTypeUtil.hpp"
+#include <set>
+
+namespace vkt
+{
+namespace typecomputil
+{
+const char* getCompareFuncForType (glu::DataType type);
+ void getCompareDependencies (std::set<glu::DataType> &compareFuncs, glu::DataType basicType);
+ void collectUniqueBasicTypes (std::set<glu::DataType> &basicTypes, const glu::VarType &type);
+ glu::DataType getPromoteType (glu::DataType type);
+} // typecomputil
+} // vkt
+
+#endif // _VKTTYPECOMPARISONUTIL_HPP
return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
}
-MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
-{
- const vk::DeviceInterface& vkd = context.getDeviceInterface();
- const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
- MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
-
- vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
-
- return memory;
-}
-
vk::VkFormat getRenderTargetFormat (DataType dataType)
{
switch (dataType)
, m_spec (spec)
, m_posNdxBuffer (createBuffer(context, (vk::VkDeviceSize)TOTAL_POS_NDX_SIZE, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT))
- , m_posNdxMem (allocateAndBindMemory(context, *m_posNdxBuffer, vk::MemoryRequirement::HostVisible))
+ , m_posNdxMem (vk::bindBuffer (context.getDeviceInterface(), context.getDevice(),m_context.getDefaultAllocator(), *m_posNdxBuffer, vk::MemoryRequirement::HostVisible))
, m_inputLayout (computeStd430Layout(spec.values.inputs))
, m_inputBuffer (m_inputLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_inputLayout.size, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) : Move<vk::VkBuffer>())
- , m_inputMem (m_inputLayout.size > 0 ? allocateAndBindMemory(context, *m_inputBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
+ , m_inputMem (m_inputLayout.size > 0 ? vk::bindBuffer (context.getDeviceInterface(), context.getDevice(),m_context.getDefaultAllocator(), *m_inputBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
, m_referenceLayout (computeStd140Layout(spec.values.outputs))
, m_referenceBuffer (m_referenceLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_referenceLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
- , m_referenceMem (m_referenceLayout.size > 0 ? allocateAndBindMemory(context, *m_referenceBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
+ , m_referenceMem (m_inputLayout.size > 0 ? vk::bindBuffer (context.getDeviceInterface(), context.getDevice(),m_context.getDefaultAllocator(), *m_referenceBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
, m_uniformLayout (computeStd140Layout(spec.values.uniforms))
, m_uniformBuffer (m_uniformLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_uniformLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
- , m_uniformMem (m_uniformLayout.size > 0 ? allocateAndBindMemory(context, *m_uniformBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
+ , m_uniformMem (m_uniformLayout.size > 0 ? vk::bindBuffer (context.getDeviceInterface(), context.getDevice(),m_context.getDefaultAllocator(), *m_uniformBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
, m_rtFormat (getRenderTargetFormat(spec.outputFormat))
, m_outputCount (((deUint32)m_spec.values.outputs.size() == 0 || m_spec.outputType == glu::sl::OUTPUT_RESULT) ? 1 : (deUint32)m_spec.values.outputs.size())
m_rtView[outNdx] = createAttachmentView(context, *m_rtImage[outNdx], m_rtFormat);
m_readImageBuffer[outNdx] = createBuffer(context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat))), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- m_readImageMem[outNdx] = allocateAndBindMemory(context, *m_readImageBuffer[outNdx], vk::MemoryRequirement::HostVisible);
+ m_readImageMem[outNdx] = vk::bindBuffer (context.getDeviceInterface(), context.getDevice(),m_context.getDefaultAllocator(), *m_readImageBuffer[outNdx], vk::MemoryRequirement::HostVisible);
}
m_framebuffer = createFramebuffer(context, *m_renderPass, m_rtView, m_outputCount, RENDER_WIDTH, RENDER_HEIGHT);
}
dEQP-VK.memory_model.transitive.noncoherent.atomic_atomic.payload_local.physbuffer.guard_local.physbuffer.nontransvis
dEQP-VK.memory_model.transitive.noncoherent.atomic_atomic.payload_local.physbuffer.guard_local.physbuffer.transvis
dEQP-VK.memory_model.padding.test
+dEQP-VK.memory_model.shared.scalar_types.0
+dEQP-VK.memory_model.shared.scalar_types.1
+dEQP-VK.memory_model.shared.scalar_types.2
+dEQP-VK.memory_model.shared.scalar_types.3
+dEQP-VK.memory_model.shared.scalar_types.4
+dEQP-VK.memory_model.shared.scalar_types.5
+dEQP-VK.memory_model.shared.scalar_types.6
+dEQP-VK.memory_model.shared.scalar_types.7
+dEQP-VK.memory_model.shared.scalar_types.8
+dEQP-VK.memory_model.shared.scalar_types.9
+dEQP-VK.memory_model.shared.vector_types.0
+dEQP-VK.memory_model.shared.vector_types.1
+dEQP-VK.memory_model.shared.vector_types.2
+dEQP-VK.memory_model.shared.vector_types.3
+dEQP-VK.memory_model.shared.vector_types.4
+dEQP-VK.memory_model.shared.vector_types.5
+dEQP-VK.memory_model.shared.vector_types.6
+dEQP-VK.memory_model.shared.vector_types.7
+dEQP-VK.memory_model.shared.vector_types.8
+dEQP-VK.memory_model.shared.vector_types.9
+dEQP-VK.memory_model.shared.basic_types.0
+dEQP-VK.memory_model.shared.basic_types.1
+dEQP-VK.memory_model.shared.basic_types.2
+dEQP-VK.memory_model.shared.basic_types.3
+dEQP-VK.memory_model.shared.basic_types.4
+dEQP-VK.memory_model.shared.basic_types.5
+dEQP-VK.memory_model.shared.basic_types.6
+dEQP-VK.memory_model.shared.basic_types.7
+dEQP-VK.memory_model.shared.basic_types.8
+dEQP-VK.memory_model.shared.basic_types.9
+dEQP-VK.memory_model.shared.basic_arrays.0
+dEQP-VK.memory_model.shared.basic_arrays.1
+dEQP-VK.memory_model.shared.basic_arrays.2
+dEQP-VK.memory_model.shared.basic_arrays.3
+dEQP-VK.memory_model.shared.basic_arrays.4
+dEQP-VK.memory_model.shared.basic_arrays.5
+dEQP-VK.memory_model.shared.basic_arrays.6
+dEQP-VK.memory_model.shared.basic_arrays.7
+dEQP-VK.memory_model.shared.basic_arrays.8
+dEQP-VK.memory_model.shared.basic_arrays.9
+dEQP-VK.memory_model.shared.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.nested_structs.0
+dEQP-VK.memory_model.shared.nested_structs.1
+dEQP-VK.memory_model.shared.nested_structs.2
+dEQP-VK.memory_model.shared.nested_structs.3
+dEQP-VK.memory_model.shared.nested_structs.4
+dEQP-VK.memory_model.shared.nested_structs.5
+dEQP-VK.memory_model.shared.nested_structs.6
+dEQP-VK.memory_model.shared.nested_structs.7
+dEQP-VK.memory_model.shared.nested_structs.8
+dEQP-VK.memory_model.shared.nested_structs.9
+dEQP-VK.memory_model.shared.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.16bit.scalar_types.0
+dEQP-VK.memory_model.shared.16bit.scalar_types.1
+dEQP-VK.memory_model.shared.16bit.scalar_types.2
+dEQP-VK.memory_model.shared.16bit.scalar_types.3
+dEQP-VK.memory_model.shared.16bit.scalar_types.4
+dEQP-VK.memory_model.shared.16bit.scalar_types.5
+dEQP-VK.memory_model.shared.16bit.scalar_types.6
+dEQP-VK.memory_model.shared.16bit.scalar_types.7
+dEQP-VK.memory_model.shared.16bit.scalar_types.8
+dEQP-VK.memory_model.shared.16bit.scalar_types.9
+dEQP-VK.memory_model.shared.16bit.vector_types.0
+dEQP-VK.memory_model.shared.16bit.vector_types.1
+dEQP-VK.memory_model.shared.16bit.vector_types.2
+dEQP-VK.memory_model.shared.16bit.vector_types.3
+dEQP-VK.memory_model.shared.16bit.vector_types.4
+dEQP-VK.memory_model.shared.16bit.vector_types.5
+dEQP-VK.memory_model.shared.16bit.vector_types.6
+dEQP-VK.memory_model.shared.16bit.vector_types.7
+dEQP-VK.memory_model.shared.16bit.vector_types.8
+dEQP-VK.memory_model.shared.16bit.vector_types.9
+dEQP-VK.memory_model.shared.16bit.basic_types.0
+dEQP-VK.memory_model.shared.16bit.basic_types.1
+dEQP-VK.memory_model.shared.16bit.basic_types.2
+dEQP-VK.memory_model.shared.16bit.basic_types.3
+dEQP-VK.memory_model.shared.16bit.basic_types.4
+dEQP-VK.memory_model.shared.16bit.basic_types.5
+dEQP-VK.memory_model.shared.16bit.basic_types.6
+dEQP-VK.memory_model.shared.16bit.basic_types.7
+dEQP-VK.memory_model.shared.16bit.basic_types.8
+dEQP-VK.memory_model.shared.16bit.basic_types.9
+dEQP-VK.memory_model.shared.16bit.basic_arrays.0
+dEQP-VK.memory_model.shared.16bit.basic_arrays.1
+dEQP-VK.memory_model.shared.16bit.basic_arrays.2
+dEQP-VK.memory_model.shared.16bit.basic_arrays.3
+dEQP-VK.memory_model.shared.16bit.basic_arrays.4
+dEQP-VK.memory_model.shared.16bit.basic_arrays.5
+dEQP-VK.memory_model.shared.16bit.basic_arrays.6
+dEQP-VK.memory_model.shared.16bit.basic_arrays.7
+dEQP-VK.memory_model.shared.16bit.basic_arrays.8
+dEQP-VK.memory_model.shared.16bit.basic_arrays.9
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.16bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.16bit.nested_structs.0
+dEQP-VK.memory_model.shared.16bit.nested_structs.1
+dEQP-VK.memory_model.shared.16bit.nested_structs.2
+dEQP-VK.memory_model.shared.16bit.nested_structs.3
+dEQP-VK.memory_model.shared.16bit.nested_structs.4
+dEQP-VK.memory_model.shared.16bit.nested_structs.5
+dEQP-VK.memory_model.shared.16bit.nested_structs.6
+dEQP-VK.memory_model.shared.16bit.nested_structs.7
+dEQP-VK.memory_model.shared.16bit.nested_structs.8
+dEQP-VK.memory_model.shared.16bit.nested_structs.9
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.16bit.nested_structs_arrays.9
+dEQP-VK.memory_model.shared.8bit.scalar_types.0
+dEQP-VK.memory_model.shared.8bit.scalar_types.1
+dEQP-VK.memory_model.shared.8bit.scalar_types.2
+dEQP-VK.memory_model.shared.8bit.scalar_types.3
+dEQP-VK.memory_model.shared.8bit.scalar_types.4
+dEQP-VK.memory_model.shared.8bit.scalar_types.5
+dEQP-VK.memory_model.shared.8bit.scalar_types.6
+dEQP-VK.memory_model.shared.8bit.scalar_types.7
+dEQP-VK.memory_model.shared.8bit.scalar_types.8
+dEQP-VK.memory_model.shared.8bit.scalar_types.9
+dEQP-VK.memory_model.shared.8bit.vector_types.0
+dEQP-VK.memory_model.shared.8bit.vector_types.1
+dEQP-VK.memory_model.shared.8bit.vector_types.2
+dEQP-VK.memory_model.shared.8bit.vector_types.3
+dEQP-VK.memory_model.shared.8bit.vector_types.4
+dEQP-VK.memory_model.shared.8bit.vector_types.5
+dEQP-VK.memory_model.shared.8bit.vector_types.6
+dEQP-VK.memory_model.shared.8bit.vector_types.7
+dEQP-VK.memory_model.shared.8bit.vector_types.8
+dEQP-VK.memory_model.shared.8bit.vector_types.9
+dEQP-VK.memory_model.shared.8bit.basic_types.0
+dEQP-VK.memory_model.shared.8bit.basic_types.1
+dEQP-VK.memory_model.shared.8bit.basic_types.2
+dEQP-VK.memory_model.shared.8bit.basic_types.3
+dEQP-VK.memory_model.shared.8bit.basic_types.4
+dEQP-VK.memory_model.shared.8bit.basic_types.5
+dEQP-VK.memory_model.shared.8bit.basic_types.6
+dEQP-VK.memory_model.shared.8bit.basic_types.7
+dEQP-VK.memory_model.shared.8bit.basic_types.8
+dEQP-VK.memory_model.shared.8bit.basic_types.9
+dEQP-VK.memory_model.shared.8bit.basic_arrays.0
+dEQP-VK.memory_model.shared.8bit.basic_arrays.1
+dEQP-VK.memory_model.shared.8bit.basic_arrays.2
+dEQP-VK.memory_model.shared.8bit.basic_arrays.3
+dEQP-VK.memory_model.shared.8bit.basic_arrays.4
+dEQP-VK.memory_model.shared.8bit.basic_arrays.5
+dEQP-VK.memory_model.shared.8bit.basic_arrays.6
+dEQP-VK.memory_model.shared.8bit.basic_arrays.7
+dEQP-VK.memory_model.shared.8bit.basic_arrays.8
+dEQP-VK.memory_model.shared.8bit.basic_arrays.9
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.0
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.1
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.2
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.3
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.4
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.5
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.6
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.7
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.8
+dEQP-VK.memory_model.shared.8bit.arrays_of_arrays.9
+dEQP-VK.memory_model.shared.8bit.nested_structs.0
+dEQP-VK.memory_model.shared.8bit.nested_structs.1
+dEQP-VK.memory_model.shared.8bit.nested_structs.2
+dEQP-VK.memory_model.shared.8bit.nested_structs.3
+dEQP-VK.memory_model.shared.8bit.nested_structs.4
+dEQP-VK.memory_model.shared.8bit.nested_structs.5
+dEQP-VK.memory_model.shared.8bit.nested_structs.6
+dEQP-VK.memory_model.shared.8bit.nested_structs.7
+dEQP-VK.memory_model.shared.8bit.nested_structs.8
+dEQP-VK.memory_model.shared.8bit.nested_structs.9
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.0
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.1
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.2
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.3
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.4
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.5
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.6
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.7
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.8
+dEQP-VK.memory_model.shared.8bit.nested_structs_arrays.9