external/vulkancts/modules/vulkan/query_pool/vktQueryPoolStatisticsTests.cpp \
external/vulkancts/modules/vulkan/query_pool/vktQueryPoolTests.cpp \
external/vulkancts/modules/vulkan/rasterization/vktRasterizationTests.cpp \
+ external/vulkancts/modules/vulkan/robustness/vktRobustnessBufferAccessTests.cpp \
+ external/vulkancts/modules/vulkan/robustness/vktRobustnessTests.cpp \
+ external/vulkancts/modules/vulkan/robustness/vktRobustnessUtil.cpp \
+ external/vulkancts/modules/vulkan/robustness/vktRobustnessVertexAccessTests.cpp \
external/vulkancts/modules/vulkan/shaderexecutor/vktOpaqueTypeIndexingTests.cpp \
external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinPrecisionTests.cpp \
external/vulkancts/modules/vulkan/shaderexecutor/vktShaderBuiltinTests.cpp \
$(deqp_dir)/external/vulkancts/modules/vulkan/pipeline \
$(deqp_dir)/external/vulkancts/modules/vulkan/query_pool \
$(deqp_dir)/external/vulkancts/modules/vulkan/rasterization \
+ $(deqp_dir)/external/vulkancts/modules/vulkan/robustness \
$(deqp_dir)/external/vulkancts/modules/vulkan/shaderexecutor \
$(deqp_dir)/external/vulkancts/modules/vulkan/shaderrender \
$(deqp_dir)/external/vulkancts/modules/vulkan/sparse_resources \
dEQP-VK.geometry.emit.triangle_strip_emit_2_end_2
dEQP-VK.geometry.emit.triangle_strip_emit_3_end_2
dEQP-VK.geometry.emit.triangle_strip_emit_3_end_2_emit_3_end_0
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.vertex_access.r32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.triangle_out_of_bounds
add_subdirectory(fragment_ops)
add_subdirectory(geometry)
add_subdirectory(texture)
+add_subdirectory(robustness)
include_directories(
api
fragment_ops
texture
geometry
+ robustness
)
set(DEQP_VK_SRCS
deqp-vk-fragment-ops
deqp-vk-texture
deqp-vk-geometry
+ deqp-vk-robustness
)
if (DE_COMPILER_IS_MSC AND (DE_PTR_SIZE EQUAL 4))
--- /dev/null
+
+include_directories(
+ ..
+ )
+
+set(DEQP_VK_ROBUSTNESS_SRCS
+ vktRobustnessTests.cpp
+ vktRobustnessTests.hpp
+ vktRobustnessBufferAccessTests.cpp
+ vktRobustnessBufferAccessTests.hpp
+ vktRobustnessUtil.cpp
+ vktRobustnessUtil.hpp
+ vktRobustnessVertexAccessTests.cpp
+ vktRobustnessVertexAccessTests.hpp
+ )
+
+set(DEQP_VK_ROBUSTNESS_LIBS
+ tcutil
+ vkutil
+ )
+
+add_library(deqp-vk-robustness STATIC ${DEQP_VK_ROBUSTNESS_SRCS})
+target_link_libraries(deqp-vk-robustness ${DEQP_VK_ROBUSTNESS_LIBS})
+
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robust buffer access tests for uniform/storage buffers and
+ * uniform/storage texel buffers.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktRobustnessBufferAccessTests.hpp"
+#include "vktRobustnessUtil.hpp"
+#include "vktTestCaseUtil.hpp"
+#include "vkBuilderUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include "tcuTestLog.hpp"
+
+#include <limits>
+#include <sstream>
+
+namespace vkt
+{
+namespace robustness
+{
+
+using namespace vk;
+
+enum ShaderType
+{
+ SHADER_TYPE_MATRIX_COPY,
+ SHADER_TYPE_VECTOR_COPY,
+ SHADER_TYPE_SCALAR_COPY,
+ SHADER_TYPE_TEXEL_COPY,
+
+ SHADER_TYPE_COUNT
+};
+
+enum BufferAccessType
+{
+ BUFFER_ACCESS_TYPE_READ,
+ BUFFER_ACCESS_TYPE_READ_FROM_STORAGE,
+ BUFFER_ACCESS_TYPE_WRITE,
+};
+
+static VkDeviceSize min (VkDeviceSize a, VkDeviceSize b)
+{
+ return (a < b) ? a : b;
+}
+
+class RobustBufferAccessTest : public vkt::TestCase
+{
+public:
+ static const deUint32 s_testArraySize;
+ static const deUint32 s_numberOfBytesAccessed;
+
+ RobustBufferAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat);
+
+ virtual ~RobustBufferAccessTest (void) {}
+
+private:
+ static void genBufferShaderAccess (ShaderType shaderType,
+ VkFormat bufferFormat,
+ bool readFromStorage,
+ std::ostringstream& bufferDefinition,
+ std::ostringstream& bufferUse);
+
+ static void genTexelBufferShaderAccess (VkFormat bufferFormat,
+ std::ostringstream& bufferDefinition,
+ std::ostringstream& bufferUse,
+ bool readFromStorage);
+
+protected:
+ static void initBufferAccessPrograms (SourceCollections& programCollection,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ bool readFromStorage);
+
+ const VkShaderStageFlags m_shaderStage;
+ const ShaderType m_shaderType;
+ const VkFormat m_bufferFormat;
+};
+
+class RobustBufferReadTest : public RobustBufferAccessTest
+{
+public:
+ RobustBufferReadTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ VkDeviceSize readAccessRange,
+ bool readFromStorage,
+ bool accessOutOfBackingMemory);
+
+ virtual ~RobustBufferReadTest (void) {}
+
+ virtual void initPrograms (SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const;
+
+private:
+ const bool m_readFromStorage;
+ const VkDeviceSize m_readAccessRange;
+ const bool m_accessOutOfBackingMemory;
+};
+
+class RobustBufferWriteTest : public RobustBufferAccessTest
+{
+public:
+ RobustBufferWriteTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ VkDeviceSize writeAccessRange,
+ bool accessOutOfBackingMemory);
+
+ virtual ~RobustBufferWriteTest (void) {}
+
+ virtual void initPrograms (SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const;
+
+private:
+ const VkDeviceSize m_writeAccessRange;
+ const bool m_accessOutOfBackingMemory;
+};
+
+class BufferAccessInstance : public vkt::TestInstance
+{
+public:
+ BufferAccessInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ BufferAccessType bufferAccessType,
+ VkDeviceSize inBufferAccessRange,
+ VkDeviceSize outBufferAccessRange,
+ bool accessOutOfBackingMemory);
+
+ virtual ~BufferAccessInstance (void) {}
+
+ virtual tcu::TestStatus iterate (void);
+
+ virtual bool verifyResult (void);
+
+private:
+ bool isExpectedValueFromInBuffer (VkDeviceSize offsetInBytes, const void* valuePtr, VkDeviceSize valueSize);
+ bool isOutBufferValueUnchanged (VkDeviceSize offsetInBytes, VkDeviceSize valueSize);
+
+protected:
+ Move<VkDevice> m_device;
+ de::MovePtr<TestEnvironment> m_testEnvironment;
+
+ const ShaderType m_shaderType;
+ const VkShaderStageFlags m_shaderStage;
+
+ const VkFormat m_bufferFormat;
+ const BufferAccessType m_bufferAccessType;
+
+ const VkDeviceSize m_inBufferAccessRange;
+ Move<VkBuffer> m_inBuffer;
+ de::MovePtr<Allocation> m_inBufferAlloc;
+ VkDeviceSize m_inBufferAllocSize;
+ VkDeviceSize m_inBufferMaxAccessRange;
+
+ const VkDeviceSize m_outBufferAccessRange;
+ Move<VkBuffer> m_outBuffer;
+ de::MovePtr<Allocation> m_outBufferAlloc;
+ VkDeviceSize m_outBufferAllocSize;
+ VkDeviceSize m_outBufferMaxAccessRange;
+
+ Move<VkBuffer> m_indicesBuffer;
+ de::MovePtr<Allocation> m_indicesBufferAlloc;
+
+ Move<VkDescriptorPool> m_descriptorPool;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorSet> m_descriptorSet;
+
+ Move<VkFence> m_fence;
+ VkQueue m_queue;
+
+ // Used when m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT
+ Move<VkBuffer> m_vertexBuffer;
+ de::MovePtr<Allocation> m_vertexBufferAlloc;
+
+ // Used when m_shaderType == SHADER_TYPE_TEXEL_COPY
+ Move<VkBufferView> m_inTexelBufferView;
+ Move<VkBufferView> m_outTexelBufferView;
+
+ const bool m_accessOutOfBackingMemory;
+};
+
+class BufferReadInstance: public BufferAccessInstance
+{
+public:
+ BufferReadInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ bool readFromStorage,
+ VkDeviceSize inBufferAccessRange,
+ bool accessOutOfBackingMemory);
+
+ virtual ~BufferReadInstance (void) {}
+
+private:
+};
+
+class BufferWriteInstance: public BufferAccessInstance
+{
+public:
+ BufferWriteInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ VkDeviceSize writeBufferAccessRange,
+ bool accessOutOfBackingMemory);
+
+ virtual ~BufferWriteInstance (void) {}
+};
+
+// RobustBufferAccessTest
+
+const deUint32 RobustBufferAccessTest::s_testArraySize = 1024;
+const deUint32 RobustBufferAccessTest::s_numberOfBytesAccessed = (deUint32)(16 * sizeof(float)); // size of mat4
+
+RobustBufferAccessTest::RobustBufferAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat)
+ : vkt::TestCase (testContext, name, description)
+ , m_shaderStage (shaderStage)
+ , m_shaderType (shaderType)
+ , m_bufferFormat (bufferFormat)
+{
+ DE_ASSERT(m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT || m_shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT || m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT);
+}
+
+void RobustBufferAccessTest::genBufferShaderAccess (ShaderType shaderType,
+ VkFormat bufferFormat,
+ bool readFromStorage,
+ std::ostringstream& bufferDefinition,
+ std::ostringstream& bufferUse)
+{
+ if (isFloatFormat(bufferFormat))
+ {
+ bufferDefinition <<
+ "layout(binding = 0, " << (readFromStorage ? "std430" : "std140") << ") " << (readFromStorage ? "buffer" : "uniform") << " InBuffer\n"
+ "{\n"
+ " mat4 inMatrix[" << s_testArraySize << "];\n"
+ "};\n\n";
+
+ bufferDefinition <<
+ "layout(binding = 1, std430) buffer OutBuffer\n"
+ "{\n"
+ " mat4 outMatrix[" << s_testArraySize << "];\n"
+ "};\n\n";
+
+ bufferDefinition <<
+ "layout(binding = 2, std140) uniform Indices\n"
+ "{\n"
+ " int inIndex;\n"
+ " int outIndex;\n"
+ "};\n\n";
+
+ switch (shaderType)
+ {
+ case SHADER_TYPE_MATRIX_COPY:
+ bufferUse <<
+ " mat4 tmp = inMatrix[inIndex];\n"
+ " outMatrix[outIndex] = tmp;\n";
+ break;
+
+ case SHADER_TYPE_VECTOR_COPY:
+ bufferUse <<
+ " outMatrix[outIndex][0] = inMatrix[inIndex][0];\n"
+ " outMatrix[outIndex][1] = inMatrix[inIndex][1];\n"
+ " outMatrix[outIndex][2] = inMatrix[inIndex][2];\n"
+ " outMatrix[outIndex][3] = inMatrix[inIndex][3];\n";
+ break;
+
+ case SHADER_TYPE_SCALAR_COPY:
+ bufferUse <<
+ " outMatrix[outIndex][0][0] = inMatrix[inIndex][0][0];\n"
+ " outMatrix[outIndex][0][1] = inMatrix[inIndex][0][1];\n"
+ " outMatrix[outIndex][0][2] = inMatrix[inIndex][0][2];\n"
+ " outMatrix[outIndex][0][3] = inMatrix[inIndex][0][3];\n"
+
+ " outMatrix[outIndex][1][0] = inMatrix[inIndex][1][0];\n"
+ " outMatrix[outIndex][1][1] = inMatrix[inIndex][1][1];\n"
+ " outMatrix[outIndex][1][2] = inMatrix[inIndex][1][2];\n"
+ " outMatrix[outIndex][1][3] = inMatrix[inIndex][1][3];\n"
+
+ " outMatrix[outIndex][2][0] = inMatrix[inIndex][2][0];\n"
+ " outMatrix[outIndex][2][1] = inMatrix[inIndex][2][1];\n"
+ " outMatrix[outIndex][2][2] = inMatrix[inIndex][2][2];\n"
+ " outMatrix[outIndex][2][3] = inMatrix[inIndex][2][3];\n"
+
+ " outMatrix[outIndex][3][0] = inMatrix[inIndex][3][0];\n"
+ " outMatrix[outIndex][3][1] = inMatrix[inIndex][3][1];\n"
+ " outMatrix[outIndex][3][2] = inMatrix[inIndex][3][2];\n"
+ " outMatrix[outIndex][3][3] = inMatrix[inIndex][3][3];\n";
+ break;
+
+ default:
+ DE_ASSERT(false);
+ }
+ }
+ else
+ {
+ std::string typePrefixStr;
+
+ if (isUintFormat(bufferFormat))
+ {
+ typePrefixStr = "u";
+ }
+ else if (isIntFormat(bufferFormat))
+ {
+ typePrefixStr = "i";
+ }
+ else
+ {
+ DE_ASSERT(false);
+ }
+
+ bufferDefinition <<
+ "layout(binding = 0, " << (readFromStorage ? "std430" : "std140") << ") " << (readFromStorage ? "buffer readonly" : "uniform") << " InBuffer\n"
+ "{\n"
+ " " << typePrefixStr << "vec4 inVecs[" << s_testArraySize << "][4];\n"
+ "};\n\n";
+
+ bufferDefinition <<
+ "layout(binding = 1, std430) buffer OutBuffer\n"
+ "{\n"
+ " " << typePrefixStr << "vec4 outVecs[" << s_testArraySize << "][4];\n"
+ "};\n\n";
+
+ bufferDefinition <<
+ "layout(binding = 2, std140) uniform Indices\n"
+ "{\n"
+ " int inIndex;\n"
+ " int outIndex;\n"
+ "};\n\n";
+
+ switch (shaderType)
+ {
+ case SHADER_TYPE_MATRIX_COPY:
+ // Shader type not supported for integer types.
+ DE_ASSERT(false);
+ break;
+
+ case SHADER_TYPE_VECTOR_COPY:
+ bufferUse <<
+ " outVecs[outIndex][0] = inVecs[inIndex][0];\n"
+ " outVecs[outIndex][1] = inVecs[inIndex][1];\n"
+ " outVecs[outIndex][2] = inVecs[inIndex][2];\n"
+ " outVecs[outIndex][3] = inVecs[inIndex][3];\n";
+ break;
+
+ case SHADER_TYPE_SCALAR_COPY:
+ bufferUse <<
+ " outVecs[outIndex][0][0] = inVecs[inIndex][0][0];\n"
+ " outVecs[outIndex][0][1] = inVecs[inIndex][0][1];\n"
+ " outVecs[outIndex][0][2] = inVecs[inIndex][0][2];\n"
+ " outVecs[outIndex][0][3] = inVecs[inIndex][0][3];\n"
+
+ " outVecs[outIndex][1][0] = inVecs[inIndex][1][0];\n"
+ " outVecs[outIndex][1][1] = inVecs[inIndex][1][1];\n"
+ " outVecs[outIndex][1][2] = inVecs[inIndex][1][2];\n"
+ " outVecs[outIndex][1][3] = inVecs[inIndex][1][3];\n"
+
+ " outVecs[outIndex][2][0] = inVecs[inIndex][2][0];\n"
+ " outVecs[outIndex][2][1] = inVecs[inIndex][2][1];\n"
+ " outVecs[outIndex][2][2] = inVecs[inIndex][2][2];\n"
+ " outVecs[outIndex][2][3] = inVecs[inIndex][2][3];\n"
+
+ " outVecs[outIndex][3][0] = inVecs[inIndex][3][0];\n"
+ " outVecs[outIndex][3][1] = inVecs[inIndex][3][1];\n"
+ " outVecs[outIndex][3][2] = inVecs[inIndex][3][2];\n"
+ " outVecs[outIndex][3][3] = inVecs[inIndex][3][3];\n";
+ break;
+
+ default:
+ DE_ASSERT(false);
+ }
+ }
+}
+
+void RobustBufferAccessTest::genTexelBufferShaderAccess (VkFormat bufferFormat,
+ std::ostringstream& bufferDefinition,
+ std::ostringstream& bufferUse,
+ bool readFromStorage)
+{
+ const char* layoutTypeStr;
+ const char* inTexelBufferTypeStr;
+ const char* outTexelBufferTypeStr;
+ const deUint32 texelSize = mapVkFormat(bufferFormat).getPixelSize();
+
+ if (isFloatFormat(bufferFormat))
+ {
+ layoutTypeStr = "rgba32f";
+ inTexelBufferTypeStr = readFromStorage ? "imageBuffer" : "samplerBuffer";
+ outTexelBufferTypeStr = "imageBuffer";
+ }
+ else if (isUintFormat(bufferFormat))
+ {
+ layoutTypeStr = "rgba32ui";
+ inTexelBufferTypeStr = readFromStorage ? "uimageBuffer" : "usamplerBuffer";
+ outTexelBufferTypeStr = "uimageBuffer";
+ }
+ else if (isIntFormat(bufferFormat))
+ {
+ layoutTypeStr = "rgba32i";
+ inTexelBufferTypeStr = readFromStorage ? "iimageBuffer" : "isamplerBuffer";
+ outTexelBufferTypeStr = "iimageBuffer";
+ }
+ else if (bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ layoutTypeStr = "rgb10_a2";
+ inTexelBufferTypeStr = readFromStorage ? "imageBuffer" : "samplerBuffer"; outTexelBufferTypeStr = "imageBuffer";
+ }
+ else
+ {
+ TCU_THROW(NotSupportedError, (std::string("Unsupported format: ") + getFormatName(bufferFormat)).c_str());
+ }
+
+ bufferDefinition << "layout(set = 0, binding = 0" << ((readFromStorage) ? (std::string(", ") + layoutTypeStr) : "") << ") uniform highp "
+ << ((readFromStorage) ? "readonly " : "") << inTexelBufferTypeStr << " inImage;\n";
+
+ bufferDefinition << "layout(set = 0, binding = 1, " << layoutTypeStr << ") uniform highp writeonly " << outTexelBufferTypeStr << " outImage;\n";
+
+ bufferDefinition <<
+ "layout(binding = 2, std140) uniform Offsets\n"
+ "{\n"
+ " int inOffset;\n"
+ " int outOffset;\n"
+ "};\n\n";
+
+ bufferUse << " for (int i = 0; i < " << (s_numberOfBytesAccessed / texelSize) << "; i++)\n"
+ << " {\n"
+ << " imageStore(outImage, outOffset + i, " << (readFromStorage ? "imageLoad" : "texelFetch") << "(inImage, inOffset + i));\n"
+ << " }\n";
+}
+
+void RobustBufferAccessTest::initBufferAccessPrograms (SourceCollections& programCollection,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ bool readFromStorage)
+{
+ std::ostringstream bufferDefinition;
+ std::ostringstream bufferUse;
+
+ if (shaderType != SHADER_TYPE_TEXEL_COPY)
+ {
+ genBufferShaderAccess(shaderType, bufferFormat, readFromStorage, bufferDefinition, bufferUse);
+ }
+
+ if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ std::ostringstream computeShaderSource;
+
+ if (shaderType == SHADER_TYPE_TEXEL_COPY)
+ genTexelBufferShaderAccess(bufferFormat, bufferDefinition, bufferUse, readFromStorage);
+
+ computeShaderSource <<
+ "#version 440\n"
+ "#extension GL_EXT_texture_buffer : require\n"
+ "precision highp float;\n"
+ "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+ << bufferDefinition.str() <<
+ "void main (void)\n"
+ "{\n"
+ << bufferUse.str() <<
+ "}\n";
+
+ programCollection.glslSources.add("compute") << glu::ComputeSource(computeShaderSource.str());
+ }
+ else
+ {
+ std::ostringstream vertexShaderSource;
+ std::ostringstream fragmentShaderSource;
+
+ if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
+ {
+ if (shaderType == SHADER_TYPE_TEXEL_COPY)
+ genTexelBufferShaderAccess(bufferFormat, bufferDefinition, bufferUse, readFromStorage);
+
+ vertexShaderSource <<
+ "#version 440\n"
+ "#extension GL_EXT_texture_buffer : require\n"
+ "precision highp float;\n"
+ "layout(location = 0) in vec4 position;\n\n"
+ << bufferDefinition.str() << "\n"
+ "out gl_PerVertex {\n"
+ " vec4 gl_Position;\n"
+ "};\n\n"
+ "void main (void)\n"
+ "{\n"
+ << bufferUse.str() <<
+ " gl_Position = position;\n"
+ "}\n";
+ }
+ else
+ {
+ vertexShaderSource <<
+ "#version 440\n"
+ "precision highp float;\n"
+ "layout(location = 0) in vec4 position;\n\n"
+ "out gl_PerVertex {\n"
+ " vec4 gl_Position;\n"
+ "};\n\n"
+ "void main (void)\n"
+ "{\n"
+ " gl_Position = position;\n"
+ "}\n";
+ }
+
+ programCollection.glslSources.add("vertex") << glu::VertexSource(vertexShaderSource.str());
+
+ if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ if (shaderType == SHADER_TYPE_TEXEL_COPY)
+ genTexelBufferShaderAccess(bufferFormat, bufferDefinition, bufferUse, readFromStorage);
+
+ fragmentShaderSource <<
+ "#version 440\n"
+ "#extension GL_EXT_texture_buffer : require\n"
+ "precision highp float;\n"
+ "layout(location = 0) out vec4 fragColor;\n"
+ << bufferDefinition.str() <<
+ "void main (void)\n"
+ "{\n"
+ << bufferUse.str() <<
+ " fragColor = vec4(1.0);\n"
+ "}\n";
+ }
+ else
+ {
+ fragmentShaderSource <<
+ "#version 440\n"
+ "precision highp float;\n"
+ "layout(location = 0) out vec4 fragColor;\n\n"
+ "void main (void)\n"
+ "{\n"
+ " fragColor = vec4(1.0);\n"
+ "}\n";
+ }
+
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(fragmentShaderSource.str());
+ }
+}
+
+// RobustBufferReadTest
+
+RobustBufferReadTest::RobustBufferReadTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ VkDeviceSize readAccessRange,
+ bool readFromStorage,
+ bool accessOutOfBackingMemory)
+ : RobustBufferAccessTest (testContext, name, description, shaderStage, shaderType, bufferFormat)
+ , m_readFromStorage (readFromStorage)
+ , m_readAccessRange (readAccessRange)
+ , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
+{
+}
+
+void RobustBufferReadTest::initPrograms (SourceCollections& programCollection) const
+{
+ initBufferAccessPrograms(programCollection, m_shaderStage, m_shaderType, m_bufferFormat, m_readFromStorage);
+}
+
+TestInstance* RobustBufferReadTest::createInstance (Context& context) const
+{
+ Move<VkDevice> device = createRobustBufferAccessDevice(context);
+
+ return new BufferReadInstance(context, device, m_shaderType, m_shaderStage, m_bufferFormat, m_readFromStorage, m_readAccessRange, m_accessOutOfBackingMemory);
+}
+
+// RobustBufferWriteTest
+
+RobustBufferWriteTest::RobustBufferWriteTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkShaderStageFlags shaderStage,
+ ShaderType shaderType,
+ VkFormat bufferFormat,
+ VkDeviceSize writeAccessRange,
+ bool accessOutOfBackingMemory)
+
+ : RobustBufferAccessTest (testContext, name, description, shaderStage, shaderType, bufferFormat)
+ , m_writeAccessRange (writeAccessRange)
+ , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
+{
+}
+
+void RobustBufferWriteTest::initPrograms (SourceCollections& programCollection) const
+{
+ initBufferAccessPrograms(programCollection, m_shaderStage, m_shaderType, m_bufferFormat, false /* readFromStorage */);
+}
+
+TestInstance* RobustBufferWriteTest::createInstance (Context& context) const
+{
+ Move<VkDevice> device = createRobustBufferAccessDevice(context);
+
+ return new BufferWriteInstance(context, device, m_shaderType, m_shaderStage, m_bufferFormat, m_writeAccessRange, m_accessOutOfBackingMemory);
+}
+
+// BufferAccessInstance
+
+BufferAccessInstance::BufferAccessInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ BufferAccessType bufferAccessType,
+ VkDeviceSize inBufferAccessRange,
+ VkDeviceSize outBufferAccessRange,
+ bool accessOutOfBackingMemory)
+ : vkt::TestInstance (context)
+ , m_device (device)
+ , m_shaderType (shaderType)
+ , m_shaderStage (shaderStage)
+ , m_bufferFormat (bufferFormat)
+ , m_bufferAccessType (bufferAccessType)
+ , m_inBufferAccessRange (inBufferAccessRange)
+ , m_outBufferAccessRange (outBufferAccessRange)
+ , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ const bool isTexelAccess = !!(m_shaderType == SHADER_TYPE_TEXEL_COPY);
+ const bool readFromStorage = !!(m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE);
+ SimpleAllocator memAlloc (vk, *m_device, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+
+ DE_ASSERT(RobustBufferAccessTest::s_numberOfBytesAccessed % sizeof(deUint32) == 0);
+ DE_ASSERT(inBufferAccessRange <= RobustBufferAccessTest::s_numberOfBytesAccessed);
+ DE_ASSERT(outBufferAccessRange <= RobustBufferAccessTest::s_numberOfBytesAccessed);
+
+ // Check storage support
+ if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
+ {
+ if (!context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
+ {
+ TCU_THROW(NotSupportedError, "Stores not supported in vertex stage");
+ }
+ }
+ else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ if (!context.getDeviceFeatures().fragmentStoresAndAtomics)
+ {
+ TCU_THROW(NotSupportedError, "Stores not supported in fragment stage");
+ }
+ }
+
+ // Check format support
+ {
+ VkFormatFeatureFlags requiredFormatFeatures = 0;
+ const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), m_bufferFormat);
+
+ if (isTexelAccess)
+ {
+ requiredFormatFeatures = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+ }
+
+ if ((formatProperties.bufferFeatures & requiredFormatFeatures) != requiredFormatFeatures)
+ {
+ TCU_THROW(NotSupportedError, (std::string("Format cannot be used in uniform and storage") + (isTexelAccess ? " texel" : "") + " buffers: "
+ + getFormatName(m_bufferFormat)).c_str());
+ }
+ }
+
+ // Create buffer to read data from
+ {
+ VkBufferUsageFlags inBufferUsageFlags;
+ VkMemoryRequirements inBufferMemoryReqs;
+
+ if (isTexelAccess)
+ {
+ inBufferUsageFlags = readFromStorage ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
+ }
+ else
+ {
+ inBufferUsageFlags = readFromStorage ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ }
+
+ const VkBufferCreateInfo inBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_inBufferAccessRange, // VkDeviceSize size;
+ inBufferUsageFlags, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 queueFamilyIndexCount;
+ DE_NULL // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_inBuffer = createBuffer(vk, *m_device, &inBufferParams);
+
+ inBufferMemoryReqs = getBufferMemoryRequirements(vk, *m_device, *m_inBuffer);
+ m_inBufferAllocSize = inBufferMemoryReqs.size;
+ m_inBufferAlloc = memAlloc.allocate(inBufferMemoryReqs, MemoryRequirement::HostVisible);
+
+ // Size of the most restrictive bound
+ m_inBufferMaxAccessRange = min(m_inBufferAllocSize, min(inBufferParams.size, m_inBufferAccessRange));
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_inBuffer, m_inBufferAlloc->getMemory(), m_inBufferAlloc->getOffset()));
+ populateBufferWithTestValues(m_inBufferAlloc->getHostPtr(), m_inBufferAllocSize, m_bufferFormat);
+ flushMappedMemoryRange(vk, *m_device, m_inBufferAlloc->getMemory(), m_inBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+
+ log << tcu::TestLog::Message << "inBufferAllocSize = " << m_inBufferAllocSize << tcu::TestLog::EndMessage;
+ log << tcu::TestLog::Message << "inBufferMaxAccessRange = " << m_inBufferMaxAccessRange << tcu::TestLog::EndMessage;
+ }
+
+ // Create buffer to write data into
+ {
+ VkMemoryRequirements outBufferMemoryReqs;
+ const VkBufferUsageFlags outBufferUsageFlags = (m_shaderType == SHADER_TYPE_TEXEL_COPY) ? VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT
+ : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+
+ const VkBufferCreateInfo outBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_outBufferAccessRange, // VkDeviceSize size;
+ outBufferUsageFlags, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 queueFamilyIndexCount;
+ DE_NULL // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_outBuffer = createBuffer(vk, *m_device, &outBufferParams);
+
+ outBufferMemoryReqs = getBufferMemoryRequirements(vk, *m_device, *m_outBuffer);
+ m_outBufferAllocSize = outBufferMemoryReqs.size;
+ m_outBufferAlloc = memAlloc.allocate(outBufferMemoryReqs, MemoryRequirement::HostVisible);
+
+ // If we are requesting access out of the memory that backs the buffer, make sure the test is able to do so.
+ if (m_accessOutOfBackingMemory)
+ {
+ if (m_outBufferAllocSize >= ((RobustBufferAccessTest::s_testArraySize + 1) * RobustBufferAccessTest::s_numberOfBytesAccessed))
+ {
+ TCU_THROW(NotSupportedError, "Cannot access beyond the end of the memory that backs the buffer");
+ }
+ }
+
+ // Size of the most restrictive bound
+ m_outBufferMaxAccessRange = min(m_outBufferAllocSize, min(outBufferParams.size, m_outBufferAccessRange));
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_outBuffer, m_outBufferAlloc->getMemory(), m_outBufferAlloc->getOffset()));
+ deMemset(m_outBufferAlloc->getHostPtr(), 0xFF, (size_t)m_outBufferAllocSize);
+ flushMappedMemoryRange(vk, *m_device, m_outBufferAlloc->getMemory(), m_outBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+
+ log << tcu::TestLog::Message << "outBufferAllocSize = " << m_outBufferAllocSize << tcu::TestLog::EndMessage;
+ log << tcu::TestLog::Message << "outBufferMaxAccessRange = " << m_outBufferMaxAccessRange << tcu::TestLog::EndMessage;
+ }
+
+ // Create buffer for indices/offsets
+ {
+ struct IndicesBuffer
+ {
+ int32_t inIndex;
+ int32_t outIndex;
+ };
+
+ IndicesBuffer indices = { 0, 0 };
+
+ const VkBufferCreateInfo indicesBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ sizeof(IndicesBuffer), // VkDeviceSize size;
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_indicesBuffer = createBuffer(vk, *m_device, &indicesBufferParams);
+ m_indicesBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_device, *m_indicesBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_indicesBuffer, m_indicesBufferAlloc->getMemory(), m_indicesBufferAlloc->getOffset()));
+
+ if (m_accessOutOfBackingMemory)
+ {
+ if (m_bufferAccessType == BUFFER_ACCESS_TYPE_WRITE)
+ {
+ indices.outIndex = RobustBufferAccessTest::s_testArraySize - 1;
+ }
+ else
+ {
+ indices.inIndex = RobustBufferAccessTest::s_testArraySize - 1;
+ }
+ }
+
+ deMemcpy(m_indicesBufferAlloc->getHostPtr(), &indices, sizeof(IndicesBuffer));
+
+ flushMappedMemoryRange(vk, *m_device, m_indicesBufferAlloc->getMemory(), m_indicesBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+
+ log << tcu::TestLog::Message << "inIndex = " << indices.inIndex << tcu::TestLog::EndMessage;
+ log << tcu::TestLog::Message << "outIndex = " << indices.outIndex << tcu::TestLog::EndMessage;
+ }
+
+ // Create descriptor data
+ {
+ VkDescriptorType inBufferDescriptorType;
+ VkDescriptorType outBufferDescriptorType;
+
+ if (isTexelAccess)
+ {
+ inBufferDescriptorType = readFromStorage ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ outBufferDescriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ }
+ else
+ {
+ inBufferDescriptorType = readFromStorage ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ outBufferDescriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ }
+
+ DescriptorPoolBuilder descriptorPoolBuilder;
+ descriptorPoolBuilder.addType(inBufferDescriptorType, 1u);
+ descriptorPoolBuilder.addType(outBufferDescriptorType, 1u);
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u);
+ m_descriptorPool = descriptorPoolBuilder.build(vk, *m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ DescriptorSetLayoutBuilder setLayoutBuilder;
+ setLayoutBuilder.addSingleBinding(inBufferDescriptorType, VK_SHADER_STAGE_ALL);
+ setLayoutBuilder.addSingleBinding(outBufferDescriptorType, VK_SHADER_STAGE_ALL);
+ setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_ALL);
+ m_descriptorSetLayout = setLayoutBuilder.build(vk, *m_device);
+
+ const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_descriptorPool, // VkDescriptorPool descriptorPool;
+ 1u, // deUint32 setLayoutCount;
+ &m_descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
+ };
+
+ m_descriptorSet = allocateDescriptorSet(vk, *m_device, &descriptorSetAllocateInfo);
+
+ DescriptorSetUpdateBuilder setUpdateBuilder;
+
+ if (isTexelAccess)
+ {
+ const VkBufferViewCreateInfo inBufferViewCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferViewCreateFlags flags;
+ *m_inBuffer, // VkBuffer buffer;
+ m_bufferFormat, // VkFormat format;
+ 0ull, // VkDeviceSize offset;
+ m_inBufferAccessRange // VkDeviceSize range;
+ };
+ m_inTexelBufferView = createBufferView(vk, *m_device, &inBufferViewCreateInfo, DE_NULL);
+
+ const VkBufferViewCreateInfo outBufferViewCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferViewCreateFlags flags;
+ *m_outBuffer, // VkBuffer buffer;
+ m_bufferFormat, // VkFormat format;
+ 0ull, // VkDeviceSize offset;
+ m_outBufferAccessRange, // VkDeviceSize range;
+ };
+ m_outTexelBufferView = createBufferView(vk, *m_device, &outBufferViewCreateInfo, DE_NULL);
+
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), inBufferDescriptorType, &m_inTexelBufferView.get());
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1), outBufferDescriptorType, &m_outTexelBufferView.get());
+ }
+ else
+ {
+ const VkDescriptorBufferInfo inBufferDescriptorInfo = makeDescriptorBufferInfo(*m_inBuffer, 0ull, m_inBufferAccessRange);
+ const VkDescriptorBufferInfo outBufferDescriptorInfo = makeDescriptorBufferInfo(*m_outBuffer, 0ull, m_outBufferAccessRange);
+
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), inBufferDescriptorType, &inBufferDescriptorInfo);
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1), outBufferDescriptorType, &outBufferDescriptorInfo);
+ }
+
+ const VkDescriptorBufferInfo indicesBufferDescriptorInfo = makeDescriptorBufferInfo(*m_indicesBuffer, 0ull, 8ull);
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &indicesBufferDescriptorInfo);
+
+ setUpdateBuilder.update(vk, *m_device);
+ }
+
+ // Create fence
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u // VkFenceCreateFlags flags;
+ };
+
+ m_fence = createFence(vk, *m_device, &fenceParams);
+ }
+
+ // Get queue
+ vk.getDeviceQueue(*m_device, queueFamilyIndex, 0, &m_queue);
+
+ if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
+ {
+ m_testEnvironment = de::MovePtr<TestEnvironment>(new ComputeEnvironment(m_context, *m_device, *m_descriptorSetLayout, *m_descriptorSet));
+ }
+ else
+ {
+ using tcu::Vec4;
+
+ const VkVertexInputBindingDescription vertexInputBindingDescription =
+ {
+ 0u, // deUint32 binding;
+ sizeof(tcu::Vec4), // deUint32 strideInBytes;
+ VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
+ };
+
+ const VkVertexInputAttributeDescription vertexInputAttributeDescription =
+ {
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u // deUint32 offset;
+ };
+
+ const Vec4 vertices[] =
+ {
+ Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
+ Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
+ Vec4(1.0f, -1.0f, 0.0f, 1.0f),
+ };
+
+ // Create vertex buffer
+ {
+ const VkDeviceSize vertexBufferSize = (VkDeviceSize)(4u * sizeof(tcu::Vec4));
+ const VkBufferCreateInfo vertexBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ vertexBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 queueFamilyIndexCount;
+ DE_NULL // const deUint32* pQueueFamilyIndices;
+ };
+
+ DE_ASSERT(vertexBufferSize > 0);
+
+ m_vertexBuffer = createBuffer(vk, *m_device, &vertexBufferParams);
+ m_vertexBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_device, *m_vertexBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset()));
+
+ // Load vertices into vertex buffer
+ deMemcpy(m_vertexBufferAlloc->getHostPtr(), vertices, sizeof(tcu::Vec4) * DE_LENGTH_OF_ARRAY(vertices));
+ flushMappedMemoryRange(vk, *m_device, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ const GraphicsEnvironment::DrawConfig drawWithOneVertexBuffer =
+ {
+ std::vector<VkBuffer>(1, *m_vertexBuffer), // std::vector<VkBuffer> vertexBuffers;
+ DE_LENGTH_OF_ARRAY(vertices), // deUint32 vertexCount;
+ 1, // deUint32 instanceCount;
+ DE_NULL, // VkBuffer indexBuffer;
+ 0u, // deUint32 indexCount;
+ };
+
+ m_testEnvironment = de::MovePtr<TestEnvironment>(new GraphicsEnvironment(m_context,
+ *m_device,
+ *m_descriptorSetLayout,
+ *m_descriptorSet,
+ GraphicsEnvironment::VertexBindings(1, vertexInputBindingDescription),
+ GraphicsEnvironment::VertexAttributes(1, vertexInputAttributeDescription),
+ drawWithOneVertexBuffer));
+ }
+}
+
+// Verifies if the buffer has the value initialized by BufferAccessInstance::populateReadBuffer at a given offset.
+bool BufferAccessInstance::isExpectedValueFromInBuffer (VkDeviceSize offsetInBytes, const void* valuePtr, VkDeviceSize valueSize)
+{
+ DE_ASSERT(offsetInBytes % 4 == 0);
+ DE_ASSERT(offsetInBytes < m_inBufferAllocSize);
+
+ const deUint32 valueIndex = deUint32(offsetInBytes / 4) + 2;
+
+ if (isUintFormat(m_bufferFormat))
+ {
+ return !deMemCmp(valuePtr, &valueIndex, (size_t)valueSize);
+ }
+ else if (isIntFormat(m_bufferFormat))
+ {
+ const deInt32 value = -deInt32(valueIndex);
+ return !deMemCmp(valuePtr, &value, (size_t)valueSize);
+ }
+ else if (isFloatFormat(m_bufferFormat))
+ {
+ const float value = float(valueIndex);
+ return !deMemCmp(valuePtr, &value, (size_t)valueSize);
+ }
+ else if (m_bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ const deUint32 r = ((valueIndex + 0) & ((2u << 10) - 1u));
+ const deUint32 g = ((valueIndex + 1) & ((2u << 10) - 1u));
+ const deUint32 b = ((valueIndex + 2) & ((2u << 10) - 1u));
+ const deUint32 a = ((valueIndex + 0) & ((2u << 2) - 1u));
+ const deUint32 abgr = (a << 30) | (b << 20) | (g << 10) | r;
+
+ return !deMemCmp(valuePtr, &abgr, (size_t)valueSize);
+ }
+ else
+ {
+ DE_ASSERT(false);
+ return false;
+ }
+}
+
+bool BufferAccessInstance::isOutBufferValueUnchanged (VkDeviceSize offsetInBytes, VkDeviceSize valueSize)
+{
+ const deUint8 *const outValuePtr = (deUint8*)m_outBufferAlloc->getHostPtr() + offsetInBytes;
+ const deUint32 defaultValue = 0xFFFFFFFFu;
+
+ return !deMemCmp(outValuePtr, &defaultValue, (size_t)valueSize);
+}
+
+tcu::TestStatus BufferAccessInstance::iterate (void)
+{
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const vk::VkCommandBuffer cmdBuffer = m_testEnvironment->getCommandBuffer();
+
+ // Submit command buffer
+ {
+ const VkSubmitInfo submitInfo =
+ {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // deUint32 waitSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pWaitSemaphores;
+ DE_NULL, // const VkPIpelineStageFlags* pWaitDstStageMask;
+ 1u, // deUint32 commandBufferCount;
+ &cmdBuffer, // const VkCommandBuffer* pCommandBuffers;
+ 0u, // deUint32 signalSemaphoreCount;
+ DE_NULL // const VkSemaphore* pSignalSemaphores;
+ };
+
+ VK_CHECK(vk.resetFences(*m_device, 1, &m_fence.get()));
+ VK_CHECK(vk.queueSubmit(m_queue, 1, &submitInfo, *m_fence));
+ VK_CHECK(vk.waitForFences(*m_device, 1, &m_fence.get(), true, ~(0ull) /* infinity */));
+ }
+
+ // Prepare result buffer for read
+ {
+ const VkMappedMemoryRange outBufferRange =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ m_outBufferAlloc->getMemory(), // VkDeviceMemory mem;
+ 0ull, // VkDeviceSize offset;
+ m_outBufferAllocSize, // VkDeviceSize size;
+ };
+
+ VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
+ }
+
+ if (verifyResult())
+ return tcu::TestStatus::pass("All values OK");
+ else
+ return tcu::TestStatus::fail("Invalid value(s) found");
+}
+
+bool BufferAccessInstance::verifyResult (void)
+{
+ std::ostringstream logMsg;
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+ const bool isReadAccess = !!(m_bufferAccessType == BUFFER_ACCESS_TYPE_READ || m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE);
+ const void* inDataPtr = m_inBufferAlloc->getHostPtr();
+ const void* outDataPtr = m_outBufferAlloc->getHostPtr();
+ bool allOk = true;
+ deUint32 valueNdx = 0;
+ const VkDeviceSize maxAccessRange = isReadAccess ? m_inBufferMaxAccessRange : m_outBufferMaxAccessRange;
+
+ for (VkDeviceSize offsetInBytes = 0; offsetInBytes < m_outBufferAllocSize; offsetInBytes += 4)
+ {
+ deUint8* outValuePtr = (deUint8*)outDataPtr + offsetInBytes;
+ const size_t outValueSize = (size_t)min(4, (m_outBufferAllocSize - offsetInBytes));
+
+ if (offsetInBytes >= RobustBufferAccessTest::s_numberOfBytesAccessed)
+ {
+ // The shader will only write 16 values into the result buffer. The rest of the values
+ // should remain unchanged or may be modified if we are writing out of bounds.
+ if (!isOutBufferValueUnchanged(offsetInBytes, outValueSize)
+ && (isReadAccess || !isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, outValuePtr, 4)))
+ {
+ logMsg << "\nValue " << valueNdx++ << " has been modified with an unknown value: " << *((deUint32 *)outValuePtr);
+ allOk = false;
+ }
+ }
+ else
+ {
+ const deInt32 distanceToOutOfBounds = (deInt32)maxAccessRange - (deInt32)offsetInBytes;
+ bool isOutOfBoundsAccess = false;
+
+ logMsg << "\n" << valueNdx++ << ": ";
+
+ logValue(logMsg, outValuePtr, m_bufferFormat, outValueSize);
+
+ if (m_accessOutOfBackingMemory)
+ isOutOfBoundsAccess = true;
+
+ // Check if the shader operation accessed an operand located less than 16 bytes away
+ // from the out of bounds address.
+ if (!isOutOfBoundsAccess && distanceToOutOfBounds < 16)
+ {
+ deUint32 operandSize = 0;
+
+ switch (m_shaderType)
+ {
+ case SHADER_TYPE_SCALAR_COPY:
+ operandSize = 4; // Size of scalar
+ break;
+
+ case SHADER_TYPE_VECTOR_COPY:
+ operandSize = 4 * 4; // Size of vec4
+ break;
+
+ case SHADER_TYPE_MATRIX_COPY:
+ operandSize = 4 * 16; // Size of mat4
+ break;
+
+ case SHADER_TYPE_TEXEL_COPY:
+ operandSize = mapVkFormat(m_bufferFormat).getPixelSize();
+ break;
+
+ default:
+ DE_ASSERT(false);
+ }
+
+ isOutOfBoundsAccess = (((offsetInBytes / operandSize) + 1) * operandSize > maxAccessRange);
+ }
+
+ if (isOutOfBoundsAccess)
+ {
+ logMsg << " (out of bounds " << (isReadAccess ? "read": "write") << ")";
+
+ const bool isValuePartiallyOutOfBounds = ((distanceToOutOfBounds > 0) && ((deUint32)distanceToOutOfBounds < 4));
+ bool isValidValue = false;
+
+ if (isValuePartiallyOutOfBounds && !m_accessOutOfBackingMemory)
+ {
+ // The value is partially out of bounds
+
+ bool isOutOfBoundsPartOk = true;
+ bool isWithinBoundsPartOk = true;
+
+ if (isReadAccess)
+ {
+ isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, outValuePtr, distanceToOutOfBounds);
+ isOutOfBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, (deUint8*)outValuePtr + distanceToOutOfBounds , outValueSize - distanceToOutOfBounds);
+ }
+ else
+ {
+ isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, outValuePtr, distanceToOutOfBounds)
+ || isOutBufferValueUnchanged(offsetInBytes, distanceToOutOfBounds);
+
+ isOutOfBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, (deUint8*)outValuePtr + distanceToOutOfBounds, outValueSize - distanceToOutOfBounds)
+ || isOutBufferValueUnchanged(offsetInBytes + distanceToOutOfBounds, outValueSize - distanceToOutOfBounds);
+ }
+
+ logMsg << ", first " << distanceToOutOfBounds << " byte(s) " << (isWithinBoundsPartOk ? "OK": "wrong");
+ logMsg << ", last " << outValueSize - distanceToOutOfBounds << " byte(s) " << (isOutOfBoundsPartOk ? "OK": "wrong");
+
+ isValidValue = isWithinBoundsPartOk && isOutOfBoundsPartOk;
+ }
+ else
+ {
+ if (isReadAccess)
+ {
+ isValidValue = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, outValuePtr, outValueSize);
+ }
+ else
+ {
+ isValidValue = isOutBufferValueUnchanged(offsetInBytes, outValueSize);
+
+ if (!isValidValue)
+ {
+ // Out of bounds writes may modify values withing the memory ranges bound to the buffer
+ isValidValue = isValueWithinBufferOrZero(inDataPtr, m_inBufferAllocSize, outValuePtr, outValueSize);
+
+ if (isValidValue)
+ logMsg << ", OK, written within the memory range bound to the buffer";
+ }
+ }
+ }
+
+ if (!isValidValue)
+ {
+ // Check if we are satisfying the [0, 0, 0, x] pattern, where x may be either 0 or 1,
+ // or the maximum representable positive integer value (if the format is integer-based).
+
+ const bool canMatchVec4Pattern = (isReadAccess
+ && !isValuePartiallyOutOfBounds
+ && (m_shaderType == SHADER_TYPE_VECTOR_COPY || m_shaderType == SHADER_TYPE_TEXEL_COPY)
+ && ((offsetInBytes / 4 + 1) % 4 == 0 || m_bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32));
+ bool matchesVec4Pattern = false;
+
+ if (canMatchVec4Pattern)
+ {
+ if (m_bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr, m_bufferFormat);
+ else
+ matchesVec4Pattern = verifyOutOfBoundsVec4(reinterpret_cast<deUint32*>(outValuePtr) - 3, m_bufferFormat);
+ }
+
+ if (!canMatchVec4Pattern || !matchesVec4Pattern)
+ {
+ logMsg << ". Failed: ";
+
+ if (isReadAccess)
+ {
+ logMsg << "expected value within the buffer range or 0";
+
+ if (canMatchVec4Pattern)
+ logMsg << ", or the [0, 0, 0, x] pattern";
+ }
+ else
+ {
+ logMsg << "written out of the range";
+ }
+
+ allOk = false;
+ }
+ }
+ }
+ else // We are within bounds
+ {
+ if (isReadAccess)
+ {
+ if (!isExpectedValueFromInBuffer(offsetInBytes, outValuePtr, 4))
+ {
+ logMsg << ", Failed: unexpected value";
+ allOk = false;
+ }
+ }
+ else
+ {
+ // Out of bounds writes may change values within the bounds.
+ if (!isValueWithinBufferOrZero(inDataPtr, m_inBufferAccessRange, outValuePtr, 4))
+ {
+ logMsg << ", Failed: unexpected value";
+ allOk = false;
+ }
+ }
+ }
+ }
+ }
+
+ log << tcu::TestLog::Message << logMsg.str() << tcu::TestLog::EndMessage;
+
+ return allOk;
+}
+
+// BufferReadInstance
+
+BufferReadInstance::BufferReadInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ bool readFromStorage,
+ VkDeviceSize inBufferAccessRange,
+ bool accessOutOfBackingMemory)
+
+ : BufferAccessInstance (context, device, shaderType, shaderStage, bufferFormat,
+ readFromStorage ? BUFFER_ACCESS_TYPE_READ_FROM_STORAGE : BUFFER_ACCESS_TYPE_READ,
+ inBufferAccessRange,
+ RobustBufferAccessTest::s_numberOfBytesAccessed, // outBufferAccessRange
+ accessOutOfBackingMemory)
+{
+}
+
+// BufferWriteInstance
+
+BufferWriteInstance::BufferWriteInstance (Context& context,
+ Move<VkDevice> device,
+ ShaderType shaderType,
+ VkShaderStageFlags shaderStage,
+ VkFormat bufferFormat,
+ VkDeviceSize writeBufferAccessRange,
+ bool accessOutOfBackingMemory)
+
+ : BufferAccessInstance (context, device, shaderType, shaderStage, bufferFormat,
+ BUFFER_ACCESS_TYPE_WRITE,
+ RobustBufferAccessTest::s_numberOfBytesAccessed, // inBufferAccessRange
+ writeBufferAccessRange,
+ accessOutOfBackingMemory)
+{
+}
+
+// Test node creation functions
+
+static const char* getShaderStageName (VkShaderStageFlagBits shaderStage)
+{
+ switch (shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return "vertex";
+ case VK_SHADER_STAGE_FRAGMENT_BIT: return "fragment";
+ case VK_SHADER_STAGE_COMPUTE_BIT: return "compute";
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return "tess_control";
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return "tess_eval";
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return "geometry";
+
+ default:
+ DE_ASSERT(false);
+ }
+
+ return DE_NULL;
+}
+
+static void addBufferAccessTests (tcu::TestContext& testCtx, tcu::TestCaseGroup* parentNode)
+{
+ struct BufferRangeConfig
+ {
+ const char* name;
+ VkDeviceSize range;
+ };
+
+ const VkShaderStageFlagBits bufferAccessStages[] =
+ {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ VK_SHADER_STAGE_COMPUTE_BIT,
+ };
+
+ const VkFormat bufferFormats[] =
+ {
+ VK_FORMAT_R32_SINT,
+ VK_FORMAT_R32_UINT,
+ VK_FORMAT_R32_SFLOAT
+ };
+
+ const VkFormat texelBufferFormats[] =
+ {
+ VK_FORMAT_R32G32B32A32_SINT,
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32
+ };
+
+ const BufferRangeConfig bufferRangeConfigs[] =
+ {
+ { "range_1_byte", 1ull },
+ { "range_3_bytes", 3ull },
+ { "range_4_bytes", 4ull }, // size of float
+ { "range_32_bytes", 32ull }, // size of half mat4
+ };
+
+ const BufferRangeConfig texelBufferRangeConfigs[] =
+ {
+ { "range_1_texel", 1u },
+ { "range_3_texels", 3u },
+ };
+
+ const char* shaderTypeNames[SHADER_TYPE_COUNT] =
+ {
+ "mat4_copy",
+ "vec4_copy",
+ "scalar_copy",
+ "texel_copy",
+ };
+
+ for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(bufferAccessStages); stageNdx++)
+ {
+ const VkShaderStageFlagBits stage = bufferAccessStages[stageNdx];
+ de::MovePtr<tcu::TestCaseGroup> stageTests (new tcu::TestCaseGroup(testCtx, getShaderStageName(stage), ""));
+
+ for (int shaderTypeNdx = 0; shaderTypeNdx < SHADER_TYPE_COUNT; shaderTypeNdx++)
+ {
+ const VkFormat* formats;
+ size_t formatsLength;
+ const BufferRangeConfig* ranges;
+ size_t rangesLength;
+ deUint32 rangeMultiplier;
+ de::MovePtr<tcu::TestCaseGroup> shaderTypeTests (new tcu::TestCaseGroup(testCtx, shaderTypeNames[shaderTypeNdx], ""));
+
+ if ((ShaderType)shaderTypeNdx == SHADER_TYPE_TEXEL_COPY)
+ {
+ formats = texelBufferFormats;
+ formatsLength = DE_LENGTH_OF_ARRAY(texelBufferFormats);
+
+ ranges = texelBufferRangeConfigs;
+ rangesLength = DE_LENGTH_OF_ARRAY(texelBufferRangeConfigs);
+ }
+ else
+ {
+ formats = bufferFormats;
+ formatsLength = DE_LENGTH_OF_ARRAY(bufferFormats);
+
+ ranges = bufferRangeConfigs;
+ rangesLength = DE_LENGTH_OF_ARRAY(bufferRangeConfigs);
+ }
+
+ for (size_t formatNdx = 0; formatNdx < formatsLength; formatNdx++)
+ {
+ const VkFormat bufferFormat = formats[formatNdx];
+
+ rangeMultiplier = ((ShaderType)shaderTypeNdx == SHADER_TYPE_TEXEL_COPY) ? mapVkFormat(bufferFormat).getPixelSize() : 1;
+
+ if (!isFloatFormat(bufferFormat) && ((ShaderType)shaderTypeNdx) == SHADER_TYPE_MATRIX_COPY)
+ {
+ // Use SHADER_TYPE_MATRIX_COPY with floating-point formats only
+ break;
+ }
+
+ const std::string formatName = getFormatName(bufferFormat);
+ de::MovePtr<tcu::TestCaseGroup> formatTests (new tcu::TestCaseGroup(testCtx, de::toLower(formatName.substr(10)).c_str(), ""));
+
+ de::MovePtr<tcu::TestCaseGroup> uboReadTests (new tcu::TestCaseGroup(testCtx, "oob_uniform_read", ""));
+ de::MovePtr<tcu::TestCaseGroup> ssboReadTests (new tcu::TestCaseGroup(testCtx, "oob_storage_read", ""));
+ de::MovePtr<tcu::TestCaseGroup> ssboWriteTests (new tcu::TestCaseGroup(testCtx, "oob_storage_write", ""));
+
+ for (size_t rangeNdx = 0; rangeNdx < rangesLength; rangeNdx++)
+ {
+ const BufferRangeConfig& rangeConfig = ranges[rangeNdx];
+ const VkDeviceSize rangeInBytes = rangeConfig.range * rangeMultiplier;
+
+ uboReadTests->addChild(new RobustBufferReadTest(testCtx, rangeConfig.name, "", stage, (ShaderType)shaderTypeNdx, bufferFormat, rangeInBytes, false, false));
+ ssboReadTests->addChild(new RobustBufferReadTest(testCtx, rangeConfig.name, "", stage, (ShaderType)shaderTypeNdx, bufferFormat, rangeInBytes, true, false));
+ ssboWriteTests->addChild(new RobustBufferWriteTest(testCtx, rangeConfig.name, "", stage, (ShaderType)shaderTypeNdx, bufferFormat, rangeInBytes, false));
+
+ }
+
+ formatTests->addChild(uboReadTests.release());
+ formatTests->addChild(ssboReadTests.release());
+ formatTests->addChild(ssboWriteTests.release());
+
+ shaderTypeTests->addChild(formatTests.release());
+ }
+
+ // Read/write out of the memory that backs the buffer
+ {
+ de::MovePtr<tcu::TestCaseGroup> outOfAllocTests (new tcu::TestCaseGroup(testCtx, "out_of_alloc", ""));
+
+ const VkFormat format = (((ShaderType)shaderTypeNdx == SHADER_TYPE_TEXEL_COPY ) ? VK_FORMAT_R32G32B32A32_SFLOAT : VK_FORMAT_R32_SFLOAT);
+
+ outOfAllocTests->addChild(new RobustBufferReadTest(testCtx, "oob_uniform_read", "", stage, (ShaderType)shaderTypeNdx, format, 16, false, true));
+ outOfAllocTests->addChild(new RobustBufferReadTest(testCtx, "oob_storage_read", "", stage, (ShaderType)shaderTypeNdx, format, 16, true, true));
+ outOfAllocTests->addChild(new RobustBufferWriteTest(testCtx, "oob_storage_write", "", stage, (ShaderType)shaderTypeNdx, format, 16, true));
+
+ shaderTypeTests->addChild(outOfAllocTests.release());
+ }
+
+ stageTests->addChild(shaderTypeTests.release());
+ }
+ parentNode->addChild(stageTests.release());
+ }
+}
+
+tcu::TestCaseGroup* createBufferAccessTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> bufferAccessTests (new tcu::TestCaseGroup(testCtx, "buffer_access", ""));
+
+ addBufferAccessTests(testCtx, bufferAccessTests.get());
+
+ return bufferAccessTests.release();
+}
+
+} // robustness
+} // vkt
--- /dev/null
+#ifndef _VKTROBUSTNESSBUFFERACCESSTESTS_HPP
+#define _VKTROBUSTNESSBUFFERACCESSTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robust buffer access tests for uniform/storage buffers and
+ * uniform/storage texel buffers.
+ *//*--------------------------------------------------------------------*/
+
+#include "vktTestCase.hpp"
+
+namespace vkt
+{
+namespace robustness
+{
+
+tcu::TestCaseGroup* createBufferAccessTests (tcu::TestContext& testCtx);
+
+} // robustness
+} // vkt
+
+#endif // _VKTROBUSTNESSBUFFERACCESSTESTS_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robust Buffer Access Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktRobustnessTests.hpp"
+#include "vktRobustnessBufferAccessTests.hpp"
+#include "vktRobustnessVertexAccessTests.hpp"
+#include "vktTestGroupUtil.hpp"
+
+namespace vkt
+{
+namespace robustness
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> robustnessTests(new tcu::TestCaseGroup(testCtx, "robustness", ""));
+
+ robustnessTests->addChild(createBufferAccessTests(testCtx));
+ robustnessTests->addChild(createVertexAccessTests(testCtx));
+
+ return robustnessTests.release();
+}
+
+} // robustness
+} // vkt
--- /dev/null
+#ifndef _VKTROBUSTNESSTESTS_HPP
+#define _VKTROBUSTNESSTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robustness Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace robustness
+{
+
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx);
+
+} // robustness
+} // vkt
+
+#endif // _VKTROBUSTNESSTESTS_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robustness Utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "vktRobustnessUtil.hpp"
+#include "vkDefs.hpp"
+#include "vkImageUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "deMath.h"
+#include <iomanip>
+#include <limits>
+#include <sstream>
+
+namespace vkt
+{
+namespace robustness
+{
+
+using namespace vk;
+
+Move<VkDevice> createRobustBufferAccessDevice (Context& context)
+{
+ const float queuePriority = 1.0f;
+
+ // Create a universal queue that supports graphics and compute
+ const VkDeviceQueueCreateInfo queueParams =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkDeviceQueueCreateFlags flags;
+ context.getUniversalQueueFamilyIndex(), // deUint32 queueFamilyIndex;
+ 1u, // deUint32 queueCount;
+ &queuePriority // const float* pQueuePriorities;
+ };
+
+ VkPhysicalDeviceFeatures enabledFeatures = context.getDeviceFeatures();
+ enabledFeatures.robustBufferAccess = true;
+
+ const VkDeviceCreateInfo deviceParams =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkDeviceCreateFlags flags;
+ 1u, // deUint32 queueCreateInfoCount;
+ &queueParams, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
+ 0u, // deUint32 enabledLayerCount;
+ DE_NULL, // const char* const* ppEnabledLayerNames;
+ 0u, // deUint32 enabledExtensionCount;
+ DE_NULL, // const char* const* ppEnabledExtensionNames;
+ &enabledFeatures // const VkPhysicalDeviceFeatures* pEnabledFeatures;
+ };
+
+ return createDevice(context.getInstanceInterface(), context.getPhysicalDevice(), &deviceParams);
+}
+
+bool areEqual (float a, float b)
+{
+ return deFloatAbs(a - b) <= 0.001f;
+}
+
+bool isValueZero (const void* valuePtr, size_t valueSizeInBytes)
+{
+ const deUint8* bytePtr = reinterpret_cast<const deUint8*>(valuePtr);
+
+ for (size_t i = 0; i < valueSizeInBytes; i++)
+ {
+ if (bytePtr[i] != 0)
+ return false;
+ }
+
+ return true;
+}
+
+bool isValueWithinBuffer (const void* buffer, VkDeviceSize bufferSize, const void* valuePtr, size_t valueSizeInBytes)
+{
+ const deUint8* byteBuffer = reinterpret_cast<const deUint8*>(buffer);
+
+ if (bufferSize < ((VkDeviceSize)valueSizeInBytes))
+ return false;
+
+ for (VkDeviceSize i = 0; i <= (bufferSize - valueSizeInBytes); i++)
+ {
+ if (!deMemCmp(&byteBuffer[i], valuePtr, valueSizeInBytes))
+ return true;
+ }
+
+ return false;
+}
+
+bool isValueWithinBufferOrZero (const void* buffer, VkDeviceSize bufferSize, const void* valuePtr, size_t valueSizeInBytes)
+{
+ return isValueWithinBuffer(buffer, bufferSize, valuePtr, valueSizeInBytes) || isValueZero(valuePtr, valueSizeInBytes);
+}
+
+bool verifyOutOfBoundsVec4 (const void* vecPtr, VkFormat bufferFormat)
+{
+ if (isUintFormat(bufferFormat))
+ {
+ const deUint32* data = (deUint32*)vecPtr;
+
+ return data[0] == 0u
+ && data[1] == 0u
+ && data[2] == 0u
+ && (data[3] == 0u || data[3] == 1u || data[3] == std::numeric_limits<deUint32>::max());
+ }
+ else if (isIntFormat(bufferFormat))
+ {
+ const deInt32* data = (deInt32*)vecPtr;
+
+ return data[0] == 0
+ && data[1] == 0
+ && data[2] == 0
+ && (data[3] == 0 || data[3] == 1 || data[3] == std::numeric_limits<deInt32>::max());
+ }
+ else if (isFloatFormat(bufferFormat))
+ {
+ const float* data = (float*)vecPtr;
+
+ return areEqual(data[0], 0.0f)
+ && areEqual(data[1], 0.0f)
+ && areEqual(data[2], 0.0f)
+ && (areEqual(data[3], 0.0f) || areEqual(data[3], 1.0f));
+ }
+ else if (bufferFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ return *((deUint32*)vecPtr) == 0xc0000000u;
+ }
+
+ DE_ASSERT(false);
+ return false;
+}
+
+void populateBufferWithTestValues (void* buffer, VkDeviceSize size, VkFormat format)
+{
+ // Assign a sequence of 32-bit values
+ for (VkDeviceSize scalarNdx = 0; scalarNdx < size / 4; scalarNdx++)
+ {
+ const deUint32 valueIndex = (deUint32)(2 + scalarNdx); // Do not use 0 or 1
+
+ if (isUintFormat(format))
+ {
+ reinterpret_cast<deUint32*>(buffer)[scalarNdx] = valueIndex;
+ }
+ else if (isIntFormat(format))
+ {
+ reinterpret_cast<deInt32*>(buffer)[scalarNdx] = -deInt32(valueIndex);
+ }
+ else if (isFloatFormat(format))
+ {
+ reinterpret_cast<float*>(buffer)[scalarNdx] = float(valueIndex);
+ }
+ else if (format == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ const deUint32 r = ((valueIndex + 0) & ((2u << 10) - 1u));
+ const deUint32 g = ((valueIndex + 1) & ((2u << 10) - 1u));
+ const deUint32 b = ((valueIndex + 2) & ((2u << 10) - 1u));
+ const deUint32 a = ((valueIndex + 0) & ((2u << 2) - 1u));
+
+ reinterpret_cast<deUint32*>(buffer)[scalarNdx] = (a << 30) | (b << 20) | (g << 10) | r;
+ }
+ else
+ {
+ DE_ASSERT(false);
+ }
+ }
+}
+
+void logValue (std::ostringstream& logMsg, const void* valuePtr, VkFormat valueFormat, size_t valueSize)
+{
+ if (isUintFormat(valueFormat))
+ {
+ logMsg << *reinterpret_cast<const deUint32*>(valuePtr);
+ }
+ else if (isIntFormat(valueFormat))
+ {
+ logMsg << *reinterpret_cast<const deInt32*>(valuePtr);
+ }
+ else if (isFloatFormat(valueFormat))
+ {
+ logMsg << *reinterpret_cast<const float*>(valuePtr);
+ }
+ else
+ {
+ const deUint8* bytePtr = reinterpret_cast<const deUint8*>(valuePtr);
+ const std::ios::fmtflags streamFlags = logMsg.flags();
+
+ logMsg << std::hex;
+ for (size_t i = 0; i < valueSize; i++)
+ {
+ logMsg << " " << (deUint32)bytePtr[i];
+ }
+ logMsg.flags(streamFlags);
+ }
+}
+
+// TestEnvironment
+
+TestEnvironment::TestEnvironment (Context& context,
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ VkDescriptorSet descriptorSet)
+ : m_context (context)
+ , m_device (device)
+ , m_descriptorSetLayout (descriptorSetLayout)
+ , m_descriptorSet (descriptorSet)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ // Create command pool
+ {
+ const VkCommandPoolCreateInfo commandPoolParams =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
+ context.getUniversalQueueFamilyIndex() // deUint32 queueFamilyIndex;
+ };
+
+ m_commandPool = createCommandPool(vk, m_device, &commandPoolParams);
+ }
+
+ // Create command buffer
+ {
+ const VkCommandBufferAllocateInfo commandBufferAllocateInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_commandPool, // VkCommandPool commandPool;
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
+ 1u, // deUint32 bufferCount;
+ };
+
+ m_commandBuffer = allocateCommandBuffer(vk, m_device, &commandBufferAllocateInfo);
+ }
+}
+
+VkCommandBuffer TestEnvironment::getCommandBuffer (void)
+{
+ return *m_commandBuffer;
+}
+
+// GraphicsEnvironment
+
+GraphicsEnvironment::GraphicsEnvironment (Context& context,
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ VkDescriptorSet descriptorSet,
+ const VertexBindings& vertexBindings,
+ const VertexAttributes& vertexAttributes,
+ const DrawConfig& drawConfig)
+
+ : TestEnvironment (context, device, descriptorSetLayout, descriptorSet)
+ , m_renderSize (16, 16)
+ , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ const VkComponentMapping componentMappingRGBA = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
+ SimpleAllocator memAlloc (vk, m_device, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
+
+ // Create color image and view
+ {
+ const VkImageCreateInfo colorImageParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkImageCreateFlags flags;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ m_colorFormat, // VkFormat format;
+ { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y(), 1u }, // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ 1u, // deUint32 arrayLayers;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
+ VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
+ };
+
+ m_colorImage = createImage(vk, m_device, &colorImageParams);
+ m_colorImageAlloc = memAlloc.allocate(getImageMemoryRequirements(vk, m_device, *m_colorImage), MemoryRequirement::Any);
+ VK_CHECK(vk.bindImageMemory(m_device, *m_colorImage, m_colorImageAlloc->getMemory(), m_colorImageAlloc->getOffset()));
+
+ const VkImageViewCreateInfo colorAttachmentViewParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkImageViewCreateFlags flags;
+ *m_colorImage, // VkImage image;
+ VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
+ m_colorFormat, // VkFormat format;
+ componentMappingRGBA, // VkComponentMapping components;
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u } // VkImageSubresourceRange subresourceRange;
+ };
+
+ m_colorAttachmentView = createImageView(vk, m_device, &colorAttachmentViewParams);
+ }
+
+ // Create render pass
+ {
+ const VkAttachmentDescription colorAttachmentDescription =
+ {
+ 0u, // VkAttachmentDescriptionFlags flags;
+ m_colorFormat, // VkFormat format;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout;
+ };
+
+ const VkAttachmentReference colorAttachmentReference =
+ {
+ 0u, // deUint32 attachment;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
+ };
+
+ const VkSubpassDescription subpassDescription =
+ {
+ 0u, // VkSubpassDescriptionFlags flags;
+ VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
+ 0u, // deUint32 inputAttachmentCount;
+ DE_NULL, // const VkAttachmentReference* pInputAttachments;
+ 1u, // deUint32 colorAttachmentCount;
+ &colorAttachmentReference, // const VkAttachmentReference* pColorAttachments;
+ DE_NULL, // const VkAttachmentReference* pResolveAttachments;
+ DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
+ 0u, // deUint32 preserveAttachmentCount;
+ DE_NULL // const VkAttachmentReference* pPreserveAttachments;
+ };
+
+ const VkRenderPassCreateInfo renderPassParams =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkRenderPassCreateFlags flags;
+ 1u, // deUint32 attachmentCount;
+ &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
+ 1u, // deUint32 subpassCount;
+ &subpassDescription, // const VkSubpassDescription* pSubpasses;
+ 0u, // deUint32 dependencyCount;
+ DE_NULL // const VkSubpassDependency* pDependencies;
+ };
+
+ m_renderPass = createRenderPass(vk, m_device, &renderPassParams);
+ }
+
+ // Create framebuffer
+ {
+ const VkFramebufferCreateInfo framebufferParams =
+ {
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkFramebufferCreateFlags flags;
+ *m_renderPass, // VkRenderPass renderPass;
+ 1u, // deUint32 attachmentCount;
+ &m_colorAttachmentView.get(), // const VkImageView* pAttachments;
+ (deUint32)m_renderSize.x(), // deUint32 width;
+ (deUint32)m_renderSize.y(), // deUint32 height;
+ 1u // deUint32 layers;
+ };
+
+ m_framebuffer = createFramebuffer(vk, m_device, &framebufferParams);
+ }
+
+ // Create pipeline layout
+ {
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineLayoutCreateFlags flags;
+ 1u, // deUint32 setLayoutCount;
+ &m_descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL // const VkPushConstantRange* pPushConstantRanges;
+ };
+
+ m_pipelineLayout = createPipelineLayout(vk, m_device, &pipelineLayoutParams);
+ }
+
+ m_vertexShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("vertex"), 0);
+ m_fragmentShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("fragment"), 0);
+
+ // Create pipeline
+ {
+ const VkPipelineShaderStageCreateInfo renderShaderStages[2] =
+ {
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineShaderStageCreateFlags flags;
+ VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
+ *m_vertexShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineShaderStageCreateFlags flags;
+ VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
+ *m_fragmentShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
+ }
+ };
+
+ const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineVertexInputStateCreateFlags flags;
+ (deUint32)vertexBindings.size(), // deUint32 vertexBindingDescriptionCount;
+ vertexBindings.data(), // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
+ (deUint32)vertexAttributes.size(), // deUint32 vertexAttributeDescriptionCount;
+ vertexAttributes.data() // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+ };
+
+ const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
+ false // VkBool32 primitiveRestartEnable;
+ };
+
+ const VkViewport viewport =
+ {
+ 0.0f, // float x;
+ 0.0f, // float y;
+ (float)m_renderSize.x(), // float width;
+ (float)m_renderSize.y(), // float height;
+ 0.0f, // float minDepth;
+ 1.0f // float maxDepth;
+ };
+
+ const VkRect2D scissor = { { 0, 0 }, { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y() } };
+
+ const VkPipelineViewportStateCreateInfo viewportStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineViewportStateCreateFlags flags;
+ 1u, // deUint32 viewportCount;
+ &viewport, // const VkViewport* pViewports;
+ 1u, // deUint32 scissorCount;
+ &scissor // const VkRect2D* pScissors;
+ };
+
+ const VkPipelineRasterizationStateCreateInfo rasterStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineRasterizationStateCreateFlags flags;
+ false, // VkBool32 depthClampEnable;
+ false, // VkBool32 rasterizerDiscardEnable;
+ VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
+ VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
+ VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
+ false, // VkBool32 depthBiasEnable;
+ 0.0f, // float depthBiasConstantFactor;
+ 0.0f, // float depthBiasClamp;
+ 0.0f, // float depthBiasSlopeFactor;
+ 1.0f // float lineWidth;
+ };
+
+ const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
+ {
+ false, // VkBool32 blendEnable;
+ VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
+ VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
+ VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // VkColorComponentFlags colorWriteMask;
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
+ };
+
+ const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineColorBlendStateCreateFlags flags;
+ false, // VkBool32 logicOpEnable;
+ VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
+ 1u, // deUint32 attachmentCount;
+ &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
+ { 0.0f, 0.0f, 0.0f, 0.0f } // float blendConstants[4];
+ };
+
+ const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineMultisampleStateCreateFlags flags;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
+ false, // VkBool32 sampleShadingEnable;
+ 0.0f, // float minSampleShading;
+ DE_NULL, // const VkSampleMask* pSampleMask;
+ false, // VkBool32 alphaToCoverageEnable;
+ false // VkBool32 alphaToOneEnable;
+ };
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilStateParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineDepthStencilStateCreateFlags flags;
+ false, // VkBool32 depthTestEnable;
+ false, // VkBool32 depthWriteEnable;
+ VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
+ false, // VkBool32 depthBoundsTestEnable;
+ false, // VkBool32 stencilTestEnable;
+ { // VkStencilOpState front;
+ VK_STENCIL_OP_ZERO, // VkStencilOp failOp;
+ VK_STENCIL_OP_ZERO, // VkStencilOp passOp;
+ VK_STENCIL_OP_ZERO, // VkStencilOp depthFailOp;
+ VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
+ 0u, // deUint32 compareMask;
+ 0u, // deUint32 writeMask;
+ 0u // deUint32 reference;
+ },
+ { // VkStencilOpState back;
+ VK_STENCIL_OP_ZERO, // VkStencilOp failOp;
+ VK_STENCIL_OP_ZERO, // VkStencilOp passOp;
+ VK_STENCIL_OP_ZERO, // VkStencilOp depthFailOp;
+ VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
+ 0u, // deUint32 compareMask;
+ 0u, // deUint32 writeMask;
+ 0u // deUint32 reference;
+ },
+ -1.0f, // float minDepthBounds;
+ +1.0f // float maxDepthBounds;
+ };
+
+ const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
+ {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineCreateFlags flags;
+ 2u, // deUint32 stageCount;
+ renderShaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
+ &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
+ &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
+ DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
+ &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
+ &rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
+ &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
+ &depthStencilStateParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
+ &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
+ DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
+ *m_pipelineLayout, // VkPipelineLayout layout;
+ *m_renderPass, // VkRenderPass renderPass;
+ 0u, // deUint32 subpass;
+ 0u, // VkPipeline basePipelineHandle;
+ 0u // deInt32 basePipelineIndex;
+ };
+
+ m_graphicsPipeline = createGraphicsPipeline(vk, m_device, DE_NULL, &graphicsPipelineParams);
+ }
+
+ // Record commands
+ {
+ const VkCommandBufferBeginInfo commandBufferBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkCommandBufferUsageFlags flags;
+ (const VkCommandBufferInheritanceInfo*)DE_NULL, // const VkCommandBufferInheritanceInfo *pInheritanceInfo;
+ };
+
+ VkClearValue attachmentClearValue;
+ attachmentClearValue.color.float32[0] = 0.0f;
+ attachmentClearValue.color.float32[1] = 0.0f;
+ attachmentClearValue.color.float32[2] = 0.0f;
+ attachmentClearValue.color.float32[3] = 0.0f;
+
+ const VkRenderPassBeginInfo renderPassBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_renderPass, // VkRenderPass renderPass;
+ *m_framebuffer, // VkFramebuffer framebuffer;
+ {
+ { 0, 0 },
+ { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y() }
+ }, // VkRect2D renderArea;
+ 1, // deUint32 clearValueCount;
+ &attachmentClearValue // const VkClearValue* pClearValues;
+ };
+
+ const VkImageMemoryBarrier imageLayoutBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkAccessFlags)0, // VkAccessFlags srcAccessMask;
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
+ VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
+ *m_colorImage, // VkImage image;
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u } // VkImageSubresourceRange subresourceRange;
+ };
+
+ VK_CHECK(vk.beginCommandBuffer(*m_commandBuffer, &commandBufferBeginInfo));
+ {
+ vk.cmdPipelineBarrier(*m_commandBuffer,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ (VkDependencyFlags)0,
+ 0u, DE_NULL,
+ 0u, DE_NULL,
+ 1u, &imageLayoutBarrier);
+
+ vk.cmdBeginRenderPass(*m_commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
+ {
+ const std::vector<VkDeviceSize> vertexBufferOffsets(drawConfig.vertexBuffers.size(), 0ull);
+
+ vk.cmdBindPipeline(*m_commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_graphicsPipeline);
+ vk.cmdBindDescriptorSets(*m_commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0, 1, &m_descriptorSet, 0, DE_NULL);
+ vk.cmdBindVertexBuffers(*m_commandBuffer, 0, (deUint32)drawConfig.vertexBuffers.size(), drawConfig.vertexBuffers.data(), vertexBufferOffsets.data());
+
+ if (drawConfig.indexBuffer == DE_NULL || drawConfig.indexCount == 0)
+ {
+ vk.cmdDraw(*m_commandBuffer, drawConfig.vertexCount, drawConfig.instanceCount, 0, 0);
+ }
+ else
+ {
+ vk.cmdBindIndexBuffer(*m_commandBuffer, drawConfig.indexBuffer, 0, VK_INDEX_TYPE_UINT32);
+ vk.cmdDrawIndexed(*m_commandBuffer, drawConfig.indexCount, drawConfig.instanceCount, 0, 0, 0);
+ }
+ }
+ vk.cmdEndRenderPass(*m_commandBuffer);
+ }
+ VK_CHECK(vk.endCommandBuffer(*m_commandBuffer));
+ }
+}
+
+// ComputeEnvironment
+
+ComputeEnvironment::ComputeEnvironment (Context& context,
+ VkDevice device,
+ VkDescriptorSetLayout descriptorSetLayout,
+ VkDescriptorSet descriptorSet)
+
+ : TestEnvironment (context, device, descriptorSetLayout, descriptorSet)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ // Create pipeline layout
+ {
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineLayoutCreateFlags flags;
+ 1u, // deUint32 setLayoutCount;
+ &m_descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL // const VkPushConstantRange* pPushConstantRanges;
+ };
+
+ m_pipelineLayout = createPipelineLayout(vk, m_device, &pipelineLayoutParams);
+ }
+
+ // Create compute pipeline
+ {
+ m_computeShaderModule = createShaderModule(vk, m_device, m_context.getBinaryCollection().get("compute"), 0);
+
+ const VkPipelineShaderStageCreateInfo computeStageParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineShaderStageCreateFlags flags;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
+ *m_computeShaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
+ };
+
+ const VkComputePipelineCreateInfo computePipelineParams =
+ {
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineCreateFlags flags;
+ computeStageParams, // VkPipelineShaderStageCreateInfo stage;
+ *m_pipelineLayout, // VkPipelineLayout layout;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0u // deInt32 basePipelineIndex;
+ };
+
+ m_computePipeline = createComputePipeline(vk, m_device, DE_NULL, &computePipelineParams);
+ }
+
+ // Record commands
+ {
+ const VkCommandBufferBeginInfo commandBufferBeginInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkCommandBufferUsageFlags flags;
+ (const VkCommandBufferInheritanceInfo*)DE_NULL, // const VkCommandBufferInheritanceInfo *pInheritanceInfo;
+ };
+
+ VK_CHECK(vk.beginCommandBuffer(*m_commandBuffer, &commandBufferBeginInfo));
+ vk.cmdBindPipeline(*m_commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_computePipeline);
+ vk.cmdBindDescriptorSets(*m_commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0, 1, &m_descriptorSet, 0, DE_NULL);
+ vk.cmdDispatch(*m_commandBuffer, 32, 32, 1);
+ VK_CHECK(vk.endCommandBuffer(*m_commandBuffer));
+ }
+}
+
+} // robustness
+} // vkt
--- /dev/null
+#ifndef _VKTROBUSTNESSUTIL_HPP
+#define _VKTROBUSTNESSUTIL_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robustness Utilities
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "vkDefs.hpp"
+#include "vkRefUtil.hpp"
+#include "vktTestCase.hpp"
+#include "vkMemUtil.hpp"
+#include "deUniquePtr.hpp"
+#include "tcuVectorUtil.hpp"
+
+namespace vkt
+{
+namespace robustness
+{
+
+vk::Move<vk::VkDevice> createRobustBufferAccessDevice (Context& context);
+bool areEqual (float a, float b);
+bool isValueZero (const void* valuePtr, size_t valueSize);
+bool isValueWithinBuffer (const void* buffer, vk::VkDeviceSize bufferSize, const void* valuePtr, size_t valueSizeInBytes);
+bool isValueWithinBufferOrZero (const void* buffer, vk::VkDeviceSize bufferSize, const void* valuePtr, size_t valueSizeInBytes);
+bool verifyOutOfBoundsVec4 (const void* vecPtr, vk::VkFormat bufferFormat);
+void populateBufferWithTestValues (void* buffer, vk::VkDeviceSize size, vk::VkFormat format);
+void logValue (std::ostringstream& logMsg, const void* valuePtr, vk::VkFormat valueFormat, size_t valueSize);
+
+class TestEnvironment
+{
+public:
+ TestEnvironment (Context& context,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout,
+ vk::VkDescriptorSet descriptorSet);
+
+ virtual ~TestEnvironment (void) {}
+
+ virtual vk::VkCommandBuffer getCommandBuffer (void);
+
+protected:
+ Context& m_context;
+ vk::VkDevice m_device;
+ vk::VkDescriptorSetLayout m_descriptorSetLayout;
+ vk::VkDescriptorSet m_descriptorSet;
+
+ vk::Move<vk::VkCommandPool> m_commandPool;
+ vk::Move<vk::VkCommandBuffer> m_commandBuffer;
+};
+
+class GraphicsEnvironment: public TestEnvironment
+{
+public:
+ typedef std::vector<vk::VkVertexInputBindingDescription> VertexBindings;
+ typedef std::vector<vk::VkVertexInputAttributeDescription> VertexAttributes;
+
+ struct DrawConfig
+ {
+ std::vector<vk::VkBuffer> vertexBuffers;
+ deUint32 vertexCount;
+ deUint32 instanceCount;
+
+ vk::VkBuffer indexBuffer;
+ deUint32 indexCount;
+ };
+
+ GraphicsEnvironment (Context& context,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout,
+ vk::VkDescriptorSet descriptorSet,
+ const VertexBindings& vertexBindings,
+ const VertexAttributes& vertexAttributes,
+ const DrawConfig& drawConfig);
+
+ virtual ~GraphicsEnvironment (void) {}
+
+private:
+ const tcu::UVec2 m_renderSize;
+ const vk::VkFormat m_colorFormat;
+
+ vk::Move<vk::VkImage> m_colorImage;
+ de::MovePtr<vk::Allocation> m_colorImageAlloc;
+ vk::Move<vk::VkImageView> m_colorAttachmentView;
+ vk::Move<vk::VkRenderPass> m_renderPass;
+ vk::Move<vk::VkFramebuffer> m_framebuffer;
+
+ vk::Move<vk::VkShaderModule> m_vertexShaderModule;
+ vk::Move<vk::VkShaderModule> m_fragmentShaderModule;
+
+ vk::Move<vk::VkBuffer> m_vertexBuffer;
+ de::MovePtr<vk::Allocation> m_vertexBufferAlloc;
+
+ vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
+ vk::Move<vk::VkPipeline> m_graphicsPipeline;
+};
+
+class ComputeEnvironment: public TestEnvironment
+{
+public:
+ ComputeEnvironment (Context& context,
+ vk::VkDevice device,
+ vk::VkDescriptorSetLayout descriptorSetLayout,
+ vk::VkDescriptorSet descriptorSet);
+
+ virtual ~ComputeEnvironment (void) {}
+
+private:
+ vk::Move<vk::VkShaderModule> m_computeShaderModule;
+ vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
+ vk::Move<vk::VkPipeline> m_computePipeline;
+};
+
+} // robustness
+} // vkt
+
+#endif // _VKTROBUSTNESSUTIL_HPP
--- /dev/null
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robust Vertex Buffer Access Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktRobustnessVertexAccessTests.hpp"
+#include "vktRobustnessUtil.hpp"
+#include "vktTestCaseUtil.hpp"
+#include "vkBuilderUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include "deMath.h"
+#include "deUniquePtr.hpp"
+#include <vector>
+
+namespace vkt
+{
+namespace robustness
+{
+
+using namespace vk;
+
+typedef std::vector<VkVertexInputBindingDescription> BindingList;
+typedef std::vector<VkVertexInputAttributeDescription> AttributeList;
+
+class VertexAccessTest : public vkt::TestCase
+{
+public:
+ VertexAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances);
+
+ virtual ~VertexAccessTest (void) {}
+
+ void initPrograms (SourceCollections& programCollection) const;
+ TestInstance* createInstance (Context& context) const = 0;
+
+protected:
+ const VkFormat m_inputFormat;
+ const deUint32 m_numVertexValues;
+ const deUint32 m_numInstanceValues;
+ const deUint32 m_numVertices;
+ const deUint32 m_numInstances;
+
+};
+
+class DrawAccessTest : public VertexAccessTest
+{
+public:
+ DrawAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances);
+
+ virtual ~DrawAccessTest (void) {}
+ TestInstance* createInstance (Context& context) const;
+
+protected:
+};
+
+class DrawIndexedAccessTest : public VertexAccessTest
+{
+public:
+ enum IndexConfig
+ {
+ INDEX_CONFIG_LAST_INDEX_OUT_OF_BOUNDS,
+ INDEX_CONFIG_INDICES_OUT_OF_BOUNDS,
+ INDEX_CONFIG_TRIANGLE_OUT_OF_BOUNDS,
+
+ INDEX_CONFIG_COUNT
+ };
+
+ const static std::vector<deUint32> s_indexConfigs[INDEX_CONFIG_COUNT];
+
+ DrawIndexedAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ IndexConfig indexConfig);
+
+ virtual ~DrawIndexedAccessTest (void) {}
+ TestInstance* createInstance (Context& context) const;
+
+protected:
+ const IndexConfig m_indexConfig;
+};
+
+class VertexAccessInstance : public vkt::TestInstance
+{
+public:
+ VertexAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances,
+ const std::vector<deUint32> indices);
+
+ virtual ~VertexAccessInstance (void) {}
+ virtual tcu::TestStatus iterate (void);
+ virtual bool verifyResult (void);
+
+private:
+ bool isValueWithinVertexBufferOrZero (void* vertexBuffer, VkDeviceSize vertexBufferSize, const void* value, deUint32 valueIndexa);
+
+protected:
+ static bool isExpectedValueFromVertexBuffer (const void* vertexBuffer, deUint32 vertexIndex, VkFormat vertexFormat, const void* value);
+ static VkDeviceSize getBufferSizeInBytes (deUint32 numScalars, VkFormat format);
+
+ virtual void initVertexIds (deUint32 *indicesPtr, size_t indexCount) = 0;
+ virtual deUint32 getIndex (deUint32 vertexNum) const = 0;
+
+ Move<VkDevice> m_device;
+
+ const VkFormat m_inputFormat;
+ const deUint32 m_numVertexValues;
+ const deUint32 m_numInstanceValues;
+ const deUint32 m_numVertices;
+ const deUint32 m_numInstances;
+ AttributeList m_vertexInputAttributes;
+ BindingList m_vertexInputBindings;
+
+ Move<VkBuffer> m_vertexRateBuffer;
+ VkDeviceSize m_vertexRateBufferSize;
+ de::MovePtr<Allocation> m_vertexRateBufferAlloc;
+ VkDeviceSize m_vertexRateBufferAllocSize;
+
+ Move<VkBuffer> m_instanceRateBuffer;
+ VkDeviceSize m_instanceRateBufferSize;
+ de::MovePtr<Allocation> m_instanceRateBufferAlloc;
+ VkDeviceSize m_instanceRateBufferAllocSize;
+
+ Move<VkBuffer> m_vertexNumBuffer;
+ VkDeviceSize m_vertexNumBufferSize;
+ de::MovePtr<Allocation> m_vertexNumBufferAlloc;
+
+ Move<VkBuffer> m_indexBuffer;
+ VkDeviceSize m_indexBufferSize;
+ de::MovePtr<Allocation> m_indexBufferAlloc;
+
+ Move<VkBuffer> m_outBuffer; // SSBO
+ VkDeviceSize m_outBufferSize;
+ de::MovePtr<Allocation> m_outBufferAlloc;
+
+ Move<VkDescriptorPool> m_descriptorPool;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorSet> m_descriptorSet;
+
+ Move<VkFence> m_fence;
+ VkQueue m_queue;
+
+ de::MovePtr<GraphicsEnvironment> m_graphicsTestEnvironment;
+};
+
+class DrawAccessInstance : public VertexAccessInstance
+{
+public:
+ DrawAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances);
+
+ virtual ~DrawAccessInstance (void) {}
+
+protected:
+ virtual void initVertexIds (deUint32 *indicesPtr, size_t indexCount);
+ virtual deUint32 getIndex (deUint32 vertexNum) const;
+};
+
+class DrawIndexedAccessInstance : public VertexAccessInstance
+{
+public:
+ DrawIndexedAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances,
+ const std::vector<deUint32>& indices);
+
+ virtual ~DrawIndexedAccessInstance (void) {}
+
+protected:
+ virtual void initVertexIds (deUint32 *indicesPtr, size_t indexCount);
+ virtual deUint32 getIndex (deUint32 vertexNum) const;
+
+ const std::vector<deUint32> m_indices;
+};
+
+// VertexAccessTest
+
+VertexAccessTest::VertexAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances)
+
+ : vkt::TestCase (testContext, name, description)
+ , m_inputFormat (inputFormat)
+ , m_numVertexValues (numVertexValues)
+ , m_numInstanceValues (numInstanceValues)
+ , m_numVertices (numVertices)
+ , m_numInstances (numInstances)
+{
+}
+
+void VertexAccessTest::initPrograms (SourceCollections& programCollection) const
+{
+ std::ostringstream attributeDeclaration;
+ std::ostringstream attributeUse;
+
+ std::ostringstream vertexShaderSource;
+ std::ostringstream fragmentShaderSource;
+
+ std::ostringstream attributeTypeStr;
+ const int numChannels = getNumUsedChannels(mapVkFormat(m_inputFormat).order);
+ const deUint32 numScalarsPerVertex = numChannels * 3; // Use 3 identical attributes
+ deUint32 numValues = 0;
+
+ if (numChannels == 1)
+ {
+ if (isUintFormat(m_inputFormat))
+ attributeTypeStr << "uint";
+ else if (isIntFormat(m_inputFormat))
+ attributeTypeStr << "int";
+ else
+ attributeTypeStr << "float";
+ }
+ else
+ {
+ if (isUintFormat(m_inputFormat))
+ attributeTypeStr << "uvec";
+ else if (isIntFormat(m_inputFormat))
+ attributeTypeStr << "ivec";
+ else
+ attributeTypeStr << "vec";
+
+ attributeTypeStr << numChannels;
+ }
+
+ for (int attrNdx = 0; attrNdx < 3; attrNdx++)
+ {
+ attributeDeclaration << "layout(location = " << attrNdx << ") in " << attributeTypeStr.str() << " attr" << attrNdx << ";\n";
+
+ for (int chanNdx = 0; chanNdx < numChannels; chanNdx++)
+ {
+ attributeUse << "\toutData[(gl_InstanceIndex * " << numScalarsPerVertex * m_numVertices
+ << ") + (vertexNum * " << numScalarsPerVertex << " + " << numValues++ << ")] = attr" << attrNdx;
+
+ if (numChannels == 1)
+ attributeUse << ";\n";
+ else
+ attributeUse << "[" << chanNdx << "];\n";
+ }
+ }
+
+ attributeDeclaration << "layout(location = 3) in int vertexNum;\n";
+
+ attributeUse << "\n";
+
+ const char *outType = "";
+ if (isUintFormat(m_inputFormat))
+ outType = "uint";
+ else if (isIntFormat(m_inputFormat))
+ outType = "int";
+ else
+ outType = "float";
+
+ vertexShaderSource <<
+ "#version 310 es\n"
+ "precision highp float;\n"
+ << attributeDeclaration.str() <<
+ "layout(set = 0, binding = 0, std430) buffer outBuffer\n"
+ "{\n"
+ "\t" << outType << " outData[" << (m_numVertices * numValues) * m_numInstances << "];\n"
+ "};\n\n"
+ "void main (void)\n"
+ "{\n"
+ << attributeUse.str() <<
+ "\tgl_Position = vec4(0.0, 0.0, 0.0, 1.0);\n"
+ "}\n";
+
+ programCollection.glslSources.add("vertex") << glu::VertexSource(vertexShaderSource.str());
+
+ fragmentShaderSource <<
+ "#version 310 es\n"
+ "precision highp float;\n"
+ "layout(location = 0) out vec4 fragColor;\n"
+ "void main (void)\n"
+ "{\n"
+ "\tfragColor = vec4(1.0);\n"
+ "}\n";
+
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(fragmentShaderSource.str());
+}
+
+// DrawAccessTest
+
+DrawAccessTest::DrawAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances)
+
+ : VertexAccessTest (testContext, name, description, inputFormat, numVertexValues, numInstanceValues, numVertices, numInstances)
+{
+}
+
+TestInstance* DrawAccessTest::createInstance (Context& context) const
+{
+ Move<VkDevice> device = createRobustBufferAccessDevice(context);
+
+ return new DrawAccessInstance(context,
+ device,
+ m_inputFormat,
+ m_numVertexValues,
+ m_numInstanceValues,
+ m_numVertices,
+ m_numInstances);
+}
+
+// DrawIndexedAccessTest
+
+const deUint32 lastIndexOutOfBounds[] =
+{
+ 0, 1, 2, 3, 4, 100, // Indices of 100 and above are out of bounds
+};
+const deUint32 indicesOutOfBounds[] =
+{
+ 0, 100, 2, 101, 3, 102, // Indices of 100 and above are out of bounds
+};
+const deUint32 triangleOutOfBounds[] =
+{
+ 100, 101, 102, 3, 4, 5, // Indices of 100 and above are out of bounds
+};
+
+const std::vector<deUint32> DrawIndexedAccessTest::s_indexConfigs[INDEX_CONFIG_COUNT] =
+{
+ std::vector<deUint32>(lastIndexOutOfBounds, lastIndexOutOfBounds + DE_LENGTH_OF_ARRAY(lastIndexOutOfBounds)),
+ std::vector<deUint32>(indicesOutOfBounds, indicesOutOfBounds + DE_LENGTH_OF_ARRAY(indicesOutOfBounds)),
+ std::vector<deUint32>(triangleOutOfBounds, triangleOutOfBounds + DE_LENGTH_OF_ARRAY(triangleOutOfBounds)),
+};
+
+DrawIndexedAccessTest::DrawIndexedAccessTest (tcu::TestContext& testContext,
+ const std::string& name,
+ const std::string& description,
+ VkFormat inputFormat,
+ IndexConfig indexConfig)
+
+ : VertexAccessTest (testContext,
+ name,
+ description,
+ inputFormat,
+ getNumUsedChannels(mapVkFormat(inputFormat).order) * (deUint32)s_indexConfigs[indexConfig].size() * 2, // numVertexValues
+ getNumUsedChannels(mapVkFormat(inputFormat).order), // numInstanceValues
+ (deUint32)s_indexConfigs[indexConfig].size(), // numVertices
+ 1) // numInstances
+ , m_indexConfig (indexConfig)
+{
+}
+
+TestInstance* DrawIndexedAccessTest::createInstance (Context& context) const
+{
+ Move<VkDevice> device = createRobustBufferAccessDevice(context);
+
+ return new DrawIndexedAccessInstance(context,
+ device,
+ m_inputFormat,
+ m_numVertexValues,
+ m_numInstanceValues,
+ m_numVertices,
+ m_numInstances,
+ s_indexConfigs[m_indexConfig]);
+}
+
+// VertexAccessInstance
+
+VertexAccessInstance::VertexAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances,
+ const std::vector<deUint32> indices)
+
+ : vkt::TestInstance (context)
+ , m_device (device)
+ , m_inputFormat (inputFormat)
+ , m_numVertexValues (numVertexValues)
+ , m_numInstanceValues (numInstanceValues)
+ , m_numVertices (numVertices)
+ , m_numInstances (numInstances)
+{
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ SimpleAllocator memAlloc (vk, *m_device, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
+ const deUint32 formatSizeInBytes = tcu::getPixelSize(mapVkFormat(m_inputFormat));
+
+ // Check storage support
+ if (!context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
+ {
+ TCU_THROW(NotSupportedError, "Stores not supported in vertex stage");
+ }
+
+ const VkVertexInputAttributeDescription attributes[] =
+ {
+ // input rate: vertex
+ {
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ m_inputFormat, // VkFormat format;
+ 0u, // deUint32 offset;
+ },
+ {
+ 1u, // deUint32 location;
+ 0u, // deUint32 binding;
+ m_inputFormat, // VkFormat format;
+ formatSizeInBytes, // deUint32 offset;
+ },
+
+ // input rate: instance
+ {
+ 2u, // deUint32 location;
+ 1u, // deUint32 binding;
+ m_inputFormat, // VkFormat format;
+ 0u, // deUint32 offset;
+ },
+
+ // Attribute for vertex number
+ {
+ 3u, // deUint32 location;
+ 2u, // deUint32 binding;
+ VK_FORMAT_R32_SINT, // VkFormat format;
+ 0, // deUint32 offset;
+ },
+ };
+
+ const VkVertexInputBindingDescription bindings[] =
+ {
+ {
+ 0u, // deUint32 binding;
+ formatSizeInBytes * 2, // deUint32 stride;
+ VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
+ },
+ {
+ 1u, // deUint32 binding;
+ formatSizeInBytes, // deUint32 stride;
+ VK_VERTEX_INPUT_RATE_INSTANCE // VkVertexInputRate inputRate;
+ },
+ {
+ 2u, // deUint32 binding;
+ sizeof(deInt32), // deUint32 stride;
+ VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
+ },
+ };
+
+ m_vertexInputBindings = std::vector<VkVertexInputBindingDescription>(bindings, bindings + DE_LENGTH_OF_ARRAY(bindings));
+ m_vertexInputAttributes = std::vector<VkVertexInputAttributeDescription>(attributes, attributes + DE_LENGTH_OF_ARRAY(attributes));
+
+ // Create vertex buffer for vertex input rate
+ {
+ VkMemoryRequirements bufferMemoryReqs;
+
+ m_vertexRateBufferSize = getBufferSizeInBytes(m_numVertexValues, m_inputFormat); // All formats used in this test suite are 32-bit based.
+
+ const VkBufferCreateInfo vertexRateBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_vertexRateBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_vertexRateBuffer = createBuffer(vk, *m_device, &vertexRateBufferParams);
+ bufferMemoryReqs = getBufferMemoryRequirements(vk, *m_device, *m_vertexRateBuffer);
+ m_vertexRateBufferAllocSize = bufferMemoryReqs.size;
+ m_vertexRateBufferAlloc = memAlloc.allocate(bufferMemoryReqs, MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_vertexRateBuffer, m_vertexRateBufferAlloc->getMemory(), m_vertexRateBufferAlloc->getOffset()));
+ populateBufferWithTestValues(m_vertexRateBufferAlloc->getHostPtr(), (deUint32)m_vertexRateBufferAllocSize, m_inputFormat);
+ flushMappedMemoryRange(vk, *m_device, m_vertexRateBufferAlloc->getMemory(), m_vertexRateBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ // Create vertex buffer for instance input rate
+ {
+ VkMemoryRequirements bufferMemoryReqs;
+
+ m_instanceRateBufferSize = getBufferSizeInBytes(m_numInstanceValues, m_inputFormat); // All formats used in this test suite are 32-bit based.
+
+ const VkBufferCreateInfo instanceRateBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_instanceRateBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_instanceRateBuffer = createBuffer(vk, *m_device, &instanceRateBufferParams);
+ bufferMemoryReqs = getBufferMemoryRequirements(vk, *m_device, *m_instanceRateBuffer);
+ m_instanceRateBufferAllocSize = bufferMemoryReqs.size;
+ m_instanceRateBufferAlloc = memAlloc.allocate(bufferMemoryReqs, MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_instanceRateBuffer, m_instanceRateBufferAlloc->getMemory(), m_instanceRateBufferAlloc->getOffset()));
+ populateBufferWithTestValues(m_instanceRateBufferAlloc->getHostPtr(), (deUint32)m_instanceRateBufferAllocSize, m_inputFormat);
+ flushMappedMemoryRange(vk, *m_device, m_instanceRateBufferAlloc->getMemory(), m_instanceRateBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ // Create vertex buffer that stores the vertex number (from 0 to m_numVertices - 1)
+ {
+ m_vertexNumBufferSize = 128 * sizeof(deInt32); // Allocate enough device memory for all indices (0 to 127).
+
+ const VkBufferCreateInfo vertexNumBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_vertexNumBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_vertexNumBuffer = createBuffer(vk, *m_device, &vertexNumBufferParams);
+ m_vertexNumBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_device, *m_vertexNumBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_vertexNumBuffer, m_vertexNumBufferAlloc->getMemory(), m_vertexNumBufferAlloc->getOffset()));
+ }
+
+ // Create index buffer if required
+ if (!indices.empty())
+ {
+ m_indexBufferSize = sizeof(deUint32) * indices.size();
+
+ const VkBufferCreateInfo indexBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_indexBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_indexBuffer = createBuffer(vk, *m_device, &indexBufferParams);
+ m_indexBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_device, *m_indexBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_indexBuffer, m_indexBufferAlloc->getMemory(), m_indexBufferAlloc->getOffset()));
+ deMemcpy(m_indexBufferAlloc->getHostPtr(), indices.data(), (size_t)m_indexBufferSize);
+ flushMappedMemoryRange(vk, *m_device, m_indexBufferAlloc->getMemory(), m_indexBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ // Create result ssbo
+ {
+ const int numChannels = getNumUsedChannels(mapVkFormat(m_inputFormat).order);
+
+ m_outBufferSize = getBufferSizeInBytes(m_numVertices * m_numInstances * numChannels * 3, VK_FORMAT_R32_UINT);
+
+ const VkBufferCreateInfo outBufferParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ m_outBufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 1u, // deUint32 queueFamilyIndexCount;
+ &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
+ };
+
+ m_outBuffer = createBuffer(vk, *m_device, &outBufferParams);
+ m_outBufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_device, *m_outBuffer), MemoryRequirement::HostVisible);
+
+ VK_CHECK(vk.bindBufferMemory(*m_device, *m_outBuffer, m_outBufferAlloc->getMemory(), m_outBufferAlloc->getOffset()));
+ deMemset(m_outBufferAlloc->getHostPtr(), 0xFF, (size_t)m_outBufferSize);
+ flushMappedMemoryRange(vk, *m_device, m_outBufferAlloc->getMemory(), m_outBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ // Create descriptor set data
+ {
+ DescriptorPoolBuilder descriptorPoolBuilder;
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
+ m_descriptorPool = descriptorPoolBuilder.build(vk, *m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ DescriptorSetLayoutBuilder setLayoutBuilder;
+ setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_VERTEX_BIT);
+ m_descriptorSetLayout = setLayoutBuilder.build(vk, *m_device);
+
+ const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ *m_descriptorPool, // VkDescriptorPool desciptorPool;
+ 1u, // deUint32 setLayoutCount;
+ &m_descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
+ };
+
+ m_descriptorSet = allocateDescriptorSet(vk, *m_device, &descriptorSetAllocateInfo);
+
+ const VkDescriptorBufferInfo outBufferDescriptorInfo = makeDescriptorBufferInfo(*m_outBuffer, 0ull, VK_WHOLE_SIZE);
+
+ DescriptorSetUpdateBuilder setUpdateBuilder;
+ setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outBufferDescriptorInfo);
+ setUpdateBuilder.update(vk, *m_device);
+ }
+
+ // Create fence
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u // VkFenceCreateFlags flags;
+ };
+
+ m_fence = createFence(vk, *m_device, &fenceParams);
+ }
+
+ // Get queue
+ vk.getDeviceQueue(*m_device, queueFamilyIndex, 0, &m_queue);
+
+ // Setup graphics test environment
+ {
+ GraphicsEnvironment::DrawConfig drawConfig;
+
+ drawConfig.vertexBuffers.push_back(*m_vertexRateBuffer);
+ drawConfig.vertexBuffers.push_back(*m_instanceRateBuffer);
+ drawConfig.vertexBuffers.push_back(*m_vertexNumBuffer);
+
+ drawConfig.vertexCount = m_numVertices;
+ drawConfig.instanceCount = m_numInstances;
+ drawConfig.indexBuffer = *m_indexBuffer;
+ drawConfig.indexCount = (deUint32)(m_indexBufferSize / sizeof(deUint32));
+
+ m_graphicsTestEnvironment = de::MovePtr<GraphicsEnvironment>(new GraphicsEnvironment(m_context,
+ *m_device,
+ *m_descriptorSetLayout,
+ *m_descriptorSet,
+ GraphicsEnvironment::VertexBindings(bindings, bindings + DE_LENGTH_OF_ARRAY(bindings)),
+ GraphicsEnvironment::VertexAttributes(attributes, attributes + DE_LENGTH_OF_ARRAY(attributes)),
+ drawConfig));
+ }
+}
+
+tcu::TestStatus VertexAccessInstance::iterate (void)
+{
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const vk::VkCommandBuffer cmdBuffer = m_graphicsTestEnvironment->getCommandBuffer();
+
+ // Initialize vertex ids
+ {
+ deUint32 *bufferPtr = reinterpret_cast<deUint32*>(m_vertexNumBufferAlloc->getHostPtr());
+ deMemset(bufferPtr, 0, (size_t)m_vertexNumBufferSize);
+
+ initVertexIds(bufferPtr, (size_t)(m_vertexNumBufferSize / sizeof(deUint32)));
+
+ flushMappedMemoryRange(vk, *m_device, m_vertexNumBufferAlloc->getMemory(), m_vertexNumBufferAlloc->getOffset(), VK_WHOLE_SIZE);
+ }
+
+ // Submit command buffer
+ {
+ const VkSubmitInfo submitInfo =
+ {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // deUint32 waitSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pWaitSemaphores;
+ DE_NULL, // const VkPIpelineStageFlags* pWaitDstStageMask;
+ 1u, // deUint32 commandBufferCount;
+ &cmdBuffer, // const VkCommandBuffer* pCommandBuffers;
+ 0u, // deUint32 signalSemaphoreCount;
+ DE_NULL // const VkSemaphore* pSignalSemaphores;
+ };
+
+ VK_CHECK(vk.resetFences(*m_device, 1, &m_fence.get()));
+ VK_CHECK(vk.queueSubmit(m_queue, 1, &submitInfo, *m_fence));
+ VK_CHECK(vk.waitForFences(*m_device, 1, &m_fence.get(), true, ~(0ull) /* infinity */));
+ }
+
+ // Prepare result buffer for read
+ {
+ const VkMappedMemoryRange outBufferRange =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ m_outBufferAlloc->getMemory(), // VkDeviceMemory mem;
+ 0ull, // VkDeviceSize offset;
+ m_outBufferSize, // VkDeviceSize size;
+ };
+
+ VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
+ }
+
+ if (verifyResult())
+ return tcu::TestStatus::pass("All values OK");
+ else
+ return tcu::TestStatus::fail("Invalid value(s) found");
+}
+
+bool VertexAccessInstance::verifyResult (void)
+{
+ std::ostringstream logMsg;
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+ const int numChannels = getNumUsedChannels(mapVkFormat(m_inputFormat).order);
+ const deUint32 numScalarsPerVertex = numChannels * 3; // Use 3 identical attributes
+ void* outDataPtr = m_outBufferAlloc->getHostPtr();
+ const deUint32 outValueSize = sizeof(deUint32);
+ bool allOk = true;
+
+ const VkMappedMemoryRange outBufferRange =
+ {
+ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ m_outBufferAlloc->getMemory(), // VkDeviceMemory mem;
+ m_outBufferAlloc->getOffset(), // VkDeviceSize offset;
+ m_outBufferSize, // VkDeviceSize size;
+ };
+
+ VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
+
+ for (deUint32 valueNdx = 0; valueNdx < m_outBufferSize / outValueSize; valueNdx++)
+ {
+ deUint32 numInBufferValues;
+ void* inBufferPtr;
+ VkDeviceSize inBufferAllocSize;
+ deUint32 inBufferValueIndex;
+ bool isOutOfBoundsAccess = false;
+ const bool isInstanceRateValue = ((valueNdx / numChannels) % 3 == 2);
+ const deUint32* outValuePtr = (deUint32*)outDataPtr + valueNdx;
+
+ if (isInstanceRateValue)
+ {
+ const deUint32 elementIndex = valueNdx / (numScalarsPerVertex * m_numVertices); // instance id
+
+ numInBufferValues = m_numInstanceValues;
+ inBufferPtr = m_instanceRateBufferAlloc->getHostPtr();
+ inBufferAllocSize = m_instanceRateBufferAllocSize;
+ inBufferValueIndex = (getIndex(elementIndex) * numChannels) + (valueNdx % numScalarsPerVertex) - (2 * numChannels);
+ }
+ else
+ {
+ const deUint32 vertexNdx = valueNdx / numScalarsPerVertex;
+ const deUint32 instanceNdx = vertexNdx / m_numVertices;
+ const deUint32 elementIndex = valueNdx / numScalarsPerVertex; // vertex id
+
+ numInBufferValues = m_numVertexValues;
+ inBufferPtr = m_vertexRateBufferAlloc->getHostPtr();
+ inBufferAllocSize = m_vertexRateBufferAllocSize;
+ inBufferValueIndex = (getIndex(elementIndex) * (numChannels * 2)) + (valueNdx % numScalarsPerVertex) - instanceNdx * (m_numVertices * numChannels * 2);
+ }
+
+ isOutOfBoundsAccess = (inBufferValueIndex >= numInBufferValues);
+
+ const deInt32 distanceToOutOfBounds = (deInt32)outValueSize * ((deInt32)numInBufferValues - (deInt32)inBufferValueIndex);
+
+ if (!isOutOfBoundsAccess && (distanceToOutOfBounds < 16))
+ isOutOfBoundsAccess = (((inBufferValueIndex / numChannels) + 1) * numChannels > numInBufferValues);
+
+ // Log value information
+ {
+ const deUint32 attributeIndex = (valueNdx % numScalarsPerVertex) / numChannels;
+
+ // Vertex separator
+ if (valueNdx && valueNdx % numScalarsPerVertex == 0)
+ logMsg << "\n";
+
+ logMsg << "\n" << valueNdx << ": Value ";
+
+ // Result index and value
+ if (m_inputFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ logValue(logMsg, outValuePtr, VK_FORMAT_R32_SFLOAT, 4);
+ else
+ logValue(logMsg, outValuePtr, m_inputFormat, 4);
+
+ // Attribute name
+ logMsg << "\tfrom attr" << attributeIndex;
+ if (numChannels > 1)
+ logMsg << "[" << valueNdx % numChannels << "]";
+
+ // Input rate
+ if (attributeIndex == 2)
+ logMsg << "\tinstance rate";
+ else
+ logMsg << "\tvertex rate";
+ }
+
+ if (isOutOfBoundsAccess)
+ {
+ const bool isValidValue = isValueWithinVertexBufferOrZero(inBufferPtr, inBufferAllocSize, outValuePtr, inBufferValueIndex);
+
+ logMsg << "\t(out of bounds)";
+
+ if (!isValidValue)
+ {
+ // Check if we are satisfying the [0, 0, 0, x] pattern, where x may be either 0 or 1,
+ // or the maximum representable positive integer value (if the format is integer-based).
+
+ const bool canMatchVec4Pattern = ((valueNdx % numChannels == 3) || m_inputFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32);
+ bool matchesVec4Pattern = false;
+
+ if (canMatchVec4Pattern)
+ {
+ if (m_inputFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr - 3, VK_FORMAT_R32G32B32_SFLOAT);
+ else
+ matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr - 3, m_inputFormat);
+ }
+
+ if (!canMatchVec4Pattern || !matchesVec4Pattern)
+ {
+ logMsg << ", Failed: expected a value within the buffer range or 0";
+
+ if (canMatchVec4Pattern)
+ logMsg << ", or the [0, 0, 0, x] pattern";
+
+ allOk = false;
+ }
+ }
+ }
+ else if (!isExpectedValueFromVertexBuffer(inBufferPtr, inBufferValueIndex, m_inputFormat, outValuePtr))
+ {
+ logMsg << ", Failed: unexpected value";
+ allOk = false;
+ }
+ }
+ log << tcu::TestLog::Message << logMsg.str() << tcu::TestLog::EndMessage;
+
+ return allOk;
+}
+
+bool VertexAccessInstance::isValueWithinVertexBufferOrZero(void* vertexBuffer, VkDeviceSize vertexBufferSize, const void* value, deUint32 valueIndex)
+{
+ if (m_inputFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ const float normValue = *reinterpret_cast<const float*>(value);
+ const deUint32 scalarIndex = valueIndex % 4;
+ const bool isAlpha = (scalarIndex == 3);
+ deUint32 encodedValue;
+
+ if (isAlpha)
+ encodedValue = deMin32(deUint32(normValue * 0x3u), 0x3u);
+ else
+ encodedValue = deMin32(deUint32(normValue * 0x3FFu), 0x3FFu);
+
+ if (encodedValue == 0)
+ return true;
+
+ for (deUint32 i = 0; i < vertexBufferSize / 4; i++)
+ {
+ const deUint32 packedValue = reinterpret_cast<deUint32*>(vertexBuffer)[i];
+ deUint32 unpackedValue;
+
+ if (scalarIndex < 3)
+ unpackedValue = (packedValue >> (10 * scalarIndex)) & 0x3FFu;
+ else
+ unpackedValue = (packedValue >> 30) & 0x3u;
+
+ if (unpackedValue == encodedValue)
+ return true;
+ }
+
+ return false;
+ }
+ else
+ {
+ return isValueWithinBufferOrZero(vertexBuffer, vertexBufferSize, value, sizeof(deUint32));
+ }
+}
+
+bool VertexAccessInstance::isExpectedValueFromVertexBuffer (const void* vertexBuffer, deUint32 vertexIndex, VkFormat vertexFormat, const void* value)
+{
+ if (isUintFormat(vertexFormat))
+ {
+ const deUint32* bufferPtr = reinterpret_cast<const deUint32*>(vertexBuffer);
+
+ return bufferPtr[vertexIndex] == *reinterpret_cast<const deUint32 *>(value);
+ }
+ else if (isIntFormat(vertexFormat))
+ {
+ const deInt32* bufferPtr = reinterpret_cast<const deInt32*>(vertexBuffer);
+
+ return bufferPtr[vertexIndex] == *reinterpret_cast<const deInt32 *>(value);
+ }
+ else if (isFloatFormat(vertexFormat))
+ {
+ const float* bufferPtr = reinterpret_cast<const float*>(vertexBuffer);
+
+ return areEqual(bufferPtr[vertexIndex], *reinterpret_cast<const float *>(value));
+ }
+ else if (vertexFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ const deUint32* bufferPtr = reinterpret_cast<const deUint32*>(vertexBuffer);
+ const deUint32 packedValue = bufferPtr[vertexIndex / 4];
+ const deUint32 scalarIndex = vertexIndex % 4;
+ float normValue;
+
+ if (scalarIndex < 3)
+ normValue = float((packedValue >> (10 * scalarIndex)) & 0x3FFu) / 0x3FFu;
+ else
+ normValue = float(packedValue >> 30) / 0x3u;
+
+ return areEqual(normValue, *reinterpret_cast<const float *>(value));
+ }
+
+ DE_ASSERT(false);
+ return false;
+}
+
+VkDeviceSize VertexAccessInstance::getBufferSizeInBytes (deUint32 numScalars, VkFormat format)
+{
+ if (isUintFormat(format) || isIntFormat(format) || isFloatFormat(format))
+ {
+ return numScalars * 4;
+ }
+ else if (format == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
+ {
+ DE_ASSERT(numScalars % 4 == 0);
+ return numScalars;
+ }
+
+ DE_ASSERT(false);
+ return 0;
+}
+
+// DrawAccessInstance
+
+DrawAccessInstance::DrawAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances)
+ : VertexAccessInstance (context,
+ device,
+ inputFormat,
+ numVertexValues,
+ numInstanceValues,
+ numVertices,
+ numInstances,
+ std::vector<deUint32>()) // No index buffer
+{
+}
+
+void DrawAccessInstance::initVertexIds (deUint32 *indicesPtr, size_t indexCount)
+{
+ for (deUint32 i = 0; i < indexCount; i++)
+ indicesPtr[i] = i;
+}
+
+deUint32 DrawAccessInstance::getIndex (deUint32 vertexNum) const
+{
+ return vertexNum;
+}
+
+// DrawIndexedAccessInstance
+
+DrawIndexedAccessInstance::DrawIndexedAccessInstance (Context& context,
+ Move<VkDevice> device,
+ VkFormat inputFormat,
+ deUint32 numVertexValues,
+ deUint32 numInstanceValues,
+ deUint32 numVertices,
+ deUint32 numInstances,
+ const std::vector<deUint32>& indices)
+ : VertexAccessInstance (context,
+ device,
+ inputFormat,
+ numVertexValues,
+ numInstanceValues,
+ numVertices,
+ numInstances,
+ indices)
+ , m_indices (indices)
+{
+}
+
+void DrawIndexedAccessInstance::initVertexIds (deUint32 *indicesPtr, size_t indexCount)
+{
+ DE_UNREF(indexCount);
+
+ for (deUint32 i = 0; i < m_indices.size(); i++)
+ {
+ DE_ASSERT(m_indices[i] < indexCount);
+
+ indicesPtr[m_indices[i]] = i;
+ }
+}
+
+deUint32 DrawIndexedAccessInstance::getIndex (deUint32 vertexNum) const
+{
+ DE_ASSERT(vertexNum < (deUint32)m_indices.size());
+
+ return m_indices[vertexNum];
+}
+
+// Test node creation functions
+
+static tcu::TestCaseGroup* createDrawTests (tcu::TestContext& testCtx, VkFormat format)
+{
+ struct TestConfig
+ {
+ std::string name;
+ std::string description;
+ VkFormat inputFormat;
+ deUint32 numVertexValues;
+ deUint32 numInstanceValues;
+ deUint32 numVertices;
+ deUint32 numInstances;
+ };
+
+ const deUint32 numChannels = getNumUsedChannels(mapVkFormat(format).order);
+
+ const TestConfig testConfigs[] =
+ {
+ // name description format numVertexValues numInstanceValues numVertices numInstances
+ { "vertex_out_of_bounds", "Create data for 6 vertices, draw 9 vertices", format, numChannels * 2 * 6, numChannels, 9, 1 },
+ { "vertex_incomplete", "Create data for half a vertex, draw 3 vertices", format, numChannels, numChannels, 3, 1 },
+ { "instance_out_of_bounds", "Create data for 1 instance, draw 3 instances", format, numChannels * 2 * 9, numChannels, 3, 3 },
+ };
+
+ de::MovePtr<tcu::TestCaseGroup> drawTests (new tcu::TestCaseGroup(testCtx, "draw", ""));
+
+ for (int i = 0; i < DE_LENGTH_OF_ARRAY(testConfigs); i++)
+ {
+ const TestConfig &config = testConfigs[i];
+
+ drawTests->addChild(new DrawAccessTest(testCtx, config.name, config.description, config.inputFormat,
+ config.numVertexValues, config.numInstanceValues,
+ config.numVertices, config.numInstances));
+ }
+
+ return drawTests.release();
+}
+
+static tcu::TestCaseGroup* createDrawIndexedTests (tcu::TestContext& testCtx, VkFormat format)
+{
+ struct TestConfig
+ {
+ std::string name;
+ std::string description;
+ VkFormat inputFormat;
+ DrawIndexedAccessTest::IndexConfig indexConfig;
+ };
+
+ const TestConfig testConfigs[] =
+ {
+ // name description format indexConfig
+ { "last_index_out_of_bounds", "Only last index is out of bounds", format, DrawIndexedAccessTest::INDEX_CONFIG_LAST_INDEX_OUT_OF_BOUNDS },
+ { "indices_out_of_bounds", "Random indices out of bounds", format, DrawIndexedAccessTest::INDEX_CONFIG_INDICES_OUT_OF_BOUNDS },
+ { "triangle_out_of_bounds", "First triangle is out of bounds", format, DrawIndexedAccessTest::INDEX_CONFIG_TRIANGLE_OUT_OF_BOUNDS },
+ };
+
+ de::MovePtr<tcu::TestCaseGroup> drawTests (new tcu::TestCaseGroup(testCtx, "draw_indexed", ""));
+
+ for (int i = 0; i < DE_LENGTH_OF_ARRAY(testConfigs); i++)
+ {
+ const TestConfig &config = testConfigs[i];
+
+ drawTests->addChild(new DrawIndexedAccessTest(testCtx, config.name, config.description, config.inputFormat, config.indexConfig));
+ }
+
+ return drawTests.release();
+}
+
+static void addVertexFormatTests (tcu::TestContext& testCtx, tcu::TestCaseGroup* parentGroup)
+{
+ const VkFormat vertexFormats[] =
+ {
+ VK_FORMAT_R32_UINT,
+ VK_FORMAT_R32_SINT,
+ VK_FORMAT_R32_SFLOAT,
+ VK_FORMAT_R32G32_UINT,
+ VK_FORMAT_R32G32_SINT,
+ VK_FORMAT_R32G32_SFLOAT,
+ VK_FORMAT_R32G32B32_UINT,
+ VK_FORMAT_R32G32B32_SINT,
+ VK_FORMAT_R32G32B32_SFLOAT,
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R32G32B32A32_SINT,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32
+ };
+
+ for (int i = 0; i < DE_LENGTH_OF_ARRAY(vertexFormats); i++)
+ {
+ const std::string formatName = getFormatName(vertexFormats[i]);
+ de::MovePtr<tcu::TestCaseGroup> formatGroup (new tcu::TestCaseGroup(testCtx, de::toLower(formatName.substr(10)).c_str(), ""));
+
+ formatGroup->addChild(createDrawTests(testCtx, vertexFormats[i]));
+ formatGroup->addChild(createDrawIndexedTests(testCtx, vertexFormats[i]));
+
+ parentGroup->addChild(formatGroup.release());
+ }
+}
+
+tcu::TestCaseGroup* createVertexAccessTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> vertexAccessTests (new tcu::TestCaseGroup(testCtx, "vertex_access", ""));
+
+ addVertexFormatTests(testCtx, vertexAccessTests.get());
+
+ return vertexAccessTests.release();
+}
+
+} // robustness
+} // vkt
--- /dev/null
+#ifndef _VKTROBUSTNESSVERTEXACCESSTESTS_HPP
+#define _VKTROBUSTNESSVERTEXACCESSTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ * Copyright (c) 2016 Imagination Technologies Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Robust Vertex Buffer Access Tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "vkDefs.hpp"
+#include "vktTestCase.hpp"
+
+namespace vkt
+{
+namespace robustness
+{
+
+tcu::TestCaseGroup* createVertexAccessTests (tcu::TestContext& testCtx);
+
+} // robustness
+} // vkt
+
+#endif // _VKTROBUSTNESSVERTEXACCESSTESTS_HPP
#include "vktFragmentOperationsTests.hpp"
#include "vktTextureTests.hpp"
#include "vktGeometryTests.hpp"
+#include "vktRobustnessTests.hpp"
#include <vector>
#include <sstream>
addChild(FragmentOperations::createTests(m_testCtx));
addChild(texture::createTests (m_testCtx));
addChild(geometry::createTests (m_testCtx));
+ addChild(robustness::createTests (m_testCtx));
}
} // vkt
dEQP-VK.geometry.emit.triangle_strip_emit_2_end_2
dEQP-VK.geometry.emit.triangle_strip_emit_3_end_2
dEQP-VK.geometry.emit.triangle_strip_emit_3_end_2_emit_3_end_0
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.vertex.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.fragment.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.mat4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.vec4_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_uint.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_uniform_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_read.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_1_byte
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_3_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_4_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.r32_sfloat.oob_storage_write.range_32_bytes
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.scalar_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_uint.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.r32g32b32a32_sfloat.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_uniform_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_read.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_1_texel
+dEQP-VK.robustness.buffer_access.compute.texel_copy.a2b10g10r10_unorm_pack32.oob_storage_write.range_3_texels
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_uniform_read
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_storage_read
+dEQP-VK.robustness.buffer_access.compute.texel_copy.out_of_alloc.oob_storage_write
+dEQP-VK.robustness.vertex_access.r32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_uint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sint.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.r32g32b32a32_sfloat.draw_indexed.triangle_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.vertex_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.vertex_incomplete
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw.instance_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.last_index_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.indices_out_of_bounds
+dEQP-VK.robustness.vertex_access.a2b10g10r10_unorm_pack32.draw_indexed.triangle_out_of_bounds