From: Vikram Kushwaha Date: Tue, 11 Jul 2017 19:31:50 +0000 (-0700) Subject: Add tests for sparse buffers/images with device groups X-Git-Tag: upstream/1.3.5~2565^2~144 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7c0af777b06f12a3f0bbf703f71850303726e3df;p=platform%2Fupstream%2FVK-GL-CTS.git Add tests for sparse buffers/images with device groups New tests: dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.* dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.* dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.* dEQP-VK.sparse_resources.buffer.ubo.device_group_* dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_* dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_* dEQP-VK.sparse_resources.device_group_image_sparse_binding.* dEQP-VK.sparse_resources.device_group_image_sparse_residency.* dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.* dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.* Components: Vulkan VK-GL-CTS issue: 110 Change-Id: Ifa59ac69871be5663437e18fc82b3b09a072678e --- diff --git a/android/cts/master/vk-master.txt b/android/cts/master/vk-master.txt index 37ec163..c4fb6c5 100755 --- a/android/cts/master/vk-master.txt +++ b/android/cts/master/vk-master.txt @@ -237021,35 +237021,70 @@ dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_10 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_12 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_10 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_12 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ubo.sparse_binding dEQP-VK.sparse_resources.buffer.ubo.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.ubo.sparse_residency dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_aliased dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_non_resident_strict +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_non_resident_strict dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.512_1_1 dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.1024_1_1 dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.11_1_1 @@ -237176,6 +237211,132 @@ dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba16ui.137_137_3 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.256_256_6 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.128_128_8 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.137_137_3 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.512_256_1 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.1024_128_1 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.11_137_1 @@ -237311,6 +237472,141 @@ dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba16ui.11_137_3 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.512_256_16 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.1024_128_8 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.11_137_3 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.512_256_1 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.1024_128_1 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.11_137_1 @@ -237401,6 +237697,96 @@ dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba16ui.11_137_3 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.256_256_16 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.1024_128_8 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.11_137_3 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.512_256_1 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.128_128_1 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.503_137_1 @@ -237521,6 +237907,126 @@ dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.256_256_16 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.128_128_8 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.503_137_3 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.11_37_3 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.512_256_1 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.128_128_1 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.503_137_1 diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp index cfd080a..bd4a517 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp @@ -22,6 +22,7 @@ *//*--------------------------------------------------------------------*/ #include "vktSparseResourcesBase.hpp" +#include "vktSparseResourcesTestsUtil.hpp" #include "vkMemUtil.hpp" #include "vkRefUtil.hpp" #include "vkTypeUtil.hpp" @@ -43,8 +44,6 @@ struct QueueFamilyQueuesCount deUint32 queueCount; }; -static const deUint32 NO_MATCH_FOUND = ~0u; - deUint32 findMatchingQueueFamilyIndex (const std::vector& queueFamilyProperties, const VkQueueFlags queueFlags, const deUint32 startIndex) @@ -66,9 +65,41 @@ void SparseResourcesBaseInstance::createDeviceSupportingQueues(const QueueRequir typedef std::map SelectedQueuesMap; typedef std::map > QueuePrioritiesMap; - const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + std::vector devGroupProperties; + std::vector deviceExtensions; + VkDeviceGroupDeviceCreateInfoKHR deviceGroupInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR, //stype + DE_NULL, //pNext + 0, //physicalDeviceCount + DE_NULL //physicalDevices + }; + m_physicalDevices.push_back(m_context.getPhysicalDevice()); + + // If requested, create an intance with device groups + if (m_useDeviceGroups) + { + const std::vector requiredExtensions(1, "VK_KHR_device_group_creation"); + m_deviceGroupInstance = createInstanceWithExtensions(m_context.getPlatformInterface(), requiredExtensions); + devGroupProperties = enumeratePhysicalDeviceGroupsKHR(m_context.getInstanceInterface(), m_deviceGroupInstance.get()); + m_numPhysicalDevices = devGroupProperties[m_deviceGroupIdx].physicalDeviceCount; + + m_physicalDevices.clear(); + for (size_t physDeviceID = 0; physDeviceID < m_numPhysicalDevices; physDeviceID++) + { + m_physicalDevices.push_back(devGroupProperties[m_deviceGroupIdx].physicalDevices[physDeviceID]); + } + if (m_numPhysicalDevices < 2) + TCU_THROW(NotSupportedError, "Sparse binding device group tests not supported with 1 physical device"); + + deviceGroupInfo.physicalDeviceCount = devGroupProperties[m_deviceGroupIdx].physicalDeviceCount; + deviceGroupInfo.pPhysicalDevices = devGroupProperties[m_deviceGroupIdx].physicalDevices; + + deviceExtensions.push_back("VK_KHR_device_group"); + } + InstanceDriver instance(m_context.getPlatformInterface(), m_useDeviceGroups ? m_deviceGroupInstance.get() : m_context.getInstance()); + const VkPhysicalDevice physicalDevice = getPhysicalDevice(); deUint32 queueFamilyPropertiesCount = 0u; instance.getPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueFamilyPropertiesCount, DE_NULL); @@ -142,16 +173,16 @@ void SparseResourcesBaseInstance::createDeviceSupportingQueues(const QueueRequir const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice); const VkDeviceCreateInfo deviceInfo = { - VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags; - static_cast(queueInfos.size()), // uint32_t queueCreateInfoCount; - &queueInfos[0], // const VkDeviceQueueCreateInfo* pQueueCreateInfos; - 0u, // uint32_t enabledLayerCount; - DE_NULL, // const char* const* ppEnabledLayerNames; - 0u, // uint32_t enabledExtensionCount; - DE_NULL, // const char* const* ppEnabledExtensionNames; - &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures; + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType; + m_useDeviceGroups ? &deviceGroupInfo : DE_NULL, // const void* pNext; + (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags; + static_cast(queueInfos.size()) , // uint32_t queueCreateInfoCount; + &queueInfos[0], // const VkDeviceQueueCreateInfo* pQueueCreateInfos; + 0u, // uint32_t enabledLayerCount; + DE_NULL, // const char* const* ppEnabledLayerNames; + deUint32(deviceExtensions.size()), // uint32_t enabledExtensionCount; + deviceExtensions.size() ? &deviceExtensions[0] : DE_NULL, // const char* const* ppEnabledExtensionNames; + &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures; }; m_logicalDevice = createDevice(instance, physicalDevice, &deviceInfo); diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp index 15385b4..a82eeb5 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp @@ -28,6 +28,7 @@ #include "vkRef.hpp" #include "vkPlatform.hpp" #include "deUniquePtr.hpp" +#include "tcuCommandLine.hpp" #include #include @@ -58,18 +59,33 @@ struct QueueRequirements class SparseResourcesBaseInstance : public TestInstance { public: - SparseResourcesBaseInstance (Context &context) : TestInstance(context) {} + SparseResourcesBaseInstance (Context &context, bool useDeviceGroups = false) + : TestInstance (context) + , m_numPhysicalDevices (1) + , m_useDeviceGroups (useDeviceGroups) + { + const tcu::CommandLine& cmdLine = context.getTestContext().getCommandLine(); + m_deviceGroupIdx = cmdLine.getVKDeviceGroupId() - 1; + } + bool usingDeviceGroups() { return m_useDeviceGroups; } protected: typedef std::vector QueueRequirementsVec; + deUint32 m_numPhysicalDevices; + void createDeviceSupportingQueues (const QueueRequirementsVec& queueRequirements); const Queue& getQueue (const vk::VkQueueFlags queueFlags, const deUint32 queueIndex) const; - const vk::DeviceInterface& getDeviceInterface (void) const { return *m_deviceDriver; } - vk::VkDevice getDevice (void) const { return *m_logicalDevice; } - vk::Allocator& getAllocator (void) { return *m_allocator; } + const vk::DeviceInterface& getDeviceInterface (void) const { return *m_deviceDriver; } + vk::VkDevice getDevice (void) const { return *m_logicalDevice; } + vk::Allocator& getAllocator (void) { return *m_allocator; } + vk::VkPhysicalDevice getPhysicalDevice (deUint32 i = 0) { return m_physicalDevices[i];} private: + bool m_useDeviceGroups; + deUint32 m_deviceGroupIdx; + vk::Move m_deviceGroupInstance; + std::vector m_physicalDevices; std::map > m_queues; de::MovePtr m_deviceDriver; vk::Move m_logicalDevice; diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.cpp index 38d6791..2855eb9 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.cpp @@ -83,7 +83,8 @@ public: const std::string& name, const std::string& description, const deUint32 bufferSize, - const glu::GLSLVersion glslVersion); + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups); void initPrograms (SourceCollections& sourceCollections) const; TestInstance* createInstance (Context& context) const; @@ -91,16 +92,19 @@ public: private: const deUint32 m_bufferSizeInBytes; const glu::GLSLVersion m_glslVersion; + const bool m_useDeviceGroups; }; BufferSparseMemoryAliasingCase::BufferSparseMemoryAliasingCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, const deUint32 bufferSize, - const glu::GLSLVersion glslVersion) + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups) : TestCase (testCtx, name, description) , m_bufferSizeInBytes (bufferSize) , m_glslVersion (glslVersion) + , m_useDeviceGroups (useDeviceGroups) { } @@ -135,32 +139,29 @@ class BufferSparseMemoryAliasingInstance : public SparseResourcesBaseInstance { public: BufferSparseMemoryAliasingInstance (Context& context, - const deUint32 bufferSize); + const deUint32 bufferSize, + const bool useDeviceGroups); tcu::TestStatus iterate (void); private: const deUint32 m_bufferSizeInBytes; + const deUint32 m_useDeviceGroups; + }; -BufferSparseMemoryAliasingInstance::BufferSparseMemoryAliasingInstance (Context& context, - const deUint32 bufferSize) - : SparseResourcesBaseInstance (context) +BufferSparseMemoryAliasingInstance::BufferSparseMemoryAliasingInstance (Context& context, + const deUint32 bufferSize, + const bool useDeviceGroups) + : SparseResourcesBaseInstance (context, useDeviceGroups) , m_bufferSizeInBytes (bufferSize) + , m_useDeviceGroups (useDeviceGroups) { } tcu::TestStatus BufferSparseMemoryAliasingInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); - - if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding) - TCU_THROW(NotSupportedError, "Sparse binding not supported"); - - if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyAliased) - TCU_THROW(NotSupportedError, "Sparse memory aliasing not supported"); - { // Create logical device supporting both sparse and compute operations QueueRequirementsVec queueRequirements; @@ -169,245 +170,268 @@ tcu::TestStatus BufferSparseMemoryAliasingInstance::iterate (void) createDeviceSupportingQueues(queueRequirements); } + const vk::VkPhysicalDevice& physicalDevice = getPhysicalDevice(); + + if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding) + TCU_THROW(NotSupportedError, "Sparse binding not supported"); + + if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyAliased) + TCU_THROW(NotSupportedError, "Sparse memory aliasing not supported"); const DeviceInterface& deviceInterface = getDeviceInterface(); const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); - VkBufferCreateInfo bufferCreateInfo = - { - VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - VK_BUFFER_CREATE_SPARSE_BINDING_BIT | - VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, // VkBufferCreateFlags flags; - m_bufferSizeInBytes, // VkDeviceSize size; - VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage; - VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; - 0u, // deUint32 queueFamilyIndexCount; - DE_NULL // const deUint32* pQueueFamilyIndices; - }; - - const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; - - if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; - bufferCreateInfo.queueFamilyIndexCount = 2u; - bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; - } + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; - // Create sparse buffers - const Unique sparseBufferWrite(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); - const Unique sparseBufferRead (createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); + VkBufferCreateInfo bufferCreateInfo = + { + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + VK_BUFFER_CREATE_SPARSE_BINDING_BIT | + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, // VkBufferCreateFlags flags; + m_bufferSizeInBytes, // VkDeviceSize size; + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage; + VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; + 0u, // deUint32 queueFamilyIndexCount; + DE_NULL // const deUint32* pQueueFamilyIndices; + }; - // Create sparse buffers memory bind semaphore - const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; - const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBufferWrite); + if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) + { + bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; + bufferCreateInfo.queueFamilyIndexCount = 2u; + bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; + } - if (bufferMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); + // Create sparse buffers + const Unique sparseBufferWrite(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); + const Unique sparseBufferRead(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); - DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0); + // Create sparse buffers memory bind semaphore + const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any); + const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBufferWrite); - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + if (bufferMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); - const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.size, memoryType, 0u); + DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0); - Move deviceMemoryPtr(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)); + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any); - { - const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo[2] = - { - makeSparseBufferMemoryBindInfo - (*sparseBufferWrite, //VkBuffer buffer; - 1u, //deUint32 bindCount; - &sparseMemoryBind //const VkSparseMemoryBind* Binds; - ), - - makeSparseBufferMemoryBindInfo - (*sparseBufferRead, //VkBuffer buffer; - 1u, //deUint32 bindCount; - &sparseMemoryBind //const VkSparseMemoryBind* Binds; - ) - }; - - const VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 2u, //deUint32 bufferBindCount; - sparseBufferMemoryBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } + const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.size, memoryType, 0u); - // Create output buffer - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + Move deviceMemoryPtr(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)); - // Create command buffer for compute and data transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + { + const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo[2] = + { + makeSparseBufferMemoryBindInfo + (*sparseBufferWrite, //VkBuffer buffer; + 1u, //deUint32 bindCount; + &sparseMemoryBind //const VkSparseMemoryBind* Binds; + ), + + makeSparseBufferMemoryBindInfo + (*sparseBufferRead, //VkBuffer buffer; + 1u, //deUint32 bindCount; + &sparseMemoryBind //const VkSparseMemoryBind* Binds; + ) + }; + + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; + + const VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 2u, //deUint32 bufferBindCount; + sparseBufferMemoryBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); + } + + // Create output buffer + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + + // Create command buffer for compute and data transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); + + // Create descriptor set + const Unique descriptorSetLayout( + DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) + .build(deviceInterface, getDevice())); + + // Create compute pipeline + const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); + const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); + const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); + + deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); + + // Create descriptor set + const Unique descriptorPool( + DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) + .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); + + const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); - // Start recording commands - beginCommandBuffer(deviceInterface, *commandBuffer); + { + const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBufferWrite, 0u, m_bufferSizeInBytes); - // Create descriptor set - const Unique descriptorSetLayout( - DescriptorSetLayoutBuilder() - .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) - .build(deviceInterface, getDevice())); + DescriptorSetUpdateBuilder() + .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo) + .update(deviceInterface, getDevice()); + } - // Create compute pipeline - const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); - const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); - const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); + deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); - deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); + { + deUint32 numInvocationsLeft = m_bufferSizeInBytes / SIZE_OF_UINT_IN_SHADER; + const tcu::UVec3 workGroupSize = computeWorkGroupSize(numInvocationsLeft); + const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); - // Create descriptor set - const Unique descriptorPool( - DescriptorPoolBuilder() - .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u) - .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); + numInvocationsLeft -= workGroupSize.x()*workGroupSize.y()*workGroupSize.z(); - const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); + const deUint32 xWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.x()); + numInvocationsLeft = numInvocationsLeft / xWorkGroupCount + ((numInvocationsLeft % xWorkGroupCount) ? 1u : 0u); + const deUint32 yWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.y()); + numInvocationsLeft = numInvocationsLeft / yWorkGroupCount + ((numInvocationsLeft % yWorkGroupCount) ? 1u : 0u); + const deUint32 zWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.z()); + numInvocationsLeft = numInvocationsLeft / zWorkGroupCount + ((numInvocationsLeft % zWorkGroupCount) ? 1u : 0u); - { - const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBufferWrite, 0u, m_bufferSizeInBytes); + if (numInvocationsLeft != 1u) + TCU_THROW(NotSupportedError, "Buffer size is not supported"); - DescriptorSetUpdateBuilder() - .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo) - .update(deviceInterface, getDevice()); - } - - deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); + deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); + } - { - deUint32 numInvocationsLeft = m_bufferSizeInBytes / SIZE_OF_UINT_IN_SHADER; - const tcu::UVec3 workGroupSize = computeWorkGroupSize(numInvocationsLeft); - const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); - - numInvocationsLeft -= workGroupSize.x()*workGroupSize.y()*workGroupSize.z(); + { + const VkBufferMemoryBarrier sparseBufferWriteBarrier + = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *sparseBufferWrite, + 0ull, + m_bufferSizeInBytes); - const deUint32 xWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.x()); - numInvocationsLeft = numInvocationsLeft / xWorkGroupCount + ((numInvocationsLeft % xWorkGroupCount) ? 1u : 0u); - const deUint32 yWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.y()); - numInvocationsLeft = numInvocationsLeft / yWorkGroupCount + ((numInvocationsLeft % yWorkGroupCount) ? 1u : 0u); - const deUint32 zWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.z()); - numInvocationsLeft = numInvocationsLeft / zWorkGroupCount + ((numInvocationsLeft % zWorkGroupCount) ? 1u : 0u); + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferWriteBarrier, 0u, DE_NULL); + } - if (numInvocationsLeft != 1u) - TCU_THROW(NotSupportedError, "Buffer size is not supported"); + { + const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSizeInBytes); - deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); - } + deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBufferRead, *outputBuffer, 1u, &bufferCopy); + } - { - const VkBufferMemoryBarrier sparseBufferWriteBarrier - = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *sparseBufferWrite, - 0ull, - m_bufferSizeInBytes); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferWriteBarrier, 0u, DE_NULL); - } - - { - const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSizeInBytes); + { + const VkBufferMemoryBarrier outputBufferHostBarrier + = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0ull, + m_bufferSizeInBytes); - deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBufferRead, *outputBuffer, 1u, &bufferCopy); - } + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostBarrier, 0u, DE_NULL); + } - { - const VkBufferMemoryBarrier outputBufferHostBarrier - = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0ull, - m_bufferSizeInBytes); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostBarrier, 0u, DE_NULL); - } + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); - // End recording commands - endCommandBuffer(deviceInterface, *commandBuffer); + // The stage at which execution is going to wait for finish of sparse binding operations + const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; - // The stage at which execution is going to wait for finish of sparse binding operations - const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; + // Submit commands for execution and wait for completion + // In case of device groups, submit on the physical device with the resource + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), + waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - // Submit commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits); + // Retrieve data from output buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSizeInBytes); - // Retrieve data from output buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSizeInBytes); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + // Prepare reference data + std::vector referenceData; + referenceData.resize(m_bufferSizeInBytes); - // Prepare reference data - std::vector referenceData; - referenceData.resize(m_bufferSizeInBytes); + std::vector referenceDataBlock; + referenceDataBlock.resize(MODULO_DIVISOR); - std::vector referenceDataBlock; - referenceDataBlock.resize(MODULO_DIVISOR); + for (deUint32 valueNdx = 0; valueNdx < MODULO_DIVISOR; ++valueNdx) + { + referenceDataBlock[valueNdx] = valueNdx % MODULO_DIVISOR; + } - for (deUint32 valueNdx = 0; valueNdx < MODULO_DIVISOR; ++valueNdx) - { - referenceDataBlock[valueNdx] = valueNdx % MODULO_DIVISOR; - } + const deUint32 fullBlockSizeInBytes = MODULO_DIVISOR * SIZE_OF_UINT_IN_SHADER; + const deUint32 lastBlockSizeInBytes = m_bufferSizeInBytes % fullBlockSizeInBytes; + const deUint32 numberOfBlocks = m_bufferSizeInBytes / fullBlockSizeInBytes + (lastBlockSizeInBytes ? 1u : 0u); - const deUint32 fullBlockSizeInBytes = MODULO_DIVISOR * SIZE_OF_UINT_IN_SHADER; - const deUint32 lastBlockSizeInBytes = m_bufferSizeInBytes % fullBlockSizeInBytes; - const deUint32 numberOfBlocks = m_bufferSizeInBytes / fullBlockSizeInBytes + (lastBlockSizeInBytes ? 1u : 0u); + for (deUint32 blockNdx = 0; blockNdx < numberOfBlocks; ++blockNdx) + { + const deUint32 offset = blockNdx * fullBlockSizeInBytes; + deMemcpy(&referenceData[0] + offset, &referenceDataBlock[0], ((offset + fullBlockSizeInBytes) <= m_bufferSizeInBytes) ? fullBlockSizeInBytes : lastBlockSizeInBytes); + } - for (deUint32 blockNdx = 0; blockNdx < numberOfBlocks; ++blockNdx) - { - const deUint32 offset = blockNdx * fullBlockSizeInBytes; - deMemcpy(&referenceData[0] + offset, &referenceDataBlock[0], ((offset + fullBlockSizeInBytes) <= m_bufferSizeInBytes) ? fullBlockSizeInBytes : lastBlockSizeInBytes); + // Compare reference data with output data + if (deMemCmp(&referenceData[0], outputData, m_bufferSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); } - - // Compare reference data with output data - if (deMemCmp(&referenceData[0], outputData, m_bufferSizeInBytes) != 0) - return tcu::TestStatus::fail("Failed"); - else - return tcu::TestStatus::pass("Passed"); + return tcu::TestStatus::pass("Passed"); } TestInstance* BufferSparseMemoryAliasingCase::createInstance (Context& context) const { - return new BufferSparseMemoryAliasingInstance(context, m_bufferSizeInBytes); + return new BufferSparseMemoryAliasingInstance(context, m_bufferSizeInBytes, m_useDeviceGroups); } } // anonymous ns -void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group) +void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group, const bool useDeviceGroups) { - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440, useDeviceGroups)); } } // sparse diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.hpp index f714088..26ea9f5 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferMemoryAliasing.hpp @@ -31,7 +31,7 @@ namespace vkt namespace sparse { -void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group); +void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group, const bool useDeviceGroups); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.cpp index 2202d9a..5a280d0 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.cpp @@ -58,20 +58,24 @@ public: BufferSparseBindingCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, - const deUint32 bufferSize); + const deUint32 bufferSize, + const bool useDeviceGroups); TestInstance* createInstance (Context& context) const; private: const deUint32 m_bufferSize; + const bool m_useDeviceGroups; }; BufferSparseBindingCase::BufferSparseBindingCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, - const deUint32 bufferSize) + const deUint32 bufferSize, + const bool useDeviceGroups) : TestCase (testCtx, name, description) , m_bufferSize (bufferSize) + , m_useDeviceGroups (useDeviceGroups) { } @@ -79,30 +83,29 @@ class BufferSparseBindingInstance : public SparseResourcesBaseInstance { public: BufferSparseBindingInstance (Context& context, - const deUint32 bufferSize); + const deUint32 bufferSize, + const bool useDeviceGroups); tcu::TestStatus iterate (void); private: const deUint32 m_bufferSize; + const deUint32 m_useDeviceGroups; }; BufferSparseBindingInstance::BufferSparseBindingInstance (Context& context, - const deUint32 bufferSize) + const deUint32 bufferSize, + const bool useDeviceGroups) - : SparseResourcesBaseInstance (context) + : SparseResourcesBaseInstance (context, useDeviceGroups) , m_bufferSize (bufferSize) + , m_useDeviceGroups (useDeviceGroups) { } tcu::TestStatus BufferSparseBindingInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); - - if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding) - TCU_THROW(NotSupportedError, "Sparse binding not supported"); - { // Create logical device supporting both sparse and compute operations QueueRequirementsVec queueRequirements; @@ -111,213 +114,232 @@ tcu::TestStatus BufferSparseBindingInstance::iterate (void) createDeviceSupportingQueues(queueRequirements); } + const vk::VkPhysicalDevice& physicalDevice = getPhysicalDevice(); + + if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding) + TCU_THROW(NotSupportedError, "Sparse binding not supported"); const DeviceInterface& deviceInterface = getDeviceInterface(); const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); - VkBufferCreateInfo bufferCreateInfo; + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) + { + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; - bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; // VkStructureType sType; - bufferCreateInfo.pNext = DE_NULL; // const void* pNext; - bufferCreateInfo.flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // VkBufferCreateFlags flags; - bufferCreateInfo.size = m_bufferSize; // VkDeviceSize size; - bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | - VK_BUFFER_USAGE_TRANSFER_DST_BIT; // VkBufferUsageFlags usage; - bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // VkSharingMode sharingMode; - bufferCreateInfo.queueFamilyIndexCount = 0u; // deUint32 queueFamilyIndexCount; - bufferCreateInfo.pQueueFamilyIndices = DE_NULL; // const deUint32* pQueueFamilyIndices; + VkBufferCreateInfo bufferCreateInfo; - const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; + bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; // VkStructureType sType; + bufferCreateInfo.pNext = DE_NULL; // const void* pNext; + bufferCreateInfo.flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // VkBufferCreateFlags flags; + bufferCreateInfo.size = m_bufferSize; // VkDeviceSize size; + bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | + VK_BUFFER_USAGE_TRANSFER_DST_BIT; // VkBufferUsageFlags usage; + bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // VkSharingMode sharingMode; + bufferCreateInfo.queueFamilyIndexCount = 0u; // deUint32 queueFamilyIndexCount; + bufferCreateInfo.pQueueFamilyIndices = DE_NULL; // const deUint32* pQueueFamilyIndices; - if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) - { - bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; // VkSharingMode sharingMode; - bufferCreateInfo.queueFamilyIndexCount = 2u; // deUint32 queueFamilyIndexCount; - bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; // const deUint32* pQueueFamilyIndices; - } + const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; - // Create sparse buffer - const Unique sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); + if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) + { + bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; // VkSharingMode sharingMode; + bufferCreateInfo.queueFamilyIndexCount = 2u; // deUint32 queueFamilyIndexCount; + bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; // const deUint32* pQueueFamilyIndices; + } - // Create sparse buffer memory bind semaphore - const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + // Create sparse buffer + const Unique sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); - const VkMemoryRequirements bufferMemRequirement = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer); + // Create sparse buffer memory bind semaphore + const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - if (bufferMemRequirement.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); + const VkMemoryRequirements bufferMemRequirement = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer); - DE_ASSERT((bufferMemRequirement.size % bufferMemRequirement.alignment) == 0); + if (bufferMemRequirement.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); - Move sparseMemoryAllocation; + DE_ASSERT((bufferMemRequirement.size % bufferMemRequirement.alignment) == 0); - { - std::vector sparseMemoryBinds; - const deUint32 numSparseBinds = static_cast(bufferMemRequirement.size / bufferMemRequirement.alignment); - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirement, MemoryRequirement::Any); - - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + Move sparseMemoryAllocation; { - const VkMemoryAllocateInfo allocateInfo = + std::vector sparseMemoryBinds; + const deUint32 numSparseBinds = static_cast(bufferMemRequirement.size / bufferMemRequirement.alignment); + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirement, MemoryRequirement::Any); + + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); + { - VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - bufferMemRequirement.size, // VkDeviceSize allocationSize; - memoryType, // uint32_t memoryTypeIndex; + const VkMemoryAllocateInfo allocateInfo = + { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + bufferMemRequirement.size, // VkDeviceSize allocationSize; + memoryType, // uint32_t memoryTypeIndex; + }; + + sparseMemoryAllocation = allocateMemory(deviceInterface, getDevice(), &allocateInfo); + } + + for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx) + { + const VkSparseMemoryBind sparseMemoryBind = + { + bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize resourceOffset; + bufferMemRequirement.alignment, // VkDeviceSize size; + *sparseMemoryAllocation, // VkDeviceMemory memory; + bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize memoryOffset; + (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags; + }; + sparseMemoryBinds.push_back(sparseMemoryBind); + } + + const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, numSparseBinds, &sparseMemoryBinds[0]); + + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; }; - sparseMemoryAllocation = allocateMemory(deviceInterface, getDevice(), &allocateInfo); - } - - for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx) - { - const VkSparseMemoryBind sparseMemoryBind = + const VkBindSparseInfo bindSparseInfo = { - bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize resourceOffset; - bufferMemRequirement.alignment, // VkDeviceSize size; - *sparseMemoryAllocation, // VkDeviceMemory memory; - bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize memoryOffset; - (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags; + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 1u, //deUint32 bufferBindCount; + &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; }; - sparseMemoryBinds.push_back(sparseMemoryBind); + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); } - const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, numSparseBinds, &sparseMemoryBinds[0]); - - const VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 1u, //deUint32 bufferBindCount; - &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; - - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } - - // Create command buffer for transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + // Create command buffer for transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - // Start recording transfer commands - beginCommandBuffer(deviceInterface, *commandBuffer); + // Start recording transfer commands + beginCommandBuffer(deviceInterface, *commandBuffer); - const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); - const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); - const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + const Unique inputBuffer(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); + const de::UniquePtr inputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); - std::vector referenceData; - referenceData.resize(m_bufferSize); + std::vector referenceData; + referenceData.resize(m_bufferSize); - for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx) - { - referenceData[valueNdx] = static_cast((valueNdx % bufferMemRequirement.alignment) + 1u); - } + for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx) + { + referenceData[valueNdx] = static_cast((valueNdx % bufferMemRequirement.alignment) + 1u); + } - deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize); + deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize); - flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize); + flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize); - { - const VkBufferMemoryBarrier inputBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *inputBuffer, - 0u, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier inputBufferBarrier + = makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *inputBuffer, + 0u, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } - { - const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); + { + const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); - deviceInterface.cmdCopyBuffer(*commandBuffer, *inputBuffer, *sparseBuffer, 1u, &bufferCopy); - } + deviceInterface.cmdCopyBuffer(*commandBuffer, *inputBuffer, *sparseBuffer, 1u, &bufferCopy); + } - { - const VkBufferMemoryBarrier sparseBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *sparseBuffer, - 0u, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier sparseBufferBarrier + = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *sparseBuffer, + 0u, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL); + } - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - { - const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); + { + const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); - deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy); - } + deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy); + } - { - const VkBufferMemoryBarrier outputBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0u, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier outputBufferBarrier + = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0u, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); + } - // End recording transfer commands - endCommandBuffer(deviceInterface, *commandBuffer); + // End recording transfer commands + endCommandBuffer(deviceInterface, *commandBuffer); - const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; + const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; - // Submit transfer commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits); + // Submit transfer commands for execution and wait for completion + // In case of device groups, submit on the physical device with the resource + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), + waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - // Retrieve data from output buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize); + // Retrieve data from output buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - // Compare output data with reference data - if (deMemCmp(&referenceData[0], outputData, m_bufferSize) != 0) - return tcu::TestStatus::fail("Failed"); - else - return tcu::TestStatus::pass("Passed"); + // Compare output data with reference data + if (deMemCmp(&referenceData[0], outputData, m_bufferSize) != 0) + return tcu::TestStatus::fail("Failed"); + } + return tcu::TestStatus::pass("Passed"); } TestInstance* BufferSparseBindingCase::createInstance (Context& context) const { - return new BufferSparseBindingInstance(context, m_bufferSize); + return new BufferSparseBindingInstance(context, m_bufferSize, m_useDeviceGroups); } } // anonymous ns -void addBufferSparseBindingTests (tcu::TestCaseGroup* group) +void addBufferSparseBindingTests (tcu::TestCaseGroup* group, const bool useDeviceGroups) { - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10)); - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12)); - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16)); - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17)); - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20)); - group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, useDeviceGroups)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, useDeviceGroups)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, useDeviceGroups)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, useDeviceGroups)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, useDeviceGroups)); + group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, useDeviceGroups)); } } // sparse diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.hpp index 3a048d6..0b4b567 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseBinding.hpp @@ -31,7 +31,7 @@ namespace vkt namespace sparse { -void addBufferSparseBindingTests (tcu::TestCaseGroup* group); +void addBufferSparseBindingTests (tcu::TestCaseGroup* group, const bool useDeviceGroups); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.cpp index b23910f..294a052 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.cpp @@ -64,7 +64,9 @@ public: const std::string& name, const std::string& description, const deUint32 bufferSize, - const glu::GLSLVersion glslVersion); + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups); + void initPrograms (SourceCollections& sourceCollections) const; TestInstance* createInstance (Context& context) const; @@ -72,16 +74,21 @@ public: private: const deUint32 m_bufferSize; const glu::GLSLVersion m_glslVersion; + const bool m_useDeviceGroups; + }; BufferSparseResidencyCase::BufferSparseResidencyCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, const deUint32 bufferSize, - const glu::GLSLVersion glslVersion) + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups) + : TestCase (testCtx, name, description) , m_bufferSize (bufferSize) , m_glslVersion (glslVersion) + , m_useDeviceGroups (useDeviceGroups) { } @@ -119,30 +126,28 @@ class BufferSparseResidencyInstance : public SparseResourcesBaseInstance { public: BufferSparseResidencyInstance (Context& context, - const deUint32 bufferSize); + const deUint32 bufferSize, + const bool useDeviceGroups); tcu::TestStatus iterate (void); private: const deUint32 m_bufferSize; + const deUint32 m_useDeviceGroups; }; BufferSparseResidencyInstance::BufferSparseResidencyInstance (Context& context, - const deUint32 bufferSize) + const deUint32 bufferSize, + const bool useDeviceGroups) : SparseResourcesBaseInstance (context) , m_bufferSize (bufferSize) + , m_useDeviceGroups (useDeviceGroups) { } tcu::TestStatus BufferSparseResidencyInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); - const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); - - if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyBuffer) - TCU_THROW(NotSupportedError, "Sparse partially resident buffers not supported"); - { // Create logical device supporting both sparse and compute operations QueueRequirementsVec queueRequirements; @@ -151,225 +156,245 @@ tcu::TestStatus BufferSparseResidencyInstance::iterate (void) createDeviceSupportingQueues(queueRequirements); } + const VkPhysicalDevice physicalDevice = getPhysicalDevice(); + const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); + + if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyBuffer) + TCU_THROW(NotSupportedError, "Sparse partially resident buffers not supported"); const DeviceInterface& deviceInterface = getDeviceInterface(); const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); - VkBufferCreateInfo bufferCreateInfo = + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - VK_BUFFER_CREATE_SPARSE_BINDING_BIT | - VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, // VkBufferCreateFlags flags; - m_bufferSize, // VkDeviceSize size; - VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage; - VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; - 0u, // deUint32 queueFamilyIndexCount; - DE_NULL // const deUint32* pQueueFamilyIndices; - }; - - const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; - - if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) - { - bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; - bufferCreateInfo.queueFamilyIndexCount = 2u; - bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; - } - - // Create sparse buffer - const Unique sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; - // Create sparse buffer memory bind semaphore - const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - - const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer); - - if (bufferMemRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); + VkBufferCreateInfo bufferCreateInfo = + { + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + VK_BUFFER_CREATE_SPARSE_BINDING_BIT | + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, // VkBufferCreateFlags flags; + m_bufferSize, // VkDeviceSize size; + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage; + VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; + 0u, // deUint32 queueFamilyIndexCount; + DE_NULL // const deUint32* pQueueFamilyIndices; + }; - DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0); + const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; - const deUint32 numSparseSlots = static_cast(bufferMemRequirements.size / bufferMemRequirements.alignment); - std::vector deviceMemUniquePtrVec; + if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) + { + bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; + bufferCreateInfo.queueFamilyIndexCount = 2u; + bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; + } - { - std::vector sparseMemoryBinds; - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any); + // Create sparse buffer + const Unique sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo)); - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + // Create sparse buffer memory bind semaphore + const Unique bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; sparseBindNdx += 2) - { - const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.alignment, memoryType, bufferMemRequirements.alignment * sparseBindNdx); + const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + if (bufferMemRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits"); - sparseMemoryBinds.push_back(sparseMemoryBind); - } + DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0); - const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, static_cast(sparseMemoryBinds.size()), &sparseMemoryBinds[0]); + const deUint32 numSparseSlots = static_cast(bufferMemRequirements.size / bufferMemRequirements.alignment); + std::vector deviceMemUniquePtrVec; - const VkBindSparseInfo bindSparseInfo = { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 1u, //deUint32 bufferBindCount; - &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; - - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } + std::vector sparseMemoryBinds; + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any); + + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); + + for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; sparseBindNdx += 2) + { + const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.alignment, memoryType, bufferMemRequirements.alignment * sparseBindNdx); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + sparseMemoryBinds.push_back(sparseMemoryBind); + } + + const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, static_cast(sparseMemoryBinds.size()), &sparseMemoryBinds[0]); + + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; + const VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 1u, //deUint32 bufferBindCount; + &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); + } - // Create input buffer - const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT); - const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); - const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); + // Create input buffer + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT); + const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); + const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); - std::vector referenceData; - referenceData.resize(m_bufferSize); + std::vector referenceData; + referenceData.resize(m_bufferSize); - for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx) - { - referenceData[valueNdx] = static_cast((valueNdx % bufferMemRequirements.alignment) + 1u); - } + for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx) + { + referenceData[valueNdx] = static_cast((valueNdx % bufferMemRequirements.alignment) + 1u); + } - deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize); + deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize); - flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize); + flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize); - // Create output buffer - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + // Create output buffer + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - // Create command buffer for compute and data transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + // Create command buffer for compute and data transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - // Start recording compute and transfer commands - beginCommandBuffer(deviceInterface, *commandBuffer); + // Start recording compute and transfer commands + beginCommandBuffer(deviceInterface, *commandBuffer); - // Create descriptor set - const Unique descriptorSetLayout( - DescriptorSetLayoutBuilder() - .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) - .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) - .build(deviceInterface, getDevice())); + // Create descriptor set + const Unique descriptorSetLayout( + DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT) + .build(deviceInterface, getDevice())); - // Create compute pipeline - const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); - const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); - const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); + // Create compute pipeline + const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); + const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); + const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); - deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); + deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); - const Unique descriptorPool( - DescriptorPoolBuilder() - .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u) - .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); + const Unique descriptorPool( + DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u) + .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); - const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); + const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); - { - const VkDescriptorBufferInfo inputBufferInfo = makeDescriptorBufferInfo(*inputBuffer, 0ull, m_bufferSize); - const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBuffer, 0ull, m_bufferSize); - - DescriptorSetUpdateBuilder() - .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputBufferInfo) - .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo) - .update(deviceInterface, getDevice()); - } + { + const VkDescriptorBufferInfo inputBufferInfo = makeDescriptorBufferInfo(*inputBuffer, 0ull, m_bufferSize); + const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBuffer, 0ull, m_bufferSize); - deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); + DescriptorSetUpdateBuilder() + .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputBufferInfo) + .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo) + .update(deviceInterface, getDevice()); + } - { - const VkBufferMemoryBarrier inputBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT, - VK_ACCESS_SHADER_READ_BIT, - *inputBuffer, - 0ull, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); - } + deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); - deviceInterface.cmdDispatch(*commandBuffer, 1u, 1u, 1u); + { + const VkBufferMemoryBarrier inputBufferBarrier + = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT, + *inputBuffer, + 0ull, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } - { - const VkBufferMemoryBarrier sparseBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *sparseBuffer, - 0ull, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL); - } + deviceInterface.cmdDispatch(*commandBuffer, 1u, 1u, 1u); - { - const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); + { + const VkBufferMemoryBarrier sparseBufferBarrier + = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *sparseBuffer, + 0ull, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL); + } - deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy); - } + { + const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize); - { - const VkBufferMemoryBarrier outputBufferBarrier - = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0ull, - m_bufferSize); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); - } + deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy); + } - // End recording compute and transfer commands - endCommandBuffer(deviceInterface, *commandBuffer); + { + const VkBufferMemoryBarrier outputBufferBarrier + = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0ull, + m_bufferSize); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); + } - const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; + // End recording compute and transfer commands + endCommandBuffer(deviceInterface, *commandBuffer); - // Submit transfer commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits); + const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; - // Retrieve data from output buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize); + // Submit transfer commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), + waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + // Retrieve data from output buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - // Compare output data with reference data - for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; ++sparseBindNdx) - { - const deUint32 alignment = static_cast(bufferMemRequirements.alignment); - const deUint32 offset = alignment * sparseBindNdx; - const deUint32 size = sparseBindNdx == (numSparseSlots - 1) ? m_bufferSize % alignment : alignment; + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - if (sparseBindNdx % 2u == 0u) + // Compare output data with reference data + for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; ++sparseBindNdx) { - if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0) - return tcu::TestStatus::fail("Failed"); - } - else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict) - { - deMemset(&referenceData[offset], 0u, size); - - if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0) - return tcu::TestStatus::fail("Failed"); + const deUint32 alignment = static_cast(bufferMemRequirements.alignment); + const deUint32 offset = alignment * sparseBindNdx; + const deUint32 size = sparseBindNdx == (numSparseSlots - 1) ? m_bufferSize % alignment : alignment; + + if (sparseBindNdx % 2u == 0u) + { + if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0) + return tcu::TestStatus::fail("Failed"); + } + else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict) + { + deMemset(&referenceData[offset], 0u, size); + + if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0) + return tcu::TestStatus::fail("Failed"); + } } } @@ -378,19 +403,19 @@ tcu::TestStatus BufferSparseResidencyInstance::iterate (void) TestInstance* BufferSparseResidencyCase::createInstance (Context& context) const { - return new BufferSparseResidencyInstance(context, m_bufferSize); + return new BufferSparseResidencyInstance(context, m_bufferSize, m_useDeviceGroups); } } // anonymous ns -void addBufferSparseResidencyTests(tcu::TestCaseGroup* group) +void addBufferSparseResidencyTests(tcu::TestCaseGroup* group, const bool useDeviceGroups) { - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440)); - group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440, useDeviceGroups)); + group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440, useDeviceGroups)); } } // sparse diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.hpp index 54fae2d..0b4647e 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferSparseResidency.hpp @@ -31,7 +31,7 @@ namespace vkt namespace sparse { -void addBufferSparseResidencyTests(tcu::TestCaseGroup* group); +void addBufferSparseResidencyTests(tcu::TestCaseGroup* group, const bool useDeviceGroups); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferTests.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferTests.cpp index 22a0381..6be4613 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferTests.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBufferTests.cpp @@ -68,8 +68,8 @@ typedef SharedPtr > AllocationSp; enum { - RENDER_SIZE = 128, //!< framebuffer size in pixels - GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row + RENDER_SIZE = 128, //!< framebuffer size in pixels + GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row }; enum TestFlagBits @@ -78,6 +78,7 @@ enum TestFlagBits TEST_FLAG_ALIASED = 1u << 0, //!< sparseResidencyAliased TEST_FLAG_RESIDENCY = 1u << 1, //!< sparseResidencyBuffer TEST_FLAG_NON_RESIDENT_STRICT = 1u << 2, //!< residencyNonResidentStrict + TEST_FLAG_ENABLE_DEVICE_GROUPS = 1u << 3, //!< device groups are enabled }; typedef deUint32 TestFlags; @@ -584,7 +585,9 @@ public: void draw (const DeviceInterface& vk, const VkDevice device, const VkQueue queue, - const Delegate& drawDelegate) const + const Delegate& drawDelegate, + const bool useDeviceGroups, + const deUint32 deviceID) const { beginCommandBuffer(vk, *m_cmdBuffer); @@ -668,7 +671,7 @@ public: } VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer)); - submitCommandsAndWait(vk, device, queue, *m_cmdBuffer); + submitCommandsAndWait(vk, device, queue, *m_cmdBuffer, 0U, DE_NULL, DE_NULL, 0U, DE_NULL, useDeviceGroups, deviceID); } private: @@ -697,7 +700,8 @@ private: Renderer& operator= (const Renderer&); }; -void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation) +void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation, + const bool useDeviceGroups, deUint32 resourceDevId, deUint32 memoryDeviceId) { const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo = { @@ -706,20 +710,28 @@ void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const V &sparseAllocation.memoryBinds[0], // const VkSparseMemoryBind* pBinds; }; + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + resourceDevId, //deUint32 resourceDeviceIndex; + memoryDeviceId, //deUint32 memoryDeviceIndex; + }; + const VkBindSparseInfo bindInfo = { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - 0u, // uint32_t waitSemaphoreCount; - DE_NULL, // const VkSemaphore* pWaitSemaphores; - 1u, // uint32_t bufferBindCount; - &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, // uint32_t imageOpaqueBindCount; - DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, // uint32_t imageBindCount; - DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds; - 0u, // uint32_t signalSemaphoreCount; - DE_NULL, // const VkSemaphore* pSignalSemaphores; + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType; + useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, // const void* pNext; + 0u, // uint32_t waitSemaphoreCount; + DE_NULL, // const VkSemaphore* pWaitSemaphores; + 1u, // uint32_t bufferBindCount; + &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, // uint32_t imageOpaqueBindCount; + DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, // uint32_t imageBindCount; + DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds; + 0u, // uint32_t signalSemaphoreCount; + DE_NULL, // const VkSemaphore* pSignalSemaphores; }; const Unique fence(createFence(vk, device)); @@ -732,7 +744,7 @@ class SparseBufferTestInstance : public SparseResourcesBaseInstance, Renderer::D { public: SparseBufferTestInstance (Context& context, const TestFlags flags) - : SparseResourcesBaseInstance (context) + : SparseResourcesBaseInstance (context, (flags & TEST_FLAG_ENABLE_DEVICE_GROUPS) != 0) , m_aliased ((flags & TEST_FLAG_ALIASED) != 0) , m_residency ((flags & TEST_FLAG_RESIDENCY) != 0) , m_nonResidentStrict ((flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0) @@ -740,7 +752,14 @@ public: , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM) , m_colorBufferSize (m_renderSize.x() * m_renderSize.y() * tcu::getPixelSize(mapVkFormat(m_colorFormat))) { - const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice()); + { + QueueRequirementsVec requirements; + requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u)); + + createDeviceSupportingQueues(requirements); + } + const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), getPhysicalDevice()); if (!features.sparseBinding) TCU_THROW(NotSupportedError, "Missing feature: sparseBinding"); @@ -754,14 +773,6 @@ public: if (m_nonResidentStrict && !m_context.getDeviceProperties().sparseProperties.residencyNonResidentStrict) TCU_THROW(NotSupportedError, "Missing sparse property: residencyNonResidentStrict"); - { - QueueRequirementsVec requirements; - requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); - requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u)); - - createDeviceSupportingQueues(requirements); - } - const DeviceInterface& vk = getDeviceInterface(); m_sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0u); m_universalQueue = getQueue(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 0u); @@ -809,16 +820,18 @@ protected: void draw (const VkPrimitiveTopology topology, const VkDescriptorSetLayout descriptorSetLayout = DE_NULL, - Renderer::SpecializationMap specMap = Renderer::SpecializationMap()) + Renderer::SpecializationMap specMap = Renderer::SpecializationMap(), + bool useDeviceGroups = false, + deUint32 deviceID = 0) { const UniquePtr renderer(new Renderer( getDeviceInterface(), getDevice(), getAllocator(), m_universalQueue.queueFamilyIndex, descriptorSetLayout, m_context.getBinaryCollection(), "vert", "frag", *m_colorBuffer, m_renderSize, m_colorFormat, Vec4(1.0f, 0.0f, 0.0f, 1.0f), topology, specMap)); - renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this); + renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this, useDeviceGroups, deviceID); } - tcu::TestStatus verifyDrawResult (void) const + bool isResultImageCorrect (void) const { invalidateMappedMemoryRange(getDeviceInterface(), getDevice(), m_colorBufferAlloc->getMemory(), 0ull, m_colorBufferSize); @@ -827,10 +840,7 @@ protected: m_context.getTestContext().getLog() << tcu::LogImageSet("Result", "Result") << tcu::LogImage("color0", "", resultImage) << tcu::TestLog::EndImageSet; - if (imageHasErrorPixels(resultImage)) - return tcu::TestStatus::fail("Some buffer values were incorrect"); - else - return tcu::TestStatus::pass("Pass"); + return !imageHasErrorPixels(resultImage); } const bool m_aliased; @@ -958,180 +968,195 @@ public: MovePtr sparseAllocation; Move sparseBuffer; Move sparseBufferAliased; + bool setupDescriptors = true; - // Set up the sparse buffer + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); - const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big - deUint32 numMaxChunks = 0u; + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; - // Check how many chunks we can allocate given the alignment and size requirements of UBOs + // Set up the sparse buffer { - const UniquePtr minAllocation(SparseAllocationBuilder() - .addMemoryBind() - .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize)); - - numMaxChunks = deMaxu32(static_cast(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u); - } + VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); + const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big + deUint32 numMaxChunks = 0u; - if (numMaxChunks < 4) - { - sparseAllocation = SparseAllocationBuilder() - .addMemoryBind() - .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize); - } - else - { - // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding - SparseAllocationBuilder builder; - builder.addMemoryBind(); + // Check how many chunks we can allocate given the alignment and size requirements of UBOs + { + const UniquePtr minAllocation(SparseAllocationBuilder() + .addMemoryBind() + .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize)); - if (m_residency) - builder.addResourceHole(); + numMaxChunks = deMaxu32(static_cast(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u); + } - builder - .addMemoryAllocation() - .addMemoryHole() - .addMemoryBind(); + if (numMaxChunks < 4) + { + sparseAllocation = SparseAllocationBuilder() + .addMemoryBind() + .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize); + } + else + { + // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding + SparseAllocationBuilder builder; + builder.addMemoryBind(); - if (m_aliased) - builder.addAliasedMemoryBind(0u, 0u); + if (m_residency) + builder.addResourceHole(); - sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize); - DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange); - } + builder + .addMemoryAllocation() + .addMemoryHole() + .addMemoryBind(); - // Create the buffer - referenceBufferCreateInfo.size = sparseAllocation->resourceSize; - sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo); - bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation); + if (m_aliased) + builder.addAliasedMemoryBind(0u, 0u); - if (m_aliased) - { - sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo); - bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation); - } - } + sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize); + DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange); + } - // Set uniform data - { - const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u); - const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks; - const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0); - const deUint32 numBufferEntries = static_cast(stagingBufferSize / sizeof(IVec4)); + // Create the buffer + referenceBufferCreateInfo.size = sparseAllocation->resourceSize; + sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo); + bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID); - const Unique stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT))); - const UniquePtr stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible)); + if (m_aliased) + { + sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo); + bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID); + } + } + // Set uniform data { - // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk - IVec4* const pData = static_cast(stagingBufferAlloc->getHostPtr()); - for (deUint32 i = 0; i < numBufferEntries; ++i) - pData[i] = IVec4(3*i ^ 127, 0, 0, 0); + const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u); + const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks; + const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0); + const deUint32 numBufferEntries = static_cast(stagingBufferSize / sizeof(IVec4)); - flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize); + const Unique stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT))); + const UniquePtr stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible)); - const VkBufferCopy copyRegion = { - 0ull, // VkDeviceSize srcOffset; - 0ull, // VkDeviceSize dstOffset; - stagingBufferSize, // VkDeviceSize size; - }; - - const Unique cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex)); - const Unique cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - - beginCommandBuffer (vk, *cmdBuffer); - vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region); - endCommandBuffer (vk, *cmdBuffer); - - submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer); - // Once the fence is signaled, the write is also available to the aliasing buffer. + // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk + IVec4* const pData = static_cast(stagingBufferAlloc->getHostPtr()); + for (deUint32 i = 0; i < numBufferEntries; ++i) + pData[i] = IVec4(3*i ^ 127, 0, 0, 0); + + flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize); + + const VkBufferCopy copyRegion = + { + 0ull, // VkDeviceSize srcOffset; + 0ull, // VkDeviceSize dstOffset; + stagingBufferSize, // VkDeviceSize size; + }; + + const Unique cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex)); + const Unique cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + + beginCommandBuffer (vk, *cmdBuffer); + vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region); + endCommandBuffer (vk, *cmdBuffer); + + submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer, 0u, DE_NULL, DE_NULL, 0, DE_NULL, usingDeviceGroups(), firstDeviceID); + // Once the fence is signaled, the write is also available to the aliasing buffer. + } } - } - // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case. - const deUint32 maxBufferRange = deMinu32(static_cast(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange); + // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case. + const deUint32 maxBufferRange = deMinu32(static_cast(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange); - // Descriptor sets - { - m_descriptorSetLayout = DescriptorSetLayoutBuilder() - .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT) - .build(vk, getDevice()); + // Descriptor sets + { + // Setup only once + if (setupDescriptors) + { + m_descriptorSetLayout = DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT) + .build(vk, getDevice()); - m_descriptorPool = DescriptorPoolBuilder() - .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) - .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u); + m_descriptorPool = DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) + .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u); - m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout); + m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout); + setupDescriptors = false; + } - const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer); - const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange); + const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer); + const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange); - DescriptorSetUpdateBuilder() - .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo) - .update(vk, getDevice()); - } + DescriptorSetUpdateBuilder() + .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo) + .update(vk, getDevice()); + } - // Vertex data - { - const Vec4 vertexData[] = + // Vertex data { - Vec4(-1.0f, -1.0f, 0.0f, 1.0f), - Vec4(-1.0f, 1.0f, 0.0f, 1.0f), - Vec4( 1.0f, -1.0f, 0.0f, 1.0f), - Vec4( 1.0f, 1.0f, 0.0f, 1.0f), - }; + const Vec4 vertexData[] = + { + Vec4(-1.0f, -1.0f, 0.0f, 1.0f), + Vec4(-1.0f, 1.0f, 0.0f, 1.0f), + Vec4( 1.0f, -1.0f, 0.0f, 1.0f), + Vec4( 1.0f, 1.0f, 0.0f, 1.0f), + }; - const VkDeviceSize vertexBufferSize = sizeof(vertexData); + const VkDeviceSize vertexBufferSize = sizeof(vertexData); - m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT)); - m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible); + m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT)); + m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible); - deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize); - flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize); - } + deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize); + flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize); + } - // Draw - { - std::vector specializationData; + // Draw { - const deUint32 numBufferEntries = maxBufferRange / static_cast(sizeof(IVec4)); - const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks; + std::vector specializationData; + { + const deUint32 numBufferEntries = maxBufferRange / static_cast(sizeof(IVec4)); + const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks; - specializationData.push_back(numBufferEntries); - specializationData.push_back(numEntriesPerChunk); - } + specializationData.push_back(numBufferEntries); + specializationData.push_back(numEntriesPerChunk); + } - const VkSpecializationMapEntry specMapEntries[] = - { + const VkSpecializationMapEntry specMapEntries[] = { - 1u, // uint32_t constantID; - 0u, // uint32_t offset; - sizeof(deInt32), // size_t size; - }, + { + 1u, // uint32_t constantID; + 0u, // uint32_t offset; + sizeof(deInt32), // size_t size; + }, + { + 2u, // uint32_t constantID; + sizeof(deInt32), // uint32_t offset; + sizeof(deInt32), // size_t size; + }, + }; + + const VkSpecializationInfo specInfo = { - 2u, // uint32_t constantID; - sizeof(deInt32), // uint32_t offset; - sizeof(deInt32), // size_t size; - }, - }; + DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount; + specMapEntries, // const VkSpecializationMapEntry* pMapEntries; + sizeInBytes(specializationData), // size_t dataSize; + getDataOrNullptr(specializationData), // const void* pData; + }; - const VkSpecializationInfo specInfo = - { - DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount; - specMapEntries, // const VkSpecializationMapEntry* pMapEntries; - sizeInBytes(specializationData), // size_t dataSize; - getDataOrNullptr(specializationData), // const void* pData; - }; + Renderer::SpecializationMap specMap; + specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo; - Renderer::SpecializationMap specMap; - specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo; + draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap, usingDeviceGroups(), firstDeviceID); + } - draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap); + if(!isResultImageCorrect()) + return tcu::TestStatus::fail("Some buffer values were incorrect"); } - - return verifyDrawResult(); + return tcu::TestStatus::pass("Pass"); } private: @@ -1246,61 +1271,74 @@ public: referenceBufferCreateInfo.size = m_sparseAllocation->resourceSize; m_sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo); - // Bind the memory - bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation); m_perDrawBufferOffset = m_sparseAllocation->resourceSize / m_sparseAllocation->numResourceChunks; m_stagingBufferSize = 2 * m_perDrawBufferOffset; m_stagingBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)); m_stagingBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_stagingBuffer, MemoryRequirement::HostVisible); + + } tcu::TestStatus iterate (void) { - initializeBuffers(); - const DeviceInterface& vk = getDeviceInterface(); - // Upload to the sparse buffer + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize); + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; - VkDeviceSize firstChunkOffset = 0ull; - VkDeviceSize secondChunkOffset = m_perDrawBufferOffset; + // Bind the memory + bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID); - if (m_residency) - secondChunkOffset += m_perDrawBufferOffset; + initializeBuffers(); - if (m_aliased) - firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset; - - const VkBufferCopy copyRegions[] = + // Upload to the sparse buffer { + flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize); + + VkDeviceSize firstChunkOffset = 0ull; + VkDeviceSize secondChunkOffset = m_perDrawBufferOffset; + + if (m_residency) + secondChunkOffset += m_perDrawBufferOffset; + + if (m_aliased) + firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset; + + const VkBufferCopy copyRegions[] = { - 0ull, // VkDeviceSize srcOffset; - firstChunkOffset, // VkDeviceSize dstOffset; - m_perDrawBufferOffset, // VkDeviceSize size; - }, - { - m_perDrawBufferOffset, // VkDeviceSize srcOffset; - secondChunkOffset, // VkDeviceSize dstOffset; - m_perDrawBufferOffset, // VkDeviceSize size; - }, - }; + { + 0ull, // VkDeviceSize srcOffset; + firstChunkOffset, // VkDeviceSize dstOffset; + m_perDrawBufferOffset, // VkDeviceSize size; + }, + { + m_perDrawBufferOffset, // VkDeviceSize srcOffset; + secondChunkOffset, // VkDeviceSize dstOffset; + m_perDrawBufferOffset, // VkDeviceSize size; + }, + }; + + const Unique cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex)); + const Unique cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - const Unique cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex)); - const Unique cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + beginCommandBuffer (vk, *cmdBuffer); + vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions); + endCommandBuffer (vk, *cmdBuffer); - beginCommandBuffer (vk, *cmdBuffer); - vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions); - endCommandBuffer (vk, *cmdBuffer); + submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer, 0u, DE_NULL, DE_NULL, 0, DE_NULL, usingDeviceGroups(), firstDeviceID); + } - submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer); - } - draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + Renderer::SpecializationMap specMap; + draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, DE_NULL, specMap, usingDeviceGroups(), firstDeviceID); - return verifyDrawResult(); + if(!isResultImageCorrect()) + return tcu::TestStatus::fail("Some buffer values were incorrect"); + } + return tcu::TestStatus::pass("Pass"); } protected: @@ -1523,23 +1561,28 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) TestFlags flags; } groups[] = { - { "sparse_binding", 0u }, - { "sparse_binding_aliased", TEST_FLAG_ALIASED, }, - { "sparse_residency", TEST_FLAG_RESIDENCY, }, - { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, }, - { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT, }, + { "sparse_binding", 0u, }, + { "sparse_binding_aliased", TEST_FLAG_ALIASED, }, + { "sparse_residency", TEST_FLAG_RESIDENCY, }, + { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, }, + { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT,}, }; const int numGroupsIncludingNonResidentStrict = DE_LENGTH_OF_ARRAY(groups); const int numGroupsDefaultList = numGroupsIncludingNonResidentStrict - 1; + std::string devGroupPrefix = "device_group_"; // Transfer { MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "transfer", "")); { MovePtr subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding", "")); - addBufferSparseBindingTests(subGroup.get()); + addBufferSparseBindingTests(subGroup.get(), false); group->addChild(subGroup.release()); + + MovePtr subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_binding", "")); + addBufferSparseBindingTests(subGroupDeviceGroups.get(), true); + group->addChild(subGroupDeviceGroups.release()); } parentGroup->addChild(group.release()); } @@ -1549,13 +1592,21 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ssbo", "")); { MovePtr subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding_aliased", "")); - addBufferSparseMemoryAliasingTests(subGroup.get()); + addBufferSparseMemoryAliasingTests(subGroup.get(), false); group->addChild(subGroup.release()); + + MovePtr subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_binding_aliased", "")); + addBufferSparseMemoryAliasingTests(subGroupDeviceGroups.get(), true); + group->addChild(subGroupDeviceGroups.release()); } { MovePtr subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_residency", "")); - addBufferSparseResidencyTests(subGroup.get()); + addBufferSparseResidencyTests(subGroup.get(), false); group->addChild(subGroup.release()); + + MovePtr subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_residency", "")); + addBufferSparseResidencyTests(subGroupDeviceGroups.get(), true); + group->addChild(subGroupDeviceGroups.release()); } parentGroup->addChild(group.release()); } @@ -1565,8 +1616,13 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ubo", "")); for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx) + { group->addChild(createTestInstanceWithPrograms(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags)); - + } + for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx) + { + group->addChild(createTestInstanceWithPrograms(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS)); + } parentGroup->addChild(group.release()); } @@ -1575,7 +1631,13 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "vertex_buffer", "")); for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { group->addChild(createTestInstanceWithPrograms(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags)); + } + for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { + group->addChild(createTestInstanceWithPrograms(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS)); + } parentGroup->addChild(group.release()); } @@ -1585,7 +1647,13 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "index_buffer", "")); for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { group->addChild(createTestInstanceWithPrograms(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags)); + } + for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { + group->addChild(createTestInstanceWithPrograms(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS)); + } parentGroup->addChild(group.release()); } @@ -1595,7 +1663,13 @@ void populateTestGroup (tcu::TestCaseGroup* parentGroup) MovePtr group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "indirect_buffer", "")); for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { group->addChild(createTestInstanceWithPrograms(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags)); + } + for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx) + { + group->addChild(createTestInstanceWithPrograms(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS)); + } parentGroup->addChild(group.release()); } diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.cpp index 1dcfbbd..acd6489 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.cpp @@ -107,13 +107,15 @@ public: const ImageType imageType, const tcu::UVec3& imageSize, const tcu::TextureFormat& format, - const glu::GLSLVersion glslVersion); + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups); void initPrograms (SourceCollections& sourceCollections) const; TestInstance* createInstance (Context& context) const; private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -126,8 +128,10 @@ ImageSparseMemoryAliasingCase::ImageSparseMemoryAliasingCase (tcu::TestContext& const ImageType imageType, const tcu::UVec3& imageSize, const tcu::TextureFormat& format, - const glu::GLSLVersion glslVersion) + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups) : TestCase (testCtx, name, description) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -141,11 +145,13 @@ public: ImageSparseMemoryAliasingInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups); tcu::TestStatus iterate (void); private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -154,8 +160,10 @@ private: ImageSparseMemoryAliasingInstance::ImageSparseMemoryAliasingInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) - : SparseResourcesBaseInstance (context) + const tcu::TextureFormat& format, + const bool useDeviceGroups) + : SparseResourcesBaseInstance (context, useDeviceGroups) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -165,7 +173,17 @@ ImageSparseMemoryAliasingInstance::ImageSparseMemoryAliasingInstance (Context& tcu::TestStatus ImageSparseMemoryAliasingInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + + { + // Create logical device supporting both sparse and compute queues + QueueRequirementsVec queueRequirements; + queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + + createDeviceSupportingQueues(queueRequirements); + } + + const VkPhysicalDevice physicalDevice = getPhysicalDevice(); const tcu::UVec3 maxWorkGroupSize = tcu::UVec3(128u, 128u, 64u); const tcu::UVec3 maxWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); const deUint32 maxWorkGroupInvocations = 128u; @@ -173,6 +191,10 @@ tcu::TestStatus ImageSparseMemoryAliasingInstance::iterate (void) VkSparseImageMemoryRequirements aspectRequirements; std::vector deviceMemUniquePtrVec; + //vsk checking these flags should be after creating m_imageType + //getting queues should be outside the loop + //see these in all image files + // Check if image size does not exceed device limits if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize)) TCU_THROW(NotSupportedError, "Image size not supported for device"); @@ -185,447 +207,454 @@ tcu::TestStatus ImageSparseMemoryAliasingInstance::iterate (void) if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType)) TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported"); - imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - imageSparseInfo.pNext = DE_NULL; - imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | - VK_IMAGE_CREATE_SPARSE_ALIASED_BIT | - VK_IMAGE_CREATE_SPARSE_BINDING_BIT; - imageSparseInfo.imageType = mapImageType(m_imageType); - imageSparseInfo.format = mapTextureFormat(m_format); - imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); - imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); - imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; - imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; - imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | - VK_IMAGE_USAGE_TRANSFER_SRC_BIT | - VK_IMAGE_USAGE_STORAGE_BIT; - imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - imageSparseInfo.queueFamilyIndexCount = 0u; - imageSparseInfo.pQueueFamilyIndices = DE_NULL; - - if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) - imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + const DeviceInterface& deviceInterface = getDeviceInterface(); + const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); + const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - // Assign maximum allowed mipmap levels to image - VkImageFormatProperties imageFormatProperties; - instance.getPhysicalDeviceImageFormatProperties(physicalDevice, - imageSparseInfo.format, - imageSparseInfo.imageType, - imageSparseInfo.tiling, - imageSparseInfo.usage, - imageSparseInfo.flags, - &imageFormatProperties); - - imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); - } + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; + + imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageSparseInfo.pNext = DE_NULL; + imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT | + VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + imageSparseInfo.imageType = mapImageType(m_imageType); + imageSparseInfo.format = mapTextureFormat(m_format); + imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); + imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); + imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_STORAGE_BIT; + imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageSparseInfo.queueFamilyIndexCount = 0u; + imageSparseInfo.pQueueFamilyIndices = DE_NULL; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; - // Check if device supports sparse operations for image format - if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo)) - TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); + { + // Assign maximum allowed mipmap levels to image + VkImageFormatProperties imageFormatProperties; + instance.getPhysicalDeviceImageFormatProperties(physicalDevice, + imageSparseInfo.format, + imageSparseInfo.imageType, + imageSparseInfo.tiling, + imageSparseInfo.usage, + imageSparseInfo.flags, + &imageFormatProperties); + + imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); + } - { - // Create logical device supporting both sparse and compute queues - QueueRequirementsVec queueRequirements; - queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); - queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + // Check if device supports sparse operations for image format + if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo)) + TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); - createDeviceSupportingQueues(queueRequirements); - } + // Create sparse image + const Unique imageRead(createImage(deviceInterface, getDevice(), &imageSparseInfo)); + const Unique imageWrite(createImage(deviceInterface, getDevice(), &imageSparseInfo)); - const DeviceInterface& deviceInterface = getDeviceInterface(); - const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); - const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); + // Create semaphores to synchronize sparse binding operations with other operations on the sparse images + const Unique memoryBindSemaphoreTransfer(createSemaphore(deviceInterface, getDevice())); + const Unique memoryBindSemaphoreCompute(createSemaphore(deviceInterface, getDevice())); - // Create sparse image - const Unique imageRead(createImage(deviceInterface, getDevice(), &imageSparseInfo)); - const Unique imageWrite(createImage(deviceInterface, getDevice(), &imageSparseInfo)); + const VkSemaphore imageMemoryBindSemaphores[] = { memoryBindSemaphoreTransfer.get(), memoryBindSemaphoreCompute.get() }; - // Create semaphores to synchronize sparse binding operations with other operations on the sparse images - const Unique memoryBindSemaphoreTransfer(createSemaphore(deviceInterface, getDevice())); - const Unique memoryBindSemaphoreCompute(createSemaphore(deviceInterface, getDevice())); + { + std::vector imageResidencyMemoryBinds; + std::vector imageReadMipTailBinds; + std::vector imageWriteMipTailBinds; - const VkSemaphore imageMemoryBindSemaphores[] = { memoryBindSemaphoreTransfer.get(), memoryBindSemaphoreCompute.get() }; + // Get sparse image general memory requirements + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageRead); - { - std::vector imageResidencyMemoryBinds; - std::vector imageReadMipTailBinds; - std::vector imageWriteMipTailBinds; + // Check if required image memory size does not exceed device limits + if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); - // Get sparse image general memory requirements - const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageRead); + DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); - // Check if required image memory size does not exceed device limits - if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); + // Get sparse image sparse memory requirements + const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageRead); - DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); + DE_ASSERT(sparseMemoryRequirements.size() != 0); - // Get sparse image sparse memory requirements - const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageRead); + const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); - DE_ASSERT(sparseMemoryRequirements.size() != 0); + if (colorAspectIndex == NO_MATCH_FOUND) + TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); - const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); + aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; - if (colorAspectIndex == NO_MATCH_FOUND) - TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); + const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; + const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; - aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; + DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); - const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; - const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); - DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); + // Bind memory for each layer + for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) + { + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + { + const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); + const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity); + const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z(); + const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), + imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent); - // Bind memory for each layer - for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) - { - for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) - { - const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); - const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity); - const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z(); - const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + imageResidencyMemoryBinds.push_back(imageMemoryBind); + } + + if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) + { + const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageReadMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), - imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent); + imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); - imageResidencyMemoryBinds.push_back(imageMemoryBind); + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageWriteMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind); + } } - if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) + if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) { const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageReadMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind); const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageWriteMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind); } - } - - if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) - { - const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageReadMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; - imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind); + VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 2u, //deUint32 signalSemaphoreCount; + imageMemoryBindSemaphores //const VkSemaphore* pSignalSemaphores; + }; + + VkSparseImageMemoryBindInfo imageResidencyBindInfo[2]; + VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo[2]; + + if (imageResidencyMemoryBinds.size() > 0) + { + imageResidencyBindInfo[0].image = *imageRead; + imageResidencyBindInfo[0].bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo[0].pBinds = &imageResidencyMemoryBinds[0]; - const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); + imageResidencyBindInfo[1].image = *imageWrite; + imageResidencyBindInfo[1].bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo[1].pBinds = &imageResidencyMemoryBinds[0]; - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageWriteMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + bindSparseInfo.imageBindCount = 2u; + bindSparseInfo.pImageBinds = imageResidencyBindInfo; + } - imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind); - } + if (imageReadMipTailBinds.size() > 0) + { + imageMipTailBindInfo[0].image = *imageRead; + imageMipTailBindInfo[0].bindCount = static_cast(imageReadMipTailBinds.size()); + imageMipTailBindInfo[0].pBinds = &imageReadMipTailBinds[0]; - VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 0u, //deUint32 bufferBindCount; - DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 2u, //deUint32 signalSemaphoreCount; - imageMemoryBindSemaphores //const VkSemaphore* pSignalSemaphores; - }; - - VkSparseImageMemoryBindInfo imageResidencyBindInfo[2]; - VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo[2]; - - if (imageResidencyMemoryBinds.size() > 0) - { - imageResidencyBindInfo[0].image = *imageRead; - imageResidencyBindInfo[0].bindCount = static_cast(imageResidencyMemoryBinds.size()); - imageResidencyBindInfo[0].pBinds = &imageResidencyMemoryBinds[0]; + imageMipTailBindInfo[1].image = *imageWrite; + imageMipTailBindInfo[1].bindCount = static_cast(imageWriteMipTailBinds.size()); + imageMipTailBindInfo[1].pBinds = &imageWriteMipTailBinds[0]; - imageResidencyBindInfo[1].image = *imageWrite; - imageResidencyBindInfo[1].bindCount = static_cast(imageResidencyMemoryBinds.size()); - imageResidencyBindInfo[1].pBinds = &imageResidencyMemoryBinds[0]; + bindSparseInfo.imageOpaqueBindCount = 2u; + bindSparseInfo.pImageOpaqueBinds = imageMipTailBindInfo; + } - bindSparseInfo.imageBindCount = 2u; - bindSparseInfo.pImageBinds = imageResidencyBindInfo; + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); } - if (imageReadMipTailBinds.size() > 0) - { - imageMipTailBindInfo[0].image = *imageRead; - imageMipTailBindInfo[0].bindCount = static_cast(imageReadMipTailBinds.size()); - imageMipTailBindInfo[0].pBinds = &imageReadMipTailBinds[0]; + // Create command buffer for compute and transfer oparations + const Unique commandPool (makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - imageMipTailBindInfo[1].image = *imageWrite; - imageMipTailBindInfo[1].bindCount = static_cast(imageWriteMipTailBinds.size()); - imageMipTailBindInfo[1].pBinds = &imageWriteMipTailBinds[0]; + std::vector bufferImageCopy(imageSparseInfo.mipLevels); - bindSparseInfo.imageOpaqueBindCount = 2u; - bindSparseInfo.pImageOpaqueBinds = imageMipTailBindInfo; + { + deUint32 bufferOffset = 0u; + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset); + bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + } } - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); - // Create command buffer for compute and transfer oparations - const Unique commandPool (makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); + const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); - std::vector bufferImageCopy(imageSparseInfo.mipLevels); + std::vector referenceData(imageSizeInBytes); - { - deUint32 bufferOffset = 0u; for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) { - bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset); - bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); - } - } - - // Start recording commands - beginCommandBuffer(deviceInterface, *commandBuffer); + const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); + const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); - const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); - const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); - const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); - const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); + deMemset(&referenceData[bufferOffset], mipLevelNdx + 1u, mipLevelSizeInBytes); + } - std::vector referenceData(imageSizeInBytes); + deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); - for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) - { - const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); - const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); + flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); - deMemset(&referenceData[bufferOffset], mipLevelNdx + 1u, mipLevelSizeInBytes); - } - - deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); + { + const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *inputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } - flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); + { + const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + *imageRead, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); + } - { - const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_HOST_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *inputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); - } + deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - { - const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier - ( - 0u, - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - *imageRead, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); - } + { + const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *imageRead, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); + } - deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + { + const VkImageMemoryBarrier imageSparseShaderStorageBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_SHADER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_GENERAL, + *imageWrite, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseShaderStorageBarrier); + } - { - const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - *imageRead, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); - } + // Create descriptor set layout + const Unique descriptorSetLayout( + DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT) + .build(deviceInterface, getDevice())); - { - const VkImageMemoryBarrier imageSparseShaderStorageBarrier = makeImageMemoryBarrier - ( - 0u, - VK_ACCESS_SHADER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_GENERAL, - *imageWrite, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseShaderStorageBarrier); - } + Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); - // Create descriptor set layout - const Unique descriptorSetLayout( - DescriptorSetLayoutBuilder() - .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT) - .build(deviceInterface, getDevice())); + Unique descriptorPool( + DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels) + .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels)); - Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); + typedef de::SharedPtr< Unique > SharedVkImageView; + std::vector imageViews; + imageViews.resize(imageSparseInfo.mipLevels); - Unique descriptorPool( - DescriptorPoolBuilder() - .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels) - .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels)); + typedef de::SharedPtr< Unique > SharedVkDescriptorSet; + std::vector descriptorSets; + descriptorSets.resize(imageSparseInfo.mipLevels); - typedef de::SharedPtr< Unique > SharedVkImageView; - std::vector imageViews; - imageViews.resize(imageSparseInfo.mipLevels); + typedef de::SharedPtr< Unique > SharedVkPipeline; + std::vector computePipelines; + computePipelines.resize(imageSparseInfo.mipLevels); - typedef de::SharedPtr< Unique > SharedVkDescriptorSet; - std::vector descriptorSets; - descriptorSets.resize(imageSparseInfo.mipLevels); + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + std::ostringstream name; + name << "comp" << mipLevelNdx; - typedef de::SharedPtr< Unique > SharedVkPipeline; - std::vector computePipelines; - computePipelines.resize(imageSparseInfo.mipLevels); + // Create and bind compute pipeline + Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get(name.str()), DE_NULL)); - for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) - { - std::ostringstream name; - name << "comp" << mipLevelNdx; + computePipelines[mipLevelNdx] = makeVkSharedPtr(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); + VkPipeline computePipeline = **computePipelines[mipLevelNdx]; - // Create and bind compute pipeline - Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get(name.str()), DE_NULL)); + deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline); - computePipelines[mipLevelNdx] = makeVkSharedPtr(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); - VkPipeline computePipeline = **computePipelines[mipLevelNdx]; + // Create and bind descriptor set + descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); + VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx]; - deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline); + // Select which mipmap level to bind + const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers); - // Create and bind descriptor set - descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); - VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx]; + imageViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, getDevice(), *imageWrite, mapImageViewType(m_imageType), imageSparseInfo.format, subresourceRange)); + VkImageView imageView = **imageViews[mipLevelNdx]; - // Select which mipmap level to bind - const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers); + const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL); - imageViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, getDevice(), *imageWrite, mapImageViewType(m_imageType), imageSparseInfo.format, subresourceRange)); - VkImageView imageView = **imageViews[mipLevelNdx]; + DescriptorSetUpdateBuilder() + .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo) + .update(deviceInterface, getDevice()); - const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL); + deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL); - DescriptorSetUpdateBuilder() - .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo) - .update(deviceInterface, getDevice()); + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx); + const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations); + const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize); + const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize)); - deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL); + const deUint32 xWorkGroupCount = gridSize.x() / xWorkGroupSize + (gridSize.x() % xWorkGroupSize ? 1u : 0u); + const deUint32 yWorkGroupCount = gridSize.y() / yWorkGroupSize + (gridSize.y() % yWorkGroupSize ? 1u : 0u); + const deUint32 zWorkGroupCount = gridSize.z() / zWorkGroupSize + (gridSize.z() % zWorkGroupSize ? 1u : 0u); - const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx); - const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations); - const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize); - const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize)); + if (maxWorkGroupCount.x() < xWorkGroupCount || + maxWorkGroupCount.y() < yWorkGroupCount || + maxWorkGroupCount.z() < zWorkGroupCount) + TCU_THROW(NotSupportedError, "Image size is not supported"); - const deUint32 xWorkGroupCount = gridSize.x() / xWorkGroupSize + (gridSize.x() % xWorkGroupSize ? 1u : 0u); - const deUint32 yWorkGroupCount = gridSize.y() / yWorkGroupSize + (gridSize.y() % yWorkGroupSize ? 1u : 0u); - const deUint32 zWorkGroupCount = gridSize.z() / zWorkGroupSize + (gridSize.z() % zWorkGroupSize ? 1u : 0u); + deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); + } - if (maxWorkGroupCount.x() < xWorkGroupCount || - maxWorkGroupCount.y() < yWorkGroupCount || - maxWorkGroupCount.z() < zWorkGroupCount) - TCU_THROW(NotSupportedError, "Image size is not supported"); + { + const VkMemoryBarrier memoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT); - deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); - } + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1u, &memoryBarrier, 0u, DE_NULL, 0u, DE_NULL); + } - { - const VkMemoryBarrier memoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1u, &memoryBarrier, 0u, DE_NULL, 0u, DE_NULL); - } + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + { + const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); + } - deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); - { - const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); - } + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; - // End recording commands - endCommandBuffer(deviceInterface, *commandBuffer); + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 2u, imageMemoryBindSemaphores, stageBits, + 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; + // Retrieve data from buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); - // Submit commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 2u, imageMemoryBindSemaphores, stageBits); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - // Retrieve data from buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + { + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx); + const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); + const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData + bufferOffset); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + for (deUint32 offsetZ = 0u; offsetZ < gridSize.z(); ++offsetZ) + for (deUint32 offsetY = 0u; offsetY < gridSize.y(); ++offsetY) + for (deUint32 offsetX = 0u; offsetX < gridSize.x(); ++offsetX) + { + const deUint32 index = offsetX + (offsetY + offsetZ * gridSize.y()) * gridSize.x(); + const tcu::UVec4 referenceValue = tcu::UVec4(index % MODULO_DIVISOR, index % MODULO_DIVISOR, index % MODULO_DIVISOR, 1u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); - for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) - { - const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx); - const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); - const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData + bufferOffset); + if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) + return tcu::TestStatus::fail("Failed"); + } + } - for (deUint32 offsetZ = 0u; offsetZ < gridSize.z(); ++offsetZ) - for (deUint32 offsetY = 0u; offsetY < gridSize.y(); ++offsetY) - for (deUint32 offsetX = 0u; offsetX < gridSize.x(); ++offsetX) + for (deUint32 mipLevelNdx = aspectRequirements.imageMipTailFirstLod; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) { - const deUint32 index = offsetX + (offsetY + offsetZ * gridSize.y()) * gridSize.x(); - const tcu::UVec4 referenceValue = tcu::UVec4(index % MODULO_DIVISOR, index % MODULO_DIVISOR, index % MODULO_DIVISOR, 1u); - const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); + const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); - if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) + if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) return tcu::TestStatus::fail("Failed"); } } - for (deUint32 mipLevelNdx = aspectRequirements.imageMipTailFirstLod; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) - { - const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); - const deUint32 bufferOffset = static_cast(bufferImageCopy[mipLevelNdx].bufferOffset); - - if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) - return tcu::TestStatus::fail("Failed"); - } - return tcu::TestStatus::pass("Passed"); } @@ -675,15 +704,13 @@ void ImageSparseMemoryAliasingCase::initPrograms(SourceCollections& sourceCollec TestInstance* ImageSparseMemoryAliasingCase::createInstance (Context& context) const { - return new ImageSparseMemoryAliasingInstance(context, m_imageType, m_imageSize, m_format); + return new ImageSparseMemoryAliasingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups); } } // anonymous ns -tcu::TestCaseGroup* createImageSparseMemoryAliasingTests (tcu::TestContext& testCtx) +tcu::TestCaseGroup* createImageSparseMemoryAliasingTestsCommon(tcu::TestContext& testCtx, de::MovePtr testGroup, const bool useDeviceGroup = false) { - de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_memory_aliasing", "Sparse Image Memory Aliasing")); - static const deUint32 sizeCountPerImageType = 4u; struct ImageParameters @@ -728,7 +755,7 @@ tcu::TestCaseGroup* createImageSparseMemoryAliasingTests (tcu::TestContext& test std::ostringstream stream; stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); - formatGroup->addChild(new ImageSparseMemoryAliasingCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440)); + formatGroup->addChild(new ImageSparseMemoryAliasingCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440, useDeviceGroup)); } imageTypeGroup->addChild(formatGroup.release()); } @@ -738,5 +765,17 @@ tcu::TestCaseGroup* createImageSparseMemoryAliasingTests (tcu::TestContext& test return testGroup.release(); } +tcu::TestCaseGroup* createImageSparseMemoryAliasingTests(tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_memory_aliasing", "Sparse Image Memory Aliasing")); + return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup); +} + +tcu::TestCaseGroup* createDeviceGroupImageSparseMemoryAliasingTests(tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_memory_aliasing", "Sparse Image Memory Aliasing")); + return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup, true); +} + } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.hpp index 0c0191a..edd89d2 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageMemoryAliasing.hpp @@ -32,6 +32,7 @@ namespace sparse { tcu::TestCaseGroup* createImageSparseMemoryAliasingTests(tcu::TestContext& testCtx); +tcu::TestCaseGroup* createDeviceGroupImageSparseMemoryAliasingTests(tcu::TestContext& testCtx); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.cpp index 37b47b6..4b8e252 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.cpp @@ -60,11 +60,13 @@ public: const std::string& description, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups = false); TestInstance* createInstance (Context& context) const; private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -75,8 +77,11 @@ ImageSparseBindingCase::ImageSparseBindingCase (tcu::TestContext& testCtx, const std::string& description, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) + const tcu::TextureFormat& format, + const bool useDeviceGroups) + : TestCase (testCtx, name, description) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -89,11 +94,13 @@ public: ImageSparseBindingInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups); tcu::TestStatus iterate (void); private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -102,8 +109,11 @@ private: ImageSparseBindingInstance::ImageSparseBindingInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) - : SparseResourcesBaseInstance (context) + const tcu::TextureFormat& format, + const bool useDeviceGroups) + + : SparseResourcesBaseInstance (context, useDeviceGroups) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -113,7 +123,17 @@ ImageSparseBindingInstance::ImageSparseBindingInstance (Context& context, tcu::TestStatus ImageSparseBindingInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + + { + // Create logical device supporting both sparse and compute queues + QueueRequirementsVec queueRequirements; + queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + + createDeviceSupportingQueues(queueRequirements); + } + + const VkPhysicalDevice physicalDevice = getPhysicalDevice(); VkImageCreateInfo imageSparseInfo; std::vector deviceMemUniquePtrVec; @@ -125,229 +145,236 @@ tcu::TestStatus ImageSparseBindingInstance::iterate (void) if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding) TCU_THROW(NotSupportedError, "Device does not support sparse binding"); - { - // Create logical device supporting both sparse and compute queues - QueueRequirementsVec queueRequirements; - queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); - queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); - - createDeviceSupportingQueues(queueRequirements); - } - const DeviceInterface& deviceInterface = getDeviceInterface(); const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); - imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType; - imageSparseInfo.pNext = DE_NULL; //const void* pNext; - imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; //VkImageCreateFlags flags; - imageSparseInfo.imageType = mapImageType(m_imageType); //VkImageType imageType; - imageSparseInfo.format = mapTextureFormat(m_format); //VkFormat format; - imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent; - imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //deUint32 arrayLayers; - imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples; - imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling; - imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout; - imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | - VK_IMAGE_USAGE_TRANSFER_DST_BIT; //VkImageUsageFlags usage; - imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode; - imageSparseInfo.queueFamilyIndexCount = 0u; //deUint32 queueFamilyIndexCount; - imageSparseInfo.pQueueFamilyIndices = DE_NULL; //const deUint32* pQueueFamilyIndices; - - if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; - } + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; + + imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType; + imageSparseInfo.pNext = DE_NULL; //const void* pNext; + imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; //VkImageCreateFlags flags; + imageSparseInfo.imageType = mapImageType(m_imageType); //VkImageType imageType; + imageSparseInfo.format = mapTextureFormat(m_format); //VkFormat format; + imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent; + imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //deUint32 arrayLayers; + imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples; + imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling; + imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout; + imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_TRANSFER_DST_BIT; //VkImageUsageFlags usage; + imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode; + imageSparseInfo.queueFamilyIndexCount = 0u; //deUint32 queueFamilyIndexCount; + imageSparseInfo.pQueueFamilyIndices = DE_NULL; //const deUint32* pQueueFamilyIndices; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } - { - VkImageFormatProperties imageFormatProperties; - instance.getPhysicalDeviceImageFormatProperties(physicalDevice, - imageSparseInfo.format, - imageSparseInfo.imageType, - imageSparseInfo.tiling, - imageSparseInfo.usage, - imageSparseInfo.flags, - &imageFormatProperties); - - imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); - } + { + VkImageFormatProperties imageFormatProperties; + instance.getPhysicalDeviceImageFormatProperties(physicalDevice, + imageSparseInfo.format, + imageSparseInfo.imageType, + imageSparseInfo.tiling, + imageSparseInfo.usage, + imageSparseInfo.flags, + &imageFormatProperties); + + imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); + } - // Create sparse image - const Unique imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo)); + // Create sparse image + const Unique imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo)); - // Create sparse image memory bind semaphore - const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + // Create sparse image memory bind semaphore + const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - // Get sparse image general memory requirements - const VkMemoryRequirements imageSparseMemRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); + // Get sparse image general memory requirements + const VkMemoryRequirements imageSparseMemRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); - // Check if required image memory size does not exceed device limits - if (imageSparseMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); + // Check if required image memory size does not exceed device limits + if (imageSparseMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); - DE_ASSERT((imageSparseMemRequirements.size % imageSparseMemRequirements.alignment) == 0); + DE_ASSERT((imageSparseMemRequirements.size % imageSparseMemRequirements.alignment) == 0); - { - std::vector sparseMemoryBinds; - const deUint32 numSparseBinds = static_cast(imageSparseMemRequirements.size / imageSparseMemRequirements.alignment); - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageSparseMemRequirements, MemoryRequirement::Any); + { + std::vector sparseMemoryBinds; + const deUint32 numSparseBinds = static_cast(imageSparseMemRequirements.size / imageSparseMemRequirements.alignment); + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageSparseMemRequirements, MemoryRequirement::Any); - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); - for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx) - { - const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - imageSparseMemRequirements.alignment, memoryType, imageSparseMemRequirements.alignment * sparseBindNdx); + for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx) + { + const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + imageSparseMemRequirements.alignment, memoryType, imageSparseMemRequirements.alignment * sparseBindNdx); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(sparseMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - sparseMemoryBinds.push_back(sparseMemoryBind); - } + sparseMemoryBinds.push_back(sparseMemoryBind); + } - const VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo = makeSparseImageOpaqueMemoryBindInfo(*imageSparse, numSparseBinds, &sparseMemoryBinds[0]); + const VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo = makeSparseImageOpaqueMemoryBindInfo(*imageSparse, numSparseBinds, &sparseMemoryBinds[0]); - const VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 0u, //deUint32 bufferBindCount; - DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 1u, //deUint32 imageOpaqueBindCount; - &opaqueBindInfo, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; - - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; + + const VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 1u, //deUint32 imageOpaqueBindCount; + &opaqueBindInfo, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); + } - // Create command buffer for compute and transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + // Create command buffer for compute and transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - std::vector bufferImageCopy(imageSparseInfo.mipLevels); + std::vector bufferImageCopy(imageSparseInfo.mipLevels); - { - deUint32 bufferOffset = 0; - for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++) { - bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast(bufferOffset)); - bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + deUint32 bufferOffset = 0; + for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++) + { + bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast(bufferOffset)); + bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + } } - } - // Start recording commands - beginCommandBuffer(deviceInterface, *commandBuffer); + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); - const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); - const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); - const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); - const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); + const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); + const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); - std::vector referenceData(imageSizeInBytes); + std::vector referenceData(imageSizeInBytes); - for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx) - { - referenceData[valueNdx] = static_cast((valueNdx % imageSparseMemRequirements.alignment) + 1u); - } + for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx) + { + referenceData[valueNdx] = static_cast((valueNdx % imageSparseMemRequirements.alignment) + 1u); + } - deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); + deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); - flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); + flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); - { - const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_HOST_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *inputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *inputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } - { - const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier - ( - 0u, - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - *imageSparse, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); - } + { + const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + *imageSparse, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); + } - deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - { - const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - *imageSparse, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); - } + { + const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *imageSparse, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); + } - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - { - const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); + } - // End recording commands - endCommandBuffer(deviceInterface, *commandBuffer); + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); - const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; - // Submit commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits); + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits, + 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - // Retrieve data from buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); + // Retrieve data from buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx) - { - const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx); - const deUint32 bufferOffset = static_cast(bufferImageCopy[mipmapNdx].bufferOffset); + for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx) + { + const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx); + const deUint32 bufferOffset = static_cast(bufferImageCopy[mipmapNdx].bufferOffset); - if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) - return tcu::TestStatus::fail("Failed"); + if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + } } return tcu::TestStatus::pass("Passed"); @@ -355,15 +382,13 @@ tcu::TestStatus ImageSparseBindingInstance::iterate (void) TestInstance* ImageSparseBindingCase::createInstance (Context& context) const { - return new ImageSparseBindingInstance(context, m_imageType, m_imageSize, m_format); + return new ImageSparseBindingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups); } } // anonymous ns -tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx) +tcu::TestCaseGroup* createImageSparseBindingTestsCommon(tcu::TestContext& testCtx, de::MovePtr testGroup, const bool useDeviceGroup = false) { - de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_binding", "Buffer Sparse Binding")); - static const deUint32 sizeCountPerImageType = 3u; struct ImageParameters @@ -393,6 +418,7 @@ tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx) tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8) }; + for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx) { const ImageType imageType = imageParametersArray[imageTypeNdx].imageType; @@ -406,11 +432,10 @@ tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx) for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx) { const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx]; - - std::ostringstream stream; + std::ostringstream stream; stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); - formatGroup->addChild(new ImageSparseBindingCase(testCtx, stream.str(), "", imageType, imageSize, format)); + formatGroup->addChild(new ImageSparseBindingCase(testCtx, stream.str(), "", imageType, imageSize, format, useDeviceGroup)); } imageTypeGroup->addChild(formatGroup.release()); } @@ -420,5 +445,17 @@ tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx) return testGroup.release(); } +tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_binding", "Image Sparse Binding")); + return createImageSparseBindingTestsCommon(testCtx, testGroup); +} + +tcu::TestCaseGroup* createDeviceGroupImageSparseBindingTests(tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_binding", "Device Group Image Sparse Binding")); + return createImageSparseBindingTestsCommon(testCtx, testGroup, true); +} + } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.hpp index 327818a..d2f5a9f 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseBinding.hpp @@ -32,6 +32,7 @@ namespace sparse { tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx); +tcu::TestCaseGroup* createDeviceGroupImageSparseBindingTests(tcu::TestContext& testCtx); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp index 630a06a..ad74980 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp @@ -111,12 +111,14 @@ public: const ImageType imageType, const tcu::UVec3& imageSize, const tcu::TextureFormat& format, - const glu::GLSLVersion glslVersion); + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups); void initPrograms (SourceCollections& sourceCollections) const; TestInstance* createInstance (Context& context) const; private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -129,8 +131,10 @@ ImageSparseResidencyCase::ImageSparseResidencyCase (tcu::TestContext& testCtx, const ImageType imageType, const tcu::UVec3& imageSize, const tcu::TextureFormat& format, - const glu::GLSLVersion glslVersion) + const glu::GLSLVersion glslVersion, + const bool useDeviceGroups) : TestCase (testCtx, name, description) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -172,11 +176,14 @@ public: ImageSparseResidencyInstance(Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups); + tcu::TestStatus iterate (void); private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -185,8 +192,10 @@ private: ImageSparseResidencyInstance::ImageSparseResidencyInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) - : SparseResourcesBaseInstance (context) + const tcu::TextureFormat& format, + const bool useDeviceGroups) + : SparseResourcesBaseInstance (context, useDeviceGroups) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -196,46 +205,6 @@ ImageSparseResidencyInstance::ImageSparseResidencyInstance (Context& context tcu::TestStatus ImageSparseResidencyInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); - const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); - VkImageCreateInfo imageCreateInfo; - VkSparseImageMemoryRequirements aspectRequirements; - VkExtent3D imageGranularity; - std::vector deviceMemUniquePtrVec; - - // Check if image size does not exceed device limits - if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize)) - TCU_THROW(NotSupportedError, "Image size not supported for device"); - - // Check if device supports sparse operations for image type - if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType)) - TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported"); - - imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - imageCreateInfo.pNext = DE_NULL; - imageCreateInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT; - imageCreateInfo.imageType = mapImageType(m_imageType); - imageCreateInfo.format = mapTextureFormat(m_format); - imageCreateInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); - imageCreateInfo.mipLevels = 1u; - imageCreateInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); - imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; - imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | - VK_IMAGE_USAGE_STORAGE_BIT; - imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - imageCreateInfo.queueFamilyIndexCount = 0u; - imageCreateInfo.pQueueFamilyIndices = DE_NULL; - - if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) - { - imageCreateInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; - } - - // Check if device supports sparse operations for image format - if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageCreateInfo)) - TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); { // Create logical device supporting both sparse and compute queues @@ -246,96 +215,173 @@ tcu::TestStatus ImageSparseResidencyInstance::iterate (void) createDeviceSupportingQueues(queueRequirements); } + VkImageCreateInfo imageCreateInfo; + VkSparseImageMemoryRequirements aspectRequirements; + VkExtent3D imageGranularity; + std::vector deviceMemUniquePtrVec; + const DeviceInterface& deviceInterface = getDeviceInterface(); const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); - // Create sparse image - const Unique sparseImage(createImage(deviceInterface, getDevice(), &imageCreateInfo)); + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) + { + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; + + const VkPhysicalDevice physicalDevice = getPhysicalDevice(firstDeviceID); + const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); + + // Check if image size does not exceed device limits + if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize)) + TCU_THROW(NotSupportedError, "Image size not supported for device"); + + // Check if device supports sparse operations for image type + if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType)) + TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported"); + + imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageCreateInfo.pNext = DE_NULL; + imageCreateInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + imageCreateInfo.imageType = mapImageType(m_imageType); + imageCreateInfo.format = mapTextureFormat(m_format); + imageCreateInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); + imageCreateInfo.mipLevels = 1u; + imageCreateInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); + imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_STORAGE_BIT; + imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageCreateInfo.queueFamilyIndexCount = 0u; + imageCreateInfo.pQueueFamilyIndices = DE_NULL; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageCreateInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } + + // Check if device supports sparse operations for image format + if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageCreateInfo)) + TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); - // Create sparse image memory bind semaphore - const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + // Create sparse image + const Unique sparseImage(createImage(deviceInterface, getDevice(), &imageCreateInfo)); - { - // Get image general memory requirements - const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *sparseImage); + // Create sparse image memory bind semaphore + const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + + { + // Get image general memory requirements + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *sparseImage); - if (imageMemoryRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); + if (imageMemoryRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); - DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); + DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); - // Get sparse image sparse memory requirements - const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *sparseImage); + // Get sparse image sparse memory requirements + const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *sparseImage); - DE_ASSERT(sparseMemoryRequirements.size() != 0); + DE_ASSERT(sparseMemoryRequirements.size() != 0); - const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); - const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT); + const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); + const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT); - if (colorAspectIndex == NO_MATCH_FOUND) - TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); + if (colorAspectIndex == NO_MATCH_FOUND) + TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); - aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; - imageGranularity = aspectRequirements.formatProperties.imageGranularity; + aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; + imageGranularity = aspectRequirements.formatProperties.imageGranularity; - const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; + const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; - DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); + DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); - std::vector imageResidencyMemoryBinds; - std::vector imageMipTailMemoryBinds; + std::vector imageResidencyMemoryBinds; + std::vector imageMipTailMemoryBinds; - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); - // Bind device memory for each aspect - for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) - { - for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + // Bind device memory for each aspect + for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) { - const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; - const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx); - const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); - const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, - mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, - mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth); - for (deUint32 z = 0; z < numSparseBinds.z(); ++z) - for (deUint32 y = 0; y < numSparseBinds.y(); ++y) - for (deUint32 x = 0; x < numSparseBinds.x(); ++x) + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) { - const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); - - if (linearIndex % 2u == 1u) + const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx); + const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); + const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, + mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, + mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth); + for (deUint32 z = 0; z < numSparseBinds.z(); ++z) + for (deUint32 y = 0; y < numSparseBinds.y(); ++y) + for (deUint32 x = 0; x < numSparseBinds.x(); ++x) { - continue; + const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); + + if (linearIndex % 2u == 1u) + { + continue; + } + + VkOffset3D offset; + offset.x = x*imageGranularity.width; + offset.y = y*imageGranularity.height; + offset.z = z*imageGranularity.depth; + + VkExtent3D extent; + extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; + extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; + extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; + + const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), + imageMemoryRequirements.alignment, memoryType, subresource, offset, extent); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + imageResidencyMemoryBinds.push_back(imageMemoryBind); } + } - VkOffset3D offset; - offset.x = x*imageGranularity.width; - offset.y = y*imageGranularity.height; - offset.z = z*imageGranularity.depth; + if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); - VkExtent3D extent; - extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; - extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; - extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } - const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), - imageMemoryRequirements.alignment, memoryType, subresource, offset, extent); + // Metadata + if (metadataAspectIndex != NO_MATCH_FOUND) + { + const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; + + if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + metadataAspectRequirements.imageMipTailSize, memoryType, + metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride, + VK_SPARSE_MEMORY_BIND_METADATA_BIT); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - imageResidencyMemoryBinds.push_back(imageMemoryBind); + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } } } - if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) + if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) { const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); @@ -347,11 +393,10 @@ tcu::TestStatus ImageSparseResidencyInstance::iterate (void) { const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; - if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) + if ((metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) { const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - metadataAspectRequirements.imageMipTailSize, memoryType, - metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride, + metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset, VK_SPARSE_MEMORY_BIND_METADATA_BIT); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); @@ -359,278 +404,261 @@ tcu::TestStatus ImageSparseResidencyInstance::iterate (void) imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); } } - } - - if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) - { - const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; - imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); - } + VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + VkSparseImageMemoryBindInfo imageResidencyBindInfo; + VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; + + if (imageResidencyMemoryBinds.size() > 0) + { + imageResidencyBindInfo.image = *sparseImage; + imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; - // Metadata - if (metadataAspectIndex != NO_MATCH_FOUND) - { - const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; + bindSparseInfo.imageBindCount = 1u; + bindSparseInfo.pImageBinds = &imageResidencyBindInfo; + } - if ((metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) + if (imageMipTailMemoryBinds.size() > 0) { - const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset, - VK_SPARSE_MEMORY_BIND_METADATA_BIT); - - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + imageMipTailBindInfo.image = *sparseImage; + imageMipTailBindInfo.bindCount = static_cast(imageMipTailMemoryBinds.size()); + imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0]; - imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + bindSparseInfo.imageOpaqueBindCount = 1u; + bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; } - } - VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 0u, //deUint32 bufferBindCount; - DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; - - VkSparseImageMemoryBindInfo imageResidencyBindInfo; - VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; - - if (imageResidencyMemoryBinds.size() > 0) - { - imageResidencyBindInfo.image = *sparseImage; - imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); - imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; - - bindSparseInfo.imageBindCount = 1u; - bindSparseInfo.pImageBinds = &imageResidencyBindInfo; + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); } - if (imageMipTailMemoryBinds.size() > 0) - { - imageMipTailBindInfo.image = *sparseImage; - imageMipTailBindInfo.bindCount = static_cast(imageMipTailMemoryBinds.size()); - imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0]; - - bindSparseInfo.imageOpaqueBindCount = 1u; - bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; - } + // Create command buffer for compute and transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); - // Create command buffer for compute and transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + // Create descriptor set layout + const Unique descriptorSetLayout( + DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT) + .build(deviceInterface, getDevice())); - // Start recording commands - beginCommandBuffer(deviceInterface, *commandBuffer); + // Create and bind compute pipeline + const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); + const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); + const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); - // Create descriptor set layout - const Unique descriptorSetLayout( - DescriptorSetLayoutBuilder() - .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT) - .build(deviceInterface, getDevice())); + deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); - // Create and bind compute pipeline - const Unique shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL)); - const Unique pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout)); - const Unique computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule)); + // Create and bind descriptor set + const Unique descriptorPool( + DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u) + .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); - deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); + const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); - // Create and bind descriptor set - const Unique descriptorPool( - DescriptorPoolBuilder() - .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u) - .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); + const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize)); + const Unique imageView(makeImageView(deviceInterface, getDevice(), *sparseImage, mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange)); + const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL); - const Unique descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout)); + DescriptorSetUpdateBuilder() + .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo) + .update(deviceInterface, getDevice()); - const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize)); - const Unique imageView(makeImageView(deviceInterface, getDevice(), *sparseImage, mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange)); - const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL); + deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); - DescriptorSetUpdateBuilder() - .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo) - .update(deviceInterface, getDevice()); + { + const VkImageMemoryBarrier sparseImageLayoutChangeBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_SHADER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_GENERAL, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + *sparseImage, + subresourceRange + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageLayoutChangeBarrier); + } - deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize); - { - const VkImageMemoryBarrier sparseImageLayoutChangeBarrier = makeImageMemoryBarrier - ( - 0u, - VK_ACCESS_SHADER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_GENERAL, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - *sparseImage, - subresourceRange - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageLayoutChangeBarrier); - } + { + const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize); - const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize); + const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u); + const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u); + const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u); - { - const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize); + const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); - const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u); - const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u); - const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u); + if (maxComputeWorkGroupCount.x() < xWorkGroupCount || + maxComputeWorkGroupCount.y() < yWorkGroupCount || + maxComputeWorkGroupCount.z() < zWorkGroupCount) + { + TCU_THROW(NotSupportedError, "Image size is not supported"); + } - const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); + deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); + } - if (maxComputeWorkGroupCount.x() < xWorkGroupCount || - maxComputeWorkGroupCount.y() < yWorkGroupCount || - maxComputeWorkGroupCount.z() < zWorkGroupCount) { - TCU_THROW(NotSupportedError, "Image size is not supported"); + const VkImageMemoryBarrier sparseImageTrasferBarrier = makeImageMemoryBarrier + ( + VK_ACCESS_SHADER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_GENERAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *sparseImage, + subresourceRange + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageTrasferBarrier); } - deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); - } + const deUint32 imageSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - { - const VkImageMemoryBarrier sparseImageTrasferBarrier = makeImageMemoryBarrier - ( - VK_ACCESS_SHADER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - VK_IMAGE_LAYOUT_GENERAL, - VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - *sparseImage, - subresourceRange - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageTrasferBarrier); - } - - const deUint32 imageSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format); - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - - { - const VkBufferImageCopy bufferImageCopy = makeBufferImageCopy(imageCreateInfo.extent, imageCreateInfo.arrayLayers); - - deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *sparseImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, 1u, &bufferImageCopy); - } + { + const VkBufferImageCopy bufferImageCopy = makeBufferImageCopy(imageCreateInfo.extent, imageCreateInfo.arrayLayers); - { - const VkBufferMemoryBarrier outputBufferHostReadBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostReadBarrier, 0u, DE_NULL); - } + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *sparseImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, 1u, &bufferImageCopy); + } - // End recording commands - endCommandBuffer(deviceInterface, *commandBuffer); + { + const VkBufferMemoryBarrier outputBufferHostReadBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostReadBarrier, 0u, DE_NULL); + } - // The stage at which execution is going to wait for finish of sparse binding operations - const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); - // Submit commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits); + // The stage at which execution is going to wait for finish of sparse binding operations + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; - // Retrieve data from buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits, + 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData); + // Retrieve data from buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData); - // Validate results - if( aspectRequirements.imageMipTailFirstLod > 0u ) - { - const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); - const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); - const tcu::UVec3 lastBlockExtent = tcu::UVec3( mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, - mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, - mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth); + // Wait for sparse queue to become idle + //vsk fails: + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) + // Validate results + if( aspectRequirements.imageMipTailFirstLod > 0u ) { - for (deUint32 z = 0; z < numSparseBinds.z(); ++z) - for (deUint32 y = 0; y < numSparseBinds.y(); ++y) - for (deUint32 x = 0; x < numSparseBinds.x(); ++x) + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); + const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); + const tcu::UVec3 lastBlockExtent = tcu::UVec3( mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, + mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, + mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth); + + for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) { - VkExtent3D offset; - offset.width = x*imageGranularity.width; - offset.height = y*imageGranularity.height; - offset.depth = z*imageGranularity.depth + layerNdx*numSparseBinds.z()*imageGranularity.depth; + for (deUint32 z = 0; z < numSparseBinds.z(); ++z) + for (deUint32 y = 0; y < numSparseBinds.y(); ++y) + for (deUint32 x = 0; x < numSparseBinds.x(); ++x) + { + VkExtent3D offset; + offset.width = x*imageGranularity.width; + offset.height = y*imageGranularity.height; + offset.depth = z*imageGranularity.depth + layerNdx*numSparseBinds.z()*imageGranularity.depth; - VkExtent3D extent; - extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; - extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; - extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; + VkExtent3D extent; + extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; + extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; + extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; - const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); + const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); - if (linearIndex % 2u == 0u) - { - for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) - for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) - for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + if (linearIndex % 2u == 0u) { - const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); - const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); - - if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) - return tcu::TestStatus::fail("Failed"); + for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) + for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) + for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + + if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) + return tcu::TestStatus::fail("Failed"); + } } - } - else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict) - { - for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) - for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) - for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict) { - const tcu::UVec4 referenceValue = tcu::UVec4(0u, 0u, 0u, 0u); - const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); - - if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) - return tcu::TestStatus::fail("Failed"); + for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) + for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) + for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(0u, 0u, 0u, 0u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + + if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) + return tcu::TestStatus::fail("Failed"); + } } } } } - } - else - { - const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); - - for (deUint32 offsetZ = 0u; offsetZ < mipExtent.depth * imageCreateInfo.arrayLayers; ++offsetZ) - for (deUint32 offsetY = 0u; offsetY < mipExtent.height; ++offsetY) - for (deUint32 offsetX = 0u; offsetX < mipExtent.width; ++offsetX) + else { - const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); - const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); + + for (deUint32 offsetZ = 0u; offsetZ < mipExtent.depth * imageCreateInfo.arrayLayers; ++offsetZ) + for (deUint32 offsetY = 0u; offsetY < mipExtent.height; ++offsetY) + for (deUint32 offsetX = 0u; offsetX < mipExtent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); - if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) - return tcu::TestStatus::fail("Failed"); + if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0) + return tcu::TestStatus::fail("Failed"); + } } } @@ -639,15 +667,13 @@ tcu::TestStatus ImageSparseResidencyInstance::iterate (void) TestInstance* ImageSparseResidencyCase::createInstance (Context& context) const { - return new ImageSparseResidencyInstance(context, m_imageType, m_imageSize, m_format); + return new ImageSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups); } } // anonymous ns -tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx) +tcu::TestCaseGroup* createImageSparseResidencyTestsCommon (tcu::TestContext& testCtx, de::MovePtr testGroup, const bool useDeviceGroup = false) { - de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_residency", "Buffer Sparse Residency")); - static const deUint32 sizeCountPerImageType = 3u; struct ImageParameters @@ -695,7 +721,7 @@ tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx) std::ostringstream stream; stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); - formatGroup->addChild(new ImageSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440)); + formatGroup->addChild(new ImageSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440, useDeviceGroup)); } imageTypeGroup->addChild(formatGroup.release()); } @@ -705,5 +731,17 @@ tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx) return testGroup.release(); } +tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_residency", "Buffer Sparse Residency")); + return createImageSparseResidencyTestsCommon(testCtx, testGroup); +} + +tcu::TestCaseGroup* createDeviceGroupImageSparseResidencyTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_residency", "Buffer Sparse Residency")); + return createImageSparseResidencyTestsCommon(testCtx, testGroup, true); +} + } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp index d357d9a..0b44221 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp @@ -32,6 +32,7 @@ namespace sparse { tcu::TestCaseGroup* createImageSparseResidencyTests(tcu::TestContext& testCtx); +tcu::TestCaseGroup* createDeviceGroupImageSparseResidencyTests(tcu::TestContext& testCtx); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.cpp index 24e128b..c86d385 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.cpp @@ -71,11 +71,14 @@ public: const std::string& description, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups); + TestInstance* createInstance (Context& context) const; private: + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -86,8 +89,10 @@ MipmapSparseResidencyCase::MipmapSparseResidencyCase (tcu::TestContext& testCt const std::string& description, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) + const tcu::TextureFormat& format, + const bool useDeviceGroups) : TestCase (testCtx, name, description) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -100,12 +105,14 @@ public: MipmapSparseResidencyInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format); + const tcu::TextureFormat& format, + const bool useDeviceGroups); + tcu::TestStatus iterate (void); private: - + const bool m_useDeviceGroups; const ImageType m_imageType; const tcu::UVec3 m_imageSize; const tcu::TextureFormat m_format; @@ -114,8 +121,10 @@ private: MipmapSparseResidencyInstance::MipmapSparseResidencyInstance (Context& context, const ImageType imageType, const tcu::UVec3& imageSize, - const tcu::TextureFormat& format) - : SparseResourcesBaseInstance (context) + const tcu::TextureFormat& format, + const bool useDeviceGroups) + : SparseResourcesBaseInstance (context, useDeviceGroups) + , m_useDeviceGroups (useDeviceGroups) , m_imageType (imageType) , m_imageSize (imageSize) , m_format (format) @@ -125,7 +134,16 @@ MipmapSparseResidencyInstance::MipmapSparseResidencyInstance (Context& conte tcu::TestStatus MipmapSparseResidencyInstance::iterate (void) { const InstanceInterface& instance = m_context.getInstanceInterface(); - const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + { + // Create logical device supporting both sparse and compute operations + QueueRequirementsVec queueRequirements; + queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + + createDeviceSupportingQueues(queueRequirements); + } + + const VkPhysicalDevice physicalDevice = getPhysicalDevice(); VkImageCreateInfo imageSparseInfo; std::vector deviceMemUniquePtrVec; @@ -137,120 +155,146 @@ tcu::TestStatus MipmapSparseResidencyInstance::iterate (void) if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType)) TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported"); - imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - imageSparseInfo.pNext = DE_NULL; - imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT; - imageSparseInfo.imageType = mapImageType(m_imageType); - imageSparseInfo.format = mapTextureFormat(m_format); - imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); - imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); - imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; - imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; - imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | - VK_IMAGE_USAGE_TRANSFER_SRC_BIT; - imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - imageSparseInfo.queueFamilyIndexCount = 0u; - imageSparseInfo.pQueueFamilyIndices = DE_NULL; - - if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) - { - imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; - } + const DeviceInterface& deviceInterface = getDeviceInterface(); + const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); + const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); + // Go through all physical devices + for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++) { - VkImageFormatProperties imageFormatProperties; - instance.getPhysicalDeviceImageFormatProperties(physicalDevice, - imageSparseInfo.format, - imageSparseInfo.imageType, - imageSparseInfo.tiling, - imageSparseInfo.usage, - imageSparseInfo.flags, - &imageFormatProperties); - - imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); - } + const deUint32 firstDeviceID = physDevID; + const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices; + + imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageSparseInfo.pNext = DE_NULL; + imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + imageSparseInfo.imageType = mapImageType(m_imageType); + imageSparseInfo.format = mapTextureFormat(m_format); + imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); + imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); + imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageSparseInfo.queueFamilyIndexCount = 0u; + imageSparseInfo.pQueueFamilyIndices = DE_NULL; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } - // Check if device supports sparse operations for image format - if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo)) - TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); + { + VkImageFormatProperties imageFormatProperties; + instance.getPhysicalDeviceImageFormatProperties(physicalDevice, + imageSparseInfo.format, + imageSparseInfo.imageType, + imageSparseInfo.tiling, + imageSparseInfo.usage, + imageSparseInfo.flags, + &imageFormatProperties); + + imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); + } - { - // Create logical device supporting both sparse and compute operations - QueueRequirementsVec queueRequirements; - queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); - queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + // Check if device supports sparse operations for image format + if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo)) + TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); - createDeviceSupportingQueues(queueRequirements); - } + // Create sparse image + const Unique imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo)); - const DeviceInterface& deviceInterface = getDeviceInterface(); - const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); - const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); + // Create sparse image memory bind semaphore + const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); - // Create sparse image - const Unique imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo)); + { + // Get sparse image general memory requirements + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); - // Create sparse image memory bind semaphore - const Unique imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice())); + // Check if required image memory size does not exceed device limits + if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); - { - // Get sparse image general memory requirements - const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); + DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); - // Check if required image memory size does not exceed device limits - if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) - TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); + // Get sparse image sparse memory requirements + const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse); - DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); + DE_ASSERT(sparseMemoryRequirements.size() != 0); - // Get sparse image sparse memory requirements - const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse); + const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); + const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT); - DE_ASSERT(sparseMemoryRequirements.size() != 0); + if (colorAspectIndex == NO_MATCH_FOUND) + TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); - const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); - const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT); + const VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; + const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; + const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; - if (colorAspectIndex == NO_MATCH_FOUND) - TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); + DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); - const VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; - const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; - const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; + std::vector imageResidencyMemoryBinds; + std::vector imageMipTailMemoryBinds; - DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); - std::vector imageResidencyMemoryBinds; - std::vector imageMipTailMemoryBinds; + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); - const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); + // Bind memory for each layer + for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) + { + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + { + const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); + const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity); + const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z(); + const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; - if (memoryType == NO_MATCH_FOUND) - return tcu::TestStatus::fail("No matching memory type found"); + const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), + imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent); - // Bind memory for each layer - for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) - { - for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) - { - const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); - const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity); - const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z(); - const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(), - imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent); + imageResidencyMemoryBinds.push_back(imageMemoryBind); + } - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - imageResidencyMemoryBinds.push_back(imageMemoryBind); + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } + + // Metadata + if (metadataAspectIndex != NO_MATCH_FOUND) + { + const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; + + if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), + metadataAspectRequirements.imageMipTailSize, memoryType, + metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride, + VK_SPARSE_MEMORY_BIND_METADATA_BIT); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } + } } - if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) + if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) { const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); @@ -262,11 +306,10 @@ tcu::TestStatus MipmapSparseResidencyInstance::iterate (void) { const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; - if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) + if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) { const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - metadataAspectRequirements.imageMipTailSize, memoryType, - metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride, + metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset, VK_SPARSE_MEMORY_BIND_METADATA_BIT); deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); @@ -274,217 +317,196 @@ tcu::TestStatus MipmapSparseResidencyInstance::iterate (void) imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); } } - } - - if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) - { - const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); - - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); - - imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); - } - // Metadata - if (metadataAspectIndex != NO_MATCH_FOUND) - { - const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex]; - - if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) + const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo = { - const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), - metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset, - VK_SPARSE_MEMORY_BIND_METADATA_BIT); + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType; + DE_NULL, //const void* pNext; + firstDeviceID, //deUint32 resourceDeviceIndex; + secondDeviceID, //deUint32 memoryDeviceIndex; + }; - deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, getDevice(), DE_NULL)))); + VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + VkSparseImageMemoryBindInfo imageResidencyBindInfo; + VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; + + if (imageResidencyMemoryBinds.size() > 0) + { + imageResidencyBindInfo.image = *imageSparse; + imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; - imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + bindSparseInfo.imageBindCount = 1u; + bindSparseInfo.pImageBinds = &imageResidencyBindInfo; } - } - VkBindSparseInfo bindSparseInfo = - { - VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; - DE_NULL, //const void* pNext; - 0u, //deUint32 waitSemaphoreCount; - DE_NULL, //const VkSemaphore* pWaitSemaphores; - 0u, //deUint32 bufferBindCount; - DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; - 0u, //deUint32 imageOpaqueBindCount; - DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; - 0u, //deUint32 imageBindCount; - DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; - 1u, //deUint32 signalSemaphoreCount; - &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; - }; - - VkSparseImageMemoryBindInfo imageResidencyBindInfo; - VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; - - if (imageResidencyMemoryBinds.size() > 0) - { - imageResidencyBindInfo.image = *imageSparse; - imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); - imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; - - bindSparseInfo.imageBindCount = 1u; - bindSparseInfo.pImageBinds = &imageResidencyBindInfo; - } + if (imageMipTailMemoryBinds.size() > 0) + { + imageMipTailBindInfo.image = *imageSparse; + imageMipTailBindInfo.bindCount = static_cast(imageMipTailMemoryBinds.size()); + imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0]; - if (imageMipTailMemoryBinds.size() > 0) - { - imageMipTailBindInfo.image = *imageSparse; - imageMipTailBindInfo.bindCount = static_cast(imageMipTailMemoryBinds.size()); - imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0]; + bindSparseInfo.imageOpaqueBindCount = 1u; + bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; + } - bindSparseInfo.imageOpaqueBindCount = 1u; - bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); } - // Submit sparse bind commands for execution - VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); - } - - // Create command buffer for compute and transfer oparations - const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); - const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); + // Create command buffer for compute and transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex)); + const Unique commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY)); - std::vector bufferImageCopy(imageSparseInfo.mipLevels); + std::vector bufferImageCopy(imageSparseInfo.mipLevels); - { - deUint32 bufferOffset = 0; - for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++) { - bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast(bufferOffset)); - bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + deUint32 bufferOffset = 0; + for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++) + { + bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast(bufferOffset)); + bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + } } - } - // Start recording commands - beginCommandBuffer(deviceInterface, *commandBuffer); + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); - const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); - const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); - const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); - const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); + const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY); + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + const Unique inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo)); + const de::UniquePtr inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible)); - std::vector referenceData(imageSizeInBytes); + std::vector referenceData(imageSizeInBytes); - const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse); - for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx) - { - referenceData[valueNdx] = static_cast((valueNdx % imageMemoryRequirements.alignment) + 1u); - } + for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx) + { + referenceData[valueNdx] = static_cast((valueNdx % imageMemoryRequirements.alignment) + 1u); + } - deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); + deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes); - flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); + flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes); - { - const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_HOST_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - *inputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + *inputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } - { - const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier - ( - 0u, - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, - *imageSparse, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); - } + { + const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + *imageSparse, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); + } - deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - { - const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_TRANSFER_READ_BIT, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - *imageSparse, - makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); - } + { + const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *imageSparse, + makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers) + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier); + } - const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); - const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); - const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const Unique outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo)); + const de::UniquePtr outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible)); - deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); - { - const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier - ( - VK_ACCESS_TRANSFER_WRITE_BIT, - VK_ACCESS_HOST_READ_BIT, - *outputBuffer, - 0u, - imageSizeInBytes - ); - - deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); - } + { + const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + *outputBuffer, + 0u, + imageSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL); + } - // End recording commands - endCommandBuffer(deviceInterface, *commandBuffer); + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); - const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; - // Submit commands for execution and wait for completion - submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits); + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits, + 0, DE_NULL, m_useDeviceGroups, firstDeviceID); - // Retrieve data from buffer to host memory - invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); + // Retrieve data from buffer to host memory + invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes); - const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); + const deUint8* outputData = static_cast(outputBufferAlloc->getHostPtr()); - // Wait for sparse queue to become idle - deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); - for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx) - { - const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx); - const deUint32 bufferOffset = static_cast(bufferImageCopy[mipmapNdx].bufferOffset); + for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx) + { + const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx); + const deUint32 bufferOffset = static_cast(bufferImageCopy[mipmapNdx].bufferOffset); - if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) - return tcu::TestStatus::fail("Failed"); + if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + } } - return tcu::TestStatus::pass("Passed"); } TestInstance* MipmapSparseResidencyCase::createInstance (Context& context) const { - return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format); + return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups); } } // anonymous ns -tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx) +tcu::TestCaseGroup* createMipmapSparseResidencyTestsCommon (tcu::TestContext& testCtx, de::MovePtr testGroup, const bool useDeviceGroup = false) { - de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency")); - static const deUint32 sizeCountPerImageType = 3u; struct ImageParameters @@ -529,7 +551,7 @@ tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx) std::ostringstream stream; stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); - formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format)); + formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, useDeviceGroup)); } imageTypeGroup->addChild(formatGroup.release()); } @@ -539,5 +561,17 @@ tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx) return testGroup.release(); } +tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency")); + return createMipmapSparseResidencyTestsCommon(testCtx, testGroup); +} + +tcu::TestCaseGroup* createDeviceGroupMipmapSparseResidencyTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "device_group_mipmap_sparse_residency", "Mipmap Sparse Residency")); + return createMipmapSparseResidencyTestsCommon(testCtx, testGroup, true); +} + } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.hpp index d760821..1f61464 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesMipmapSparseResidency.hpp @@ -32,6 +32,7 @@ namespace sparse { tcu::TestCaseGroup* createMipmapSparseResidencyTests(tcu::TestContext& testCtx); +tcu::TestCaseGroup* createDeviceGroupMipmapSparseResidencyTests(tcu::TestContext& testCtx); } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp index ad9e7c7..35d649c 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp @@ -40,13 +40,17 @@ tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx) { de::MovePtr sparseTests (new tcu::TestCaseGroup(testCtx, "sparse_resources", "Sparse Resources Tests")); - sparseTests->addChild(createSparseBufferTests (testCtx)); - sparseTests->addChild(createImageSparseBindingTests (testCtx)); - sparseTests->addChild(createImageSparseResidencyTests (testCtx)); - sparseTests->addChild(createMipmapSparseResidencyTests (testCtx)); - sparseTests->addChild(createImageSparseMemoryAliasingTests (testCtx)); - sparseTests->addChild(createSparseResourcesShaderIntrinsicsTests(testCtx)); - sparseTests->addChild(createQueueBindSparseTests (testCtx)); + sparseTests->addChild(createSparseBufferTests (testCtx)); + sparseTests->addChild(createImageSparseBindingTests (testCtx)); + sparseTests->addChild(createDeviceGroupImageSparseBindingTests (testCtx)); + sparseTests->addChild(createImageSparseResidencyTests (testCtx)); + sparseTests->addChild(createDeviceGroupImageSparseResidencyTests (testCtx)); + sparseTests->addChild(createMipmapSparseResidencyTests (testCtx)); + sparseTests->addChild(createDeviceGroupMipmapSparseResidencyTests (testCtx)); + sparseTests->addChild(createImageSparseMemoryAliasingTests (testCtx)); + sparseTests->addChild(createDeviceGroupImageSparseMemoryAliasingTests (testCtx)); + sparseTests->addChild(createSparseResourcesShaderIntrinsicsTests (testCtx)); + sparseTests->addChild(createQueueBindSparseTests (testCtx)); return sparseTests.release(); } diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp index fd38c5e..6b11e60 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp @@ -23,6 +23,7 @@ #include "vktSparseResourcesTestsUtil.hpp" #include "vkQueryUtil.hpp" +#include "vkDeviceUtil.hpp" #include "vkTypeUtil.hpp" #include "tcuTextureUtil.hpp" @@ -35,6 +36,20 @@ namespace vkt namespace sparse { +vk::Move createInstanceWithExtensions(const vk::PlatformInterface& vkp, const std::vector enableExtensions) +{ + std::vector enableExtensionPtrs (enableExtensions.size()); + const std::vector availableExtensions = enumerateInstanceExtensionProperties(vkp, DE_NULL); + for (size_t extensionID = 0; extensionID < enableExtensions.size(); extensionID++) + { + if (!isExtensionSupported(availableExtensions, RequiredExtension(enableExtensions[extensionID]))) + TCU_THROW(NotSupportedError, (enableExtensions[extensionID] + " is not supported").c_str()); + enableExtensionPtrs[extensionID] = enableExtensions[extensionID]; + } + + return createDefaultInstance(vkp, std::vector() /* layers */, enableExtensionPtrs); +} + tcu::UVec3 getShaderGridSize (const ImageType imageType, const tcu::UVec3& imageSize, const deUint32 mipLevel) { const deUint32 mipLevelX = std::max(imageSize.x() >> mipLevel, 1u); @@ -375,15 +390,15 @@ Move makeFramebuffer (const DeviceInterface& vk, { const VkFramebufferCreateInfo framebufferInfo = { - VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags; - renderPass, // VkRenderPass renderPass; - attachmentCount, // uint32_t attachmentCount; - pAttachments, // const VkImageView* pAttachments; - width, // uint32_t width; - height, // uint32_t height; - layers, // uint32_t layers; + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags; + renderPass, // VkRenderPass renderPass; + attachmentCount, // uint32_t attachmentCount; + pAttachments, // const VkImageView* pAttachments; + width, // uint32_t width; + height, // uint32_t height; + layers, // uint32_t layers; }; return createFramebuffer(vk, device, &framebufferInfo); @@ -535,7 +550,9 @@ void submitCommandsAndWait (const DeviceInterface& vk, const VkSemaphore* pWaitSemaphores, const VkPipelineStageFlags* pWaitDstStageMask, const deUint32 signalSemaphoreCount, - const VkSemaphore* pSignalSemaphores) + const VkSemaphore* pSignalSemaphores, + const bool useDeviceGroups, + const deUint32 physicalDeviceID) { const VkFenceCreateInfo fenceParams = { @@ -545,17 +562,30 @@ void submitCommandsAndWait (const DeviceInterface& vk, }; const Unique fence(createFence(vk, device, &fenceParams)); + const deUint32 deviceMask = 1 << physicalDeviceID; + std::vector deviceIndices (waitSemaphoreCount, physicalDeviceID); + VkDeviceGroupSubmitInfoKHR deviceGroupSubmitInfo = + { + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR, //VkStructureType sType + DE_NULL, // const void* pNext + waitSemaphoreCount, // uint32_t waitSemaphoreCount + deviceIndices.size() ? &deviceIndices[0] : DE_NULL, // const uint32_t* pWaitSemaphoreDeviceIndices + 1u, // uint32_t commandBufferCount + &deviceMask, // const uint32_t* pCommandBufferDeviceMasks + 0u, // uint32_t signalSemaphoreCount + DE_NULL, // const uint32_t* pSignalSemaphoreDeviceIndices + }; const VkSubmitInfo submitInfo = { - VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; - DE_NULL, // const void* pNext; - waitSemaphoreCount, // deUint32 waitSemaphoreCount; - pWaitSemaphores, // const VkSemaphore* pWaitSemaphores; - pWaitDstStageMask, // const VkPipelineStageFlags* pWaitDstStageMask; - 1u, // deUint32 commandBufferCount; - &commandBuffer, // const VkCommandBuffer* pCommandBuffers; - signalSemaphoreCount, // deUint32 signalSemaphoreCount; - pSignalSemaphores, // const VkSemaphore* pSignalSemaphores; + VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; + useDeviceGroups ? &deviceGroupSubmitInfo : DE_NULL, // const void* pNext; + waitSemaphoreCount, // deUint32 waitSemaphoreCount; + pWaitSemaphores, // const VkSemaphore* pWaitSemaphores; + pWaitDstStageMask, // const VkPipelineStageFlags* pWaitDstStageMask; + 1u, // deUint32 commandBufferCount; + &commandBuffer, // const VkCommandBuffer* pCommandBuffers; + signalSemaphoreCount, // deUint32 signalSemaphoreCount; + pSignalSemaphores, // const VkSemaphore* pSignalSemaphores; }; VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence)); diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp index 7049aed..8f875c5 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp @@ -88,6 +88,10 @@ std::string getShaderImageCoordinates (const ImageType imageType, const std::string& xy, const std::string& xyz); +//!< Create instance with specific extensions +vk::Move createInstanceWithExtensions (const vk::PlatformInterface& vkp, + const std::vector enableExtensions); + //!< Size used for addresing image in a compute shader tcu::UVec3 getShaderGridSize (const ImageType imageType, const tcu::UVec3& imageSize, @@ -263,7 +267,9 @@ void submitCommandsAndWait (const vk::DeviceInterface& vk, const vk::VkSemaphore* pWaitSemaphores = DE_NULL, const vk::VkPipelineStageFlags* pWaitDstStageMask = DE_NULL, const deUint32 signalSemaphoreCount = 0, - const vk::VkSemaphore* pSignalSemaphores = DE_NULL); + const vk::VkSemaphore* pSignalSemaphores = DE_NULL, + const bool useDeviceGroups = false, + const deUint32 physicalDeviceID = 0); void requireFeatures (const vk::InstanceInterface& vki, const vk::VkPhysicalDevice physicalDevice, diff --git a/external/vulkancts/mustpass/1.0.3/vk-default.txt b/external/vulkancts/mustpass/1.0.3/vk-default.txt index f4f4e66..6294aca 100755 --- a/external/vulkancts/mustpass/1.0.3/vk-default.txt +++ b/external/vulkancts/mustpass/1.0.3/vk-default.txt @@ -237303,35 +237303,70 @@ dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_10 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_12 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_10 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_12 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_16 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_17 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_20 dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_24 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_10 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_12 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_16 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_17 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_20 +dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_24 dEQP-VK.sparse_resources.buffer.ubo.sparse_binding dEQP-VK.sparse_resources.buffer.ubo.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.ubo.sparse_residency dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_aliased dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_non_resident_strict +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_non_resident_strict dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding_aliased dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency_aliased +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding_aliased +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency +dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency_aliased dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.512_1_1 dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.1024_1_1 dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.11_1_1 @@ -237458,6 +237493,132 @@ dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba16ui.137_137_3 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.256_256_6 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.128_128_8 dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.512_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.1024_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.11_1_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.512_1_64 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.1024_1_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.11_1_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.137_137_3 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.512_256_1 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.1024_128_1 dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.11_137_1 @@ -237593,6 +237754,141 @@ dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba16ui.11_137_3 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.512_256_16 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.1024_128_8 dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.512_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.11_137_3 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.512_256_1 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.1024_128_1 dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.11_137_1 @@ -237683,6 +237979,96 @@ dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba16ui.11_137_3 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.256_256_16 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.1024_128_8 dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.1024_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.11_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.11_137_3 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.256_256_16 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.1024_128_8 +dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.11_137_3 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.512_256_1 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.128_128_1 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.503_137_1 @@ -237803,6 +238189,126 @@ dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.256_256_16 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.128_128_8 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.503_137_3 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.11_11_1 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.11_11_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.256_256_16 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.11_37_3 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.512_256_1 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.128_128_1 dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.503_137_1