dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_10
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_12
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_16
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_10
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_12
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_16
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ubo.sparse_binding
dEQP-VK.sparse_resources.buffer.ubo.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_non_resident_strict
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_non_resident_strict
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.512_1_1
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.1024_1_1
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.11_1_1
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.256_256_6
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.128_128_8
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.137_137_3
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.512_256_1
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.1024_128_1
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.11_137_1
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.512_256_16
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.1024_128_8
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.11_137_3
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.512_256_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.1024_128_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.11_137_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.256_256_16
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.1024_128_8
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.11_137_3
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.512_256_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.128_128_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.503_137_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.128_128_8
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.503_137_3
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.11_37_3
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.512_256_1
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.128_128_1
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.503_137_1
*//*--------------------------------------------------------------------*/
#include "vktSparseResourcesBase.hpp"
+#include "vktSparseResourcesTestsUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkRefUtil.hpp"
#include "vkTypeUtil.hpp"
deUint32 queueCount;
};
-static const deUint32 NO_MATCH_FOUND = ~0u;
-
deUint32 findMatchingQueueFamilyIndex (const std::vector<vk::VkQueueFamilyProperties>& queueFamilyProperties,
const VkQueueFlags queueFlags,
const deUint32 startIndex)
typedef std::map<deUint32, QueueFamilyQueuesCount> SelectedQueuesMap;
typedef std::map<deUint32, std::vector<float> > QueuePrioritiesMap;
- const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
+ std::vector<VkPhysicalDeviceGroupPropertiesKHR> devGroupProperties;
+ std::vector<const char*> deviceExtensions;
+ VkDeviceGroupDeviceCreateInfoKHR deviceGroupInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR, //stype
+ DE_NULL, //pNext
+ 0, //physicalDeviceCount
+ DE_NULL //physicalDevices
+ };
+ m_physicalDevices.push_back(m_context.getPhysicalDevice());
+
+ // If requested, create an intance with device groups
+ if (m_useDeviceGroups)
+ {
+ const std::vector<std::string> requiredExtensions(1, "VK_KHR_device_group_creation");
+ m_deviceGroupInstance = createInstanceWithExtensions(m_context.getPlatformInterface(), requiredExtensions);
+ devGroupProperties = enumeratePhysicalDeviceGroupsKHR(m_context.getInstanceInterface(), m_deviceGroupInstance.get());
+ m_numPhysicalDevices = devGroupProperties[m_deviceGroupIdx].physicalDeviceCount;
+
+ m_physicalDevices.clear();
+ for (size_t physDeviceID = 0; physDeviceID < m_numPhysicalDevices; physDeviceID++)
+ {
+ m_physicalDevices.push_back(devGroupProperties[m_deviceGroupIdx].physicalDevices[physDeviceID]);
+ }
+ if (m_numPhysicalDevices < 2)
+ TCU_THROW(NotSupportedError, "Sparse binding device group tests not supported with 1 physical device");
+
+ deviceGroupInfo.physicalDeviceCount = devGroupProperties[m_deviceGroupIdx].physicalDeviceCount;
+ deviceGroupInfo.pPhysicalDevices = devGroupProperties[m_deviceGroupIdx].physicalDevices;
+
+ deviceExtensions.push_back("VK_KHR_device_group");
+ }
+ InstanceDriver instance(m_context.getPlatformInterface(), m_useDeviceGroups ? m_deviceGroupInstance.get() : m_context.getInstance());
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice();
deUint32 queueFamilyPropertiesCount = 0u;
instance.getPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueFamilyPropertiesCount, DE_NULL);
const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice);
const VkDeviceCreateInfo deviceInfo =
{
- VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
- static_cast<deUint32>(queueInfos.size()), // uint32_t queueCreateInfoCount;
- &queueInfos[0], // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
- 0u, // uint32_t enabledLayerCount;
- DE_NULL, // const char* const* ppEnabledLayerNames;
- 0u, // uint32_t enabledExtensionCount;
- DE_NULL, // const char* const* ppEnabledExtensionNames;
- &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
+ m_useDeviceGroups ? &deviceGroupInfo : DE_NULL, // const void* pNext;
+ (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
+ static_cast<deUint32>(queueInfos.size()) , // uint32_t queueCreateInfoCount;
+ &queueInfos[0], // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
+ 0u, // uint32_t enabledLayerCount;
+ DE_NULL, // const char* const* ppEnabledLayerNames;
+ deUint32(deviceExtensions.size()), // uint32_t enabledExtensionCount;
+ deviceExtensions.size() ? &deviceExtensions[0] : DE_NULL, // const char* const* ppEnabledExtensionNames;
+ &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
};
m_logicalDevice = createDevice(instance, physicalDevice, &deviceInfo);
#include "vkRef.hpp"
#include "vkPlatform.hpp"
#include "deUniquePtr.hpp"
+#include "tcuCommandLine.hpp"
#include <map>
#include <vector>
class SparseResourcesBaseInstance : public TestInstance
{
public:
- SparseResourcesBaseInstance (Context &context) : TestInstance(context) {}
+ SparseResourcesBaseInstance (Context &context, bool useDeviceGroups = false)
+ : TestInstance (context)
+ , m_numPhysicalDevices (1)
+ , m_useDeviceGroups (useDeviceGroups)
+ {
+ const tcu::CommandLine& cmdLine = context.getTestContext().getCommandLine();
+ m_deviceGroupIdx = cmdLine.getVKDeviceGroupId() - 1;
+ }
+ bool usingDeviceGroups() { return m_useDeviceGroups; }
protected:
typedef std::vector<QueueRequirements> QueueRequirementsVec;
+ deUint32 m_numPhysicalDevices;
+
void createDeviceSupportingQueues (const QueueRequirementsVec& queueRequirements);
const Queue& getQueue (const vk::VkQueueFlags queueFlags, const deUint32 queueIndex) const;
- const vk::DeviceInterface& getDeviceInterface (void) const { return *m_deviceDriver; }
- vk::VkDevice getDevice (void) const { return *m_logicalDevice; }
- vk::Allocator& getAllocator (void) { return *m_allocator; }
+ const vk::DeviceInterface& getDeviceInterface (void) const { return *m_deviceDriver; }
+ vk::VkDevice getDevice (void) const { return *m_logicalDevice; }
+ vk::Allocator& getAllocator (void) { return *m_allocator; }
+ vk::VkPhysicalDevice getPhysicalDevice (deUint32 i = 0) { return m_physicalDevices[i];}
private:
+ bool m_useDeviceGroups;
+ deUint32 m_deviceGroupIdx;
+ vk::Move<vk::VkInstance> m_deviceGroupInstance;
+ std::vector<vk::VkPhysicalDevice> m_physicalDevices;
std::map<vk::VkQueueFlags, std::vector<Queue> > m_queues;
de::MovePtr<vk::DeviceDriver> m_deviceDriver;
vk::Move<vk::VkDevice> m_logicalDevice;
const std::string& name,
const std::string& description,
const deUint32 bufferSize,
- const glu::GLSLVersion glslVersion);
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups);
void initPrograms (SourceCollections& sourceCollections) const;
TestInstance* createInstance (Context& context) const;
private:
const deUint32 m_bufferSizeInBytes;
const glu::GLSLVersion m_glslVersion;
+ const bool m_useDeviceGroups;
};
BufferSparseMemoryAliasingCase::BufferSparseMemoryAliasingCase (tcu::TestContext& testCtx,
const std::string& name,
const std::string& description,
const deUint32 bufferSize,
- const glu::GLSLVersion glslVersion)
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups)
: TestCase (testCtx, name, description)
, m_bufferSizeInBytes (bufferSize)
, m_glslVersion (glslVersion)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
{
public:
BufferSparseMemoryAliasingInstance (Context& context,
- const deUint32 bufferSize);
+ const deUint32 bufferSize,
+ const bool useDeviceGroups);
tcu::TestStatus iterate (void);
private:
const deUint32 m_bufferSizeInBytes;
+ const deUint32 m_useDeviceGroups;
+
};
-BufferSparseMemoryAliasingInstance::BufferSparseMemoryAliasingInstance (Context& context,
- const deUint32 bufferSize)
- : SparseResourcesBaseInstance (context)
+BufferSparseMemoryAliasingInstance::BufferSparseMemoryAliasingInstance (Context& context,
+ const deUint32 bufferSize,
+ const bool useDeviceGroups)
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
, m_bufferSizeInBytes (bufferSize)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
tcu::TestStatus BufferSparseMemoryAliasingInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
-
- if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding)
- TCU_THROW(NotSupportedError, "Sparse binding not supported");
-
- if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyAliased)
- TCU_THROW(NotSupportedError, "Sparse memory aliasing not supported");
-
{
// Create logical device supporting both sparse and compute operations
QueueRequirementsVec queueRequirements;
createDeviceSupportingQueues(queueRequirements);
}
+ const vk::VkPhysicalDevice& physicalDevice = getPhysicalDevice();
+
+ if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding)
+ TCU_THROW(NotSupportedError, "Sparse binding not supported");
+
+ if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyAliased)
+ TCU_THROW(NotSupportedError, "Sparse memory aliasing not supported");
const DeviceInterface& deviceInterface = getDeviceInterface();
const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
- VkBufferCreateInfo bufferCreateInfo =
- {
- VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
- VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, // VkBufferCreateFlags flags;
- m_bufferSizeInBytes, // VkDeviceSize size;
- VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
- VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
- 0u, // deUint32 queueFamilyIndexCount;
- DE_NULL // const deUint32* pQueueFamilyIndices;
- };
-
- const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
-
- if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
- bufferCreateInfo.queueFamilyIndexCount = 2u;
- bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices;
- }
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
- // Create sparse buffers
- const Unique<VkBuffer> sparseBufferWrite(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
- const Unique<VkBuffer> sparseBufferRead (createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
+ VkBufferCreateInfo bufferCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, // VkBufferCreateFlags flags;
+ m_bufferSizeInBytes, // VkDeviceSize size;
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL // const deUint32* pQueueFamilyIndices;
+ };
- // Create sparse buffers memory bind semaphore
- const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+ const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
- const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBufferWrite);
+ if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
+ {
+ bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
+ bufferCreateInfo.queueFamilyIndexCount = 2u;
+ bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices;
+ }
- if (bufferMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
+ // Create sparse buffers
+ const Unique<VkBuffer> sparseBufferWrite(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
+ const Unique<VkBuffer> sparseBufferRead(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
- DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0);
+ // Create sparse buffers memory bind semaphore
+ const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any);
+ const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBufferWrite);
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ if (bufferMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
- const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.size, memoryType, 0u);
+ DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0);
- Move<VkDeviceMemory> deviceMemoryPtr(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL));
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any);
- {
- const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo[2] =
- {
- makeSparseBufferMemoryBindInfo
- (*sparseBufferWrite, //VkBuffer buffer;
- 1u, //deUint32 bindCount;
- &sparseMemoryBind //const VkSparseMemoryBind* Binds;
- ),
-
- makeSparseBufferMemoryBindInfo
- (*sparseBufferRead, //VkBuffer buffer;
- 1u, //deUint32 bindCount;
- &sparseMemoryBind //const VkSparseMemoryBind* Binds;
- )
- };
-
- const VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 2u, //deUint32 bufferBindCount;
- sparseBufferMemoryBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
+ const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.size, memoryType, 0u);
- // Create output buffer
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ Move<VkDeviceMemory> deviceMemoryPtr(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL));
- // Create command buffer for compute and data transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ {
+ const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo[2] =
+ {
+ makeSparseBufferMemoryBindInfo
+ (*sparseBufferWrite, //VkBuffer buffer;
+ 1u, //deUint32 bindCount;
+ &sparseMemoryBind //const VkSparseMemoryBind* Binds;
+ ),
+
+ makeSparseBufferMemoryBindInfo
+ (*sparseBufferRead, //VkBuffer buffer;
+ 1u, //deUint32 bindCount;
+ &sparseMemoryBind //const VkSparseMemoryBind* Binds;
+ )
+ };
+
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
+
+ const VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 2u, //deUint32 bufferBindCount;
+ sparseBufferMemoryBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
+ };
+
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
+ }
+
+ // Create output buffer
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+
+ // Create command buffer for compute and data transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
+
+ // Create descriptor set
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, getDevice()));
+
+ // Create compute pipeline
+ const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
+ const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
+
+ deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
+
+ // Create descriptor set
+ const Unique<VkDescriptorPool> descriptorPool(
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
+ .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+
+ const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
- // Start recording commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ {
+ const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBufferWrite, 0u, m_bufferSizeInBytes);
- // Create descriptor set
- const Unique<VkDescriptorSetLayout> descriptorSetLayout(
- DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
- .build(deviceInterface, getDevice()));
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo)
+ .update(deviceInterface, getDevice());
+ }
- // Create compute pipeline
- const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
- const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
- const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
+ deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
- deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
+ {
+ deUint32 numInvocationsLeft = m_bufferSizeInBytes / SIZE_OF_UINT_IN_SHADER;
+ const tcu::UVec3 workGroupSize = computeWorkGroupSize(numInvocationsLeft);
+ const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u);
- // Create descriptor set
- const Unique<VkDescriptorPool> descriptorPool(
- DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u)
- .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+ numInvocationsLeft -= workGroupSize.x()*workGroupSize.y()*workGroupSize.z();
- const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
+ const deUint32 xWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.x());
+ numInvocationsLeft = numInvocationsLeft / xWorkGroupCount + ((numInvocationsLeft % xWorkGroupCount) ? 1u : 0u);
+ const deUint32 yWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.y());
+ numInvocationsLeft = numInvocationsLeft / yWorkGroupCount + ((numInvocationsLeft % yWorkGroupCount) ? 1u : 0u);
+ const deUint32 zWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.z());
+ numInvocationsLeft = numInvocationsLeft / zWorkGroupCount + ((numInvocationsLeft % zWorkGroupCount) ? 1u : 0u);
- {
- const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBufferWrite, 0u, m_bufferSizeInBytes);
+ if (numInvocationsLeft != 1u)
+ TCU_THROW(NotSupportedError, "Buffer size is not supported");
- DescriptorSetUpdateBuilder()
- .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo)
- .update(deviceInterface, getDevice());
- }
-
- deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+ deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
+ }
- {
- deUint32 numInvocationsLeft = m_bufferSizeInBytes / SIZE_OF_UINT_IN_SHADER;
- const tcu::UVec3 workGroupSize = computeWorkGroupSize(numInvocationsLeft);
- const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u);
-
- numInvocationsLeft -= workGroupSize.x()*workGroupSize.y()*workGroupSize.z();
+ {
+ const VkBufferMemoryBarrier sparseBufferWriteBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *sparseBufferWrite,
+ 0ull,
+ m_bufferSizeInBytes);
- const deUint32 xWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.x());
- numInvocationsLeft = numInvocationsLeft / xWorkGroupCount + ((numInvocationsLeft % xWorkGroupCount) ? 1u : 0u);
- const deUint32 yWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.y());
- numInvocationsLeft = numInvocationsLeft / yWorkGroupCount + ((numInvocationsLeft % yWorkGroupCount) ? 1u : 0u);
- const deUint32 zWorkGroupCount = std::min(numInvocationsLeft, maxComputeWorkGroupCount.z());
- numInvocationsLeft = numInvocationsLeft / zWorkGroupCount + ((numInvocationsLeft % zWorkGroupCount) ? 1u : 0u);
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferWriteBarrier, 0u, DE_NULL);
+ }
- if (numInvocationsLeft != 1u)
- TCU_THROW(NotSupportedError, "Buffer size is not supported");
+ {
+ const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSizeInBytes);
- deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
- }
+ deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBufferRead, *outputBuffer, 1u, &bufferCopy);
+ }
- {
- const VkBufferMemoryBarrier sparseBufferWriteBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *sparseBufferWrite,
- 0ull,
- m_bufferSizeInBytes);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferWriteBarrier, 0u, DE_NULL);
- }
-
- {
- const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSizeInBytes);
+ {
+ const VkBufferMemoryBarrier outputBufferHostBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0ull,
+ m_bufferSizeInBytes);
- deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBufferRead, *outputBuffer, 1u, &bufferCopy);
- }
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostBarrier, 0u, DE_NULL);
+ }
- {
- const VkBufferMemoryBarrier outputBufferHostBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0ull,
- m_bufferSizeInBytes);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostBarrier, 0u, DE_NULL);
- }
+ // End recording commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- // End recording commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ // The stage at which execution is going to wait for finish of sparse binding operations
+ const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
- // The stage at which execution is going to wait for finish of sparse binding operations
- const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
+ // Submit commands for execution and wait for completion
+ // In case of device groups, submit on the physical device with the resource
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(),
+ waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- // Submit commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits);
+ // Retrieve data from output buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSizeInBytes);
- // Retrieve data from output buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSizeInBytes);
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ // Prepare reference data
+ std::vector<deUint8> referenceData;
+ referenceData.resize(m_bufferSizeInBytes);
- // Prepare reference data
- std::vector<deUint8> referenceData;
- referenceData.resize(m_bufferSizeInBytes);
+ std::vector<deUint32> referenceDataBlock;
+ referenceDataBlock.resize(MODULO_DIVISOR);
- std::vector<deUint32> referenceDataBlock;
- referenceDataBlock.resize(MODULO_DIVISOR);
+ for (deUint32 valueNdx = 0; valueNdx < MODULO_DIVISOR; ++valueNdx)
+ {
+ referenceDataBlock[valueNdx] = valueNdx % MODULO_DIVISOR;
+ }
- for (deUint32 valueNdx = 0; valueNdx < MODULO_DIVISOR; ++valueNdx)
- {
- referenceDataBlock[valueNdx] = valueNdx % MODULO_DIVISOR;
- }
+ const deUint32 fullBlockSizeInBytes = MODULO_DIVISOR * SIZE_OF_UINT_IN_SHADER;
+ const deUint32 lastBlockSizeInBytes = m_bufferSizeInBytes % fullBlockSizeInBytes;
+ const deUint32 numberOfBlocks = m_bufferSizeInBytes / fullBlockSizeInBytes + (lastBlockSizeInBytes ? 1u : 0u);
- const deUint32 fullBlockSizeInBytes = MODULO_DIVISOR * SIZE_OF_UINT_IN_SHADER;
- const deUint32 lastBlockSizeInBytes = m_bufferSizeInBytes % fullBlockSizeInBytes;
- const deUint32 numberOfBlocks = m_bufferSizeInBytes / fullBlockSizeInBytes + (lastBlockSizeInBytes ? 1u : 0u);
+ for (deUint32 blockNdx = 0; blockNdx < numberOfBlocks; ++blockNdx)
+ {
+ const deUint32 offset = blockNdx * fullBlockSizeInBytes;
+ deMemcpy(&referenceData[0] + offset, &referenceDataBlock[0], ((offset + fullBlockSizeInBytes) <= m_bufferSizeInBytes) ? fullBlockSizeInBytes : lastBlockSizeInBytes);
+ }
- for (deUint32 blockNdx = 0; blockNdx < numberOfBlocks; ++blockNdx)
- {
- const deUint32 offset = blockNdx * fullBlockSizeInBytes;
- deMemcpy(&referenceData[0] + offset, &referenceDataBlock[0], ((offset + fullBlockSizeInBytes) <= m_bufferSizeInBytes) ? fullBlockSizeInBytes : lastBlockSizeInBytes);
+ // Compare reference data with output data
+ if (deMemCmp(&referenceData[0], outputData, m_bufferSizeInBytes) != 0)
+ return tcu::TestStatus::fail("Failed");
}
-
- // Compare reference data with output data
- if (deMemCmp(&referenceData[0], outputData, m_bufferSizeInBytes) != 0)
- return tcu::TestStatus::fail("Failed");
- else
- return tcu::TestStatus::pass("Passed");
+ return tcu::TestStatus::pass("Passed");
}
TestInstance* BufferSparseMemoryAliasingCase::createInstance (Context& context) const
{
- return new BufferSparseMemoryAliasingInstance(context, m_bufferSizeInBytes);
+ return new BufferSparseMemoryAliasingInstance(context, m_bufferSizeInBytes, m_useDeviceGroups);
}
} // anonymous ns
-void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group)
+void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group, const bool useDeviceGroups)
{
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseMemoryAliasingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440, useDeviceGroups));
}
} // sparse
namespace sparse
{
-void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group);
+void addBufferSparseMemoryAliasingTests(tcu::TestCaseGroup* group, const bool useDeviceGroups);
} // sparse
} // vkt
BufferSparseBindingCase (tcu::TestContext& testCtx,
const std::string& name,
const std::string& description,
- const deUint32 bufferSize);
+ const deUint32 bufferSize,
+ const bool useDeviceGroups);
TestInstance* createInstance (Context& context) const;
private:
const deUint32 m_bufferSize;
+ const bool m_useDeviceGroups;
};
BufferSparseBindingCase::BufferSparseBindingCase (tcu::TestContext& testCtx,
const std::string& name,
const std::string& description,
- const deUint32 bufferSize)
+ const deUint32 bufferSize,
+ const bool useDeviceGroups)
: TestCase (testCtx, name, description)
, m_bufferSize (bufferSize)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
{
public:
BufferSparseBindingInstance (Context& context,
- const deUint32 bufferSize);
+ const deUint32 bufferSize,
+ const bool useDeviceGroups);
tcu::TestStatus iterate (void);
private:
const deUint32 m_bufferSize;
+ const deUint32 m_useDeviceGroups;
};
BufferSparseBindingInstance::BufferSparseBindingInstance (Context& context,
- const deUint32 bufferSize)
+ const deUint32 bufferSize,
+ const bool useDeviceGroups)
- : SparseResourcesBaseInstance (context)
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
, m_bufferSize (bufferSize)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
tcu::TestStatus BufferSparseBindingInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
-
- if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding)
- TCU_THROW(NotSupportedError, "Sparse binding not supported");
-
{
// Create logical device supporting both sparse and compute operations
QueueRequirementsVec queueRequirements;
createDeviceSupportingQueues(queueRequirements);
}
+ const vk::VkPhysicalDevice& physicalDevice = getPhysicalDevice();
+
+ if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding)
+ TCU_THROW(NotSupportedError, "Sparse binding not supported");
const DeviceInterface& deviceInterface = getDeviceInterface();
const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
- VkBufferCreateInfo bufferCreateInfo;
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
+ {
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
- bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; // VkStructureType sType;
- bufferCreateInfo.pNext = DE_NULL; // const void* pNext;
- bufferCreateInfo.flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // VkBufferCreateFlags flags;
- bufferCreateInfo.size = m_bufferSize; // VkDeviceSize size;
- bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
- VK_BUFFER_USAGE_TRANSFER_DST_BIT; // VkBufferUsageFlags usage;
- bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // VkSharingMode sharingMode;
- bufferCreateInfo.queueFamilyIndexCount = 0u; // deUint32 queueFamilyIndexCount;
- bufferCreateInfo.pQueueFamilyIndices = DE_NULL; // const deUint32* pQueueFamilyIndices;
+ VkBufferCreateInfo bufferCreateInfo;
- const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
+ bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; // VkStructureType sType;
+ bufferCreateInfo.pNext = DE_NULL; // const void* pNext;
+ bufferCreateInfo.flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // VkBufferCreateFlags flags;
+ bufferCreateInfo.size = m_bufferSize; // VkDeviceSize size;
+ bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT; // VkBufferUsageFlags usage;
+ bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // VkSharingMode sharingMode;
+ bufferCreateInfo.queueFamilyIndexCount = 0u; // deUint32 queueFamilyIndexCount;
+ bufferCreateInfo.pQueueFamilyIndices = DE_NULL; // const deUint32* pQueueFamilyIndices;
- if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
- {
- bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; // VkSharingMode sharingMode;
- bufferCreateInfo.queueFamilyIndexCount = 2u; // deUint32 queueFamilyIndexCount;
- bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; // const deUint32* pQueueFamilyIndices;
- }
+ const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
- // Create sparse buffer
- const Unique<VkBuffer> sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
+ if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
+ {
+ bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; // VkSharingMode sharingMode;
+ bufferCreateInfo.queueFamilyIndexCount = 2u; // deUint32 queueFamilyIndexCount;
+ bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices; // const deUint32* pQueueFamilyIndices;
+ }
- // Create sparse buffer memory bind semaphore
- const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+ // Create sparse buffer
+ const Unique<VkBuffer> sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
- const VkMemoryRequirements bufferMemRequirement = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer);
+ // Create sparse buffer memory bind semaphore
+ const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
- if (bufferMemRequirement.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
+ const VkMemoryRequirements bufferMemRequirement = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer);
- DE_ASSERT((bufferMemRequirement.size % bufferMemRequirement.alignment) == 0);
+ if (bufferMemRequirement.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
- Move<VkDeviceMemory> sparseMemoryAllocation;
+ DE_ASSERT((bufferMemRequirement.size % bufferMemRequirement.alignment) == 0);
- {
- std::vector<VkSparseMemoryBind> sparseMemoryBinds;
- const deUint32 numSparseBinds = static_cast<deUint32>(bufferMemRequirement.size / bufferMemRequirement.alignment);
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirement, MemoryRequirement::Any);
-
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ Move<VkDeviceMemory> sparseMemoryAllocation;
{
- const VkMemoryAllocateInfo allocateInfo =
+ std::vector<VkSparseMemoryBind> sparseMemoryBinds;
+ const deUint32 numSparseBinds = static_cast<deUint32>(bufferMemRequirement.size / bufferMemRequirement.alignment);
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirement, MemoryRequirement::Any);
+
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
+
{
- VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- bufferMemRequirement.size, // VkDeviceSize allocationSize;
- memoryType, // uint32_t memoryTypeIndex;
+ const VkMemoryAllocateInfo allocateInfo =
+ {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ bufferMemRequirement.size, // VkDeviceSize allocationSize;
+ memoryType, // uint32_t memoryTypeIndex;
+ };
+
+ sparseMemoryAllocation = allocateMemory(deviceInterface, getDevice(), &allocateInfo);
+ }
+
+ for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx)
+ {
+ const VkSparseMemoryBind sparseMemoryBind =
+ {
+ bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize resourceOffset;
+ bufferMemRequirement.alignment, // VkDeviceSize size;
+ *sparseMemoryAllocation, // VkDeviceMemory memory;
+ bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize memoryOffset;
+ (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags;
+ };
+ sparseMemoryBinds.push_back(sparseMemoryBind);
+ }
+
+ const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, numSparseBinds, &sparseMemoryBinds[0]);
+
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
};
- sparseMemoryAllocation = allocateMemory(deviceInterface, getDevice(), &allocateInfo);
- }
-
- for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx)
- {
- const VkSparseMemoryBind sparseMemoryBind =
+ const VkBindSparseInfo bindSparseInfo =
{
- bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize resourceOffset;
- bufferMemRequirement.alignment, // VkDeviceSize size;
- *sparseMemoryAllocation, // VkDeviceMemory memory;
- bufferMemRequirement.alignment * sparseBindNdx, // VkDeviceSize memoryOffset;
- (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags;
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 1u, //deUint32 bufferBindCount;
+ &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
};
- sparseMemoryBinds.push_back(sparseMemoryBind);
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
}
- const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, numSparseBinds, &sparseMemoryBinds[0]);
-
- const VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 1u, //deUint32 bufferBindCount;
- &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
-
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
-
- // Create command buffer for transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ // Create command buffer for transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- // Start recording transfer commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ // Start recording transfer commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
- const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
- const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
+ const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
+ const Unique<VkBuffer> inputBuffer(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
+ const de::UniquePtr<Allocation> inputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
- std::vector<deUint8> referenceData;
- referenceData.resize(m_bufferSize);
+ std::vector<deUint8> referenceData;
+ referenceData.resize(m_bufferSize);
- for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx)
- {
- referenceData[valueNdx] = static_cast<deUint8>((valueNdx % bufferMemRequirement.alignment) + 1u);
- }
+ for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx)
+ {
+ referenceData[valueNdx] = static_cast<deUint8>((valueNdx % bufferMemRequirement.alignment) + 1u);
+ }
- deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize);
+ deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize);
- flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize);
+ flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize);
- {
- const VkBufferMemoryBarrier inputBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *inputBuffer,
- 0u,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier inputBufferBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *inputBuffer,
+ 0u,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
+ }
- {
- const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
+ {
+ const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
- deviceInterface.cmdCopyBuffer(*commandBuffer, *inputBuffer, *sparseBuffer, 1u, &bufferCopy);
- }
+ deviceInterface.cmdCopyBuffer(*commandBuffer, *inputBuffer, *sparseBuffer, 1u, &bufferCopy);
+ }
- {
- const VkBufferMemoryBarrier sparseBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *sparseBuffer,
- 0u,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier sparseBufferBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *sparseBuffer,
+ 0u,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL);
+ }
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- {
- const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
+ {
+ const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
- deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy);
- }
+ deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy);
+ }
- {
- const VkBufferMemoryBarrier outputBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0u,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier outputBufferBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0u,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
+ }
- // End recording transfer commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ // End recording transfer commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
+ const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
- // Submit transfer commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits);
+ // Submit transfer commands for execution and wait for completion
+ // In case of device groups, submit on the physical device with the resource
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(),
+ waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- // Retrieve data from output buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize);
+ // Retrieve data from output buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- // Compare output data with reference data
- if (deMemCmp(&referenceData[0], outputData, m_bufferSize) != 0)
- return tcu::TestStatus::fail("Failed");
- else
- return tcu::TestStatus::pass("Passed");
+ // Compare output data with reference data
+ if (deMemCmp(&referenceData[0], outputData, m_bufferSize) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
+ return tcu::TestStatus::pass("Passed");
}
TestInstance* BufferSparseBindingCase::createInstance (Context& context) const
{
- return new BufferSparseBindingInstance(context, m_bufferSize);
+ return new BufferSparseBindingInstance(context, m_bufferSize, m_useDeviceGroups);
}
} // anonymous ns
-void addBufferSparseBindingTests (tcu::TestCaseGroup* group)
+void addBufferSparseBindingTests (tcu::TestCaseGroup* group, const bool useDeviceGroups)
{
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10));
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12));
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16));
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17));
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20));
- group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, useDeviceGroups));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, useDeviceGroups));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, useDeviceGroups));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, useDeviceGroups));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, useDeviceGroups));
+ group->addChild(new BufferSparseBindingCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, useDeviceGroups));
}
} // sparse
namespace sparse
{
-void addBufferSparseBindingTests (tcu::TestCaseGroup* group);
+void addBufferSparseBindingTests (tcu::TestCaseGroup* group, const bool useDeviceGroups);
} // sparse
} // vkt
const std::string& name,
const std::string& description,
const deUint32 bufferSize,
- const glu::GLSLVersion glslVersion);
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups);
+
void initPrograms (SourceCollections& sourceCollections) const;
TestInstance* createInstance (Context& context) const;
private:
const deUint32 m_bufferSize;
const glu::GLSLVersion m_glslVersion;
+ const bool m_useDeviceGroups;
+
};
BufferSparseResidencyCase::BufferSparseResidencyCase (tcu::TestContext& testCtx,
const std::string& name,
const std::string& description,
const deUint32 bufferSize,
- const glu::GLSLVersion glslVersion)
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups)
+
: TestCase (testCtx, name, description)
, m_bufferSize (bufferSize)
, m_glslVersion (glslVersion)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
{
public:
BufferSparseResidencyInstance (Context& context,
- const deUint32 bufferSize);
+ const deUint32 bufferSize,
+ const bool useDeviceGroups);
tcu::TestStatus iterate (void);
private:
const deUint32 m_bufferSize;
+ const deUint32 m_useDeviceGroups;
};
BufferSparseResidencyInstance::BufferSparseResidencyInstance (Context& context,
- const deUint32 bufferSize)
+ const deUint32 bufferSize,
+ const bool useDeviceGroups)
: SparseResourcesBaseInstance (context)
, m_bufferSize (bufferSize)
+ , m_useDeviceGroups (useDeviceGroups)
{
}
tcu::TestStatus BufferSparseResidencyInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
- const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
-
- if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyBuffer)
- TCU_THROW(NotSupportedError, "Sparse partially resident buffers not supported");
-
{
// Create logical device supporting both sparse and compute operations
QueueRequirementsVec queueRequirements;
createDeviceSupportingQueues(queueRequirements);
}
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice();
+ const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
+
+ if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyBuffer)
+ TCU_THROW(NotSupportedError, "Sparse partially resident buffers not supported");
const DeviceInterface& deviceInterface = getDeviceInterface();
const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
- VkBufferCreateInfo bufferCreateInfo =
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
- VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, // VkBufferCreateFlags flags;
- m_bufferSize, // VkDeviceSize size;
- VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
- VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
- 0u, // deUint32 queueFamilyIndexCount;
- DE_NULL // const deUint32* pQueueFamilyIndices;
- };
-
- const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
-
- if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
- {
- bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
- bufferCreateInfo.queueFamilyIndexCount = 2u;
- bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices;
- }
-
- // Create sparse buffer
- const Unique<VkBuffer> sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
- // Create sparse buffer memory bind semaphore
- const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
-
- const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer);
-
- if (bufferMemRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
+ VkBufferCreateInfo bufferCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
+ VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, // VkBufferCreateFlags flags;
+ m_bufferSize, // VkDeviceSize size;
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL // const deUint32* pQueueFamilyIndices;
+ };
- DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0);
+ const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
- const deUint32 numSparseSlots = static_cast<deUint32>(bufferMemRequirements.size / bufferMemRequirements.alignment);
- std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
+ if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
+ {
+ bufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
+ bufferCreateInfo.queueFamilyIndexCount = 2u;
+ bufferCreateInfo.pQueueFamilyIndices = queueFamilyIndices;
+ }
- {
- std::vector<VkSparseMemoryBind> sparseMemoryBinds;
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any);
+ // Create sparse buffer
+ const Unique<VkBuffer> sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ // Create sparse buffer memory bind semaphore
+ const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
- for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; sparseBindNdx += 2)
- {
- const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.alignment, memoryType, bufferMemRequirements.alignment * sparseBindNdx);
+ const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ if (bufferMemRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
- sparseMemoryBinds.push_back(sparseMemoryBind);
- }
+ DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0);
- const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, static_cast<deUint32>(sparseMemoryBinds.size()), &sparseMemoryBinds[0]);
+ const deUint32 numSparseSlots = static_cast<deUint32>(bufferMemRequirements.size / bufferMemRequirements.alignment);
+ std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
- const VkBindSparseInfo bindSparseInfo =
{
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 1u, //deUint32 bufferBindCount;
- &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
-
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
+ std::vector<VkSparseMemoryBind> sparseMemoryBinds;
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, bufferMemRequirements, MemoryRequirement::Any);
+
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
+
+ for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; sparseBindNdx += 2)
+ {
+ const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.alignment, memoryType, bufferMemRequirements.alignment * sparseBindNdx);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ sparseMemoryBinds.push_back(sparseMemoryBind);
+ }
+
+ const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, static_cast<deUint32>(sparseMemoryBinds.size()), &sparseMemoryBinds[0]);
+
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
+ const VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 1u, //deUint32 bufferBindCount;
+ &sparseBufferBindInfo, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &bufferMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
+ };
+
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
+ }
- // Create input buffer
- const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
- const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
- const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
+ // Create input buffer
+ const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
+ const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
+ const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
- std::vector<deUint8> referenceData;
- referenceData.resize(m_bufferSize);
+ std::vector<deUint8> referenceData;
+ referenceData.resize(m_bufferSize);
- for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx)
- {
- referenceData[valueNdx] = static_cast<deUint8>((valueNdx % bufferMemRequirements.alignment) + 1u);
- }
+ for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx)
+ {
+ referenceData[valueNdx] = static_cast<deUint8>((valueNdx % bufferMemRequirements.alignment) + 1u);
+ }
- deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize);
+ deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize);
- flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize);
+ flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), m_bufferSize);
- // Create output buffer
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ // Create output buffer
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- // Create command buffer for compute and data transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ // Create command buffer for compute and data transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- // Start recording compute and transfer commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ // Start recording compute and transfer commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- // Create descriptor set
- const Unique<VkDescriptorSetLayout> descriptorSetLayout(
- DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
- .build(deviceInterface, getDevice()));
+ // Create descriptor set
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, getDevice()));
- // Create compute pipeline
- const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
- const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
- const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
+ // Create compute pipeline
+ const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
+ const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
- deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
+ deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
- const Unique<VkDescriptorPool> descriptorPool(
- DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u)
- .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+ const Unique<VkDescriptorPool> descriptorPool(
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u)
+ .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
- const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
+ const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
- {
- const VkDescriptorBufferInfo inputBufferInfo = makeDescriptorBufferInfo(*inputBuffer, 0ull, m_bufferSize);
- const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBuffer, 0ull, m_bufferSize);
-
- DescriptorSetUpdateBuilder()
- .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputBufferInfo)
- .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo)
- .update(deviceInterface, getDevice());
- }
+ {
+ const VkDescriptorBufferInfo inputBufferInfo = makeDescriptorBufferInfo(*inputBuffer, 0ull, m_bufferSize);
+ const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBuffer, 0ull, m_bufferSize);
- deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputBufferInfo)
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo)
+ .update(deviceInterface, getDevice());
+ }
- {
- const VkBufferMemoryBarrier inputBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_SHADER_READ_BIT,
- *inputBuffer,
- 0ull,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
- }
+ deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
- deviceInterface.cmdDispatch(*commandBuffer, 1u, 1u, 1u);
+ {
+ const VkBufferMemoryBarrier inputBufferBarrier
+ = makeBufferMemoryBarrier( VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_SHADER_READ_BIT,
+ *inputBuffer,
+ 0ull,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
+ }
- {
- const VkBufferMemoryBarrier sparseBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *sparseBuffer,
- 0ull,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL);
- }
+ deviceInterface.cmdDispatch(*commandBuffer, 1u, 1u, 1u);
- {
- const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
+ {
+ const VkBufferMemoryBarrier sparseBufferBarrier
+ = makeBufferMemoryBarrier( VK_ACCESS_SHADER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *sparseBuffer,
+ 0ull,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL);
+ }
- deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy);
- }
+ {
+ const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
- {
- const VkBufferMemoryBarrier outputBufferBarrier
- = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0ull,
- m_bufferSize);
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
- }
+ deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy);
+ }
- // End recording compute and transfer commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ {
+ const VkBufferMemoryBarrier outputBufferBarrier
+ = makeBufferMemoryBarrier( VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0ull,
+ m_bufferSize);
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
+ }
- const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
+ // End recording compute and transfer commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- // Submit transfer commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(), waitStageBits);
+ const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
- // Retrieve data from output buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize);
+ // Submit transfer commands for execution and wait for completion
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(),
+ waitStageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ // Retrieve data from output buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), m_bufferSize);
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- // Compare output data with reference data
- for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; ++sparseBindNdx)
- {
- const deUint32 alignment = static_cast<deUint32>(bufferMemRequirements.alignment);
- const deUint32 offset = alignment * sparseBindNdx;
- const deUint32 size = sparseBindNdx == (numSparseSlots - 1) ? m_bufferSize % alignment : alignment;
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- if (sparseBindNdx % 2u == 0u)
+ // Compare output data with reference data
+ for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; ++sparseBindNdx)
{
- if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
- return tcu::TestStatus::fail("Failed");
- }
- else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict)
- {
- deMemset(&referenceData[offset], 0u, size);
-
- if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
- return tcu::TestStatus::fail("Failed");
+ const deUint32 alignment = static_cast<deUint32>(bufferMemRequirements.alignment);
+ const deUint32 offset = alignment * sparseBindNdx;
+ const deUint32 size = sparseBindNdx == (numSparseSlots - 1) ? m_bufferSize % alignment : alignment;
+
+ if (sparseBindNdx % 2u == 0u)
+ {
+ if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
+ else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict)
+ {
+ deMemset(&referenceData[offset], 0u, size);
+
+ if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
}
TestInstance* BufferSparseResidencyCase::createInstance (Context& context) const
{
- return new BufferSparseResidencyInstance(context, m_bufferSize);
+ return new BufferSparseResidencyInstance(context, m_bufferSize, m_useDeviceGroups);
}
} // anonymous ns
-void addBufferSparseResidencyTests(tcu::TestCaseGroup* group)
+void addBufferSparseResidencyTests(tcu::TestCaseGroup* group, const bool useDeviceGroups)
{
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440));
- group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440, useDeviceGroups));
+ group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440, useDeviceGroups));
}
} // sparse
namespace sparse
{
-void addBufferSparseResidencyTests(tcu::TestCaseGroup* group);
+void addBufferSparseResidencyTests(tcu::TestCaseGroup* group, const bool useDeviceGroups);
} // sparse
} // vkt
enum
{
- RENDER_SIZE = 128, //!< framebuffer size in pixels
- GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row
+ RENDER_SIZE = 128, //!< framebuffer size in pixels
+ GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row
};
enum TestFlagBits
TEST_FLAG_ALIASED = 1u << 0, //!< sparseResidencyAliased
TEST_FLAG_RESIDENCY = 1u << 1, //!< sparseResidencyBuffer
TEST_FLAG_NON_RESIDENT_STRICT = 1u << 2, //!< residencyNonResidentStrict
+ TEST_FLAG_ENABLE_DEVICE_GROUPS = 1u << 3, //!< device groups are enabled
};
typedef deUint32 TestFlags;
void draw (const DeviceInterface& vk,
const VkDevice device,
const VkQueue queue,
- const Delegate& drawDelegate) const
+ const Delegate& drawDelegate,
+ const bool useDeviceGroups,
+ const deUint32 deviceID) const
{
beginCommandBuffer(vk, *m_cmdBuffer);
}
VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
- submitCommandsAndWait(vk, device, queue, *m_cmdBuffer);
+ submitCommandsAndWait(vk, device, queue, *m_cmdBuffer, 0U, DE_NULL, DE_NULL, 0U, DE_NULL, useDeviceGroups, deviceID);
}
private:
Renderer& operator= (const Renderer&);
};
-void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation)
+void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation,
+ const bool useDeviceGroups, deUint32 resourceDevId, deUint32 memoryDeviceId)
{
const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo =
{
&sparseAllocation.memoryBinds[0], // const VkSparseMemoryBind* pBinds;
};
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ resourceDevId, //deUint32 resourceDeviceIndex;
+ memoryDeviceId, //deUint32 memoryDeviceIndex;
+ };
+
const VkBindSparseInfo bindInfo =
{
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u, // uint32_t waitSemaphoreCount;
- DE_NULL, // const VkSemaphore* pWaitSemaphores;
- 1u, // uint32_t bufferBindCount;
- &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, // uint32_t imageOpaqueBindCount;
- DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, // uint32_t imageBindCount;
- DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds;
- 0u, // uint32_t signalSemaphoreCount;
- DE_NULL, // const VkSemaphore* pSignalSemaphores;
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType;
+ useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, // const void* pNext;
+ 0u, // uint32_t waitSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pWaitSemaphores;
+ 1u, // uint32_t bufferBindCount;
+ &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, // uint32_t imageOpaqueBindCount;
+ DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, // uint32_t imageBindCount;
+ DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds;
+ 0u, // uint32_t signalSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pSignalSemaphores;
};
const Unique<VkFence> fence(createFence(vk, device));
{
public:
SparseBufferTestInstance (Context& context, const TestFlags flags)
- : SparseResourcesBaseInstance (context)
+ : SparseResourcesBaseInstance (context, (flags & TEST_FLAG_ENABLE_DEVICE_GROUPS) != 0)
, m_aliased ((flags & TEST_FLAG_ALIASED) != 0)
, m_residency ((flags & TEST_FLAG_RESIDENCY) != 0)
, m_nonResidentStrict ((flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0)
, m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
, m_colorBufferSize (m_renderSize.x() * m_renderSize.y() * tcu::getPixelSize(mapVkFormat(m_colorFormat)))
{
- const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
+ {
+ QueueRequirementsVec requirements;
+ requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
+ requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u));
+
+ createDeviceSupportingQueues(requirements);
+ }
+ const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), getPhysicalDevice());
if (!features.sparseBinding)
TCU_THROW(NotSupportedError, "Missing feature: sparseBinding");
if (m_nonResidentStrict && !m_context.getDeviceProperties().sparseProperties.residencyNonResidentStrict)
TCU_THROW(NotSupportedError, "Missing sparse property: residencyNonResidentStrict");
- {
- QueueRequirementsVec requirements;
- requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
- requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u));
-
- createDeviceSupportingQueues(requirements);
- }
-
const DeviceInterface& vk = getDeviceInterface();
m_sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0u);
m_universalQueue = getQueue(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 0u);
void draw (const VkPrimitiveTopology topology,
const VkDescriptorSetLayout descriptorSetLayout = DE_NULL,
- Renderer::SpecializationMap specMap = Renderer::SpecializationMap())
+ Renderer::SpecializationMap specMap = Renderer::SpecializationMap(),
+ bool useDeviceGroups = false,
+ deUint32 deviceID = 0)
{
const UniquePtr<Renderer> renderer(new Renderer(
getDeviceInterface(), getDevice(), getAllocator(), m_universalQueue.queueFamilyIndex, descriptorSetLayout,
m_context.getBinaryCollection(), "vert", "frag", *m_colorBuffer, m_renderSize, m_colorFormat, Vec4(1.0f, 0.0f, 0.0f, 1.0f), topology, specMap));
- renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this);
+ renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this, useDeviceGroups, deviceID);
}
- tcu::TestStatus verifyDrawResult (void) const
+ bool isResultImageCorrect (void) const
{
invalidateMappedMemoryRange(getDeviceInterface(), getDevice(), m_colorBufferAlloc->getMemory(), 0ull, m_colorBufferSize);
m_context.getTestContext().getLog()
<< tcu::LogImageSet("Result", "Result") << tcu::LogImage("color0", "", resultImage) << tcu::TestLog::EndImageSet;
- if (imageHasErrorPixels(resultImage))
- return tcu::TestStatus::fail("Some buffer values were incorrect");
- else
- return tcu::TestStatus::pass("Pass");
+ return !imageHasErrorPixels(resultImage);
}
const bool m_aliased;
MovePtr<SparseAllocation> sparseAllocation;
Move<VkBuffer> sparseBuffer;
Move<VkBuffer> sparseBufferAliased;
+ bool setupDescriptors = true;
- // Set up the sparse buffer
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
- const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big
- deUint32 numMaxChunks = 0u;
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
- // Check how many chunks we can allocate given the alignment and size requirements of UBOs
+ // Set up the sparse buffer
{
- const UniquePtr<SparseAllocation> minAllocation(SparseAllocationBuilder()
- .addMemoryBind()
- .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize));
-
- numMaxChunks = deMaxu32(static_cast<deUint32>(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u);
- }
+ VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
+ const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big
+ deUint32 numMaxChunks = 0u;
- if (numMaxChunks < 4)
- {
- sparseAllocation = SparseAllocationBuilder()
- .addMemoryBind()
- .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
- }
- else
- {
- // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding
- SparseAllocationBuilder builder;
- builder.addMemoryBind();
+ // Check how many chunks we can allocate given the alignment and size requirements of UBOs
+ {
+ const UniquePtr<SparseAllocation> minAllocation(SparseAllocationBuilder()
+ .addMemoryBind()
+ .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize));
- if (m_residency)
- builder.addResourceHole();
+ numMaxChunks = deMaxu32(static_cast<deUint32>(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u);
+ }
- builder
- .addMemoryAllocation()
- .addMemoryHole()
- .addMemoryBind();
+ if (numMaxChunks < 4)
+ {
+ sparseAllocation = SparseAllocationBuilder()
+ .addMemoryBind()
+ .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
+ }
+ else
+ {
+ // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding
+ SparseAllocationBuilder builder;
+ builder.addMemoryBind();
- if (m_aliased)
- builder.addAliasedMemoryBind(0u, 0u);
+ if (m_residency)
+ builder.addResourceHole();
- sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
- DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange);
- }
+ builder
+ .addMemoryAllocation()
+ .addMemoryHole()
+ .addMemoryBind();
- // Create the buffer
- referenceBufferCreateInfo.size = sparseAllocation->resourceSize;
- sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
- bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation);
+ if (m_aliased)
+ builder.addAliasedMemoryBind(0u, 0u);
- if (m_aliased)
- {
- sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
- bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation);
- }
- }
+ sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
+ DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange);
+ }
- // Set uniform data
- {
- const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u);
- const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks;
- const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0);
- const deUint32 numBufferEntries = static_cast<deUint32>(stagingBufferSize / sizeof(IVec4));
+ // Create the buffer
+ referenceBufferCreateInfo.size = sparseAllocation->resourceSize;
+ sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
+ bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID);
- const Unique<VkBuffer> stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)));
- const UniquePtr<Allocation> stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible));
+ if (m_aliased)
+ {
+ sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
+ bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID);
+ }
+ }
+ // Set uniform data
{
- // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk
- IVec4* const pData = static_cast<IVec4*>(stagingBufferAlloc->getHostPtr());
- for (deUint32 i = 0; i < numBufferEntries; ++i)
- pData[i] = IVec4(3*i ^ 127, 0, 0, 0);
+ const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u);
+ const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks;
+ const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0);
+ const deUint32 numBufferEntries = static_cast<deUint32>(stagingBufferSize / sizeof(IVec4));
- flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize);
+ const Unique<VkBuffer> stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)));
+ const UniquePtr<Allocation> stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible));
- const VkBufferCopy copyRegion =
{
- 0ull, // VkDeviceSize srcOffset;
- 0ull, // VkDeviceSize dstOffset;
- stagingBufferSize, // VkDeviceSize size;
- };
-
- const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
-
- beginCommandBuffer (vk, *cmdBuffer);
- vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region);
- endCommandBuffer (vk, *cmdBuffer);
-
- submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
- // Once the fence is signaled, the write is also available to the aliasing buffer.
+ // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk
+ IVec4* const pData = static_cast<IVec4*>(stagingBufferAlloc->getHostPtr());
+ for (deUint32 i = 0; i < numBufferEntries; ++i)
+ pData[i] = IVec4(3*i ^ 127, 0, 0, 0);
+
+ flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize);
+
+ const VkBufferCopy copyRegion =
+ {
+ 0ull, // VkDeviceSize srcOffset;
+ 0ull, // VkDeviceSize dstOffset;
+ stagingBufferSize, // VkDeviceSize size;
+ };
+
+ const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+
+ beginCommandBuffer (vk, *cmdBuffer);
+ vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region);
+ endCommandBuffer (vk, *cmdBuffer);
+
+ submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer, 0u, DE_NULL, DE_NULL, 0, DE_NULL, usingDeviceGroups(), firstDeviceID);
+ // Once the fence is signaled, the write is also available to the aliasing buffer.
+ }
}
- }
- // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case.
- const deUint32 maxBufferRange = deMinu32(static_cast<deUint32>(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange);
+ // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case.
+ const deUint32 maxBufferRange = deMinu32(static_cast<deUint32>(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange);
- // Descriptor sets
- {
- m_descriptorSetLayout = DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT)
- .build(vk, getDevice());
+ // Descriptor sets
+ {
+ // Setup only once
+ if (setupDescriptors)
+ {
+ m_descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT)
+ .build(vk, getDevice());
- m_descriptorPool = DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
- .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+ m_descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
+ .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
- m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout);
+ m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout);
+ setupDescriptors = false;
+ }
- const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer);
- const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange);
+ const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer);
+ const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange);
- DescriptorSetUpdateBuilder()
- .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo)
- .update(vk, getDevice());
- }
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo)
+ .update(vk, getDevice());
+ }
- // Vertex data
- {
- const Vec4 vertexData[] =
+ // Vertex data
{
- Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
- Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
- Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
- Vec4( 1.0f, 1.0f, 0.0f, 1.0f),
- };
+ const Vec4 vertexData[] =
+ {
+ Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
+ Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
+ Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
+ Vec4( 1.0f, 1.0f, 0.0f, 1.0f),
+ };
- const VkDeviceSize vertexBufferSize = sizeof(vertexData);
+ const VkDeviceSize vertexBufferSize = sizeof(vertexData);
- m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
- m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
+ m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
+ m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
- deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize);
- flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
- }
+ deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize);
+ flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
+ }
- // Draw
- {
- std::vector<deInt32> specializationData;
+ // Draw
{
- const deUint32 numBufferEntries = maxBufferRange / static_cast<deUint32>(sizeof(IVec4));
- const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks;
+ std::vector<deInt32> specializationData;
+ {
+ const deUint32 numBufferEntries = maxBufferRange / static_cast<deUint32>(sizeof(IVec4));
+ const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks;
- specializationData.push_back(numBufferEntries);
- specializationData.push_back(numEntriesPerChunk);
- }
+ specializationData.push_back(numBufferEntries);
+ specializationData.push_back(numEntriesPerChunk);
+ }
- const VkSpecializationMapEntry specMapEntries[] =
- {
+ const VkSpecializationMapEntry specMapEntries[] =
{
- 1u, // uint32_t constantID;
- 0u, // uint32_t offset;
- sizeof(deInt32), // size_t size;
- },
+ {
+ 1u, // uint32_t constantID;
+ 0u, // uint32_t offset;
+ sizeof(deInt32), // size_t size;
+ },
+ {
+ 2u, // uint32_t constantID;
+ sizeof(deInt32), // uint32_t offset;
+ sizeof(deInt32), // size_t size;
+ },
+ };
+
+ const VkSpecializationInfo specInfo =
{
- 2u, // uint32_t constantID;
- sizeof(deInt32), // uint32_t offset;
- sizeof(deInt32), // size_t size;
- },
- };
+ DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount;
+ specMapEntries, // const VkSpecializationMapEntry* pMapEntries;
+ sizeInBytes(specializationData), // size_t dataSize;
+ getDataOrNullptr(specializationData), // const void* pData;
+ };
- const VkSpecializationInfo specInfo =
- {
- DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount;
- specMapEntries, // const VkSpecializationMapEntry* pMapEntries;
- sizeInBytes(specializationData), // size_t dataSize;
- getDataOrNullptr(specializationData), // const void* pData;
- };
+ Renderer::SpecializationMap specMap;
+ specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo;
- Renderer::SpecializationMap specMap;
- specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo;
+ draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap, usingDeviceGroups(), firstDeviceID);
+ }
- draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap);
+ if(!isResultImageCorrect())
+ return tcu::TestStatus::fail("Some buffer values were incorrect");
}
-
- return verifyDrawResult();
+ return tcu::TestStatus::pass("Pass");
}
private:
referenceBufferCreateInfo.size = m_sparseAllocation->resourceSize;
m_sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
- // Bind the memory
- bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation);
m_perDrawBufferOffset = m_sparseAllocation->resourceSize / m_sparseAllocation->numResourceChunks;
m_stagingBufferSize = 2 * m_perDrawBufferOffset;
m_stagingBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT));
m_stagingBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_stagingBuffer, MemoryRequirement::HostVisible);
+
+
}
tcu::TestStatus iterate (void)
{
- initializeBuffers();
-
const DeviceInterface& vk = getDeviceInterface();
- // Upload to the sparse buffer
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize);
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
- VkDeviceSize firstChunkOffset = 0ull;
- VkDeviceSize secondChunkOffset = m_perDrawBufferOffset;
+ // Bind the memory
+ bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation, usingDeviceGroups(), firstDeviceID, secondDeviceID);
- if (m_residency)
- secondChunkOffset += m_perDrawBufferOffset;
+ initializeBuffers();
- if (m_aliased)
- firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset;
-
- const VkBufferCopy copyRegions[] =
+ // Upload to the sparse buffer
{
+ flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize);
+
+ VkDeviceSize firstChunkOffset = 0ull;
+ VkDeviceSize secondChunkOffset = m_perDrawBufferOffset;
+
+ if (m_residency)
+ secondChunkOffset += m_perDrawBufferOffset;
+
+ if (m_aliased)
+ firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset;
+
+ const VkBufferCopy copyRegions[] =
{
- 0ull, // VkDeviceSize srcOffset;
- firstChunkOffset, // VkDeviceSize dstOffset;
- m_perDrawBufferOffset, // VkDeviceSize size;
- },
- {
- m_perDrawBufferOffset, // VkDeviceSize srcOffset;
- secondChunkOffset, // VkDeviceSize dstOffset;
- m_perDrawBufferOffset, // VkDeviceSize size;
- },
- };
+ {
+ 0ull, // VkDeviceSize srcOffset;
+ firstChunkOffset, // VkDeviceSize dstOffset;
+ m_perDrawBufferOffset, // VkDeviceSize size;
+ },
+ {
+ m_perDrawBufferOffset, // VkDeviceSize srcOffset;
+ secondChunkOffset, // VkDeviceSize dstOffset;
+ m_perDrawBufferOffset, // VkDeviceSize size;
+ },
+ };
+
+ const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ beginCommandBuffer (vk, *cmdBuffer);
+ vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions);
+ endCommandBuffer (vk, *cmdBuffer);
- beginCommandBuffer (vk, *cmdBuffer);
- vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions);
- endCommandBuffer (vk, *cmdBuffer);
+ submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer, 0u, DE_NULL, DE_NULL, 0, DE_NULL, usingDeviceGroups(), firstDeviceID);
+ }
- submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
- }
- draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
+ Renderer::SpecializationMap specMap;
+ draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, DE_NULL, specMap, usingDeviceGroups(), firstDeviceID);
- return verifyDrawResult();
+ if(!isResultImageCorrect())
+ return tcu::TestStatus::fail("Some buffer values were incorrect");
+ }
+ return tcu::TestStatus::pass("Pass");
}
protected:
TestFlags flags;
} groups[] =
{
- { "sparse_binding", 0u },
- { "sparse_binding_aliased", TEST_FLAG_ALIASED, },
- { "sparse_residency", TEST_FLAG_RESIDENCY, },
- { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, },
- { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT, },
+ { "sparse_binding", 0u, },
+ { "sparse_binding_aliased", TEST_FLAG_ALIASED, },
+ { "sparse_residency", TEST_FLAG_RESIDENCY, },
+ { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, },
+ { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT,},
};
const int numGroupsIncludingNonResidentStrict = DE_LENGTH_OF_ARRAY(groups);
const int numGroupsDefaultList = numGroupsIncludingNonResidentStrict - 1;
+ std::string devGroupPrefix = "device_group_";
// Transfer
{
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "transfer", ""));
{
MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding", ""));
- addBufferSparseBindingTests(subGroup.get());
+ addBufferSparseBindingTests(subGroup.get(), false);
group->addChild(subGroup.release());
+
+ MovePtr<tcu::TestCaseGroup> subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_binding", ""));
+ addBufferSparseBindingTests(subGroupDeviceGroups.get(), true);
+ group->addChild(subGroupDeviceGroups.release());
}
parentGroup->addChild(group.release());
}
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ssbo", ""));
{
MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding_aliased", ""));
- addBufferSparseMemoryAliasingTests(subGroup.get());
+ addBufferSparseMemoryAliasingTests(subGroup.get(), false);
group->addChild(subGroup.release());
+
+ MovePtr<tcu::TestCaseGroup> subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_binding_aliased", ""));
+ addBufferSparseMemoryAliasingTests(subGroupDeviceGroups.get(), true);
+ group->addChild(subGroupDeviceGroups.release());
}
{
MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_residency", ""));
- addBufferSparseResidencyTests(subGroup.get());
+ addBufferSparseResidencyTests(subGroup.get(), false);
group->addChild(subGroup.release());
+
+ MovePtr<tcu::TestCaseGroup> subGroupDeviceGroups(new tcu::TestCaseGroup(parentGroup->getTestContext(), "device_group_sparse_residency", ""));
+ addBufferSparseResidencyTests(subGroupDeviceGroups.get(), true);
+ group->addChild(subGroupDeviceGroups.release());
}
parentGroup->addChild(group.release());
}
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ubo", ""));
for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx)
+ {
group->addChild(createTestInstanceWithPrograms<UBOTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags));
-
+ }
+ for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx)
+ {
+ group->addChild(createTestInstanceWithPrograms<UBOTestInstance>(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS));
+ }
parentGroup->addChild(group.release());
}
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "vertex_buffer", ""));
for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
group->addChild(createTestInstanceWithPrograms<VertexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
+ }
+ for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
+ group->addChild(createTestInstanceWithPrograms<VertexBufferTestInstance>(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS));
+ }
parentGroup->addChild(group.release());
}
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "index_buffer", ""));
for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
group->addChild(createTestInstanceWithPrograms<IndexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
+ }
+ for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
+ group->addChild(createTestInstanceWithPrograms<IndexBufferTestInstance>(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS));
+ }
parentGroup->addChild(group.release());
}
MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "indirect_buffer", ""));
for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
group->addChild(createTestInstanceWithPrograms<IndirectBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
+ }
+ for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
+ {
+ group->addChild(createTestInstanceWithPrograms<IndirectBufferTestInstance>(group->getTestContext(), (devGroupPrefix + groups[groupNdx].name).c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags | TEST_FLAG_ENABLE_DEVICE_GROUPS));
+ }
parentGroup->addChild(group.release());
}
const ImageType imageType,
const tcu::UVec3& imageSize,
const tcu::TextureFormat& format,
- const glu::GLSLVersion glslVersion);
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups);
void initPrograms (SourceCollections& sourceCollections) const;
TestInstance* createInstance (Context& context) const;
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
const ImageType imageType,
const tcu::UVec3& imageSize,
const tcu::TextureFormat& format,
- const glu::GLSLVersion glslVersion)
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups)
: TestCase (testCtx, name, description)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
ImageSparseMemoryAliasingInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups);
tcu::TestStatus iterate (void);
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
ImageSparseMemoryAliasingInstance::ImageSparseMemoryAliasingInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
- : SparseResourcesBaseInstance (context)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
tcu::TestStatus ImageSparseMemoryAliasingInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
+
+ {
+ // Create logical device supporting both sparse and compute queues
+ QueueRequirementsVec queueRequirements;
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
+
+ createDeviceSupportingQueues(queueRequirements);
+ }
+
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice();
const tcu::UVec3 maxWorkGroupSize = tcu::UVec3(128u, 128u, 64u);
const tcu::UVec3 maxWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u);
const deUint32 maxWorkGroupInvocations = 128u;
VkSparseImageMemoryRequirements aspectRequirements;
std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
+ //vsk checking these flags should be after creating m_imageType
+ //getting queues should be outside the loop
+ //see these in all image files
+
// Check if image size does not exceed device limits
if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
TCU_THROW(NotSupportedError, "Image size not supported for device");
if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
- imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- imageSparseInfo.pNext = DE_NULL;
- imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
- VK_IMAGE_CREATE_SPARSE_ALIASED_BIT |
- VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
- imageSparseInfo.imageType = mapImageType(m_imageType);
- imageSparseInfo.format = mapTextureFormat(m_format);
- imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
- imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
- imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
- imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT;
- imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- imageSparseInfo.queueFamilyIndexCount = 0u;
- imageSparseInfo.pQueueFamilyIndices = DE_NULL;
-
- if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
- imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ const DeviceInterface& deviceInterface = getDeviceInterface();
+ const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
+ const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- // Assign maximum allowed mipmap levels to image
- VkImageFormatProperties imageFormatProperties;
- instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
- imageSparseInfo.format,
- imageSparseInfo.imageType,
- imageSparseInfo.tiling,
- imageSparseInfo.usage,
- imageSparseInfo.flags,
- &imageFormatProperties);
-
- imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
- }
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
+
+ imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ imageSparseInfo.pNext = DE_NULL;
+ imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
+ VK_IMAGE_CREATE_SPARSE_ALIASED_BIT |
+ VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
+ imageSparseInfo.imageType = mapImageType(m_imageType);
+ imageSparseInfo.format = mapTextureFormat(m_format);
+ imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
+ imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
+ imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+ imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT;
+ imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ imageSparseInfo.queueFamilyIndexCount = 0u;
+ imageSparseInfo.pQueueFamilyIndices = DE_NULL;
+
+ if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
+ imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- // Check if device supports sparse operations for image format
- if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
- TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
+ {
+ // Assign maximum allowed mipmap levels to image
+ VkImageFormatProperties imageFormatProperties;
+ instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
+ imageSparseInfo.format,
+ imageSparseInfo.imageType,
+ imageSparseInfo.tiling,
+ imageSparseInfo.usage,
+ imageSparseInfo.flags,
+ &imageFormatProperties);
+
+ imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
+ }
- {
- // Create logical device supporting both sparse and compute queues
- QueueRequirementsVec queueRequirements;
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
+ // Check if device supports sparse operations for image format
+ if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
+ TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
- createDeviceSupportingQueues(queueRequirements);
- }
+ // Create sparse image
+ const Unique<VkImage> imageRead(createImage(deviceInterface, getDevice(), &imageSparseInfo));
+ const Unique<VkImage> imageWrite(createImage(deviceInterface, getDevice(), &imageSparseInfo));
- const DeviceInterface& deviceInterface = getDeviceInterface();
- const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
- const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
+ // Create semaphores to synchronize sparse binding operations with other operations on the sparse images
+ const Unique<VkSemaphore> memoryBindSemaphoreTransfer(createSemaphore(deviceInterface, getDevice()));
+ const Unique<VkSemaphore> memoryBindSemaphoreCompute(createSemaphore(deviceInterface, getDevice()));
- // Create sparse image
- const Unique<VkImage> imageRead(createImage(deviceInterface, getDevice(), &imageSparseInfo));
- const Unique<VkImage> imageWrite(createImage(deviceInterface, getDevice(), &imageSparseInfo));
+ const VkSemaphore imageMemoryBindSemaphores[] = { memoryBindSemaphoreTransfer.get(), memoryBindSemaphoreCompute.get() };
- // Create semaphores to synchronize sparse binding operations with other operations on the sparse images
- const Unique<VkSemaphore> memoryBindSemaphoreTransfer(createSemaphore(deviceInterface, getDevice()));
- const Unique<VkSemaphore> memoryBindSemaphoreCompute(createSemaphore(deviceInterface, getDevice()));
+ {
+ std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
+ std::vector<VkSparseMemoryBind> imageReadMipTailBinds;
+ std::vector<VkSparseMemoryBind> imageWriteMipTailBinds;
- const VkSemaphore imageMemoryBindSemaphores[] = { memoryBindSemaphoreTransfer.get(), memoryBindSemaphoreCompute.get() };
+ // Get sparse image general memory requirements
+ const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageRead);
- {
- std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
- std::vector<VkSparseMemoryBind> imageReadMipTailBinds;
- std::vector<VkSparseMemoryBind> imageWriteMipTailBinds;
+ // Check if required image memory size does not exceed device limits
+ if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
- // Get sparse image general memory requirements
- const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageRead);
+ DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
- // Check if required image memory size does not exceed device limits
- if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
+ // Get sparse image sparse memory requirements
+ const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageRead);
- DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
+ DE_ASSERT(sparseMemoryRequirements.size() != 0);
- // Get sparse image sparse memory requirements
- const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageRead);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
- DE_ASSERT(sparseMemoryRequirements.size() != 0);
+ if (colorAspectIndex == NO_MATCH_FOUND)
+ TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
- if (colorAspectIndex == NO_MATCH_FOUND)
- TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
+ const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
+ const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
- aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
+ DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
- const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
- const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
- DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
+ // Bind memory for each layer
+ for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
+ {
+ for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
+ {
+ const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
+ const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
+ const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
+ const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
+ imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
- // Bind memory for each layer
- for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
- {
- for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
- {
- const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
- const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
- const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
- const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageResidencyMemoryBinds.push_back(imageMemoryBind);
+ }
+
+ if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
+ {
+ const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageReadMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
- const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
- imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
+ imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
- imageResidencyMemoryBinds.push_back(imageMemoryBind);
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageWriteMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind);
+ }
}
- if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
+ if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
{
const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageReadMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind);
const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageWriteMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind);
}
- }
-
- if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
- {
- const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageReadMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
- imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind);
+ VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 0u, //deUint32 bufferBindCount;
+ DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 2u, //deUint32 signalSemaphoreCount;
+ imageMemoryBindSemaphores //const VkSemaphore* pSignalSemaphores;
+ };
+
+ VkSparseImageMemoryBindInfo imageResidencyBindInfo[2];
+ VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo[2];
+
+ if (imageResidencyMemoryBinds.size() > 0)
+ {
+ imageResidencyBindInfo[0].image = *imageRead;
+ imageResidencyBindInfo[0].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
+ imageResidencyBindInfo[0].pBinds = &imageResidencyMemoryBinds[0];
- const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
+ imageResidencyBindInfo[1].image = *imageWrite;
+ imageResidencyBindInfo[1].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
+ imageResidencyBindInfo[1].pBinds = &imageResidencyMemoryBinds[0];
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageWriteMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ bindSparseInfo.imageBindCount = 2u;
+ bindSparseInfo.pImageBinds = imageResidencyBindInfo;
+ }
- imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind);
- }
+ if (imageReadMipTailBinds.size() > 0)
+ {
+ imageMipTailBindInfo[0].image = *imageRead;
+ imageMipTailBindInfo[0].bindCount = static_cast<deUint32>(imageReadMipTailBinds.size());
+ imageMipTailBindInfo[0].pBinds = &imageReadMipTailBinds[0];
- VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 0u, //deUint32 bufferBindCount;
- DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 2u, //deUint32 signalSemaphoreCount;
- imageMemoryBindSemaphores //const VkSemaphore* pSignalSemaphores;
- };
-
- VkSparseImageMemoryBindInfo imageResidencyBindInfo[2];
- VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo[2];
-
- if (imageResidencyMemoryBinds.size() > 0)
- {
- imageResidencyBindInfo[0].image = *imageRead;
- imageResidencyBindInfo[0].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
- imageResidencyBindInfo[0].pBinds = &imageResidencyMemoryBinds[0];
+ imageMipTailBindInfo[1].image = *imageWrite;
+ imageMipTailBindInfo[1].bindCount = static_cast<deUint32>(imageWriteMipTailBinds.size());
+ imageMipTailBindInfo[1].pBinds = &imageWriteMipTailBinds[0];
- imageResidencyBindInfo[1].image = *imageWrite;
- imageResidencyBindInfo[1].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
- imageResidencyBindInfo[1].pBinds = &imageResidencyMemoryBinds[0];
+ bindSparseInfo.imageOpaqueBindCount = 2u;
+ bindSparseInfo.pImageOpaqueBinds = imageMipTailBindInfo;
+ }
- bindSparseInfo.imageBindCount = 2u;
- bindSparseInfo.pImageBinds = imageResidencyBindInfo;
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
}
- if (imageReadMipTailBinds.size() > 0)
- {
- imageMipTailBindInfo[0].image = *imageRead;
- imageMipTailBindInfo[0].bindCount = static_cast<deUint32>(imageReadMipTailBinds.size());
- imageMipTailBindInfo[0].pBinds = &imageReadMipTailBinds[0];
+ // Create command buffer for compute and transfer oparations
+ const Unique<VkCommandPool> commandPool (makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- imageMipTailBindInfo[1].image = *imageWrite;
- imageMipTailBindInfo[1].bindCount = static_cast<deUint32>(imageWriteMipTailBinds.size());
- imageMipTailBindInfo[1].pBinds = &imageWriteMipTailBinds[0];
+ std::vector<VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
- bindSparseInfo.imageOpaqueBindCount = 2u;
- bindSparseInfo.pImageOpaqueBinds = imageMipTailBindInfo;
+ {
+ deUint32 bufferOffset = 0u;
+ for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
+ {
+ bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset);
+ bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ }
}
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- // Create command buffer for compute and transfer oparations
- const Unique<VkCommandPool> commandPool (makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
+ const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
+ const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
- std::vector<VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
+ std::vector<deUint8> referenceData(imageSizeInBytes);
- {
- deUint32 bufferOffset = 0u;
for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
{
- bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset);
- bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
- }
- }
-
- // Start recording commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
+ const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
- const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
- const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
- const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
- const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
+ deMemset(&referenceData[bufferOffset], mipLevelNdx + 1u, mipLevelSizeInBytes);
+ }
- std::vector<deUint8> referenceData(imageSizeInBytes);
+ deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
- for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
- {
- const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
- const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
+ flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
- deMemset(&referenceData[bufferOffset], mipLevelNdx + 1u, mipLevelSizeInBytes);
- }
-
- deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
+ {
+ const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *inputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
+ }
- flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
+ {
+ const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
+ (
+ 0u,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ *imageRead,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
+ }
- {
- const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *inputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
- }
+ deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- {
- const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
- (
- 0u,
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- *imageRead,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
- }
+ {
+ const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ *imageRead,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
+ }
- deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ {
+ const VkImageMemoryBarrier imageSparseShaderStorageBarrier = makeImageMemoryBarrier
+ (
+ 0u,
+ VK_ACCESS_SHADER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_GENERAL,
+ *imageWrite,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseShaderStorageBarrier);
+ }
- {
- const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- *imageRead,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
- }
+ // Create descriptor set layout
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, getDevice()));
- {
- const VkImageMemoryBarrier imageSparseShaderStorageBarrier = makeImageMemoryBarrier
- (
- 0u,
- VK_ACCESS_SHADER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_GENERAL,
- *imageWrite,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseShaderStorageBarrier);
- }
+ Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
- // Create descriptor set layout
- const Unique<VkDescriptorSetLayout> descriptorSetLayout(
- DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
- .build(deviceInterface, getDevice()));
+ Unique<VkDescriptorPool> descriptorPool(
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels)
+ .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels));
- Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
+ typedef de::SharedPtr< Unique<VkImageView> > SharedVkImageView;
+ std::vector<SharedVkImageView> imageViews;
+ imageViews.resize(imageSparseInfo.mipLevels);
- Unique<VkDescriptorPool> descriptorPool(
- DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels)
- .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels));
+ typedef de::SharedPtr< Unique<VkDescriptorSet> > SharedVkDescriptorSet;
+ std::vector<SharedVkDescriptorSet> descriptorSets;
+ descriptorSets.resize(imageSparseInfo.mipLevels);
- typedef de::SharedPtr< Unique<VkImageView> > SharedVkImageView;
- std::vector<SharedVkImageView> imageViews;
- imageViews.resize(imageSparseInfo.mipLevels);
+ typedef de::SharedPtr< Unique<VkPipeline> > SharedVkPipeline;
+ std::vector<SharedVkPipeline> computePipelines;
+ computePipelines.resize(imageSparseInfo.mipLevels);
- typedef de::SharedPtr< Unique<VkDescriptorSet> > SharedVkDescriptorSet;
- std::vector<SharedVkDescriptorSet> descriptorSets;
- descriptorSets.resize(imageSparseInfo.mipLevels);
+ for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
+ {
+ std::ostringstream name;
+ name << "comp" << mipLevelNdx;
- typedef de::SharedPtr< Unique<VkPipeline> > SharedVkPipeline;
- std::vector<SharedVkPipeline> computePipelines;
- computePipelines.resize(imageSparseInfo.mipLevels);
+ // Create and bind compute pipeline
+ Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get(name.str()), DE_NULL));
- for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
- {
- std::ostringstream name;
- name << "comp" << mipLevelNdx;
+ computePipelines[mipLevelNdx] = makeVkSharedPtr(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
+ VkPipeline computePipeline = **computePipelines[mipLevelNdx];
- // Create and bind compute pipeline
- Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get(name.str()), DE_NULL));
+ deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline);
- computePipelines[mipLevelNdx] = makeVkSharedPtr(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
- VkPipeline computePipeline = **computePipelines[mipLevelNdx];
+ // Create and bind descriptor set
+ descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
+ VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx];
- deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline);
+ // Select which mipmap level to bind
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers);
- // Create and bind descriptor set
- descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
- VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx];
+ imageViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, getDevice(), *imageWrite, mapImageViewType(m_imageType), imageSparseInfo.format, subresourceRange));
+ VkImageView imageView = **imageViews[mipLevelNdx];
- // Select which mipmap level to bind
- const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers);
+ const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
- imageViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, getDevice(), *imageWrite, mapImageViewType(m_imageType), imageSparseInfo.format, subresourceRange));
- VkImageView imageView = **imageViews[mipLevelNdx];
+ DescriptorSetUpdateBuilder()
+ .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo)
+ .update(deviceInterface, getDevice());
- const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
+ deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
- DescriptorSetUpdateBuilder()
- .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo)
- .update(deviceInterface, getDevice());
+ const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
+ const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations);
+ const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize);
+ const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize));
- deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
+ const deUint32 xWorkGroupCount = gridSize.x() / xWorkGroupSize + (gridSize.x() % xWorkGroupSize ? 1u : 0u);
+ const deUint32 yWorkGroupCount = gridSize.y() / yWorkGroupSize + (gridSize.y() % yWorkGroupSize ? 1u : 0u);
+ const deUint32 zWorkGroupCount = gridSize.z() / zWorkGroupSize + (gridSize.z() % zWorkGroupSize ? 1u : 0u);
- const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
- const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations);
- const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize);
- const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize));
+ if (maxWorkGroupCount.x() < xWorkGroupCount ||
+ maxWorkGroupCount.y() < yWorkGroupCount ||
+ maxWorkGroupCount.z() < zWorkGroupCount)
+ TCU_THROW(NotSupportedError, "Image size is not supported");
- const deUint32 xWorkGroupCount = gridSize.x() / xWorkGroupSize + (gridSize.x() % xWorkGroupSize ? 1u : 0u);
- const deUint32 yWorkGroupCount = gridSize.y() / yWorkGroupSize + (gridSize.y() % yWorkGroupSize ? 1u : 0u);
- const deUint32 zWorkGroupCount = gridSize.z() / zWorkGroupSize + (gridSize.z() % zWorkGroupSize ? 1u : 0u);
+ deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
+ }
- if (maxWorkGroupCount.x() < xWorkGroupCount ||
- maxWorkGroupCount.y() < yWorkGroupCount ||
- maxWorkGroupCount.z() < zWorkGroupCount)
- TCU_THROW(NotSupportedError, "Image size is not supported");
+ {
+ const VkMemoryBarrier memoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
- deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
- }
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1u, &memoryBarrier, 0u, DE_NULL, 0u, DE_NULL);
+ }
- {
- const VkMemoryBarrier memoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1u, &memoryBarrier, 0u, DE_NULL, 0u, DE_NULL);
- }
+ deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ {
+ const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
+ }
- deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ // End recording commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- {
- const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
- }
+ const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
- // End recording commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ // Submit commands for execution and wait for completion
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 2u, imageMemoryBindSemaphores, stageBits,
+ 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
+ // Retrieve data from buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
- // Submit commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 2u, imageMemoryBindSemaphores, stageBits);
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- // Retrieve data from buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
+ {
+ const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
+ const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
+ const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData + bufferOffset);
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ for (deUint32 offsetZ = 0u; offsetZ < gridSize.z(); ++offsetZ)
+ for (deUint32 offsetY = 0u; offsetY < gridSize.y(); ++offsetY)
+ for (deUint32 offsetX = 0u; offsetX < gridSize.x(); ++offsetX)
+ {
+ const deUint32 index = offsetX + (offsetY + offsetZ * gridSize.y()) * gridSize.x();
+ const tcu::UVec4 referenceValue = tcu::UVec4(index % MODULO_DIVISOR, index % MODULO_DIVISOR, index % MODULO_DIVISOR, 1u);
+ const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
- for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
- {
- const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
- const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
- const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData + bufferOffset);
+ if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
+ }
- for (deUint32 offsetZ = 0u; offsetZ < gridSize.z(); ++offsetZ)
- for (deUint32 offsetY = 0u; offsetY < gridSize.y(); ++offsetY)
- for (deUint32 offsetX = 0u; offsetX < gridSize.x(); ++offsetX)
+ for (deUint32 mipLevelNdx = aspectRequirements.imageMipTailFirstLod; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
{
- const deUint32 index = offsetX + (offsetY + offsetZ * gridSize.y()) * gridSize.x();
- const tcu::UVec4 referenceValue = tcu::UVec4(index % MODULO_DIVISOR, index % MODULO_DIVISOR, index % MODULO_DIVISOR, 1u);
- const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
+ const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
+ const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
- if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
+ if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
return tcu::TestStatus::fail("Failed");
}
}
- for (deUint32 mipLevelNdx = aspectRequirements.imageMipTailFirstLod; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
- {
- const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
- const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
-
- if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
- return tcu::TestStatus::fail("Failed");
- }
-
return tcu::TestStatus::pass("Passed");
}
TestInstance* ImageSparseMemoryAliasingCase::createInstance (Context& context) const
{
- return new ImageSparseMemoryAliasingInstance(context, m_imageType, m_imageSize, m_format);
+ return new ImageSparseMemoryAliasingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
}
} // anonymous ns
-tcu::TestCaseGroup* createImageSparseMemoryAliasingTests (tcu::TestContext& testCtx)
+tcu::TestCaseGroup* createImageSparseMemoryAliasingTestsCommon(tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
{
- de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_memory_aliasing", "Sparse Image Memory Aliasing"));
-
static const deUint32 sizeCountPerImageType = 4u;
struct ImageParameters
std::ostringstream stream;
stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
- formatGroup->addChild(new ImageSparseMemoryAliasingCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440));
+ formatGroup->addChild(new ImageSparseMemoryAliasingCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440, useDeviceGroup));
}
imageTypeGroup->addChild(formatGroup.release());
}
return testGroup.release();
}
+tcu::TestCaseGroup* createImageSparseMemoryAliasingTests(tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_memory_aliasing", "Sparse Image Memory Aliasing"));
+ return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup);
+}
+
+tcu::TestCaseGroup* createDeviceGroupImageSparseMemoryAliasingTests(tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_memory_aliasing", "Sparse Image Memory Aliasing"));
+ return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup, true);
+}
+
} // sparse
} // vkt
{
tcu::TestCaseGroup* createImageSparseMemoryAliasingTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createDeviceGroupImageSparseMemoryAliasingTests(tcu::TestContext& testCtx);
} // sparse
} // vkt
const std::string& description,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups = false);
TestInstance* createInstance (Context& context) const;
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
const std::string& description,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
+
: TestCase (testCtx, name, description)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
ImageSparseBindingInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups);
tcu::TestStatus iterate (void);
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
ImageSparseBindingInstance::ImageSparseBindingInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
- : SparseResourcesBaseInstance (context)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
+
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
tcu::TestStatus ImageSparseBindingInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
+
+ {
+ // Create logical device supporting both sparse and compute queues
+ QueueRequirementsVec queueRequirements;
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
+
+ createDeviceSupportingQueues(queueRequirements);
+ }
+
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice();
VkImageCreateInfo imageSparseInfo;
std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseBinding)
TCU_THROW(NotSupportedError, "Device does not support sparse binding");
- {
- // Create logical device supporting both sparse and compute queues
- QueueRequirementsVec queueRequirements;
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
-
- createDeviceSupportingQueues(queueRequirements);
- }
-
const DeviceInterface& deviceInterface = getDeviceInterface();
const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
- imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType;
- imageSparseInfo.pNext = DE_NULL; //const void* pNext;
- imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; //VkImageCreateFlags flags;
- imageSparseInfo.imageType = mapImageType(m_imageType); //VkImageType imageType;
- imageSparseInfo.format = mapTextureFormat(m_format); //VkFormat format;
- imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent;
- imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //deUint32 arrayLayers;
- imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples;
- imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling;
- imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout;
- imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT; //VkImageUsageFlags usage;
- imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode;
- imageSparseInfo.queueFamilyIndexCount = 0u; //deUint32 queueFamilyIndexCount;
- imageSparseInfo.pQueueFamilyIndices = DE_NULL; //const deUint32* pQueueFamilyIndices;
-
- if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- }
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
+
+ imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType;
+ imageSparseInfo.pNext = DE_NULL; //const void* pNext;
+ imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; //VkImageCreateFlags flags;
+ imageSparseInfo.imageType = mapImageType(m_imageType); //VkImageType imageType;
+ imageSparseInfo.format = mapTextureFormat(m_format); //VkFormat format;
+ imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent;
+ imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //deUint32 arrayLayers;
+ imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples;
+ imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling;
+ imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout;
+ imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT; //VkImageUsageFlags usage;
+ imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode;
+ imageSparseInfo.queueFamilyIndexCount = 0u; //deUint32 queueFamilyIndexCount;
+ imageSparseInfo.pQueueFamilyIndices = DE_NULL; //const deUint32* pQueueFamilyIndices;
+
+ if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
+ {
+ imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
- {
- VkImageFormatProperties imageFormatProperties;
- instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
- imageSparseInfo.format,
- imageSparseInfo.imageType,
- imageSparseInfo.tiling,
- imageSparseInfo.usage,
- imageSparseInfo.flags,
- &imageFormatProperties);
-
- imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
- }
+ {
+ VkImageFormatProperties imageFormatProperties;
+ instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
+ imageSparseInfo.format,
+ imageSparseInfo.imageType,
+ imageSparseInfo.tiling,
+ imageSparseInfo.usage,
+ imageSparseInfo.flags,
+ &imageFormatProperties);
+
+ imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
+ }
- // Create sparse image
- const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
+ // Create sparse image
+ const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
- // Create sparse image memory bind semaphore
- const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+ // Create sparse image memory bind semaphore
+ const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
- // Get sparse image general memory requirements
- const VkMemoryRequirements imageSparseMemRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
+ // Get sparse image general memory requirements
+ const VkMemoryRequirements imageSparseMemRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
- // Check if required image memory size does not exceed device limits
- if (imageSparseMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
+ // Check if required image memory size does not exceed device limits
+ if (imageSparseMemRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
- DE_ASSERT((imageSparseMemRequirements.size % imageSparseMemRequirements.alignment) == 0);
+ DE_ASSERT((imageSparseMemRequirements.size % imageSparseMemRequirements.alignment) == 0);
- {
- std::vector<VkSparseMemoryBind> sparseMemoryBinds;
- const deUint32 numSparseBinds = static_cast<deUint32>(imageSparseMemRequirements.size / imageSparseMemRequirements.alignment);
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageSparseMemRequirements, MemoryRequirement::Any);
+ {
+ std::vector<VkSparseMemoryBind> sparseMemoryBinds;
+ const deUint32 numSparseBinds = static_cast<deUint32>(imageSparseMemRequirements.size / imageSparseMemRequirements.alignment);
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageSparseMemRequirements, MemoryRequirement::Any);
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
- for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx)
- {
- const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- imageSparseMemRequirements.alignment, memoryType, imageSparseMemRequirements.alignment * sparseBindNdx);
+ for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx)
+ {
+ const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ imageSparseMemRequirements.alignment, memoryType, imageSparseMemRequirements.alignment * sparseBindNdx);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
- sparseMemoryBinds.push_back(sparseMemoryBind);
- }
+ sparseMemoryBinds.push_back(sparseMemoryBind);
+ }
- const VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo = makeSparseImageOpaqueMemoryBindInfo(*imageSparse, numSparseBinds, &sparseMemoryBinds[0]);
+ const VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo = makeSparseImageOpaqueMemoryBindInfo(*imageSparse, numSparseBinds, &sparseMemoryBinds[0]);
- const VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 0u, //deUint32 bufferBindCount;
- DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 1u, //deUint32 imageOpaqueBindCount;
- &opaqueBindInfo, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
-
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
+
+ const VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 0u, //deUint32 bufferBindCount;
+ DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 1u, //deUint32 imageOpaqueBindCount;
+ &opaqueBindInfo, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
+ };
+
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
+ }
- // Create command buffer for compute and transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ // Create command buffer for compute and transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- std::vector<VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
+ std::vector<VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
- {
- deUint32 bufferOffset = 0;
- for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
{
- bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast<VkDeviceSize>(bufferOffset));
- bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ deUint32 bufferOffset = 0;
+ for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
+ {
+ bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast<VkDeviceSize>(bufferOffset));
+ bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ }
}
- }
- // Start recording commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
- const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
- const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
- const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
+ const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
+ const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
+ const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
- std::vector<deUint8> referenceData(imageSizeInBytes);
+ std::vector<deUint8> referenceData(imageSizeInBytes);
- for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
- {
- referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageSparseMemRequirements.alignment) + 1u);
- }
+ for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
+ {
+ referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageSparseMemRequirements.alignment) + 1u);
+ }
- deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
+ deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
- flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
+ flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
- {
- const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *inputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *inputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
+ }
- {
- const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
- (
- 0u,
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- *imageSparse,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
- }
+ {
+ const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
+ (
+ 0u,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ *imageSparse,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
+ }
- deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- {
- const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- *imageSparse,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
- }
+ {
+ const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ *imageSparse,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
+ }
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- {
- const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
+ }
- // End recording commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ // End recording commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
+ const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
- // Submit commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits);
+ // Submit commands for execution and wait for completion
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits,
+ 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- // Retrieve data from buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
+ // Retrieve data from buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
- {
- const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
- const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipmapNdx].bufferOffset);
+ for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
+ {
+ const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
+ const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipmapNdx].bufferOffset);
- if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
- return tcu::TestStatus::fail("Failed");
+ if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
return tcu::TestStatus::pass("Passed");
TestInstance* ImageSparseBindingCase::createInstance (Context& context) const
{
- return new ImageSparseBindingInstance(context, m_imageType, m_imageSize, m_format);
+ return new ImageSparseBindingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
}
} // anonymous ns
-tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx)
+tcu::TestCaseGroup* createImageSparseBindingTestsCommon(tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
{
- de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_binding", "Buffer Sparse Binding"));
-
static const deUint32 sizeCountPerImageType = 3u;
struct ImageParameters
tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8)
};
+
for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx)
{
const ImageType imageType = imageParametersArray[imageTypeNdx].imageType;
for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx)
{
const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx];
-
- std::ostringstream stream;
+ std::ostringstream stream;
stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
- formatGroup->addChild(new ImageSparseBindingCase(testCtx, stream.str(), "", imageType, imageSize, format));
+ formatGroup->addChild(new ImageSparseBindingCase(testCtx, stream.str(), "", imageType, imageSize, format, useDeviceGroup));
}
imageTypeGroup->addChild(formatGroup.release());
}
return testGroup.release();
}
+tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_binding", "Image Sparse Binding"));
+ return createImageSparseBindingTestsCommon(testCtx, testGroup);
+}
+
+tcu::TestCaseGroup* createDeviceGroupImageSparseBindingTests(tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_binding", "Device Group Image Sparse Binding"));
+ return createImageSparseBindingTestsCommon(testCtx, testGroup, true);
+}
+
} // sparse
} // vkt
{
tcu::TestCaseGroup* createImageSparseBindingTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createDeviceGroupImageSparseBindingTests(tcu::TestContext& testCtx);
} // sparse
} // vkt
const ImageType imageType,
const tcu::UVec3& imageSize,
const tcu::TextureFormat& format,
- const glu::GLSLVersion glslVersion);
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups);
void initPrograms (SourceCollections& sourceCollections) const;
TestInstance* createInstance (Context& context) const;
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
const ImageType imageType,
const tcu::UVec3& imageSize,
const tcu::TextureFormat& format,
- const glu::GLSLVersion glslVersion)
+ const glu::GLSLVersion glslVersion,
+ const bool useDeviceGroups)
: TestCase (testCtx, name, description)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
ImageSparseResidencyInstance(Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups);
+
tcu::TestStatus iterate (void);
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
ImageSparseResidencyInstance::ImageSparseResidencyInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
- : SparseResourcesBaseInstance (context)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
tcu::TestStatus ImageSparseResidencyInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
- const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
- VkImageCreateInfo imageCreateInfo;
- VkSparseImageMemoryRequirements aspectRequirements;
- VkExtent3D imageGranularity;
- std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
-
- // Check if image size does not exceed device limits
- if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
- TCU_THROW(NotSupportedError, "Image size not supported for device");
-
- // Check if device supports sparse operations for image type
- if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
- TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
-
- imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- imageCreateInfo.pNext = DE_NULL;
- imageCreateInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
- imageCreateInfo.imageType = mapImageType(m_imageType);
- imageCreateInfo.format = mapTextureFormat(m_format);
- imageCreateInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
- imageCreateInfo.mipLevels = 1u;
- imageCreateInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
- imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
- imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT;
- imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- imageCreateInfo.queueFamilyIndexCount = 0u;
- imageCreateInfo.pQueueFamilyIndices = DE_NULL;
-
- if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
- {
- imageCreateInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- }
-
- // Check if device supports sparse operations for image format
- if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageCreateInfo))
- TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
{
// Create logical device supporting both sparse and compute queues
createDeviceSupportingQueues(queueRequirements);
}
+ VkImageCreateInfo imageCreateInfo;
+ VkSparseImageMemoryRequirements aspectRequirements;
+ VkExtent3D imageGranularity;
+ std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
+
const DeviceInterface& deviceInterface = getDeviceInterface();
const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
- // Create sparse image
- const Unique<VkImage> sparseImage(createImage(deviceInterface, getDevice(), &imageCreateInfo));
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
+ {
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
+
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice(firstDeviceID);
+ const VkPhysicalDeviceProperties physicalDeviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
+
+ // Check if image size does not exceed device limits
+ if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
+ TCU_THROW(NotSupportedError, "Image size not supported for device");
+
+ // Check if device supports sparse operations for image type
+ if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
+ TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
+
+ imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ imageCreateInfo.pNext = DE_NULL;
+ imageCreateInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
+ imageCreateInfo.imageType = mapImageType(m_imageType);
+ imageCreateInfo.format = mapTextureFormat(m_format);
+ imageCreateInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
+ imageCreateInfo.mipLevels = 1u;
+ imageCreateInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
+ imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+ imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT;
+ imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ imageCreateInfo.queueFamilyIndexCount = 0u;
+ imageCreateInfo.pQueueFamilyIndices = DE_NULL;
+
+ if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
+ {
+ imageCreateInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
+
+ // Check if device supports sparse operations for image format
+ if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageCreateInfo))
+ TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
- // Create sparse image memory bind semaphore
- const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+ // Create sparse image
+ const Unique<VkImage> sparseImage(createImage(deviceInterface, getDevice(), &imageCreateInfo));
- {
- // Get image general memory requirements
- const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *sparseImage);
+ // Create sparse image memory bind semaphore
+ const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+
+ {
+ // Get image general memory requirements
+ const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *sparseImage);
- if (imageMemoryRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
+ if (imageMemoryRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
- DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
+ DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
- // Get sparse image sparse memory requirements
- const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *sparseImage);
+ // Get sparse image sparse memory requirements
+ const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *sparseImage);
- DE_ASSERT(sparseMemoryRequirements.size() != 0);
+ DE_ASSERT(sparseMemoryRequirements.size() != 0);
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
- const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
- if (colorAspectIndex == NO_MATCH_FOUND)
- TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
+ if (colorAspectIndex == NO_MATCH_FOUND)
+ TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
- aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
- imageGranularity = aspectRequirements.formatProperties.imageGranularity;
+ aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
+ imageGranularity = aspectRequirements.formatProperties.imageGranularity;
- const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
+ const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
- DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
+ DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
- std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
- std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
+ std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
+ std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
- // Bind device memory for each aspect
- for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
- {
- for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
+ // Bind device memory for each aspect
+ for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
{
- const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
- const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
- const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
- const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
- mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
- mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth);
- for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
- for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
- for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
+ for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
{
- const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z();
-
- if (linearIndex % 2u == 1u)
+ const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
+ const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
+ const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
+ const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
+ mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
+ mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth);
+ for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
+ for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
+ for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
{
- continue;
+ const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z();
+
+ if (linearIndex % 2u == 1u)
+ {
+ continue;
+ }
+
+ VkOffset3D offset;
+ offset.x = x*imageGranularity.width;
+ offset.y = y*imageGranularity.height;
+ offset.z = z*imageGranularity.depth;
+
+ VkExtent3D extent;
+ extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
+ extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
+ extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
+
+ const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
+ imageMemoryRequirements.alignment, memoryType, subresource, offset, extent);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageResidencyMemoryBinds.push_back(imageMemoryBind);
}
+ }
- VkOffset3D offset;
- offset.x = x*imageGranularity.width;
- offset.y = y*imageGranularity.height;
- offset.z = z*imageGranularity.depth;
+ if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels)
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
- VkExtent3D extent;
- extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
- extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
- extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
- const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
- imageMemoryRequirements.alignment, memoryType, subresource, offset, extent);
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType,
+ metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
- imageResidencyMemoryBinds.push_back(imageMemoryBind);
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
}
}
- if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels)
+ if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels)
{
const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
{
const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
- if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ if ((metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
{
const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- metadataAspectRequirements.imageMipTailSize, memoryType,
- metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
VK_SPARSE_MEMORY_BIND_METADATA_BIT);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
}
- }
-
- if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels)
- {
- const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
- imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
- }
+ VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 0u, //deUint32 bufferBindCount;
+ DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
+ };
+
+ VkSparseImageMemoryBindInfo imageResidencyBindInfo;
+ VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
+
+ if (imageResidencyMemoryBinds.size() > 0)
+ {
+ imageResidencyBindInfo.image = *sparseImage;
+ imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
+ imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
- // Metadata
- if (metadataAspectIndex != NO_MATCH_FOUND)
- {
- const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+ bindSparseInfo.imageBindCount = 1u;
+ bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
+ }
- if ((metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ if (imageMipTailMemoryBinds.size() > 0)
{
- const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
- VK_SPARSE_MEMORY_BIND_METADATA_BIT);
-
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ imageMipTailBindInfo.image = *sparseImage;
+ imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
+ imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
- imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ bindSparseInfo.imageOpaqueBindCount = 1u;
+ bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
}
- }
- VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 0u, //deUint32 bufferBindCount;
- DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
-
- VkSparseImageMemoryBindInfo imageResidencyBindInfo;
- VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
-
- if (imageResidencyMemoryBinds.size() > 0)
- {
- imageResidencyBindInfo.image = *sparseImage;
- imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
- imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
-
- bindSparseInfo.imageBindCount = 1u;
- bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
}
- if (imageMipTailMemoryBinds.size() > 0)
- {
- imageMipTailBindInfo.image = *sparseImage;
- imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
- imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
-
- bindSparseInfo.imageOpaqueBindCount = 1u;
- bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
- }
+ // Create command buffer for compute and transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- // Create command buffer for compute and transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ // Create descriptor set layout
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, getDevice()));
- // Start recording commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ // Create and bind compute pipeline
+ const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
+ const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
- // Create descriptor set layout
- const Unique<VkDescriptorSetLayout> descriptorSetLayout(
- DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
- .build(deviceInterface, getDevice()));
+ deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
- // Create and bind compute pipeline
- const Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
- const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
- const Unique<VkPipeline> computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
+ // Create and bind descriptor set
+ const Unique<VkDescriptorPool> descriptorPool(
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u)
+ .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
- deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
+ const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
- // Create and bind descriptor set
- const Unique<VkDescriptorPool> descriptorPool(
- DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u)
- .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+ const Unique<VkImageView> imageView(makeImageView(deviceInterface, getDevice(), *sparseImage, mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange));
+ const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
- const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo)
+ .update(deviceInterface, getDevice());
- const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
- const Unique<VkImageView> imageView(makeImageView(deviceInterface, getDevice(), *sparseImage, mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange));
- const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
+ deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
- DescriptorSetUpdateBuilder()
- .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo)
- .update(deviceInterface, getDevice());
+ {
+ const VkImageMemoryBarrier sparseImageLayoutChangeBarrier = makeImageMemoryBarrier
+ (
+ 0u,
+ VK_ACCESS_SHADER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_GENERAL,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ *sparseImage,
+ subresourceRange
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageLayoutChangeBarrier);
+ }
- deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+ const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize);
- {
- const VkImageMemoryBarrier sparseImageLayoutChangeBarrier = makeImageMemoryBarrier
- (
- 0u,
- VK_ACCESS_SHADER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_GENERAL,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- *sparseImage,
- subresourceRange
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageLayoutChangeBarrier);
- }
+ {
+ const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize);
- const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize);
+ const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u);
+ const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u);
+ const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u);
- {
- const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize);
+ const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u);
- const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u);
- const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u);
- const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u);
+ if (maxComputeWorkGroupCount.x() < xWorkGroupCount ||
+ maxComputeWorkGroupCount.y() < yWorkGroupCount ||
+ maxComputeWorkGroupCount.z() < zWorkGroupCount)
+ {
+ TCU_THROW(NotSupportedError, "Image size is not supported");
+ }
- const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u);
+ deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
+ }
- if (maxComputeWorkGroupCount.x() < xWorkGroupCount ||
- maxComputeWorkGroupCount.y() < yWorkGroupCount ||
- maxComputeWorkGroupCount.z() < zWorkGroupCount)
{
- TCU_THROW(NotSupportedError, "Image size is not supported");
+ const VkImageMemoryBarrier sparseImageTrasferBarrier = makeImageMemoryBarrier
+ (
+ VK_ACCESS_SHADER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_GENERAL,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ *sparseImage,
+ subresourceRange
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageTrasferBarrier);
}
- deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
- }
+ const deUint32 imageSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format);
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- {
- const VkImageMemoryBarrier sparseImageTrasferBarrier = makeImageMemoryBarrier
- (
- VK_ACCESS_SHADER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- VK_IMAGE_LAYOUT_GENERAL,
- VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- *sparseImage,
- subresourceRange
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageTrasferBarrier);
- }
-
- const deUint32 imageSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format);
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
-
- {
- const VkBufferImageCopy bufferImageCopy = makeBufferImageCopy(imageCreateInfo.extent, imageCreateInfo.arrayLayers);
-
- deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *sparseImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, 1u, &bufferImageCopy);
- }
+ {
+ const VkBufferImageCopy bufferImageCopy = makeBufferImageCopy(imageCreateInfo.extent, imageCreateInfo.arrayLayers);
- {
- const VkBufferMemoryBarrier outputBufferHostReadBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostReadBarrier, 0u, DE_NULL);
- }
+ deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *sparseImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, 1u, &bufferImageCopy);
+ }
- // End recording commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ {
+ const VkBufferMemoryBarrier outputBufferHostReadBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostReadBarrier, 0u, DE_NULL);
+ }
- // The stage at which execution is going to wait for finish of sparse binding operations
- const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
+ // End recording commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- // Submit commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits);
+ // The stage at which execution is going to wait for finish of sparse binding operations
+ const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
- // Retrieve data from buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
+ // Submit commands for execution and wait for completion
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits,
+ 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData);
+ // Retrieve data from buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData);
- // Validate results
- if( aspectRequirements.imageMipTailFirstLod > 0u )
- {
- const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u);
- const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
- const tcu::UVec3 lastBlockExtent = tcu::UVec3( mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
- mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
- mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth);
+ // Wait for sparse queue to become idle
+ //vsk fails:
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
+ // Validate results
+ if( aspectRequirements.imageMipTailFirstLod > 0u )
{
- for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
- for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
- for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
+ const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u);
+ const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
+ const tcu::UVec3 lastBlockExtent = tcu::UVec3( mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
+ mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
+ mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth);
+
+ for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
{
- VkExtent3D offset;
- offset.width = x*imageGranularity.width;
- offset.height = y*imageGranularity.height;
- offset.depth = z*imageGranularity.depth + layerNdx*numSparseBinds.z()*imageGranularity.depth;
+ for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
+ for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
+ for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
+ {
+ VkExtent3D offset;
+ offset.width = x*imageGranularity.width;
+ offset.height = y*imageGranularity.height;
+ offset.depth = z*imageGranularity.depth + layerNdx*numSparseBinds.z()*imageGranularity.depth;
- VkExtent3D extent;
- extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
- extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
- extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
+ VkExtent3D extent;
+ extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
+ extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
+ extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
- const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z();
+ const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z();
- if (linearIndex % 2u == 0u)
- {
- for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ)
- for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY)
- for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX)
+ if (linearIndex % 2u == 0u)
{
- const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u);
- const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
-
- if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
- return tcu::TestStatus::fail("Failed");
+ for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ)
+ for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY)
+ for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX)
+ {
+ const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u);
+ const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
+
+ if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
- }
- else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict)
- {
- for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ)
- for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY)
- for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX)
+ else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict)
{
- const tcu::UVec4 referenceValue = tcu::UVec4(0u, 0u, 0u, 0u);
- const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
-
- if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
- return tcu::TestStatus::fail("Failed");
+ for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ)
+ for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY)
+ for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX)
+ {
+ const tcu::UVec4 referenceValue = tcu::UVec4(0u, 0u, 0u, 0u);
+ const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
+
+ if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
}
}
}
- }
- else
- {
- const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u);
-
- for (deUint32 offsetZ = 0u; offsetZ < mipExtent.depth * imageCreateInfo.arrayLayers; ++offsetZ)
- for (deUint32 offsetY = 0u; offsetY < mipExtent.height; ++offsetY)
- for (deUint32 offsetX = 0u; offsetX < mipExtent.width; ++offsetX)
+ else
{
- const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u);
- const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
+ const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u);
+
+ for (deUint32 offsetZ = 0u; offsetZ < mipExtent.depth * imageCreateInfo.arrayLayers; ++offsetZ)
+ for (deUint32 offsetY = 0u; offsetY < mipExtent.height; ++offsetY)
+ for (deUint32 offsetX = 0u; offsetX < mipExtent.width; ++offsetX)
+ {
+ const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u);
+ const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
- if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
- return tcu::TestStatus::fail("Failed");
+ if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
}
TestInstance* ImageSparseResidencyCase::createInstance (Context& context) const
{
- return new ImageSparseResidencyInstance(context, m_imageType, m_imageSize, m_format);
+ return new ImageSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
}
} // anonymous ns
-tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx)
+tcu::TestCaseGroup* createImageSparseResidencyTestsCommon (tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
{
- de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_residency", "Buffer Sparse Residency"));
-
static const deUint32 sizeCountPerImageType = 3u;
struct ImageParameters
std::ostringstream stream;
stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
- formatGroup->addChild(new ImageSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440));
+ formatGroup->addChild(new ImageSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440, useDeviceGroup));
}
imageTypeGroup->addChild(formatGroup.release());
}
return testGroup.release();
}
+tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_residency", "Buffer Sparse Residency"));
+ return createImageSparseResidencyTestsCommon(testCtx, testGroup);
+}
+
+tcu::TestCaseGroup* createDeviceGroupImageSparseResidencyTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_residency", "Buffer Sparse Residency"));
+ return createImageSparseResidencyTestsCommon(testCtx, testGroup, true);
+}
+
} // sparse
} // vkt
{
tcu::TestCaseGroup* createImageSparseResidencyTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createDeviceGroupImageSparseResidencyTests(tcu::TestContext& testCtx);
} // sparse
} // vkt
const std::string& description,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups);
+
TestInstance* createInstance (Context& context) const;
private:
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
const std::string& description,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
: TestCase (testCtx, name, description)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
MipmapSparseResidencyInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format);
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups);
+
tcu::TestStatus iterate (void);
private:
-
+ const bool m_useDeviceGroups;
const ImageType m_imageType;
const tcu::UVec3 m_imageSize;
const tcu::TextureFormat m_format;
MipmapSparseResidencyInstance::MipmapSparseResidencyInstance (Context& context,
const ImageType imageType,
const tcu::UVec3& imageSize,
- const tcu::TextureFormat& format)
- : SparseResourcesBaseInstance (context)
+ const tcu::TextureFormat& format,
+ const bool useDeviceGroups)
+ : SparseResourcesBaseInstance (context, useDeviceGroups)
+ , m_useDeviceGroups (useDeviceGroups)
, m_imageType (imageType)
, m_imageSize (imageSize)
, m_format (format)
tcu::TestStatus MipmapSparseResidencyInstance::iterate (void)
{
const InstanceInterface& instance = m_context.getInstanceInterface();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
+ {
+ // Create logical device supporting both sparse and compute operations
+ QueueRequirementsVec queueRequirements;
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
+ queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
+
+ createDeviceSupportingQueues(queueRequirements);
+ }
+
+ const VkPhysicalDevice physicalDevice = getPhysicalDevice();
VkImageCreateInfo imageSparseInfo;
std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
- imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- imageSparseInfo.pNext = DE_NULL;
- imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
- imageSparseInfo.imageType = mapImageType(m_imageType);
- imageSparseInfo.format = mapTextureFormat(m_format);
- imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
- imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
- imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
- imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
- imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- imageSparseInfo.queueFamilyIndexCount = 0u;
- imageSparseInfo.pQueueFamilyIndices = DE_NULL;
-
- if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
- {
- imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- }
+ const DeviceInterface& deviceInterface = getDeviceInterface();
+ const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
+ const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
+ // Go through all physical devices
+ for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
{
- VkImageFormatProperties imageFormatProperties;
- instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
- imageSparseInfo.format,
- imageSparseInfo.imageType,
- imageSparseInfo.tiling,
- imageSparseInfo.usage,
- imageSparseInfo.flags,
- &imageFormatProperties);
-
- imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
- }
+ const deUint32 firstDeviceID = physDevID;
+ const deUint32 secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
+
+ imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ imageSparseInfo.pNext = DE_NULL;
+ imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
+ imageSparseInfo.imageType = mapImageType(m_imageType);
+ imageSparseInfo.format = mapTextureFormat(m_format);
+ imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize));
+ imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize);
+ imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+ imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ imageSparseInfo.queueFamilyIndexCount = 0u;
+ imageSparseInfo.pQueueFamilyIndices = DE_NULL;
+
+ if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
+ {
+ imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
- // Check if device supports sparse operations for image format
- if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
- TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
+ {
+ VkImageFormatProperties imageFormatProperties;
+ instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
+ imageSparseInfo.format,
+ imageSparseInfo.imageType,
+ imageSparseInfo.tiling,
+ imageSparseInfo.usage,
+ imageSparseInfo.flags,
+ &imageFormatProperties);
+
+ imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
+ }
- {
- // Create logical device supporting both sparse and compute operations
- QueueRequirementsVec queueRequirements;
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
- queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
+ // Check if device supports sparse operations for image format
+ if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
+ TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
- createDeviceSupportingQueues(queueRequirements);
- }
+ // Create sparse image
+ const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
- const DeviceInterface& deviceInterface = getDeviceInterface();
- const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
- const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
+ // Create sparse image memory bind semaphore
+ const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
- // Create sparse image
- const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
+ {
+ // Get sparse image general memory requirements
+ const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
- // Create sparse image memory bind semaphore
- const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
+ // Check if required image memory size does not exceed device limits
+ if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
+ TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
- {
- // Get sparse image general memory requirements
- const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
+ DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
- // Check if required image memory size does not exceed device limits
- if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
- TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
+ // Get sparse image sparse memory requirements
+ const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
- DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
+ DE_ASSERT(sparseMemoryRequirements.size() != 0);
- // Get sparse image sparse memory requirements
- const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
- DE_ASSERT(sparseMemoryRequirements.size() != 0);
+ if (colorAspectIndex == NO_MATCH_FOUND)
+ TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
- const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
+ const VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
+ const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
+ const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
- if (colorAspectIndex == NO_MATCH_FOUND)
- TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
+ DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
- const VkSparseImageMemoryRequirements aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
- const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask;
- const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
+ std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
+ std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
- DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
+ const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
- std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
- std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
+ if (memoryType == NO_MATCH_FOUND)
+ return tcu::TestStatus::fail("No matching memory type found");
- const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
+ // Bind memory for each layer
+ for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
+ {
+ for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
+ {
+ const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
+ const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
+ const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
+ const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
- if (memoryType == NO_MATCH_FOUND)
- return tcu::TestStatus::fail("No matching memory type found");
+ const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
+ imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
- // Bind memory for each layer
- for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
- {
- for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
- {
- const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
- const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity);
- const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
- const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx };
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
- const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
- imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
+ imageResidencyMemoryBinds.push_back(imageMemoryBind);
+ }
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
- imageResidencyMemoryBinds.push_back(imageMemoryBind);
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType,
+ metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
}
- if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
+ if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
{
const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
+ aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
{
const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
- if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
{
const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- metadataAspectRequirements.imageMipTailSize, memoryType,
- metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
VK_SPARSE_MEMORY_BIND_METADATA_BIT);
deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
}
- }
-
- if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
- {
- const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
-
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
-
- imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
- }
- // Metadata
- if (metadataAspectIndex != NO_MATCH_FOUND)
- {
- const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
-
- if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
+ const VkDeviceGroupBindSparseInfoKHR devGroupBindSparseInfo =
{
- const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
- metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
- VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, //VkStructureType sType;
+ DE_NULL, //const void* pNext;
+ firstDeviceID, //deUint32 resourceDeviceIndex;
+ secondDeviceID, //deUint32 memoryDeviceIndex;
+ };
- deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+ VkBindSparseInfo bindSparseInfo =
+ {
+ VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
+ m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
+ 0u, //deUint32 waitSemaphoreCount;
+ DE_NULL, //const VkSemaphore* pWaitSemaphores;
+ 0u, //deUint32 bufferBindCount;
+ DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
+ 0u, //deUint32 imageOpaqueBindCount;
+ DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
+ 0u, //deUint32 imageBindCount;
+ DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
+ 1u, //deUint32 signalSemaphoreCount;
+ &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
+ };
+
+ VkSparseImageMemoryBindInfo imageResidencyBindInfo;
+ VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
+
+ if (imageResidencyMemoryBinds.size() > 0)
+ {
+ imageResidencyBindInfo.image = *imageSparse;
+ imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
+ imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
- imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ bindSparseInfo.imageBindCount = 1u;
+ bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
}
- }
- VkBindSparseInfo bindSparseInfo =
- {
- VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
- DE_NULL, //const void* pNext;
- 0u, //deUint32 waitSemaphoreCount;
- DE_NULL, //const VkSemaphore* pWaitSemaphores;
- 0u, //deUint32 bufferBindCount;
- DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
- 0u, //deUint32 imageOpaqueBindCount;
- DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
- 0u, //deUint32 imageBindCount;
- DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
- 1u, //deUint32 signalSemaphoreCount;
- &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
- };
-
- VkSparseImageMemoryBindInfo imageResidencyBindInfo;
- VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
-
- if (imageResidencyMemoryBinds.size() > 0)
- {
- imageResidencyBindInfo.image = *imageSparse;
- imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
- imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
-
- bindSparseInfo.imageBindCount = 1u;
- bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
- }
+ if (imageMipTailMemoryBinds.size() > 0)
+ {
+ imageMipTailBindInfo.image = *imageSparse;
+ imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
+ imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
- if (imageMipTailMemoryBinds.size() > 0)
- {
- imageMipTailBindInfo.image = *imageSparse;
- imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
- imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
+ bindSparseInfo.imageOpaqueBindCount = 1u;
+ bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
+ }
- bindSparseInfo.imageOpaqueBindCount = 1u;
- bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
+ // Submit sparse bind commands for execution
+ VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
}
- // Submit sparse bind commands for execution
- VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
- }
-
- // Create command buffer for compute and transfer oparations
- const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
- const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
+ // Create command buffer for compute and transfer oparations
+ const Unique<VkCommandPool> commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
+ const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
- std::vector <VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
+ std::vector <VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
- {
- deUint32 bufferOffset = 0;
- for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
{
- bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast<VkDeviceSize>(bufferOffset));
- bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ deUint32 bufferOffset = 0;
+ for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
+ {
+ bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast<VkDeviceSize>(bufferOffset));
+ bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ }
}
- }
- // Start recording commands
- beginCommandBuffer(deviceInterface, *commandBuffer);
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *commandBuffer);
- const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
- const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
- const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
- const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
+ const deUint32 imageSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
+ const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
+ const Unique<VkBuffer> inputBuffer (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
+ const de::UniquePtr<Allocation> inputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
- std::vector<deUint8> referenceData(imageSizeInBytes);
+ std::vector<deUint8> referenceData(imageSizeInBytes);
- const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
+ const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
- for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
- {
- referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageMemoryRequirements.alignment) + 1u);
- }
+ for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
+ {
+ referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageMemoryRequirements.alignment) + 1u);
+ }
- deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
+ deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
- flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
+ flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
- {
- const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- *inputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ *inputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
+ }
- {
- const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
- (
- 0u,
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
- *imageSparse,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
- }
+ {
+ const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
+ (
+ 0u,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
+ *imageSparse,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
+ }
- deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- {
- const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_TRANSFER_READ_BIT,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
- *imageSparse,
- makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
- }
+ {
+ const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ *imageSparse,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
+ }
- const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
- const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
+ const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkBuffer> outputBuffer (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
+ const de::UniquePtr<Allocation> outputBufferAlloc (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
- deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
+ deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
- {
- const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
- (
- VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_ACCESS_HOST_READ_BIT,
- *outputBuffer,
- 0u,
- imageSizeInBytes
- );
-
- deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
- }
+ {
+ const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
+ (
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ *outputBuffer,
+ 0u,
+ imageSizeInBytes
+ );
+
+ deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
+ }
- // End recording commands
- endCommandBuffer(deviceInterface, *commandBuffer);
+ // End recording commands
+ endCommandBuffer(deviceInterface, *commandBuffer);
- const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
+ const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
- // Submit commands for execution and wait for completion
- submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits);
+ // Submit commands for execution and wait for completion
+ submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits,
+ 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
- // Retrieve data from buffer to host memory
- invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
+ // Retrieve data from buffer to host memory
+ invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
- const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
+ const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
- // Wait for sparse queue to become idle
- deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
+ // Wait for sparse queue to become idle
+ deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
- for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
- {
- const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
- const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipmapNdx].bufferOffset);
+ for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
+ {
+ const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
+ const deUint32 bufferOffset = static_cast<deUint32>(bufferImageCopy[mipmapNdx].bufferOffset);
- if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
- return tcu::TestStatus::fail("Failed");
+ if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
+ return tcu::TestStatus::fail("Failed");
+ }
}
-
return tcu::TestStatus::pass("Passed");
}
TestInstance* MipmapSparseResidencyCase::createInstance (Context& context) const
{
- return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format);
+ return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
}
} // anonymous ns
-tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx)
+tcu::TestCaseGroup* createMipmapSparseResidencyTestsCommon (tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
{
- de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency"));
-
static const deUint32 sizeCountPerImageType = 3u;
struct ImageParameters
std::ostringstream stream;
stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
- formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format));
+ formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, useDeviceGroup));
}
imageTypeGroup->addChild(formatGroup.release());
}
return testGroup.release();
}
+tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency"));
+ return createMipmapSparseResidencyTestsCommon(testCtx, testGroup);
+}
+
+tcu::TestCaseGroup* createDeviceGroupMipmapSparseResidencyTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_mipmap_sparse_residency", "Mipmap Sparse Residency"));
+ return createMipmapSparseResidencyTestsCommon(testCtx, testGroup, true);
+}
+
} // sparse
} // vkt
{
tcu::TestCaseGroup* createMipmapSparseResidencyTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createDeviceGroupMipmapSparseResidencyTests(tcu::TestContext& testCtx);
} // sparse
} // vkt
{
de::MovePtr<tcu::TestCaseGroup> sparseTests (new tcu::TestCaseGroup(testCtx, "sparse_resources", "Sparse Resources Tests"));
- sparseTests->addChild(createSparseBufferTests (testCtx));
- sparseTests->addChild(createImageSparseBindingTests (testCtx));
- sparseTests->addChild(createImageSparseResidencyTests (testCtx));
- sparseTests->addChild(createMipmapSparseResidencyTests (testCtx));
- sparseTests->addChild(createImageSparseMemoryAliasingTests (testCtx));
- sparseTests->addChild(createSparseResourcesShaderIntrinsicsTests(testCtx));
- sparseTests->addChild(createQueueBindSparseTests (testCtx));
+ sparseTests->addChild(createSparseBufferTests (testCtx));
+ sparseTests->addChild(createImageSparseBindingTests (testCtx));
+ sparseTests->addChild(createDeviceGroupImageSparseBindingTests (testCtx));
+ sparseTests->addChild(createImageSparseResidencyTests (testCtx));
+ sparseTests->addChild(createDeviceGroupImageSparseResidencyTests (testCtx));
+ sparseTests->addChild(createMipmapSparseResidencyTests (testCtx));
+ sparseTests->addChild(createDeviceGroupMipmapSparseResidencyTests (testCtx));
+ sparseTests->addChild(createImageSparseMemoryAliasingTests (testCtx));
+ sparseTests->addChild(createDeviceGroupImageSparseMemoryAliasingTests (testCtx));
+ sparseTests->addChild(createSparseResourcesShaderIntrinsicsTests (testCtx));
+ sparseTests->addChild(createQueueBindSparseTests (testCtx));
return sparseTests.release();
}
#include "vktSparseResourcesTestsUtil.hpp"
#include "vkQueryUtil.hpp"
+#include "vkDeviceUtil.hpp"
#include "vkTypeUtil.hpp"
#include "tcuTextureUtil.hpp"
namespace sparse
{
+vk::Move<VkInstance> createInstanceWithExtensions(const vk::PlatformInterface& vkp, const std::vector<std::string> enableExtensions)
+{
+ std::vector<std::string> enableExtensionPtrs (enableExtensions.size());
+ const std::vector<VkExtensionProperties> availableExtensions = enumerateInstanceExtensionProperties(vkp, DE_NULL);
+ for (size_t extensionID = 0; extensionID < enableExtensions.size(); extensionID++)
+ {
+ if (!isExtensionSupported(availableExtensions, RequiredExtension(enableExtensions[extensionID])))
+ TCU_THROW(NotSupportedError, (enableExtensions[extensionID] + " is not supported").c_str());
+ enableExtensionPtrs[extensionID] = enableExtensions[extensionID];
+ }
+
+ return createDefaultInstance(vkp, std::vector<std::string>() /* layers */, enableExtensionPtrs);
+}
+
tcu::UVec3 getShaderGridSize (const ImageType imageType, const tcu::UVec3& imageSize, const deUint32 mipLevel)
{
const deUint32 mipLevelX = std::max(imageSize.x() >> mipLevel, 1u);
{
const VkFramebufferCreateInfo framebufferInfo =
{
- VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags;
- renderPass, // VkRenderPass renderPass;
- attachmentCount, // uint32_t attachmentCount;
- pAttachments, // const VkImageView* pAttachments;
- width, // uint32_t width;
- height, // uint32_t height;
- layers, // uint32_t layers;
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags;
+ renderPass, // VkRenderPass renderPass;
+ attachmentCount, // uint32_t attachmentCount;
+ pAttachments, // const VkImageView* pAttachments;
+ width, // uint32_t width;
+ height, // uint32_t height;
+ layers, // uint32_t layers;
};
return createFramebuffer(vk, device, &framebufferInfo);
const VkSemaphore* pWaitSemaphores,
const VkPipelineStageFlags* pWaitDstStageMask,
const deUint32 signalSemaphoreCount,
- const VkSemaphore* pSignalSemaphores)
+ const VkSemaphore* pSignalSemaphores,
+ const bool useDeviceGroups,
+ const deUint32 physicalDeviceID)
{
const VkFenceCreateInfo fenceParams =
{
};
const Unique<VkFence> fence(createFence(vk, device, &fenceParams));
+ const deUint32 deviceMask = 1 << physicalDeviceID;
+ std::vector<deUint32> deviceIndices (waitSemaphoreCount, physicalDeviceID);
+ VkDeviceGroupSubmitInfoKHR deviceGroupSubmitInfo =
+ {
+ VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR, //VkStructureType sType
+ DE_NULL, // const void* pNext
+ waitSemaphoreCount, // uint32_t waitSemaphoreCount
+ deviceIndices.size() ? &deviceIndices[0] : DE_NULL, // const uint32_t* pWaitSemaphoreDeviceIndices
+ 1u, // uint32_t commandBufferCount
+ &deviceMask, // const uint32_t* pCommandBufferDeviceMasks
+ 0u, // uint32_t signalSemaphoreCount
+ DE_NULL, // const uint32_t* pSignalSemaphoreDeviceIndices
+ };
const VkSubmitInfo submitInfo =
{
- VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- waitSemaphoreCount, // deUint32 waitSemaphoreCount;
- pWaitSemaphores, // const VkSemaphore* pWaitSemaphores;
- pWaitDstStageMask, // const VkPipelineStageFlags* pWaitDstStageMask;
- 1u, // deUint32 commandBufferCount;
- &commandBuffer, // const VkCommandBuffer* pCommandBuffers;
- signalSemaphoreCount, // deUint32 signalSemaphoreCount;
- pSignalSemaphores, // const VkSemaphore* pSignalSemaphores;
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
+ useDeviceGroups ? &deviceGroupSubmitInfo : DE_NULL, // const void* pNext;
+ waitSemaphoreCount, // deUint32 waitSemaphoreCount;
+ pWaitSemaphores, // const VkSemaphore* pWaitSemaphores;
+ pWaitDstStageMask, // const VkPipelineStageFlags* pWaitDstStageMask;
+ 1u, // deUint32 commandBufferCount;
+ &commandBuffer, // const VkCommandBuffer* pCommandBuffers;
+ signalSemaphoreCount, // deUint32 signalSemaphoreCount;
+ pSignalSemaphores, // const VkSemaphore* pSignalSemaphores;
};
VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
const std::string& xy,
const std::string& xyz);
+//!< Create instance with specific extensions
+vk::Move<vk::VkInstance> createInstanceWithExtensions (const vk::PlatformInterface& vkp,
+ const std::vector<std::string> enableExtensions);
+
//!< Size used for addresing image in a compute shader
tcu::UVec3 getShaderGridSize (const ImageType imageType,
const tcu::UVec3& imageSize,
const vk::VkSemaphore* pWaitSemaphores = DE_NULL,
const vk::VkPipelineStageFlags* pWaitDstStageMask = DE_NULL,
const deUint32 signalSemaphoreCount = 0,
- const vk::VkSemaphore* pSignalSemaphores = DE_NULL);
+ const vk::VkSemaphore* pSignalSemaphores = DE_NULL,
+ const bool useDeviceGroups = false,
+ const deUint32 physicalDeviceID = 0);
void requireFeatures (const vk::InstanceInterface& vki,
const vk::VkPhysicalDevice physicalDevice,
dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.transfer.sparse_binding.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.transfer.device_group_sparse_binding.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_10
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_12
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_16
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.ssbo.sparse_binding_aliased.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_binding_aliased.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_10
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_12
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_16
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_17
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_20
dEQP-VK.sparse_resources.buffer.ssbo.sparse_residency.buffer_size_2_24
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_10
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_12
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_16
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_17
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_20
+dEQP-VK.sparse_resources.buffer.ssbo.device_group_sparse_residency.buffer_size_2_24
dEQP-VK.sparse_resources.buffer.ubo.sparse_binding
dEQP-VK.sparse_resources.buffer.ubo.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.ubo.sparse_residency_non_resident_strict
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.ubo.device_group_sparse_residency_non_resident_strict
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.vertex_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.vertex_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.index_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.index_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_binding_aliased
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency
dEQP-VK.sparse_resources.buffer.indirect_buffer.sparse_residency_aliased
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_binding_aliased
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency
+dEQP-VK.sparse_resources.buffer.indirect_buffer.device_group_sparse_residency_aliased
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.512_1_1
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.1024_1_1
dEQP-VK.sparse_resources.image_sparse_binding.1d.r32i.11_1_1
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.256_256_6
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.128_128_8
dEQP-VK.sparse_resources.image_sparse_binding.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r32i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r16i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.r8i.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba32ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba16ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.512_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.1024_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d.rgba8ui.11_1_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r32i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r16i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.r8i.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba32ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba16ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.512_1_64
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.1024_1_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.1d_array.rgba8ui.11_1_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_binding.cube_array.rgba8ui.137_137_3
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.512_256_1
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.1024_128_1
dEQP-VK.sparse_resources.image_sparse_residency.2d.r32i.11_137_1
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.512_256_16
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.1024_128_8
dEQP-VK.sparse_resources.image_sparse_residency.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg32i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg16i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rg8i.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rg8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rg8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rg8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg32i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg16i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rg8i.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.512_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_residency.3d.rgba8ui.11_137_3
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.512_256_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.1024_128_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.2d.r32i.11_137_1
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.256_256_16
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.1024_128_8
dEQP-VK.sparse_resources.mipmap_sparse_residency.3d.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r32i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r16i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.r8i.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba32ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba16ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.1024_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d.rgba8ui.11_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.2d_array.rgba8ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r32i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r16i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.r8i.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba32ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba16ui.11_137_3
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.256_256_16
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.1024_128_8
+dEQP-VK.sparse_resources.device_group_mipmap_sparse_residency.3d.rgba8ui.11_137_3
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.512_256_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.128_128_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.2d.r32i.503_137_1
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.128_128_8
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.503_137_3
dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r32i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r16i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.r8i.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba32ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba16ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.512_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.503_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d.rgba8ui.11_37_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r32i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r16i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.r8i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba32ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba16ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.512_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.2d_array.rgba8ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r32i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r16i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.r8i.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba32ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba16ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.256_256_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.128_128_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.137_137_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube.rgba8ui.11_11_1
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r32i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r16i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.r8i.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba32ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba16ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.256_256_6
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.137_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.cube_array.rgba8ui.11_11_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r32i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r16i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.r8i.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba32ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba16ui.11_37_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.256_256_16
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.128_128_8
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.503_137_3
+dEQP-VK.sparse_resources.device_group_image_sparse_memory_aliasing.3d.rgba8ui.11_37_3
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.512_256_1
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.128_128_1
dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.503_137_1