dEQP-VK.ray_tracing_pipeline.large_shader_set.cpu_ht_max.256
dEQP-VK.ray_tracing_pipeline.large_shader_set.cpu_ht_max.1024
dEQP-VK.ray_tracing_pipeline.large_shader_set.cpu_ht_max.4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_16
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_256
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_4096
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_256_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_65536_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.callable_shader.rgen_call
dEQP-VK.ray_tracing_pipeline.callable_shader.rgen_call_call
dEQP-VK.ray_tracing_pipeline.callable_shader.hit_call
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.ahit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.chit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.sect_aabs
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_16
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_256
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_4096
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_256_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_65536_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.ahit
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.chit
dEQP-VK.ray_tracing_pipeline.barrier.ubo.memory_barrier.from_host_to_rgen
dEQP-VK.ray_tracing_pipeline.inside_aabbs.isec.ray_end_outside.scaling_factor_5.rotation_2
dEQP-VK.ray_tracing_pipeline.inside_aabbs.isec.ray_end_outside.scaling_factor_5.rotation_3
dEQP-VK.ray_tracing_pipeline.inside_aabbs.isec.ray_end_outside.scaling_factor_5.rotation_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_65536_4_4
dEQP-VK.ray_tracing_pipeline.acceleration_structures.ray_cull_mask.cpu_built.ahit.4_bits
dEQP-VK.ray_tracing_pipeline.acceleration_structures.ray_cull_mask.cpu_built.ahit.4_bits_reverse
dEQP-VK.ray_tracing_pipeline.acceleration_structures.ray_cull_mask.cpu_built.ahit.16_bits
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.ahit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.chit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.sect_aabs
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_65536_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_16
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_256
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_4096
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_256_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_65536_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.ahit
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.chit
dEQP-VK.ray_tracing_pipeline.barrier.ubo.memory_barrier.from_host_to_rgen
virtual ~BottomLevelAccelerationStructureKHR ();
void setBuildType (const VkAccelerationStructureBuildTypeKHR buildType) override;
+ VkAccelerationStructureBuildTypeKHR getBuildType () const override;
void setCreateFlags (const VkAccelerationStructureCreateFlagsKHR createFlags) override;
void setCreateGeneric (bool createGeneric) override;
void setBuildFlags (const VkBuildAccelerationStructureFlagsKHR buildFlags) override;
de::MovePtr<BufferWithMemory> m_vertexBuffer;
de::MovePtr<BufferWithMemory> m_indexBuffer;
de::MovePtr<BufferWithMemory> m_deviceScratchBuffer;
- std::vector<deUint8> m_hostScratchBuffer;
+ de::UniquePtr<std::vector<deUint8>> m_hostScratchBuffer;
Move<VkAccelerationStructureKHR> m_accelerationStructureKHR;
VkBuffer m_indirectBuffer;
VkDeviceSize m_indirectBufferOffset;
std::vector<VkAccelerationStructureBuildRangeInfoKHR>& accelerationStructureBuildRangeInfoKHR,
std::vector<deUint32>& maxPrimitiveCounts,
VkDeviceSize vertexBufferOffset = 0,
- VkDeviceSize indexBufferOffset = 0);
+ VkDeviceSize indexBufferOffset = 0) const;
- virtual BufferWithMemory* getAccelerationStructureBuffer () { return m_accelerationStructureBuffer.get(); }
- virtual BufferWithMemory* getDeviceScratchBuffer () { return m_deviceScratchBuffer.get(); }
- virtual BufferWithMemory* getVertexBuffer () { return m_vertexBuffer.get(); }
- virtual BufferWithMemory* getIndexBuffer () { return m_indexBuffer.get(); }
+ virtual BufferWithMemory* getAccelerationStructureBuffer () const { return m_accelerationStructureBuffer.get(); }
+ virtual BufferWithMemory* getDeviceScratchBuffer () const { return m_deviceScratchBuffer.get(); }
+ virtual std::vector<deUint8>* getHostScratchBuffer () const { return m_hostScratchBuffer.get(); }
+ virtual BufferWithMemory* getVertexBuffer () const { return m_vertexBuffer.get(); }
+ virtual BufferWithMemory* getIndexBuffer () const { return m_indexBuffer.get(); }
virtual VkDeviceSize getAccelerationStructureBufferOffset () const { return 0; }
virtual VkDeviceSize getDeviceScratchBufferOffset () const { return 0; }
, m_vertexBuffer (DE_NULL)
, m_indexBuffer (DE_NULL)
, m_deviceScratchBuffer (DE_NULL)
+ , m_hostScratchBuffer (new std::vector<deUint8>)
, m_accelerationStructureKHR ()
, m_indirectBuffer (DE_NULL)
, m_indirectBufferOffset (0)
m_buildType = buildType;
}
+VkAccelerationStructureBuildTypeKHR BottomLevelAccelerationStructureKHR::getBuildType () const
+{
+ return m_buildType;
+}
+
void BottomLevelAccelerationStructureKHR::setCreateFlags (const VkAccelerationStructureCreateFlagsKHR createFlags)
{
m_createFlags = createFlags;
}
else
{
- m_hostScratchBuffer.resize(static_cast<size_t>(m_buildScratchSize));
+ m_hostScratchBuffer->resize(static_cast<size_t>(m_buildScratchSize));
}
}
const VkAccelerationStructureGeometryKHR* const* accelerationStructureGeometry = accelerationStructureGeometriesKHRPointers.data();
VkDeviceOrHostAddressKHR scratchData = (m_buildType == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR)
? makeDeviceOrHostAddressKHR(vk, device, getDeviceScratchBuffer()->get(), getDeviceScratchBufferOffset())
- : makeDeviceOrHostAddressKHR(m_hostScratchBuffer.data());
+ : makeDeviceOrHostAddressKHR(getHostScratchBuffer()->data());
const deUint32 geometryCount = (m_buildWithoutGeometries
? 0u
: static_cast<deUint32>(accelerationStructureGeometriesKHR.size()));
std::vector<VkAccelerationStructureBuildRangeInfoKHR>& accelerationStructureBuildRangeInfoKHR,
std::vector<deUint32>& maxPrimitiveCounts,
VkDeviceSize vertexBufferOffset,
- VkDeviceSize indexBufferOffset)
+ VkDeviceSize indexBufferOffset) const
{
accelerationStructureGeometriesKHR.resize(m_geometriesData.size());
accelerationStructureGeometriesKHRPointers.resize(m_geometriesData.size());
for (size_t geometryNdx = 0; geometryNdx < m_geometriesData.size(); ++geometryNdx)
{
- de::SharedPtr<RaytracedGeometryBase>& geometryData = m_geometriesData[geometryNdx];
+ const de::SharedPtr<RaytracedGeometryBase>& geometryData = m_geometriesData[geometryNdx];
VkDeviceOrHostAddressConstKHR vertexData, indexData;
if (m_buildType == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR)
{
{
DE_ASSERT(0); // Silent this method
}
-
+ virtual auto computeBuildSize (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize strSize) const
+ // accStrSize,updateScratch, buildScratch, vertexSize, indexSize
+ -> std::tuple<VkDeviceSize, VkDeviceSize, VkDeviceSize, VkDeviceSize, VkDeviceSize>;
protected:
struct Info;
- virtual void preCreateComputeSizesAndOffsets (const DeviceInterface& vk,
- const VkDevice device,
+ virtual void preCreateSetSizesAndOffsets (const Info& info,
const VkDeviceSize accStrSize,
- Info& info);
+ const VkDeviceSize updateScratchSize,
+ const VkDeviceSize buildScratchSize);
virtual void createAccellerationStructure (const DeviceInterface& vk,
const VkDevice device,
VkDeviceAddress deviceAddress);
- virtual BufferWithMemory* getAccelerationStructureBuffer () override;
- virtual BufferWithMemory* getDeviceScratchBuffer () override;
- virtual BufferWithMemory* getVertexBuffer () override;
- virtual BufferWithMemory* getIndexBuffer () override;
+ virtual BufferWithMemory* getAccelerationStructureBuffer () const override;
+ virtual BufferWithMemory* getDeviceScratchBuffer () const override;
+ virtual std::vector<deUint8>* getHostScratchBuffer () const override;
+ virtual BufferWithMemory* getVertexBuffer () const override;
+ virtual BufferWithMemory* getIndexBuffer () const override;
virtual VkDeviceSize getAccelerationStructureBufferOffset () const override { return m_info.accStrOffset; }
- virtual VkDeviceSize getDeviceScratchBufferOffset () const override { return m_info.scratchBuffOffset; }
+ virtual VkDeviceSize getDeviceScratchBufferOffset () const override { return m_info.buildScratchBuffOffset; }
virtual VkDeviceSize getVertexBufferOffset () const override { return m_info.vertBuffOffset; }
virtual VkDeviceSize getIndexBufferOffset () const override { return m_info.indexBuffOffset; }
struct Info
{
deUint32 accStrIndex;
- VkDeviceSize accStrSize;
VkDeviceSize accStrOffset;
deUint32 vertBuffIndex;
- VkDeviceSize vertBuffSize;
VkDeviceSize vertBuffOffset;
deUint32 indexBuffIndex;
- VkDeviceSize indexBuffSize;
VkDeviceSize indexBuffOffset;
- deUint32 scratchBuffIndex;
- VkDeviceSize scratchBuffSize;
- VkDeviceSize scratchBuffOffset;
+ deUint32 buildScratchBuffIndex;
+ VkDeviceSize buildScratchBuffOffset;
} m_info;
};
-template<class X> inline X negz(const X&)
+template<class X> inline X negz (const X&)
{
return (~static_cast<X>(0));
}
-template<class X> inline bool isnegz(const X& x)
+template<class X> inline bool isnegz (const X& x)
{
return x == negz(x);
}
+template<class Y> inline auto make_unsigned(const Y& y) -> typename std::make_unsigned<Y>::type
+{
+ return static_cast<typename std::make_unsigned<Y>::type>(y);
+}
BottomLevelAccelerationStructurePoolMember::BottomLevelAccelerationStructurePoolMember (BottomLevelAccelerationStructurePoolImpl& pool)
: m_pool (pool)
BottomLevelAccelerationStructurePool& m_pool;
std::vector<de::SharedPtr<BufferWithMemory>> m_accellerationStructureBuffers;
- std::vector<de::SharedPtr<BufferWithMemory>> m_deviceScratchBuffers;
+ de::SharedPtr<BufferWithMemory> m_deviceScratchBuffer;
+ de::UniquePtr<std::vector<deUint8>> m_hostScratchBuffer;
std::vector<de::SharedPtr<BufferWithMemory>> m_vertexBuffers;
std::vector<de::SharedPtr<BufferWithMemory>> m_indexBuffers;
};
BottomLevelAccelerationStructurePoolImpl::BottomLevelAccelerationStructurePoolImpl (BottomLevelAccelerationStructurePool& pool)
: m_pool (pool)
, m_accellerationStructureBuffers ()
- , m_deviceScratchBuffers ()
+ , m_deviceScratchBuffer ()
+ , m_hostScratchBuffer (new std::vector<deUint8>)
, m_vertexBuffers ()
, m_indexBuffers ()
{
}
-BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getAccelerationStructureBuffer ()
+BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getAccelerationStructureBuffer () const
{
BufferWithMemory* result = nullptr;
if (m_pool.m_accellerationStructureBuffers.size())
}
return result;
}
-BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getDeviceScratchBuffer ()
+BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getDeviceScratchBuffer () const
{
- return m_pool.m_deviceScratchBuffers.size() && !isnegz(m_info.scratchBuffIndex)
- ? m_pool.m_deviceScratchBuffers[m_info.scratchBuffIndex].get()
- : nullptr;
+ DE_ASSERT(m_info.buildScratchBuffIndex == 0);
+ return m_pool.m_deviceScratchBuffer.get();
}
-BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getVertexBuffer ()
+std::vector<deUint8>* BottomLevelAccelerationStructurePoolMember::getHostScratchBuffer () const
+{
+ return this->m_buildScratchSize ? m_pool.m_hostScratchBuffer.get() : nullptr;
+}
+
+BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getVertexBuffer () const
{
BufferWithMemory* result = nullptr;
if (m_pool.m_vertexBuffers.size())
}
return result;
}
-BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getIndexBuffer ()
+BufferWithMemory* BottomLevelAccelerationStructurePoolMember::getIndexBuffer () const
{
BufferWithMemory* result = nullptr;
if (m_pool.m_indexBuffers.size())
, m_batchGeomCount (0)
, m_infos ()
, m_structs ()
- , m_createOnce ()
+ , m_createOnce (false)
+ , m_tryCachedMemory (true)
+ , m_structsBuffSize (0)
+ , m_updatesScratchSize (0)
+ , m_buildsScratchSize (0)
+ , m_verticesSize (0)
+ , m_indicesSize (0)
, m_impl (new Impl(*this))
{
}
delete m_impl;
}
-void BottomLevelAccelerationStructurePool::batchStructCount (const size_t& value)
+void BottomLevelAccelerationStructurePool::batchStructCount (const deUint32& value)
{
DE_ASSERT(value >= 1); m_batchStructCount = value;
}
return m_structs.back();
}
+void adjustBatchCount (const DeviceInterface& vkd,
+ const VkDevice device,
+ const std::vector<BottomLevelAccelerationStructurePool::BlasPtr>& structs,
+ const std::vector<BottomLevelAccelerationStructurePool::BlasInfo>& infos,
+ const VkDeviceSize maxBufferSize,
+ deUint32 (&result)[4])
+{
+ tcu::Vector<VkDeviceSize, 4> sizes(0);
+ tcu::Vector<VkDeviceSize, 4> sums(0);
+ tcu::Vector<deUint32, 4> tmps(0);
+ tcu::Vector<deUint32, 4> batches(0);
+
+ VkDeviceSize updateScratchSize = 0; static_cast<void>(updateScratchSize); // not used yet, disabled for future implementation
+
+ auto updateIf = [&](deUint32 c)
+ {
+ if (sums[c] + sizes[c] <= maxBufferSize)
+ {
+ sums[c] += sizes[c];
+ tmps[c] += 1;
+
+ batches[c] = std::max(tmps[c], batches[c]);
+ }
+ else
+ {
+ sums[c] = 0;
+ tmps[c] = 0;
+ }
+ };
+
+ const deUint32 maxIter = static_cast<deUint32>(structs.size());
+ for (deUint32 i = 0; i < maxIter; ++i)
+ {
+ auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(structs[i].get());
+ std::tie(sizes[0], updateScratchSize, sizes[1], sizes[2], sizes[3]) = str.computeBuildSize(vkd, device, infos[i].structureSize);
+
+ updateIf(0);
+ updateIf(1);
+ updateIf(2);
+ updateIf(3);
+ }
+
+ result[0] = std::max(batches[0], 1u);
+ result[1] = std::max(batches[1], 1u);
+ result[2] = std::max(batches[2], 1u);
+ result[3] = std::max(batches[3], 1u);
+}
+
size_t BottomLevelAccelerationStructurePool::getAllocationCount () const
{
return m_impl->m_accellerationStructureBuffers.size()
+ m_impl->m_vertexBuffers.size()
+ m_impl->m_indexBuffers.size()
- + m_impl->m_deviceScratchBuffers.size();
+ + 1 /* for scratch buffer */;
+}
+
+size_t BottomLevelAccelerationStructurePool::getAllocationCount (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize maxBufferSize) const
+{
+ DE_ASSERT(m_structs.size() != 0);
+
+ std::map<deUint32, VkDeviceSize> accStrSizes;
+ std::map<deUint32, VkDeviceSize> vertBuffSizes;
+ std::map<deUint32, VkDeviceSize> indexBuffSizes;
+ std::map<deUint32, VkDeviceSize> scratchBuffSizes;
+
+ const deUint32 allStructsCount = structCount();
+
+ deUint32 batchStructCount = m_batchStructCount;
+ deUint32 batchScratchCount = m_batchStructCount;
+ deUint32 batchVertexCount = m_batchGeomCount ? m_batchGeomCount : m_batchStructCount;
+ deUint32 batchIndexCount = batchVertexCount;
+
+ if (!isnegz(maxBufferSize))
+ {
+ deUint32 batches[4];
+ adjustBatchCount(vk, device, m_structs, m_infos, maxBufferSize, batches);
+ batchStructCount = batches[0];
+ batchScratchCount = batches[1];
+ batchVertexCount = batches[2];
+ batchIndexCount = batches[3];
+ }
+
+ deUint32 iStr = 0;
+ deUint32 iScratch = 0;
+ deUint32 iVertex = 0;
+ deUint32 iIndex = 0;
+
+ VkDeviceSize strSize = 0;
+ VkDeviceSize updateScratchSize = 0;
+ VkDeviceSize buildScratchSize = 0;
+ VkDeviceSize vertexSize = 0;
+ VkDeviceSize indexSize = 0;
+
+ for (; iStr < allStructsCount; ++iStr)
+ {
+ auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[iStr].get());
+ std::tie(strSize, updateScratchSize, buildScratchSize, vertexSize, indexSize) = str.computeBuildSize(vk, device, m_infos[iStr].structureSize);
+
+ {
+ const VkDeviceSize alignedStrSize = deAlign64(strSize, 256);
+ const deUint32 accStrIndex = (iStr / batchStructCount);
+ accStrSizes[accStrIndex] += alignedStrSize;
+ }
+
+ if (buildScratchSize != 0)
+ {
+ const VkDeviceSize alignedBuilsScratchSize = deAlign64(buildScratchSize, 256);
+ const deUint32 scratchBuffIndex = (iScratch/ batchScratchCount);
+ scratchBuffSizes[scratchBuffIndex] += alignedBuilsScratchSize;
+ iScratch += 1;
+ }
+
+ if (vertexSize != 0)
+ {
+ const VkDeviceSize alignedVertBuffSize = deAlign64(vertexSize, 8);
+ const deUint32 vertBuffIndex = (iVertex / batchVertexCount);
+ vertBuffSizes[vertBuffIndex] += alignedVertBuffSize;
+ iVertex += 1;
+ }
+
+ if (indexSize != 0)
+ {
+ const VkDeviceSize alignedIndexBuffSize = deAlign64(indexSize, 8);
+ const deUint32 indexBuffIndex = (iIndex / batchIndexCount);
+ indexBuffSizes[indexBuffIndex] += alignedIndexBuffSize;
+ iIndex += 1;
+ }
+ }
+
+ return accStrSizes.size()
+ + vertBuffSizes.size()
+ + indexBuffSizes.size()
+ + scratchBuffSizes.size();
}
-void BottomLevelAccelerationStructurePool::batchCreate (const DeviceInterface& vk,
- const VkDevice device,
- Allocator& allocator)
+tcu::Vector<VkDeviceSize, 4> BottomLevelAccelerationStructurePool::getAllocationSizes (const DeviceInterface& vk,
+ const VkDevice device) const
+{
+ if (m_structsBuffSize)
+ {
+ return tcu::Vector<VkDeviceSize, 4>(m_structsBuffSize, m_buildsScratchSize, m_verticesSize, m_indicesSize);
+ }
+
+ VkDeviceSize strSize = 0;
+ VkDeviceSize updateScratchSize = 0; static_cast<void>(updateScratchSize); // not used yet, disabled for future implementation
+ VkDeviceSize buildScratchSize = 0;
+ VkDeviceSize vertexSize = 0;
+ VkDeviceSize indexSize = 0;
+ VkDeviceSize sumStrSize = 0;
+ VkDeviceSize sumUpdateScratchSize = 0; static_cast<void>(sumUpdateScratchSize); // not used yet, disabled for future implementation
+ VkDeviceSize sumBuildScratchSize = 0;
+ VkDeviceSize sumVertexSize = 0;
+ VkDeviceSize sumIndexSize = 0;
+ for (size_t i = 0; i < structCount(); ++i)
+ {
+ auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[i].get());
+ std::tie(strSize, updateScratchSize, buildScratchSize, vertexSize, indexSize) = str.computeBuildSize(vk, device, m_infos[i].structureSize);
+ sumStrSize += deAlign64(strSize, 256);
+ //sumUpdateScratchSize += deAlign64(updateScratchSize, 256); not used yet, disabled for future implementation
+ sumBuildScratchSize += deAlign64(buildScratchSize, 256);
+ sumVertexSize += deAlign64(vertexSize, 8);
+ sumIndexSize += deAlign64(indexSize, 8);
+ }
+ return tcu::Vector<VkDeviceSize, 4>(sumStrSize, sumBuildScratchSize, sumVertexSize, sumIndexSize);
+}
+
+void BottomLevelAccelerationStructurePool::batchCreate (const DeviceInterface& vkd,
+ const VkDevice device,
+ Allocator& allocator)
+{
+ batchCreateAdjust(vkd, device, allocator, negz<VkDeviceSize>(0));
+}
+
+void BottomLevelAccelerationStructurePool::batchCreateAdjust (const DeviceInterface& vkd,
+ const VkDevice device,
+ Allocator& allocator,
+ const VkDeviceSize maxBufferSize)
{
// Prevent a programmer from calling this method more than once.
if (m_createOnce) DE_ASSERT(0);
auto createAccellerationStructureBuffer = [&](VkDeviceSize bufferSize) -> typename std::add_pointer<BufferWithMemory>::type
{
- typename std::add_pointer<BufferWithMemory>::type result = nullptr;
+ BufferWithMemory* res = nullptr;
const VkBufferCreateInfo bci = makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
- try
+
+ if (m_tryCachedMemory) try
{
- result = new BufferWithMemory(vk, device, allocator, bci,
- MemoryRequirement::Cached | MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ res = new BufferWithMemory(vkd, device, allocator, bci, MemoryRequirement::Cached | MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
}
catch (const tcu::NotSupportedError&)
{
- // retry without Cached flag
- result = new BufferWithMemory(vk, device, allocator, bci, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ res = nullptr;
}
- return result;
+
+ return (nullptr != res)
+ ? res
+ : (new BufferWithMemory(vkd, device, allocator, bci, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress));
};
- auto createScratchBuffer = [&](VkDeviceSize bufferSize) -> typename std::add_pointer<BufferWithMemory>::type
+ auto createDeviceScratchBuffer = [&](VkDeviceSize bufferSize) -> de::SharedPtr<BufferWithMemory>
{
const VkBufferCreateInfo bci = makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
- return new BufferWithMemory(vk, device, allocator, bci, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ BufferWithMemory* p = new BufferWithMemory(vkd, device, allocator, bci, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ return de::SharedPtr<BufferWithMemory>(p);
};
- std::vector<de::SharedPtr<BufferWithMemory>> accellerationStructureBuffers;
- std::vector<de::SharedPtr<BufferWithMemory>> deviceScratchBuffers;
- std::vector<de::SharedPtr<BufferWithMemory>> vertexBuffers;
- std::vector<de::SharedPtr<BufferWithMemory>> indexBuffers;
+ std::map<deUint32, VkDeviceSize> accStrSizes;
+ std::map<deUint32, VkDeviceSize> vertBuffSizes;
+ std::map<deUint32, VkDeviceSize> indexBuffSizes;
+
+ const deUint32 allStructsCount = structCount();
+ deUint32 iterKey = 0;
+
+ deUint32 batchStructCount = m_batchStructCount;
+ deUint32 batchVertexCount = m_batchGeomCount ? m_batchGeomCount : m_batchStructCount;
+ deUint32 batchIndexCount = batchVertexCount;
- BottomLevelAccelerationStructurePoolMember::Info info { /* initialized with zeros */ };
- typename std::map<deUint32, VkDeviceSize>::size_type iter = 0;
- std::map<deUint32, VkDeviceSize> accStrSizes;
- std::map<deUint32, VkDeviceSize> vertBuffSizes;
- std::map<deUint32, VkDeviceSize> indexBuffSizes;
- std::map<deUint32, VkDeviceSize> scratchBuffSizes;
+ if (!isnegz(maxBufferSize))
+ {
+ deUint32 batches[4];
+ adjustBatchCount(vkd, device, m_structs, m_infos, maxBufferSize, batches);
+ batchStructCount = batches[0];
+ // batches[1]: batchScratchCount
+ batchVertexCount = batches[2];
+ batchIndexCount = batches[3];
+ }
- deUint32 indexBuffIndexControl = 0;
- deUint32 scratchBuffIndexControl = 0;
+ deUint32 iStr = 0;
+ deUint32 iVertex = 0;
+ deUint32 iIndex = 0;
- const size_t realBatchGeomCount = batchGeomCount() ? batchGeomCount() : batchStructCount();
+ VkDeviceSize strSize = 0;
+ VkDeviceSize updateScratchSize = 0;
+ VkDeviceSize buildScratchSize = 0;
+ VkDeviceSize maxBuildScratchSize = 0;
+ VkDeviceSize vertexSize = 0;
+ VkDeviceSize indexSize = 0;
- for (size_t i = 0; i < structCount(); ++i)
+ VkDeviceSize strOffset = 0;
+ VkDeviceSize vertexOffset = 0;
+ VkDeviceSize indexOffset = 0;
+
+ deUint32 hostStructCount = 0;
+ deUint32 deviceStructCount = 0;
+
+ for (; iStr < allStructsCount; ++iStr)
{
- auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[i].get());
- str.preCreateComputeSizesAndOffsets(vk, device, m_infos[i].structureSize, info);
+ BottomLevelAccelerationStructurePoolMember::Info info{};
+ auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[iStr].get());
+ std::tie(strSize, updateScratchSize, buildScratchSize, vertexSize, indexSize) = str.computeBuildSize(vkd, device, m_infos[iStr].structureSize);
+
+ ++(str.getBuildType() == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR ? hostStructCount : deviceStructCount);
{
- const deUint32 accStrIndex = deUint32(i / batchStructCount());
- accStrSizes[accStrIndex] += deAlign64(info.accStrSize, 256);
- if (((i + 1) % batchStructCount()) == 0)
- {
- info.accStrOffset = 0;
- info.accStrIndex = accStrIndex + 1;
- }
- else
+ const VkDeviceSize alignedStrSize = deAlign64(strSize, 256);
+ const deUint32 accStrIndex = (iStr / batchStructCount);
+ if (iStr != 0 && (iStr % batchStructCount) == 0)
{
- info.accStrIndex = accStrIndex;
- info.accStrOffset += deAlign64(info.accStrSize, 256);
+ strOffset = 0;
}
+
+ info.accStrIndex = accStrIndex;
+ info.accStrOffset = strOffset;
+ accStrSizes[accStrIndex] += alignedStrSize;
+ strOffset += alignedStrSize;
+ m_structsBuffSize += alignedStrSize;
}
+ if (buildScratchSize != 0)
{
- const deUint32 vertexBuffIndex = deUint32(i / realBatchGeomCount);
- vertBuffSizes[vertexBuffIndex] += info.vertBuffOffset += deAlign64(info.vertBuffSize, 8);
- if (((i + 1) % realBatchGeomCount) == 0)
- {
- info.vertBuffOffset = 0;
- info.vertBuffIndex = vertexBuffIndex + 1;
- }
- else
- {
- info.vertBuffIndex = vertexBuffIndex;
- info.vertBuffOffset += deAlign64(info.vertBuffSize, 8);
- }
+ maxBuildScratchSize = std::max(maxBuildScratchSize, make_unsigned(deAlign64(buildScratchSize, 256u)));
+
+ info.buildScratchBuffIndex = 0;
+ info.buildScratchBuffOffset = 0;
}
- if (info.indexBuffSize)
+ if (vertexSize != 0)
{
- const deUint32 indexBuffIndex = deUint32(indexBuffIndexControl / realBatchGeomCount);
- indexBuffSizes[indexBuffIndex] += deAlign64(info.indexBuffSize, 8);
- if (((indexBuffIndexControl + 1) % realBatchGeomCount) == 0)
+ const VkDeviceSize alignedVertBuffSize = deAlign64(vertexSize, 8);
+ const deUint32 vertBuffIndex = (iVertex / batchVertexCount);
+ if (iVertex != 0 && (iVertex % batchVertexCount) == 0)
{
- info.indexBuffOffset = 0;
- info.indexBuffIndex = indexBuffIndex + 1;
+ vertexOffset = 0;
}
- else
- {
- info.indexBuffIndex = indexBuffIndex;
- info.indexBuffOffset += deAlign64(info.indexBuffSize, 8);
- }
- indexBuffIndexControl += 1;
+
+ info.vertBuffIndex = vertBuffIndex;
+ info.vertBuffOffset = vertexOffset;
+ vertBuffSizes[vertBuffIndex] += alignedVertBuffSize;
+ vertexOffset += alignedVertBuffSize;
+ m_verticesSize += alignedVertBuffSize;
+ iVertex += 1;
}
- if (info.scratchBuffSize)
+ if (indexSize != 0)
{
- const deUint32 scratchBuffIndex = deUint32(scratchBuffIndexControl / batchStructCount());
- scratchBuffSizes[scratchBuffIndex] += deAlign64(info.scratchBuffSize, 256);
- if (((scratchBuffIndexControl + 1) % batchStructCount()) == 0)
- {
- info.scratchBuffOffset = 0;
- info.scratchBuffIndex = scratchBuffIndex + 1;
- }
- else
+ const VkDeviceSize alignedIndexBuffSize = deAlign64(indexSize, 8);
+ const deUint32 indexBuffIndex = (iIndex / batchIndexCount);
+ if (iIndex != 0 && (iIndex % batchIndexCount) == 0)
{
- info.scratchBuffIndex = scratchBuffIndex;
- info.scratchBuffOffset += deAlign64(info.scratchBuffSize, 256);
+ indexOffset = 0;
}
- scratchBuffIndexControl += 1;
+
+ info.indexBuffIndex = indexBuffIndex;
+ info.indexBuffOffset = indexOffset;
+ indexBuffSizes[indexBuffIndex] += alignedIndexBuffSize;
+ indexOffset += alignedIndexBuffSize;
+ m_indicesSize += alignedIndexBuffSize;
+ iIndex += 1;
}
+
+ str.preCreateSetSizesAndOffsets(info, strSize, updateScratchSize, buildScratchSize);
}
- for (iter = 0; iter < accStrSizes.size(); ++iter)
+ for (iterKey = 0; iterKey < static_cast<deUint32>(accStrSizes.size()); ++iterKey)
{
- m_impl->m_accellerationStructureBuffers.emplace_back(createAccellerationStructureBuffer(accStrSizes[deUint32(iter)]));
+ m_impl->m_accellerationStructureBuffers.emplace_back(createAccellerationStructureBuffer(accStrSizes.at(iterKey)));
}
- for (iter = 0; iter < vertBuffSizes.size(); ++iter)
+ for (iterKey = 0; iterKey < static_cast<deUint32>(vertBuffSizes.size()); ++iterKey)
{
- m_impl->m_vertexBuffers.emplace_back(createVertexBuffer(vk, device, allocator, vertBuffSizes[deUint32(iter)]));
+ m_impl->m_vertexBuffers.emplace_back(createVertexBuffer(vkd, device, allocator, vertBuffSizes.at(iterKey)));
}
- for (iter = 0; iter < indexBuffSizes.size(); ++iter)
+ for (iterKey = 0; iterKey < static_cast<deUint32>(indexBuffSizes.size()); ++iterKey)
{
- m_impl->m_indexBuffers.emplace_back(createIndexBuffer(vk, device, allocator, indexBuffSizes[deUint32(iter)]));
+ m_impl->m_indexBuffers.emplace_back(createIndexBuffer(vkd, device, allocator, indexBuffSizes.at(iterKey)));
}
- for (iter = 0; iter < scratchBuffSizes.size(); ++iter)
+
+ if (maxBuildScratchSize)
{
- m_impl->m_deviceScratchBuffers.emplace_back(createScratchBuffer(scratchBuffSizes[deUint32(iter)]));
+ if (hostStructCount) m_impl->m_hostScratchBuffer->resize(static_cast<size_t>(maxBuildScratchSize));
+ if (deviceStructCount) m_impl->m_deviceScratchBuffer = createDeviceScratchBuffer(maxBuildScratchSize);
+
+ m_buildsScratchSize = maxBuildScratchSize;
}
- for (deUint32 i = 0; i < structCount(); ++i)
+ for (iterKey = 0; iterKey < allStructsCount; ++iterKey)
{
- auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[i].get());
- str.createAccellerationStructure(vk, device, m_infos[i].deviceAddress);
+ auto& str = *dynamic_cast<BottomLevelAccelerationStructurePoolMember*>(m_structs[iterKey].get());
+ str.createAccellerationStructure(vkd, device, m_infos[iterKey].deviceAddress);
}
}
}
}
+void BottomLevelAccelerationStructurePool::batchBuild (const DeviceInterface& vk,
+ const VkDevice device,
+ VkCommandPool cmdPool,
+ VkQueue queue)
+{
+ const deUint32 limit = 10000u;
+ const deUint32 count = structCount();
+ std::vector<BlasPtr> buildingOnDevice;
+
+ auto buildOnDevice = [&]() -> void
+ {
+ Move<VkCommandBuffer> cmd = allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+ beginCommandBuffer(vk, *cmd, 0u);
+ for (const auto& str : buildingOnDevice)
+ str->build(vk, device, *cmd);
+ endCommandBuffer(vk, *cmd);
+
+ submitCommandsAndWait(vk, device, queue, *cmd);
+ vk.resetCommandPool(device, cmdPool, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
+ };
+
+ buildingOnDevice.reserve(limit);
+ for (deUint32 i = 0; i < count; ++i)
+ {
+ auto str = m_structs[i];
+
+ if (str->getBuildType() == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR)
+ str->build(vk, device, DE_NULL);
+ else
+ buildingOnDevice.emplace_back(str);
-void BottomLevelAccelerationStructurePoolMember::preCreateComputeSizesAndOffsets (const DeviceInterface& vk,
- const VkDevice device,
- const VkDeviceSize accStrSize,
- Info& ioinfo)
+ if ( buildingOnDevice.size() == limit || (count - 1) == i)
+ {
+ buildOnDevice();
+ buildingOnDevice.clear();
+ }
+ }
+}
+
+auto BottomLevelAccelerationStructurePoolMember::computeBuildSize (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize strSize) const
+ // accStrSize,updateScratch,buildScratch, vertexSize, indexSize
+ -> std::tuple<VkDeviceSize, VkDeviceSize,VkDeviceSize,VkDeviceSize,VkDeviceSize>
{
- // AS may be built from geometries using vkCmdBuildAccelerationStructuresKHR / vkBuildAccelerationStructuresKHR
- // or may be copied/compacted/deserialized from other AS ( in this case AS does not need geometries, but it needs to know its size before creation ).
- DE_ASSERT(!m_geometriesData.empty() != !(accStrSize == 0)); // logical xor
+ DE_ASSERT(!m_geometriesData.empty() != !(strSize == 0)); // logical xor
+
+ std::tuple<VkDeviceSize,VkDeviceSize,VkDeviceSize,VkDeviceSize,VkDeviceSize> result(deAlign64(strSize, 256), 0, 0, 0, 0);
- if (accStrSize == 0)
+ if (!m_geometriesData.empty())
{
std::vector<VkAccelerationStructureGeometryKHR> accelerationStructureGeometriesKHR;
std::vector<VkAccelerationStructureGeometryKHR*> accelerationStructureGeometriesKHRPointers;
m_useArrayOfPointers ? accelerationStructureGeometry : DE_NULL, // const VkAccelerationStructureGeometryKHR* const* ppGeometries;
makeDeviceOrHostAddressKHR(DE_NULL) // VkDeviceOrHostAddressKHR scratchData;
};
+
VkAccelerationStructureBuildSizesInfoKHR sizeInfo =
{
VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR, // VkStructureType sType;
vk.getAccelerationStructureBuildSizesKHR(device, m_buildType, &accelerationStructureBuildGeometryInfoKHR, maxPrimitiveCounts.data(), &sizeInfo);
- m_structureSize = sizeInfo.accelerationStructureSize;
- m_updateScratchSize = sizeInfo.updateScratchSize;
- m_buildScratchSize = sizeInfo.buildScratchSize;
- }
- else
- {
- m_structureSize = accStrSize;
- m_updateScratchSize = 0u;
- m_buildScratchSize = 0u;
- }
-
- ioinfo.accStrSize = m_structureSize;
- m_info.accStrIndex = ioinfo.accStrIndex;
- m_info.accStrOffset = ioinfo.accStrOffset;
-
- ioinfo.scratchBuffSize = m_buildScratchSize;
- if (m_buildScratchSize > 0u)
- {
- if (m_buildType == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR)
- {
- m_info.scratchBuffIndex = ioinfo.scratchBuffIndex;
- m_info.scratchBuffOffset = ioinfo.scratchBuffOffset;
- }
- else
- {
- m_hostScratchBuffer.resize(static_cast<size_t>(m_buildScratchSize));
- m_info.scratchBuffIndex = negz(m_info.scratchBuffIndex);
- m_info.scratchBuffOffset = negz(m_info.scratchBuffOffset);
- }
+ std::get<0>(result) = sizeInfo.accelerationStructureSize;
+ std::get<1>(result) = sizeInfo.updateScratchSize;
+ std::get<2>(result) = sizeInfo.buildScratchSize;
+ std::get<3>(result) = getVertexBufferSize(m_geometriesData);
+ std::get<4>(result) = getIndexBufferSize(m_geometriesData);
}
- ioinfo.vertBuffSize = getVertexBufferSize(m_geometriesData);
- m_info.vertBuffIndex = ioinfo.vertBuffIndex;
- m_info.vertBuffOffset = ioinfo.vertBuffOffset;
+ return result;
+}
- ioinfo.indexBuffSize = getIndexBufferSize(m_geometriesData);
- if (ioinfo.indexBuffSize)
- {
- m_info.indexBuffIndex = ioinfo.indexBuffIndex;
- m_info.indexBuffOffset = ioinfo.indexBuffOffset;
- }
- else
- {
- m_info.indexBuffIndex = negz(m_info.indexBuffIndex);
- m_info.indexBuffOffset = negz(m_info.indexBuffOffset);
- }
+void BottomLevelAccelerationStructurePoolMember::preCreateSetSizesAndOffsets (const Info& info,
+ const VkDeviceSize accStrSize,
+ const VkDeviceSize updateScratchSize,
+ const VkDeviceSize buildScratchSize)
+{
+ m_info = info;
+ m_structureSize = accStrSize;
+ m_updateScratchSize = updateScratchSize;
+ m_buildScratchSize = buildScratchSize;
}
void BottomLevelAccelerationStructurePoolMember::createAccellerationStructure (const DeviceInterface& vk,
const VkDevice device,
Allocator& allocator,
std::vector<de::SharedPtr<BottomLevelAccelerationStructure> > bottomLevelInstances,
- std::vector<InstanceData> instanceData)
+ std::vector<InstanceData> instanceData,
+ const bool tryCachedMemory)
{
DE_ASSERT(bottomLevelInstances.size() != 0);
DE_ASSERT(bottomLevelInstances.size() == instanceData.size());
DE_UNREF(instanceData);
+ BufferWithMemory* result = nullptr;
const VkDeviceSize bufferSizeBytes = bottomLevelInstances.size() * sizeof(VkAccelerationStructureInstanceKHR);
const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(bufferSizeBytes, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
- try
+ if (tryCachedMemory) try
{
- return new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::Cached | MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ result = new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::Cached | MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
}
catch (const tcu::NotSupportedError&)
{
- // retry without Cached flag
- return new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
+ result = nullptr;
}
+ return result
+ ? result
+ : new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress);
}
void updateSingleInstance (const DeviceInterface& vk,
const VkDeviceSize indirectBufferOffset,
const deUint32 indirectBufferStride) override;
void setUsePPGeometries (const bool usePPGeometries) override;
+ void setTryCachedMemory (const bool tryCachedMemory) override;
VkBuildAccelerationStructureFlagsKHR getBuildFlags () const override;
+ void getCreationSizes (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize structureSize,
+ CreationSizes& sizes) override;
void create (const DeviceInterface& vk,
const VkDevice device,
Allocator& allocator,
VkDeviceSize m_indirectBufferOffset;
deUint32 m_indirectBufferStride;
bool m_usePPGeometries;
+ bool m_tryCachedMemory;
void prepareInstances (const DeviceInterface& vk,
, m_indirectBufferOffset (0)
, m_indirectBufferStride (0)
, m_usePPGeometries (false)
+ , m_tryCachedMemory (true)
{
}
m_usePPGeometries = usePPGeometries;
}
+void TopLevelAccelerationStructureKHR::setTryCachedMemory (const bool tryCachedMemory)
+{
+ m_tryCachedMemory = tryCachedMemory;
+}
+
void TopLevelAccelerationStructureKHR::setIndirectBuildParameters (const VkBuffer indirectBuffer,
const VkDeviceSize indirectBufferOffset,
const deUint32 indirectBufferStride)
return m_buildFlags;
}
+VkDeviceSize TopLevelAccelerationStructure::CreationSizes::sum () const
+{
+ return structure + updateScratch + buildScratch + instancePointers + instancesBuffer;
+}
+
+void TopLevelAccelerationStructureKHR::getCreationSizes (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize structureSize,
+ CreationSizes& sizes)
+{
+ // AS may be built from geometries using vkCmdBuildAccelerationStructureKHR / vkBuildAccelerationStructureKHR
+ // or may be copied/compacted/deserialized from other AS ( in this case AS does not need geometries, but it needs to know its size before creation ).
+ DE_ASSERT(!m_bottomLevelInstances.empty() != !(structureSize == 0)); // logical xor
+
+ if (structureSize == 0)
+ {
+ VkAccelerationStructureGeometryKHR accelerationStructureGeometryKHR;
+ const auto accelerationStructureGeometryKHRPtr = &accelerationStructureGeometryKHR;
+ std::vector<deUint32> maxPrimitiveCounts;
+ prepareInstances(vk, device, accelerationStructureGeometryKHR, maxPrimitiveCounts);
+
+ VkAccelerationStructureBuildGeometryInfoKHR accelerationStructureBuildGeometryInfoKHR =
+ {
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, // VkAccelerationStructureTypeKHR type;
+ m_buildFlags, // VkBuildAccelerationStructureFlagsKHR flags;
+ VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, // VkBuildAccelerationStructureModeKHR mode;
+ DE_NULL, // VkAccelerationStructureKHR srcAccelerationStructure;
+ DE_NULL, // VkAccelerationStructureKHR dstAccelerationStructure;
+ 1u, // deUint32 geometryCount;
+ (m_usePPGeometries ? nullptr : &accelerationStructureGeometryKHR), // const VkAccelerationStructureGeometryKHR* pGeometries;
+ (m_usePPGeometries ? &accelerationStructureGeometryKHRPtr : nullptr), // const VkAccelerationStructureGeometryKHR* const* ppGeometries;
+ makeDeviceOrHostAddressKHR(DE_NULL) // VkDeviceOrHostAddressKHR scratchData;
+ };
+
+ VkAccelerationStructureBuildSizesInfoKHR sizeInfo =
+ {
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0, // VkDeviceSize accelerationStructureSize;
+ 0, // VkDeviceSize updateScratchSize;
+ 0 // VkDeviceSize buildScratchSize;
+ };
+
+ vk.getAccelerationStructureBuildSizesKHR(device, m_buildType, &accelerationStructureBuildGeometryInfoKHR, maxPrimitiveCounts.data(), &sizeInfo);
+
+ sizes.structure = sizeInfo.accelerationStructureSize;
+ sizes.updateScratch = sizeInfo.updateScratchSize;
+ sizes.buildScratch = sizeInfo.buildScratchSize;
+ }
+ else
+ {
+ sizes.structure = structureSize;
+ sizes.updateScratch = 0u;
+ sizes.buildScratch = 0u;
+ }
+
+ sizes.instancePointers = 0u;
+ if (m_useArrayOfPointers)
+ {
+ const size_t pointerSize = (m_buildType == VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR) ? sizeof(VkDeviceOrHostAddressConstKHR::deviceAddress) : sizeof(VkDeviceOrHostAddressConstKHR::hostAddress);
+ sizes.instancePointers = static_cast<VkDeviceSize>(m_bottomLevelInstances.size() * pointerSize);
+ }
+
+ sizes.instancesBuffer = m_bottomLevelInstances.empty() ? 0u : m_bottomLevelInstances.size() * sizeof(VkAccelerationStructureInstanceKHR);
+}
+
void TopLevelAccelerationStructureKHR::create (const DeviceInterface& vk,
const VkDevice device,
Allocator& allocator,
}
{
+ bool tryCachedStatus = false;
const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(m_structureSize, VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
- try
+ if (m_tryCachedMemory) try
{
+ tryCachedStatus = true;
m_accelerationStructureBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::Cached | MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress));
}
catch (const tcu::NotSupportedError&)
{
- // retry without Cached flag
+ tryCachedStatus = false;
+ }
+
+ if (false == tryCachedStatus)
+ {
+ // retry without Cached flag or just create if m_tryCachedMemory is not set
m_accelerationStructureBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vk, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible | MemoryRequirement::Coherent | MemoryRequirement::DeviceAddress));
}
}
}
if(!m_bottomLevelInstances.empty())
- m_instanceBuffer = de::MovePtr<BufferWithMemory>(createInstanceBuffer(vk, device, allocator, m_bottomLevelInstances, m_instanceData));
+ m_instanceBuffer = de::MovePtr<BufferWithMemory>(createInstanceBuffer(vk, device, allocator, m_bottomLevelInstances, m_instanceData, m_tryCachedMemory));
}
void TopLevelAccelerationStructureKHR::updateInstanceMatrix (const DeviceInterface& vk, const VkDevice device, size_t instanceIndex, const VkTransformMatrixKHR& matrix)
uint32_t getMaxDescriptorSetAccelerationStructures (void) override { return m_accelerationStructureProperties.maxDescriptorSetAccelerationStructures; }
uint32_t getMaxRayDispatchInvocationCount (void) override { return m_rayTracingPipelineProperties.maxRayDispatchInvocationCount; }
uint32_t getMaxRayHitAttributeSize (void) override { return m_rayTracingPipelineProperties.maxRayHitAttributeSize; }
+ uint32_t getMaxMemoryAllocationCount (void) override { return m_maxMemoryAllocationCount; }
protected:
VkPhysicalDeviceAccelerationStructurePropertiesKHR m_accelerationStructureProperties;
VkPhysicalDeviceRayTracingPipelinePropertiesKHR m_rayTracingPipelineProperties;
+ deUint32 m_maxMemoryAllocationCount;
};
RayTracingPropertiesKHR::~RayTracingPropertiesKHR ()
{
m_accelerationStructureProperties = getPhysicalDeviceExtensionProperties(vki, physicalDevice);
m_rayTracingPipelineProperties = getPhysicalDeviceExtensionProperties(vki, physicalDevice);
+ m_maxMemoryAllocationCount = getPhysicalDeviceProperties(vki, physicalDevice).limits.maxMemoryAllocationCount;
}
de::MovePtr<RayTracingProperties> makeRayTracingProperties (const InstanceInterface& vki,
const VkGeometryFlagsKHR geometryFlags = 0u );
virtual void setBuildType (const VkAccelerationStructureBuildTypeKHR buildType) = DE_NULL;
+ virtual VkAccelerationStructureBuildTypeKHR getBuildType () const = 0;
virtual void setCreateFlags (const VkAccelerationStructureCreateFlagsKHR createFlags) = DE_NULL;
virtual void setCreateGeneric (bool createGeneric) = 0;
virtual void setBuildFlags (const VkBuildAccelerationStructureFlagsKHR buildFlags) = DE_NULL;
BottomLevelAccelerationStructurePool();
virtual ~BottomLevelAccelerationStructurePool();
- BlasPtr at (deUint32 index) const { return m_structs[index]; }
- BlasPtr operator[] (deUint32 index) const { return m_structs[index]; }
- auto structures () const -> const std::vector<BlasPtr>& { return m_structs; }
- size_t structCount () const { return m_structs.size(); }
+ BlasPtr at (deUint32 index) const { return m_structs[index]; }
+ BlasPtr operator[] (deUint32 index) const { return m_structs[index]; }
+ auto structures () const -> const std::vector<BlasPtr>& { return m_structs; }
+ deUint32 structCount () const { return static_cast<deUint32>(m_structs.size()); }
- size_t batchStructCount () const {return m_batchStructCount; }
- void batchStructCount (const size_t& value);
+ // defines how many structures will be packet in single buffer
+ deUint32 batchStructCount () const {return m_batchStructCount; }
+ void batchStructCount (const deUint32& value);
- size_t batchGeomCount () const {return m_batchGeomCount; }
- void batchGeomCount (const size_t& value) { m_batchGeomCount = value; }
+ // defines how many geometries (vertices and/or indices) will be packet in single buffer
+ deUint32 batchGeomCount () const {return m_batchGeomCount; }
+ void batchGeomCount (const deUint32& value) { m_batchGeomCount = value; }
- BlasPtr add (VkDeviceSize structureSize = 0,
- VkDeviceAddress deviceAddress = 0);
+ bool tryCachedMemory () const { return m_tryCachedMemory; }
+ void tryCachedMemory (const bool cachedMemory) { m_tryCachedMemory = cachedMemory; }
+
+ BlasPtr add (VkDeviceSize structureSize = 0,
+ VkDeviceAddress deviceAddress = 0);
/**
* @brief Creates previously added bottoms at a time.
* @note All geometries must be known before call this method.
*/
- void batchCreate (const DeviceInterface& vk,
- const VkDevice device,
- Allocator& allocator);
- void batchBuild (const DeviceInterface& vk,
- const VkDevice device,
- VkCommandBuffer cmdBuffer);
- size_t getAllocationCount () const;
-
+ void batchCreate (const DeviceInterface& vkd,
+ const VkDevice device,
+ Allocator& allocator);
+ void batchCreateAdjust (const DeviceInterface& vkd,
+ const VkDevice device,
+ Allocator& allocator,
+ const VkDeviceSize maxBufferSize);
+ void batchBuild (const DeviceInterface& vk,
+ const VkDevice device,
+ VkCommandBuffer cmdBuffer);
+ void batchBuild (const DeviceInterface& vk,
+ const VkDevice device,
+ VkCommandPool cmdPool,
+ VkQueue queue);
+ size_t getAllocationCount () const;
+ size_t getAllocationCount (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize maxBufferSize) const;
+ auto getAllocationSizes (const DeviceInterface& vk, // (strBuff, scratchBuff, vertBuff, indexBuff)
+ const VkDevice device) const -> tcu::Vector<VkDeviceSize, 4>;
protected:
- size_t m_batchStructCount; // default is 4
- size_t m_batchGeomCount; // default is 0, if zero then batchStructCount is used
+ deUint32 m_batchStructCount; // default is 4
+ deUint32 m_batchGeomCount; // default is 0, if zero then batchStructCount is used
std::vector<BlasInfo> m_infos;
std::vector<BlasPtr> m_structs;
bool m_createOnce;
+ bool m_tryCachedMemory;
+ VkDeviceSize m_structsBuffSize;
+ VkDeviceSize m_updatesScratchSize;
+ VkDeviceSize m_buildsScratchSize;
+ VkDeviceSize m_verticesSize;
+ VkDeviceSize m_indicesSize;
protected:
struct Impl;
class TopLevelAccelerationStructure
{
public:
+ struct CreationSizes
+ {
+ VkDeviceSize structure;
+ VkDeviceSize updateScratch;
+ VkDeviceSize buildScratch;
+ VkDeviceSize instancePointers;
+ VkDeviceSize instancesBuffer;
+ VkDeviceSize sum () const;
+ };
+
static deUint32 getRequiredAllocationCount (void);
TopLevelAccelerationStructure ();
const VkDeviceSize indirectBufferOffset,
const deUint32 indirectBufferStride) = DE_NULL;
virtual void setUsePPGeometries (const bool usePPGeometries) = 0;
+ virtual void setTryCachedMemory (const bool tryCachedMemory) = 0;
virtual VkBuildAccelerationStructureFlagsKHR getBuildFlags () const = DE_NULL;
VkDeviceSize getStructureSize () const;
// methods specific for each acceleration structure
+ virtual void getCreationSizes (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize structureSize,
+ CreationSizes& sizes) = 0;
virtual void create (const DeviceInterface& vk,
const VkDevice device,
Allocator& allocator,
virtual uint32_t getMaxDescriptorSetAccelerationStructures (void) = 0;
virtual uint32_t getMaxRayDispatchInvocationCount (void) = 0;
virtual uint32_t getMaxRayHitAttributeSize (void) = 0;
+ virtual uint32_t getMaxMemoryAllocationCount (void) = 0;
};
de::MovePtr<RayTracingProperties> makeRayTracingProperties (const InstanceInterface& vki,
#include "vkBarrierUtil.hpp"
#include "vkBufferWithMemory.hpp"
#include "vkImageWithMemory.hpp"
+#include "vkImageUtil.hpp"
#include "vkTypeUtil.hpp"
+#include "tcuTextureUtil.hpp"
+
#include "vkRayTracingUtil.hpp"
#include "deClock.h"
+#include <cmath>
#include <limits>
+#include <iostream>
namespace vkt
{
deUint32 instancesGroupCount;
bool deferredOperation;
deUint32 workerThreadsCount;
+ bool deviceBuild;
};
deUint32 getShaderGroupSize (const InstanceInterface& vki,
class RayTracingBuildTestInstance : public TestInstance
{
public:
- RayTracingBuildTestInstance (Context& context, const CaseDef& data);
- ~RayTracingBuildTestInstance (void);
- tcu::TestStatus iterate (void);
+ typedef de::SharedPtr<BottomLevelAccelerationStructure> BlasPtr;
+ typedef de::SharedPtr<TopLevelAccelerationStructure> TlasPtr;
+ typedef BottomLevelAccelerationStructurePool BlasPool;
+
+ RayTracingBuildTestInstance (Context& context, const CaseDef& data);
+ ~RayTracingBuildTestInstance (void);
+ tcu::TestStatus iterate (void);
protected:
- deUint32 iterateNoWorkers (void);
- deUint32 iterateWithWorkers (void);
- void checkSupportInInstance (void) const;
- deUint32 validateBuffer (de::MovePtr<BufferWithMemory> buffer);
- de::MovePtr<BufferWithMemory> runTest (bool useGpuBuild,
- deUint32 workerThreadsCount);
- de::MovePtr<TopLevelAccelerationStructure> initTopAccelerationStructure (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount,
- vector<de::SharedPtr<BottomLevelAccelerationStructure> >& bottomLevelAccelerationStructures);
- vector<de::SharedPtr<BottomLevelAccelerationStructure> > initBottomAccelerationStructures (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount);
- de::MovePtr<BottomLevelAccelerationStructure> initBottomAccelerationStructure (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount,
- tcu::UVec2& startPos,
- bool triangles);
+ bool verifyAllocationCount () const;
+ void checkSupportInInstance (void) const;
+ deUint32 validateBuffer (de::MovePtr<BufferWithMemory> buffer);
+ de::MovePtr<BufferWithMemory> runTest (bool useGpuBuild,
+ deUint32 workerThreadsCount);
+ TlasPtr initTopAccelerationStructure (bool useGpuBuild,
+ deUint32 workerThreadsCount,
+ const BlasPool& pool);
+ void createTopAccelerationStructure (VkCommandBuffer cmdBuffer,
+ TopLevelAccelerationStructure* tlas);
+ void initBottomAccelerationStructures (BlasPool& pool,
+ bool useGpuBuild,
+ deUint32 workerThreadsCount) const;
+ void initBottomAccelerationStructure (BlasPtr blas,
+ bool useGpuBuild,
+ deUint32 workerThreadsCount,
+ tcu::UVec2& startPos,
+ bool triangles) const;
private:
- CaseDef m_data;
+ CaseDef m_data;
+ const VkFormat m_format;
};
RayTracingBuildTestInstance::RayTracingBuildTestInstance (Context& context, const CaseDef& data)
: vkt::TestInstance (context)
, m_data (data)
+ , m_format (VK_FORMAT_R32_UINT)
{
}
{
}
-void RayTracingTestCase::checkSupport(Context& context) const
+void RayTracingTestCase::checkSupport (Context& context) const
{
context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
if (accelerationStructureFeaturesKHR.accelerationStructure == DE_FALSE)
TCU_THROW(TestError, "VK_KHR_ray_tracing_pipeline requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructure");
- if (accelerationStructureFeaturesKHR.accelerationStructureHostCommands == DE_FALSE)
- TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructureHostCommands");
-
- if (m_data.deferredOperation)
+ if (!m_data.deviceBuild)
+ {
context.requireDeviceFunctionality("VK_KHR_deferred_host_operations");
+ if (accelerationStructureFeaturesKHR.accelerationStructureHostCommands == DE_FALSE)
+ TCU_THROW(NotSupportedError, "Requires VkPhysicalDeviceAccelerationStructureFeaturesKHR.accelerationStructureHostCommands");
+ }
}
void RayTracingTestCase::initPrograms (SourceCollections& programCollection) const
return new RayTracingBuildTestInstance(context, m_data);
}
-de::MovePtr<TopLevelAccelerationStructure> RayTracingBuildTestInstance::initTopAccelerationStructure (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount,
- vector<de::SharedPtr<BottomLevelAccelerationStructure> >& bottomLevelAccelerationStructures)
+auto RayTracingBuildTestInstance::initTopAccelerationStructure (bool useGpuBuild,
+ deUint32 workerThreadsCount,
+ const BlasPool& pool) -> TlasPtr
{
- const DeviceInterface& vkd = m_context.getDeviceInterface();
- const VkDevice device = m_context.getDevice();
- Allocator& allocator = m_context.getDefaultAllocator();
de::MovePtr<TopLevelAccelerationStructure> result = makeTopLevelAccelerationStructure();
+ const std::vector<BlasPtr>& blases = pool.structures();
- result->setInstanceCount(bottomLevelAccelerationStructures.size());
+ result->setInstanceCount(blases.size());
result->setBuildType(useGpuBuild ? VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR : VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR);
result->setDeferredOperation(m_data.deferredOperation, workerThreadsCount);
- for (size_t instanceNdx = 0; instanceNdx < bottomLevelAccelerationStructures.size(); ++instanceNdx)
+ for (size_t instanceNdx = 0; instanceNdx < blases.size(); ++instanceNdx)
{
const bool triangles = (m_data.testType == TEST_TYPE_TRIANGLES) || (m_data.testType == TEST_TYPE_MIXED && (instanceNdx & 1) == 0);
deUint32 instanceShaderBindingTableRecordOffset = triangles ? 0 : 1;
- result->addInstance(bottomLevelAccelerationStructures[instanceNdx], vk::identityMatrix3x4, 0, 0xFF, instanceShaderBindingTableRecordOffset);
+ result->addInstance(blases[instanceNdx], vk::identityMatrix3x4, 0, 0xFF, instanceShaderBindingTableRecordOffset);
}
- result->createAndBuild(vkd, device, cmdBuffer, allocator);
-
- return result;
+ return TlasPtr(result.release());
}
-de::MovePtr<BottomLevelAccelerationStructure> RayTracingBuildTestInstance::initBottomAccelerationStructure (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount,
- tcu::UVec2& startPos,
- bool triangles)
+void RayTracingBuildTestInstance::createTopAccelerationStructure (VkCommandBuffer cmdBuffer,
+ TopLevelAccelerationStructure* tlas)
{
- const DeviceInterface& vkd = m_context.getDeviceInterface();
- const VkDevice device = m_context.getDevice();
- Allocator& allocator = m_context.getDefaultAllocator();
- de::MovePtr<BottomLevelAccelerationStructure> result = makeBottomLevelAccelerationStructure();
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
- result->setBuildType(useGpuBuild ? VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR : VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR);
- result->setDeferredOperation(m_data.deferredOperation, workerThreadsCount);
- result->setGeometryCount(m_data.geometriesGroupCount);
+ tlas->createAndBuild(vkd, device, cmdBuffer, allocator);
+}
+
+void RayTracingBuildTestInstance::initBottomAccelerationStructure (BlasPtr blas,
+ bool useGpuBuild,
+ deUint32 workerThreadsCount,
+ tcu::UVec2& startPos,
+ bool triangles) const
+{
+ blas->setBuildType(useGpuBuild ? VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR : VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR);
+ blas->setDeferredOperation(m_data.deferredOperation, workerThreadsCount);
+ blas->setGeometryCount(m_data.geometriesGroupCount);
for (size_t geometryNdx = 0; geometryNdx < m_data.geometriesGroupCount; ++geometryNdx)
{
const float x1 = float(startPos.x() + 1) / float(m_data.width);
const float y1 = float(startPos.y() + 1) / float(m_data.height);
const float z = (n % 7 == 0) ? +1.0f : -1.0f;
- const deUint32 m = (13 * (n + 1)) % (m_data.width * m_data.height);
+ const deUint32 m = (n + 13) % (m_data.width * m_data.height);
if (triangles)
{
const float ym = (y0 + y1) / 2.0f;
geometryData.push_back(tcu::Vec3(x0, y0, z));
- geometryData.push_back(tcu::Vec3(xm, y1, z));
geometryData.push_back(tcu::Vec3(x1, ym, z));
-
- if (m_data.squaresGroupCount == 1)
- {
- geometryData.push_back(tcu::Vec3(x0, y0, z));
- geometryData.push_back(tcu::Vec3(x1, ym, z));
- geometryData.push_back(tcu::Vec3(xm, y1, z));
- }
+ geometryData.push_back(tcu::Vec3(xm, y1, z));
}
else
{
startPos.x() = m % m_data.width;
}
- result->addGeometry(geometryData, triangles);
+ blas->addGeometry(geometryData, triangles);
}
+}
+
+void RayTracingBuildTestInstance::initBottomAccelerationStructures (BlasPool& pool,
+ bool useGpuBuild,
+ deUint32 workerThreadsCount) const
+{
+ tcu::UVec2 startPos {};
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+ const VkDeviceSize maxBuffSize = 3 * (VkDeviceSize(1) << 30); // 3GB
- result->createAndBuild(vkd, device, cmdBuffer, allocator);
+ for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool.add();
+
+ const std::vector<BlasPtr>& blases = pool.structures();
+
+ for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx)
+ {
+ const bool triangles = (m_data.testType == TEST_TYPE_TRIANGLES) || (m_data.testType == TEST_TYPE_MIXED && (instanceNdx & 1) == 0);
+ initBottomAccelerationStructure(blases[instanceNdx], useGpuBuild, workerThreadsCount, startPos, triangles);
+ }
- return result;
+ pool.batchCreateAdjust(vkd, device, allocator, maxBuffSize);
}
-vector<de::SharedPtr<BottomLevelAccelerationStructure> > RayTracingBuildTestInstance::initBottomAccelerationStructures (VkCommandBuffer cmdBuffer,
- bool useGpuBuild,
- deUint32 workerThreadsCount)
+bool RayTracingBuildTestInstance::verifyAllocationCount () const
{
- tcu::UVec2 startPos;
- vector<de::SharedPtr<BottomLevelAccelerationStructure> > result;
+ BlasPool pool {};
+ tcu::UVec2 startPos {};
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ auto& log = m_context.getTestContext().getLog();
+ const size_t avvailableAllocCount = m_context.getDeviceProperties().limits.maxMemoryAllocationCount;
+ const VkDeviceSize maxBufferSize = 3 * (VkDeviceSize(1) << 30); // 3GB
+
+
+ for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool.add();
+
+ const std::vector<BlasPtr>& blases = pool.structures();
for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx)
{
const bool triangles = (m_data.testType == TEST_TYPE_TRIANGLES) || (m_data.testType == TEST_TYPE_MIXED && (instanceNdx & 1) == 0);
- de::MovePtr<BottomLevelAccelerationStructure> bottomLevelAccelerationStructure = initBottomAccelerationStructure(cmdBuffer, useGpuBuild, workerThreadsCount, startPos, triangles);
-
- result.push_back(de::SharedPtr<BottomLevelAccelerationStructure>(bottomLevelAccelerationStructure.release()));
+ initBottomAccelerationStructure(blases[instanceNdx], true, 0, startPos, triangles);
}
- return result;
+ const size_t poolAllocationCount = pool.getAllocationCount(vkd, device, maxBufferSize);
+ const size_t requiredAllocationCount = poolAllocationCount + 120;
+
+ log << tcu::TestLog::Message
+ << "The test consumes " << poolAllocationCount
+ << " allocations out of " << avvailableAllocCount << " available"
+ << tcu::TestLog::EndMessage;
+
+ return (requiredAllocationCount < avvailableAllocCount);
}
de::MovePtr<BufferWithMemory> RayTracingBuildTestInstance::runTest (bool useGpuBuild, deUint32 workerThreadsCount)
{
- const InstanceInterface& vki = m_context.getInstanceInterface();
- const DeviceInterface& vkd = m_context.getDeviceInterface();
- const VkDevice device = m_context.getDevice();
- const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
- const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
- const VkQueue queue = m_context.getUniversalQueue();
- Allocator& allocator = m_context.getDefaultAllocator();
- const VkFormat format = VK_FORMAT_R32_UINT;
- const deUint32 pixelCount = m_data.width * m_data.height;
- const deUint32 shaderGroupHandleSize = getShaderGroupSize(vki, physicalDevice);
- const deUint32 shaderGroupBaseAlignment = getShaderGroupBaseAlignment(vki, physicalDevice);
-
- const Move<VkDescriptorSetLayout> descriptorSetLayout = DescriptorSetLayoutBuilder()
- .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, ALL_RAY_TRACING_STAGES)
- .addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, ALL_RAY_TRACING_STAGES)
- .build(vkd, device);
- const Move<VkDescriptorPool> descriptorPool = DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
- .addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
- .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
- const Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vkd, device, *descriptorPool, *descriptorSetLayout);
- const Move<VkPipelineLayout> pipelineLayout = makePipelineLayout(vkd, device, descriptorSetLayout.get());
- const Move<VkCommandPool> cmdPool = createCommandPool(vkd, device, 0, queueFamilyIndex);
- const Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vkd, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
-
- de::MovePtr<RayTracingPipeline> rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
- Move<VkShaderModule> raygenShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0);
- de::SharedPtr<Move<VkShaderModule>> hitShader = makeVkSharedPtr(createShaderModule(vkd, device, m_context.getBinaryCollection().get("ahit"), 0));
- Move<VkShaderModule> missShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("miss"), 0);
- Move<VkShaderModule> intersectionShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("sect"), 0);
- rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, raygenShader, 0u);
- rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, hitShader, 1u);
- rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, hitShader, 2u);
- rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR, intersectionShader, 2u);
- rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, missShader, 3u);
- Move<VkPipeline> pipeline = rayTracingPipeline->createPipeline(vkd, device, *pipelineLayout);
- const de::MovePtr<BufferWithMemory> raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0u, 1u);
- const de::MovePtr<BufferWithMemory> hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1u, 2u);
- const de::MovePtr<BufferWithMemory> missShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 3u, 1u);
- const VkStridedDeviceAddressRegionKHR raygenShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
- const VkStridedDeviceAddressRegionKHR hitShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, 2u * shaderGroupHandleSize);
- const VkStridedDeviceAddressRegionKHR missShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
- const VkStridedDeviceAddressRegionKHR callableShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
-
- const VkImageCreateInfo imageCreateInfo = makeImageCreateInfo(m_data.width, m_data.height, format);
- const VkImageSubresourceRange imageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0, 1u);
- const de::MovePtr<ImageWithMemory> image = de::MovePtr<ImageWithMemory>(new ImageWithMemory(vkd, device, allocator, imageCreateInfo, MemoryRequirement::Any));
- const Move<VkImageView> imageView = makeImageView(vkd, device, **image, VK_IMAGE_VIEW_TYPE_2D, format, imageSubresourceRange);
-
- const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(pixelCount*sizeof(deUint32), VK_BUFFER_USAGE_TRANSFER_DST_BIT);
- const VkImageSubresourceLayers bufferImageSubresourceLayers = makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u);
- const VkBufferImageCopy bufferImageRegion = makeBufferImageCopy(makeExtent3D(m_data.width, m_data.height, 1u), bufferImageSubresourceLayers);
- de::MovePtr<BufferWithMemory> buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vkd, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible));
-
- const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
-
- const VkImageMemoryBarrier preImageBarrier = makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT,
- VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- **image, imageSubresourceRange);
- const VkImageMemoryBarrier postImageBarrier = makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
- **image, imageSubresourceRange);
- const VkMemoryBarrier postTraceMemoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
- const VkMemoryBarrier postCopyMemoryBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
- const VkClearValue clearValue = makeClearValueColorU32(5u, 5u, 5u, 255u);
-
- vector<de::SharedPtr<BottomLevelAccelerationStructure> > bottomLevelAccelerationStructures;
- de::MovePtr<TopLevelAccelerationStructure> topLevelAccelerationStructure;
+ const InstanceInterface& vki = m_context.getInstanceInterface();
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ const VkQueue queue = m_context.getUniversalQueue();
+ Allocator& allocator = m_context.getDefaultAllocator();
+ const deUint32 pixelCount = m_data.width * m_data.height;
+ const deUint32 shaderGroupHandleSize = getShaderGroupSize(vki, physicalDevice);
+ const deUint32 shaderGroupBaseAlignment = getShaderGroupBaseAlignment(vki, physicalDevice);
+
+ const Move<VkDescriptorSetLayout> descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, ALL_RAY_TRACING_STAGES)
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, ALL_RAY_TRACING_STAGES)
+ .build(vkd, device);
+ const Move<VkDescriptorPool> descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+ .addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR)
+ .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+ const Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vkd, device, *descriptorPool, *descriptorSetLayout);
+ const Move<VkPipelineLayout> pipelineLayout = makePipelineLayout(vkd, device, descriptorSetLayout.get());
+ const Move<VkCommandPool> cmdPool = createCommandPool(vkd, device, 0, queueFamilyIndex);
+ const Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vkd, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+ de::MovePtr<RayTracingPipeline> rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
+ Move<VkShaderModule> raygenShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0);
+ Move<VkShaderModule> hitShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("ahit"), 0);
+ Move<VkShaderModule> missShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("miss"), 0);
+ Move<VkShaderModule> intersectionShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("sect"), 0);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, *raygenShader, 0u);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, *hitShader, 1u);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, *hitShader, 2u);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR, *intersectionShader, 2u);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, *missShader, 3u);
+ Move<VkPipeline> pipeline = rayTracingPipeline->createPipeline(vkd, device, *pipelineLayout);
+ const de::MovePtr<BufferWithMemory> raygenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 0u, 1u);
+ const de::MovePtr<BufferWithMemory> hitShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 1u, 2u);
+ const de::MovePtr<BufferWithMemory> missShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, 3u, 1u);
+ const VkStridedDeviceAddressRegionKHR raygenShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR hitShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitShaderBindingTable->get(), 0), shaderGroupHandleSize, 2u * shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR missShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR callableShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(DE_NULL, 0, 0);
+
+ const VkImageCreateInfo imageCreateInfo = makeImageCreateInfo(m_data.width, m_data.height, m_format);
+ const VkImageSubresourceRange imageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0, 1u);
+ const de::MovePtr<ImageWithMemory> image = de::MovePtr<ImageWithMemory>(new ImageWithMemory(vkd, device, allocator, imageCreateInfo, MemoryRequirement::Any));
+ const Move<VkImageView> imageView = makeImageView(vkd, device, **image, VK_IMAGE_VIEW_TYPE_2D, m_format, imageSubresourceRange);
+
+ const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(pixelCount*sizeof(deUint32), VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ const VkImageSubresourceLayers bufferImageSubresourceLayers = makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u);
+ const VkBufferImageCopy bufferImageRegion = makeBufferImageCopy(makeExtent3D(m_data.width, m_data.height, 1u), bufferImageSubresourceLayers);
+ de::MovePtr<BufferWithMemory> buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(vkd, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible));
+
+ const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL);
+
+ const VkImageMemoryBarrier preImageBarrier = makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ **image, imageSubresourceRange);
+ const VkImageMemoryBarrier postImageBarrier = makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR | VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
+ **image, imageSubresourceRange);
+ const VkMemoryBarrier postTraceMemoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
+ const VkMemoryBarrier postCopyMemoryBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
+ const VkClearValue clearValue = makeClearValueColorU32(5u, 5u, 5u, 255u);
+
+ TlasPtr topLevelAccelerationStructure;
+ BottomLevelAccelerationStructurePool blasPool;
+
+ initBottomAccelerationStructures(blasPool, useGpuBuild, workerThreadsCount);
+ blasPool.batchBuild(vkd, device, *cmdPool, queue);
beginCommandBuffer(vkd, *cmdBuffer, 0u);
{
vkd.cmdClearColorImage(*cmdBuffer, **image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1, &imageSubresourceRange);
cmdPipelineImageMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, &postImageBarrier);
- bottomLevelAccelerationStructures = initBottomAccelerationStructures(*cmdBuffer, useGpuBuild, workerThreadsCount);
- topLevelAccelerationStructure = initTopAccelerationStructure(*cmdBuffer, useGpuBuild, workerThreadsCount, bottomLevelAccelerationStructures);
+ topLevelAccelerationStructure = initTopAccelerationStructure(useGpuBuild, workerThreadsCount, blasPool);
+ createTopAccelerationStructure(*cmdBuffer, topLevelAccelerationStructure.get());
- const TopLevelAccelerationStructure* topLevelAccelerationStructurePtr = topLevelAccelerationStructure.get();
VkWriteDescriptorSetAccelerationStructureKHR accelerationStructureWriteDescriptorSet =
{
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, // VkStructureType sType;
DE_NULL, // const void* pNext;
1u, // deUint32 accelerationStructureCount;
- topLevelAccelerationStructurePtr->getPtr(), // const VkAccelerationStructureKHR* pAccelerationStructures;
+ topLevelAccelerationStructure->getPtr(), // const VkAccelerationStructureKHR* pAccelerationStructures;
};
DescriptorSetUpdateBuilder()
{
const InstanceInterface& vki = m_context.getInstanceInterface();
const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
- const vk::VkPhysicalDeviceProperties& properties = m_context.getDeviceProperties();
- const deUint32 requiredAllocations = 8u
- + TopLevelAccelerationStructure::getRequiredAllocationCount()
- + m_data.instancesGroupCount * BottomLevelAccelerationStructure::getRequiredAllocationCount();
de::MovePtr<RayTracingProperties> rayTracingProperties = makeRayTracingProperties(vki, physicalDevice);
if (rayTracingProperties->getMaxPrimitiveCount() < m_data.squaresGroupCount)
if (rayTracingProperties->getMaxInstanceCount() < m_data.instancesGroupCount)
TCU_THROW(NotSupportedError, "Instances required more than supported");
- if (properties.limits.maxMemoryAllocationCount < requiredAllocations)
- TCU_THROW(NotSupportedError, "Test requires more allocations allowed");
+ if (!verifyAllocationCount())
+ TCU_THROW(NotSupportedError, "Memory allocations required more than supported");
}
deUint32 RayTracingBuildTestInstance::validateBuffer (de::MovePtr<BufferWithMemory> buffer)
return failures;
}
-deUint32 RayTracingBuildTestInstance::iterateWithWorkers (void)
-{
- de::MovePtr<BufferWithMemory> singleThreadBufferCPU = runTest(false, 0);
- const deUint32 singleThreadFailures = validateBuffer(singleThreadBufferCPU);
- de::MovePtr<BufferWithMemory> multiThreadBufferCPU = runTest(false, m_data.workerThreadsCount);
- const deUint32 multiThreadFailures = validateBuffer(multiThreadBufferCPU);
- const deUint32 failures = singleThreadFailures + multiThreadFailures;
-
- return failures;
-}
-
-deUint32 RayTracingBuildTestInstance::iterateNoWorkers (void)
-{
- de::MovePtr<BufferWithMemory> bufferGPU = runTest(true, 0);
- de::MovePtr<BufferWithMemory> bufferCPU = runTest(false, 0);
- const deUint32 failuresGPU = validateBuffer(bufferGPU);
- const deUint32 failuresCPU = validateBuffer(bufferCPU);
- const deUint32 failures = failuresGPU + failuresCPU;
-
- return failures;
-}
-
tcu::TestStatus RayTracingBuildTestInstance::iterate (void)
{
checkSupportInInstance();
- const deUint32 failures = m_data.workerThreadsCount == 0
- ? iterateNoWorkers()
- : iterateWithWorkers();
+ const deUint32 failures = validateBuffer(runTest(m_data.deviceBuild, m_data.workerThreadsCount));
- if (failures == 0)
- return tcu::TestStatus::pass("Pass");
- else
- return tcu::TestStatus::fail("failures=" + de::toString(failures));
+ return (failures == 0) ? tcu::TestStatus::pass("Pass") : tcu::TestStatus::fail("failures=" + de::toString(failures));
}
} // anonymous
-tcu::TestCaseGroup* createBuildTests (tcu::TestContext& testCtx)
+static void buildTest (tcu::TestCaseGroup* testParentGroup, deUint32 threadsCount, bool deviceBuild)
{
- de::MovePtr<tcu::TestCaseGroup> buildGroup(new tcu::TestCaseGroup(testCtx, "build", "Ray tracing build tests"));
-
const char* tests[] =
{
"level_primitives",
"level_geometries",
"level_instances"
};
- const deUint32 sizes[] = { 4, 16, 64, 256, 1024 };
- const deUint32 factors[] = { 1, 4 };
- const deUint32 threads[] = { 0, 1, 2, 3, 4, 8, std::numeric_limits<deUint32>::max() };
+ const deUint32 sizes[] = { 4, 16, 64, 256, 1024 };
+ const deUint32 factors[] = { 1, 4 };
+ const bool deferredOperation = threadsCount != 0;
+ tcu::TestContext& testCtx = testParentGroup->getTestContext();
- for (size_t threadNdx = 0; threadNdx <= DE_LENGTH_OF_ARRAY(threads); ++threadNdx)
+ for (size_t testsNdx = 0; testsNdx < DE_LENGTH_OF_ARRAY(tests); ++testsNdx)
{
- const bool defferedOperation = threadNdx != DE_LENGTH_OF_ARRAY(threads);
- const deUint32 threadsCount = threadNdx < DE_LENGTH_OF_ARRAY(threads) ? threads[threadNdx] : 0;
- const string groupName = !defferedOperation ? "gpu_cpu"
- : threadsCount == 0 ? "gpu_cpuht"
- : threadsCount == std::numeric_limits<deUint32>::max() ? "cpuht_max"
- : "cpuht_" + de::toString(threadsCount);
- const string groupDesc = !defferedOperation ? "Compare results of run with acceleration structures build on GPU and CPU"
- : threadsCount > 0 ? "Compare results of run with acceleration structures build on GPU and using host threading"
- : "Run acceleration structures build using host threading";
+ de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, tests[testsNdx], ""));
+
+ for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
+ for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ {
+ const deUint32 factor = factors[factorNdx];
+ const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
+ const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
+ const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
+ const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
+ const CaseDef caseDef =
+ {
+ TEST_TYPE_TRIANGLES, // TestType testType;
+ sizes[sizesNdx], // deUint32 width;
+ sizes[sizesNdx], // deUint32 height;
+ squaresGroupCount, // deUint32 squaresGroupCount;
+ geometriesGroupCount, // deUint32 geometriesGroupCount;
+ instancesGroupCount, // deUint32 instancesGroupCount;
+ deferredOperation, // bool deferredOperation;
+ threadsCount, // deUint32 workerThreadsCount;
+ deviceBuild // bool deviceBuild;
+ };
+ const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
+ const std::string testName = "triangles_" + suffix;
+
+ if (squaresGroupCount == 0 || geometriesGroupCount == 0 || instancesGroupCount == 0)
+ continue;
+
+ group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ }
+
+ for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
+ for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ {
+ const deUint32 factor = factors[factorNdx];
+ const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
+ const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
+ const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
+ const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
+ const CaseDef caseDef =
+ {
+ TEST_TYPE_AABBS, // TestType testType;
+ sizes[sizesNdx], // deUint32 width;
+ sizes[sizesNdx], // deUint32 height;
+ squaresGroupCount, // deUint32 squaresGroupCount;
+ geometriesGroupCount, // deUint32 geometriesGroupCount;
+ instancesGroupCount, // deUint32 instancesGroupCount;
+ deferredOperation, // bool deferredOperation;
+ threadsCount, // deUint32 workerThreadsCount;
+ deviceBuild // bool deviceBuild;
+ };
+ const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
+ const std::string testName = "aabbs_" + suffix;
+
+ if (squaresGroupCount == 0 || geometriesGroupCount == 0 || instancesGroupCount == 0)
+ continue;
+
+ group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ }
+
+ for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
+ for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ {
+ const deUint32 factor = factors[factorNdx];
+ const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
+ const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
+ const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
+ const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
+ const CaseDef caseDef =
+ {
+ TEST_TYPE_MIXED, // TestType testType;
+ sizes[sizesNdx], // deUint32 width;
+ sizes[sizesNdx], // deUint32 height;
+ squaresGroupCount, // deUint32 squaresGroupCount;
+ geometriesGroupCount, // deUint32 geometriesGroupCount;
+ instancesGroupCount, // deUint32 instancesGroupCount;
+ deferredOperation, // bool deferredOperation;
+ threadsCount, // deUint32 workerThreadsCount;
+ deviceBuild // bool deviceBuild;
+ };
+ const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
+ const std::string testName = "mixed_" + suffix;
+
+ if (squaresGroupCount < 2 || geometriesGroupCount < 2 || instancesGroupCount < 2)
+ continue;
+
+ group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ }
+
+ testParentGroup->addChild(group.release());
+ }
+}
- const bool deviceBuild = !defferedOperation || threadsCount == 0;
+tcu::TestCaseGroup* createBuildTests (tcu::TestContext& testCtx)
+{
+ de::MovePtr<tcu::TestCaseGroup> buildGroup(new tcu::TestCaseGroup(testCtx, "build", "Ray tracing build tests"));
- de::MovePtr<tcu::TestCaseGroup> groupGpuCpuHt (new tcu::TestCaseGroup(testCtx, groupName.c_str(), groupDesc.c_str()));
+ const deUint32 threads[] = { 0, 1, 2, 3, 4, 8, std::numeric_limits<deUint32>::max() };
- for (size_t testsNdx = 0; testsNdx < DE_LENGTH_OF_ARRAY(tests); ++testsNdx)
+ for (const auto threadCount : threads)
+ {
+ auto buildTargeGroup = [&](bool deviceBuild) -> void
{
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, tests[testsNdx], ""));
+ DE_ASSERT(!(threadCount != 0 && deviceBuild));
- for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
- for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ string groupName, groupDesc;
+ if (deviceBuild)
{
- if (deviceBuild && sizes[sizesNdx] > 256)
- continue;
-
- const deUint32 factor = factors[factorNdx];
- const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
- const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
- const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
- const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
- const CaseDef caseDef =
- {
- TEST_TYPE_TRIANGLES, // TestType testType;
- sizes[sizesNdx], // deUint32 width;
- sizes[sizesNdx], // deUint32 height;
- squaresGroupCount, // deUint32 squaresGroupCount;
- geometriesGroupCount, // deUint32 geometriesGroupCount;
- instancesGroupCount, // deUint32 instancesGroupCount;
- defferedOperation, // bool deferredOperation;
- threadsCount // deUint32 workerThreadsCount;
- };
- const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
- const std::string testName = "triangles_" + suffix;
-
- if (squaresGroupCount == 0 || geometriesGroupCount == 0 || instancesGroupCount == 0)
- continue;
-
- group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ groupName = "gpu";
+ groupDesc = "Compare results of run with acceleration structures build on GPU";
}
-
- for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
- for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ else
{
- if (deviceBuild && sizes[sizesNdx] > 256)
- continue;
-
- const deUint32 factor = factors[factorNdx];
- const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
- const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
- const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
- const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
- const CaseDef caseDef =
- {
- TEST_TYPE_AABBS, // TestType testType;
- sizes[sizesNdx], // deUint32 width;
- sizes[sizesNdx], // deUint32 height;
- squaresGroupCount, // deUint32 squaresGroupCount;
- geometriesGroupCount, // deUint32 geometriesGroupCount;
- instancesGroupCount, // deUint32 instancesGroupCount;
- defferedOperation, // bool deferredOperation;
- threadsCount // deUint32 workerThreadsCount;
- };
- const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
- const std::string testName = "aabbs_" + suffix;
-
- if (squaresGroupCount == 0 || geometriesGroupCount == 0 || instancesGroupCount == 0)
- continue;
-
- group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ groupName = "cpu";
+ groupDesc = "Compare results of run with acceleration structures build on CPU";
}
- for (size_t factorNdx = 0; factorNdx < DE_LENGTH_OF_ARRAY(factors); ++factorNdx)
- for (size_t sizesNdx = 0; sizesNdx < DE_LENGTH_OF_ARRAY(sizes); ++sizesNdx)
+ if (threadCount != 0)
{
- if (deviceBuild && sizes[sizesNdx] > 256)
- continue;
-
- const deUint32 factor = factors[factorNdx];
- const deUint32 largestGroup = sizes[sizesNdx] * sizes[sizesNdx] / factor / factor;
- const deUint32 squaresGroupCount = testsNdx == 0 ? largestGroup : factor;
- const deUint32 geometriesGroupCount = testsNdx == 1 ? largestGroup : factor;
- const deUint32 instancesGroupCount = testsNdx == 2 ? largestGroup : factor;
- const CaseDef caseDef =
- {
- TEST_TYPE_MIXED, // TestType testType;
- sizes[sizesNdx], // deUint32 width;
- sizes[sizesNdx], // deUint32 height;
- squaresGroupCount, // deUint32 squaresGroupCount;
- geometriesGroupCount, // deUint32 geometriesGroupCount;
- instancesGroupCount, // deUint32 instancesGroupCount;
- defferedOperation, // bool deferredOperation;
- threadsCount // deUint32 workerThreadsCount;
- };
- const std::string suffix = de::toString(caseDef.instancesGroupCount) + '_' + de::toString(caseDef.geometriesGroupCount) + '_' + de::toString(caseDef.squaresGroupCount);
- const std::string testName = "mixed_" + suffix;
-
- if (squaresGroupCount < 2 || geometriesGroupCount < 2 || instancesGroupCount < 2)
- continue;
-
- group->addChild(new RayTracingTestCase(testCtx, testName.c_str(), "", caseDef));
+ groupName += threadCount == std::numeric_limits<deUint32>::max()
+ ? "ht_max" : "ht_" + de::toString(threadCount);
+ groupDesc = "Compare results of run with acceleration structures build on CPU and using host threading";
}
- groupGpuCpuHt->addChild(group.release());
- }
+ de::MovePtr<tcu::TestCaseGroup> groupGpuCpuHt(new tcu::TestCaseGroup(testCtx, groupName.c_str(), groupDesc.c_str()));
+ buildTest(groupGpuCpuHt.get(), threadCount, deviceBuild);
+ buildGroup->addChild(groupGpuCpuHt.release());
+ };
- buildGroup->addChild(groupGpuCpuHt.release());
+ if (threadCount == 0)
+ {
+ buildTargeGroup(true);
+ }
+ buildTargeGroup(false);
}
return buildGroup.release();
const BufferWithMemory& callSbt) const;
void initBottomAccellStructures (VkCommandBuffer cmdBuffer,
BottomLevelAccelerationStructurePool& pool,
- const size_t& batchStructCount) const;
+ const deUint32& batchStructCount) const;
private:
TestParams2 m_params;
const VkExtent3D m_imageExtent;
void TraceRaysIndirect2Instance::initBottomAccellStructures (VkCommandBuffer cmdBuffer,
BottomLevelAccelerationStructurePool& pool,
- const size_t& batchStructCount) const
+ const deUint32& batchStructCount) const
{
const DeviceInterface& vkd = m_context.getDeviceInterface();
const VkDevice device = m_context.getDevice();
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.ahit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.chit_aabs
dEQP-VK.ray_tracing_pipeline.builtin.worldtoobject3x4ext.sect_aabs
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpuht.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.gpu.level_instances.mixed_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.triangles_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_1_1_1048576
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.aabbs_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_16
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_256
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_4096
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_primitives.mixed_4_4_65536
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.triangles_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_16_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_256_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_4096_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_65536_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_1_1048576_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_1_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.aabbs_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_16_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_256_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_4096_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_geometries.mixed_4_65536_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.triangles_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1048576_1_1
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_1_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.aabbs_65536_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_16_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_256_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_4096_4_4
+dEQP-VK.ray_tracing_pipeline.build.cpu.level_instances.mixed_65536_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_16
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_256
dEQP-VK.ray_tracing_pipeline.build.cpuht_1.level_primitives.triangles_1_1_4096
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_16_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_256_4_4
dEQP-VK.ray_tracing_pipeline.build.cpuht_max.level_instances.mixed_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.triangles_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_1_1_65536
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.aabbs_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_16
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_256
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_primitives.mixed_4_4_4096
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.triangles_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_16_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_256_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_4096_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_1_65536_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_1_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.aabbs_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_16_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_256_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_geometries.mixed_4_4096_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.triangles_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_65536_1_1
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_1_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.aabbs_4096_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_16_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_256_4_4
-dEQP-VK.ray_tracing_pipeline.build.gpu_cpu.level_instances.mixed_4096_4_4
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.ahit
dEQP-VK.ray_tracing_pipeline.barycentric_coordinates.chit
dEQP-VK.ray_tracing_pipeline.barrier.ubo.memory_barrier.from_host_to_rgen