1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Indirect Compute Dispatch tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktComputeIndirectComputeDispatchTests.hpp"
26 #include "vktComputeTestsUtil.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vktTestCase.hpp"
36 #include "vktTestCaseUtil.hpp"
37 #include "vkPlatform.hpp"
38 #include "vkPrograms.hpp"
39 #include "vkMemUtil.hpp"
40 #include "vkBuilderUtil.hpp"
41 #include "vkQueryUtil.hpp"
43 #include "tcuVector.hpp"
44 #include "tcuVectorUtil.hpp"
45 #include "tcuTestLog.hpp"
46 #include "tcuRGBA.hpp"
47 #include "tcuStringTemplate.hpp"
49 #include "deUniquePtr.hpp"
50 #include "deSharedPtr.hpp"
51 #include "deStringUtil.hpp"
52 #include "deArrayUtil.hpp"
54 #include "gluShaderUtil.hpp"
65 RESULT_BLOCK_BASE_SIZE = 4 * (int)sizeof(deUint32), // uvec3 + uint
66 RESULT_BLOCK_NUM_PASSED_OFFSET = 3 * (int)sizeof(deUint32),
67 INDIRECT_COMMAND_OFFSET = 3 * (int)sizeof(deUint32),
70 vk::VkDeviceSize getResultBlockAlignedSize (const vk::InstanceInterface& instance_interface,
71 const vk::VkPhysicalDevice physicalDevice,
72 const vk::VkDeviceSize baseSize)
74 // TODO getPhysicalDeviceProperties() was added to vkQueryUtil in 41-image-load-store-tests. Use it once it's merged.
75 vk::VkPhysicalDeviceProperties deviceProperties;
76 instance_interface.getPhysicalDeviceProperties(physicalDevice, &deviceProperties);
77 vk::VkDeviceSize alignment = deviceProperties.limits.minStorageBufferOffsetAlignment;
79 if (alignment == 0 || (baseSize % alignment == 0))
82 return (baseSize / alignment + 1)*alignment;
85 struct DispatchCommand
87 DispatchCommand (const deIntptr offset,
88 const tcu::UVec3& numWorkGroups)
90 , m_numWorkGroups (numWorkGroups) {}
93 tcu::UVec3 m_numWorkGroups;
96 typedef std::vector<DispatchCommand> DispatchCommandsVec;
98 struct DispatchCaseDesc
100 DispatchCaseDesc (const char* name,
101 const char* description,
102 const deUintptr bufferSize,
103 const tcu::UVec3 workGroupSize,
104 const DispatchCommandsVec& dispatchCommands)
106 , m_description (description)
107 , m_bufferSize (bufferSize)
108 , m_workGroupSize (workGroupSize)
109 , m_dispatchCommands (dispatchCommands) {}
112 const char* m_description;
113 const deUintptr m_bufferSize;
114 const tcu::UVec3 m_workGroupSize;
115 const DispatchCommandsVec m_dispatchCommands;
118 class IndirectDispatchInstanceBufferUpload : public vkt::TestInstance
121 IndirectDispatchInstanceBufferUpload (Context& context,
122 const std::string& name,
123 const deUintptr bufferSize,
124 const tcu::UVec3& workGroupSize,
125 const DispatchCommandsVec& dispatchCommands);
127 virtual ~IndirectDispatchInstanceBufferUpload (void) {}
129 virtual tcu::TestStatus iterate (void);
132 virtual void fillIndirectBufferData (const vk::VkCommandBuffer commandBuffer,
133 const Buffer& indirectBuffer);
135 deBool verifyResultBuffer (const Buffer& resultBuffer,
136 const vk::VkDeviceSize resultBlockSize,
137 const vk::VkDeviceSize resultBufferSize) const;
140 const std::string m_name;
142 const vk::DeviceInterface& m_device_interface;
143 const vk::VkDevice m_device;
145 const vk::VkQueue m_queue;
146 const deUint32 m_queueFamilyIndex;
148 const deUintptr m_bufferSize;
149 const tcu::UVec3 m_workGroupSize;
150 const DispatchCommandsVec m_dispatchCommands;
152 vk::Allocator& m_allocator;
155 IndirectDispatchInstanceBufferUpload (const vkt::TestInstance&);
156 IndirectDispatchInstanceBufferUpload& operator= (const vkt::TestInstance&);
159 IndirectDispatchInstanceBufferUpload::IndirectDispatchInstanceBufferUpload (Context& context,
160 const std::string& name,
161 const deUintptr bufferSize,
162 const tcu::UVec3& workGroupSize,
163 const DispatchCommandsVec& dispatchCommands)
164 : vkt::TestInstance (context)
165 , m_context (context)
167 , m_device_interface (context.getDeviceInterface())
168 , m_device (context.getDevice())
169 , m_queue (context.getUniversalQueue())
170 , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
171 , m_bufferSize (bufferSize)
172 , m_workGroupSize (workGroupSize)
173 , m_dispatchCommands (dispatchCommands)
174 , m_allocator (context.getDefaultAllocator())
178 void IndirectDispatchInstanceBufferUpload::fillIndirectBufferData (const vk::VkCommandBuffer commandBuffer, const Buffer& indirectBuffer)
180 DE_UNREF(commandBuffer);
182 const vk::Allocation& alloc = indirectBuffer.getAllocation();
183 deUint8* indirectDataPtr = reinterpret_cast<deUint8*>(alloc.getHostPtr());
185 for (DispatchCommandsVec::const_iterator cmdIter = m_dispatchCommands.begin(); cmdIter != m_dispatchCommands.end(); ++cmdIter)
187 DE_ASSERT(cmdIter->m_offset >= 0);
188 DE_ASSERT(cmdIter->m_offset % sizeof(deUint32) == 0);
189 DE_ASSERT(cmdIter->m_offset + INDIRECT_COMMAND_OFFSET <= (deIntptr)m_bufferSize);
191 deUint32* const dstPtr = (deUint32*)&indirectDataPtr[cmdIter->m_offset];
193 dstPtr[0] = cmdIter->m_numWorkGroups[0];
194 dstPtr[1] = cmdIter->m_numWorkGroups[1];
195 dstPtr[2] = cmdIter->m_numWorkGroups[2];
198 vk::flushMappedMemoryRange(m_device_interface, m_device, alloc.getMemory(), alloc.getOffset(), m_bufferSize);
201 tcu::TestStatus IndirectDispatchInstanceBufferUpload::iterate (void)
203 tcu::TestContext& testCtx = m_context.getTestContext();
205 testCtx.getLog() << tcu::TestLog::Message << "GL_DISPATCH_INDIRECT_BUFFER size = " << m_bufferSize << tcu::TestLog::EndMessage;
207 tcu::ScopedLogSection section(testCtx.getLog(), "Commands", "Indirect Dispatch Commands (" + de::toString(m_dispatchCommands.size()) + " in total)");
209 for (deUint32 cmdNdx = 0; cmdNdx < m_dispatchCommands.size(); ++cmdNdx)
212 << tcu::TestLog::Message
213 << cmdNdx << ": " << "offset = " << m_dispatchCommands[cmdNdx].m_offset << ", numWorkGroups = " << m_dispatchCommands[cmdNdx].m_numWorkGroups
214 << tcu::TestLog::EndMessage;
218 // Create result buffer
219 const vk::VkDeviceSize resultBlockSize = getResultBlockAlignedSize(m_context.getInstanceInterface(), m_context.getPhysicalDevice(), RESULT_BLOCK_BASE_SIZE);
220 const vk::VkDeviceSize resultBufferSize = resultBlockSize * (deUint32)m_dispatchCommands.size();
223 m_device_interface, m_device, m_allocator,
224 makeBufferCreateInfo(resultBufferSize, vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
225 vk::MemoryRequirement::HostVisible);
228 const vk::Allocation& alloc = resultBuffer.getAllocation();
229 deUint8* resultDataPtr = reinterpret_cast<deUint8*>(alloc.getHostPtr());
231 for (deUint32 cmdNdx = 0; cmdNdx < m_dispatchCommands.size(); ++cmdNdx)
233 deUint8* const dstPtr = &resultDataPtr[resultBlockSize*cmdNdx];
235 *(deUint32*)(dstPtr + 0 * sizeof(deUint32)) = m_dispatchCommands[cmdNdx].m_numWorkGroups[0];
236 *(deUint32*)(dstPtr + 1 * sizeof(deUint32)) = m_dispatchCommands[cmdNdx].m_numWorkGroups[1];
237 *(deUint32*)(dstPtr + 2 * sizeof(deUint32)) = m_dispatchCommands[cmdNdx].m_numWorkGroups[2];
238 *(deUint32*)(dstPtr + RESULT_BLOCK_NUM_PASSED_OFFSET) = 0;
241 vk::flushMappedMemoryRange(m_device_interface, m_device, alloc.getMemory(), alloc.getOffset(), resultBufferSize);
244 // Create verify compute shader
245 const vk::Unique<vk::VkShaderModule> verifyShader(createShaderModule(
246 m_device_interface, m_device, m_context.getBinaryCollection().get("indirect_dispatch_" + m_name + "_verify"), 0u));
248 // Create descriptorSetLayout
249 vk::DescriptorSetLayoutBuilder layoutBuilder;
250 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
251 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(layoutBuilder.build(m_device_interface, m_device));
253 // Create compute pipeline
254 const vk::Unique<vk::VkPipelineLayout> pipelineLayout(makePipelineLayout(m_device_interface, m_device, *descriptorSetLayout));
255 const vk::Unique<vk::VkPipeline> computePipeline(makeComputePipeline(m_device_interface, m_device, *pipelineLayout, *verifyShader));
257 // Create descriptor pool
258 const vk::Unique<vk::VkDescriptorPool> descriptorPool(
259 vk::DescriptorPoolBuilder()
260 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, (deUint32)m_dispatchCommands.size())
261 .build(m_device_interface, m_device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, static_cast<deUint32>(m_dispatchCommands.size())));
263 const vk::VkBufferMemoryBarrier ssboPostBarrier = makeBufferMemoryBarrier(
264 vk::VK_ACCESS_SHADER_WRITE_BIT, vk::VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, resultBufferSize);
266 // Create command buffer
267 const vk::Unique<vk::VkCommandPool> cmdPool(makeCommandPool(m_device_interface, m_device, m_queueFamilyIndex));
268 const vk::Unique<vk::VkCommandBuffer> cmdBuffer(allocateCommandBuffer(m_device_interface, m_device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
270 // Begin recording commands
271 beginCommandBuffer(m_device_interface, *cmdBuffer);
273 // Create indirect buffer
274 Buffer indirectBuffer(
275 m_device_interface, m_device, m_allocator,
276 makeBufferCreateInfo(m_bufferSize, vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
277 vk::MemoryRequirement::HostVisible);
278 fillIndirectBufferData(*cmdBuffer, indirectBuffer);
280 // Bind compute pipeline
281 m_device_interface.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
283 // Allocate descriptor sets
284 typedef de::SharedPtr<vk::Unique<vk::VkDescriptorSet> > SharedVkDescriptorSet;
285 std::vector<SharedVkDescriptorSet> descriptorSets(m_dispatchCommands.size());
287 vk::VkDeviceSize curOffset = 0;
289 // Create descriptor sets
290 for (deUint32 cmdNdx = 0; cmdNdx < m_dispatchCommands.size(); ++cmdNdx)
292 descriptorSets[cmdNdx] = SharedVkDescriptorSet(new vk::Unique<vk::VkDescriptorSet>(
293 makeDescriptorSet(m_device_interface, m_device, *descriptorPool, *descriptorSetLayout)));
295 const vk::VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, curOffset, resultBlockSize);
297 vk::DescriptorSetUpdateBuilder descriptorSetBuilder;
298 descriptorSetBuilder.writeSingle(**descriptorSets[cmdNdx], vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo);
299 descriptorSetBuilder.update(m_device_interface, m_device);
301 // Bind descriptor set
302 m_device_interface.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &(**descriptorSets[cmdNdx]), 0u, DE_NULL);
304 // Dispatch indirect compute command
305 m_device_interface.cmdDispatchIndirect(*cmdBuffer, *indirectBuffer, m_dispatchCommands[cmdNdx].m_offset);
307 curOffset += resultBlockSize;
310 // Insert memory barrier
311 m_device_interface.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
312 0, (const vk::VkMemoryBarrier*)DE_NULL,
314 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
316 // End recording commands
317 endCommandBuffer(m_device_interface, *cmdBuffer);
319 // Wait for command buffer execution finish
320 submitCommandsAndWait(m_device_interface, m_device, m_queue, *cmdBuffer);
322 // Check if result buffer contains valid values
323 if (verifyResultBuffer(resultBuffer, resultBlockSize, resultBufferSize))
324 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Pass");
326 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Invalid values in result buffer");
329 deBool IndirectDispatchInstanceBufferUpload::verifyResultBuffer (const Buffer& resultBuffer,
330 const vk::VkDeviceSize resultBlockSize,
331 const vk::VkDeviceSize resultBufferSize) const
334 const vk::Allocation& alloc = resultBuffer.getAllocation();
335 vk::invalidateMappedMemoryRange(m_device_interface, m_device, alloc.getMemory(), alloc.getOffset(), resultBufferSize);
337 const deUint8* const resultDataPtr = reinterpret_cast<deUint8*>(alloc.getHostPtr());
339 for (deUint32 cmdNdx = 0; cmdNdx < m_dispatchCommands.size(); cmdNdx++)
341 const DispatchCommand& cmd = m_dispatchCommands[cmdNdx];
342 const deUint8* const srcPtr = (const deUint8*)resultDataPtr + cmdNdx*resultBlockSize;
343 const deUint32 numPassed = *(const deUint32*)(srcPtr + RESULT_BLOCK_NUM_PASSED_OFFSET);
344 const deUint32 numInvocationsPerGroup = m_workGroupSize[0] * m_workGroupSize[1] * m_workGroupSize[2];
345 const deUint32 numGroups = cmd.m_numWorkGroups[0] * cmd.m_numWorkGroups[1] * cmd.m_numWorkGroups[2];
346 const deUint32 expectedCount = numInvocationsPerGroup * numGroups;
348 if (numPassed != expectedCount)
350 tcu::TestContext& testCtx = m_context.getTestContext();
353 << tcu::TestLog::Message
354 << "ERROR: got invalid result for invocation " << cmdNdx
355 << ": got numPassed = " << numPassed << ", expected " << expectedCount
356 << tcu::TestLog::EndMessage;
365 class IndirectDispatchCaseBufferUpload : public vkt::TestCase
368 IndirectDispatchCaseBufferUpload (tcu::TestContext& testCtx,
369 const DispatchCaseDesc& caseDesc,
370 const glu::GLSLVersion glslVersion);
372 virtual ~IndirectDispatchCaseBufferUpload (void) {}
374 virtual void initPrograms (vk::SourceCollections& programCollection) const;
375 virtual TestInstance* createInstance (Context& context) const;
378 const deUintptr m_bufferSize;
379 const tcu::UVec3 m_workGroupSize;
380 const DispatchCommandsVec m_dispatchCommands;
381 const glu::GLSLVersion m_glslVersion;
384 IndirectDispatchCaseBufferUpload (const vkt::TestCase&);
385 IndirectDispatchCaseBufferUpload& operator= (const vkt::TestCase&);
388 IndirectDispatchCaseBufferUpload::IndirectDispatchCaseBufferUpload (tcu::TestContext& testCtx,
389 const DispatchCaseDesc& caseDesc,
390 const glu::GLSLVersion glslVersion)
391 : vkt::TestCase (testCtx, caseDesc.m_name, caseDesc.m_description)
392 , m_bufferSize (caseDesc.m_bufferSize)
393 , m_workGroupSize (caseDesc.m_workGroupSize)
394 , m_dispatchCommands (caseDesc.m_dispatchCommands)
395 , m_glslVersion (glslVersion)
399 void IndirectDispatchCaseBufferUpload::initPrograms (vk::SourceCollections& programCollection) const
401 const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
403 std::ostringstream verifyBuffer;
406 << versionDecl << "\n"
407 << "layout(local_size_x = ${LOCAL_SIZE_X}, local_size_y = ${LOCAL_SIZE_Y}, local_size_z = ${LOCAL_SIZE_Z}) in;\n"
408 << "layout(set = 0, binding = 0, std430) buffer Result\n"
410 << " uvec3 expectedGroupCount;\n"
411 << " coherent uint numPassed;\n"
413 << "void main (void)\n"
415 << " if (all(equal(result.expectedGroupCount, gl_NumWorkGroups)))\n"
416 << " atomicAdd(result.numPassed, 1u);\n"
419 std::map<std::string, std::string> args;
421 args["LOCAL_SIZE_X"] = de::toString(m_workGroupSize.x());
422 args["LOCAL_SIZE_Y"] = de::toString(m_workGroupSize.y());
423 args["LOCAL_SIZE_Z"] = de::toString(m_workGroupSize.z());
425 std::string verifyProgramString = tcu::StringTemplate(verifyBuffer.str()).specialize(args);
427 programCollection.glslSources.add("indirect_dispatch_" + m_name + "_verify") << glu::ComputeSource(verifyProgramString);
430 TestInstance* IndirectDispatchCaseBufferUpload::createInstance (Context& context) const
432 return new IndirectDispatchInstanceBufferUpload(context, m_name, m_bufferSize, m_workGroupSize, m_dispatchCommands);
435 class IndirectDispatchInstanceBufferGenerate : public IndirectDispatchInstanceBufferUpload
438 IndirectDispatchInstanceBufferGenerate (Context& context,
439 const std::string& name,
440 const deUintptr bufferSize,
441 const tcu::UVec3& workGroupSize,
442 const DispatchCommandsVec& dispatchCommands)
443 : IndirectDispatchInstanceBufferUpload(context, name, bufferSize, workGroupSize, dispatchCommands) {}
445 virtual ~IndirectDispatchInstanceBufferGenerate (void) {}
448 virtual void fillIndirectBufferData (const vk::VkCommandBuffer commandBuffer,
449 const Buffer& indirectBuffer);
451 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
452 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
453 vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
454 vk::Move<vk::VkPipeline> m_computePipeline;
457 IndirectDispatchInstanceBufferGenerate (const vkt::TestInstance&);
458 IndirectDispatchInstanceBufferGenerate& operator= (const vkt::TestInstance&);
461 void IndirectDispatchInstanceBufferGenerate::fillIndirectBufferData (const vk::VkCommandBuffer commandBuffer, const Buffer& indirectBuffer)
463 // Create compute shader that generates data for indirect buffer
464 const vk::Unique<vk::VkShaderModule> genIndirectBufferDataShader(createShaderModule(
465 m_device_interface, m_device, m_context.getBinaryCollection().get("indirect_dispatch_" + m_name + "_generate"), 0u));
467 // Create descriptorSetLayout
468 vk::DescriptorSetLayoutBuilder layoutBuilder;
469 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT);
470 vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(layoutBuilder.build(m_device_interface, m_device));
472 // Create compute pipeline
473 m_pipelineLayout = makePipelineLayout(m_device_interface, m_device, *descriptorSetLayout);
474 m_computePipeline = makeComputePipeline(m_device_interface, m_device, *m_pipelineLayout, *genIndirectBufferDataShader);
476 // Create descriptor pool
477 m_descriptorPool = vk::DescriptorPoolBuilder()
478 .addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
479 .build(m_device_interface, m_device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
481 // Create descriptor set
482 m_descriptorSet = makeDescriptorSet(m_device_interface, m_device, *m_descriptorPool, *descriptorSetLayout);
484 const vk::VkDescriptorBufferInfo indirectDescriptorInfo = makeDescriptorBufferInfo(*indirectBuffer, 0ull, m_bufferSize);
486 vk::DescriptorSetUpdateBuilder descriptorSetBuilder;
487 descriptorSetBuilder.writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &indirectDescriptorInfo);
488 descriptorSetBuilder.update(m_device_interface, m_device);
490 const vk::VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(
491 vk::VK_ACCESS_SHADER_WRITE_BIT, vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT, *indirectBuffer, 0ull, m_bufferSize);
493 // Bind compute pipeline
494 m_device_interface.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *m_computePipeline);
496 // Bind descriptor set
497 m_device_interface.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
499 // Dispatch compute command
500 m_device_interface.cmdDispatch(commandBuffer, 1u, 1u, 1u);
502 // Insert memory barrier
503 m_device_interface.cmdPipelineBarrier(commandBuffer, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, (vk::VkDependencyFlags)0,
504 0, (const vk::VkMemoryBarrier*)DE_NULL,
506 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
509 class IndirectDispatchCaseBufferGenerate : public IndirectDispatchCaseBufferUpload
512 IndirectDispatchCaseBufferGenerate (tcu::TestContext& testCtx,
513 const DispatchCaseDesc& caseDesc,
514 const glu::GLSLVersion glslVersion)
515 : IndirectDispatchCaseBufferUpload(testCtx, caseDesc, glslVersion) {}
517 virtual ~IndirectDispatchCaseBufferGenerate (void) {}
519 virtual void initPrograms (vk::SourceCollections& programCollection) const;
520 virtual TestInstance* createInstance (Context& context) const;
523 IndirectDispatchCaseBufferGenerate (const vkt::TestCase&);
524 IndirectDispatchCaseBufferGenerate& operator= (const vkt::TestCase&);
527 void IndirectDispatchCaseBufferGenerate::initPrograms (vk::SourceCollections& programCollection) const
529 IndirectDispatchCaseBufferUpload::initPrograms(programCollection);
531 const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
533 std::ostringstream computeBuffer;
537 << versionDecl << "\n"
538 << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
539 << "layout(set = 0, binding = 0, std430) buffer Out\n"
541 << " highp uint data[];\n"
543 << "void writeCmd (uint offset, uvec3 numWorkGroups)\n"
545 << " data[offset+0u] = numWorkGroups.x;\n"
546 << " data[offset+1u] = numWorkGroups.y;\n"
547 << " data[offset+2u] = numWorkGroups.z;\n"
549 << "void main (void)\n"
553 for (DispatchCommandsVec::const_iterator cmdIter = m_dispatchCommands.begin(); cmdIter != m_dispatchCommands.end(); ++cmdIter)
555 const deUint32 offs = (deUint32)(cmdIter->m_offset / sizeof(deUint32));
556 DE_ASSERT((size_t)offs * sizeof(deUint32) == (size_t)cmdIter->m_offset);
559 << "\twriteCmd(" << offs << "u, uvec3("
560 << cmdIter->m_numWorkGroups.x() << "u, "
561 << cmdIter->m_numWorkGroups.y() << "u, "
562 << cmdIter->m_numWorkGroups.z() << "u));\n";
566 computeBuffer << "}\n";
568 std::string computeString = computeBuffer.str();
570 programCollection.glslSources.add("indirect_dispatch_" + m_name + "_generate") << glu::ComputeSource(computeString);
573 TestInstance* IndirectDispatchCaseBufferGenerate::createInstance (Context& context) const
575 return new IndirectDispatchInstanceBufferGenerate(context, m_name, m_bufferSize, m_workGroupSize, m_dispatchCommands);
578 DispatchCommandsVec commandsVec (const DispatchCommand& cmd)
580 DispatchCommandsVec vec;
585 DispatchCommandsVec commandsVec (const DispatchCommand& cmd0,
586 const DispatchCommand& cmd1,
587 const DispatchCommand& cmd2,
588 const DispatchCommand& cmd3,
589 const DispatchCommand& cmd4)
591 DispatchCommandsVec vec;
600 DispatchCommandsVec commandsVec (const DispatchCommand& cmd0,
601 const DispatchCommand& cmd1,
602 const DispatchCommand& cmd2,
603 const DispatchCommand& cmd3,
604 const DispatchCommand& cmd4,
605 const DispatchCommand& cmd5,
606 const DispatchCommand& cmd6)
608 DispatchCommandsVec vec;
621 tcu::TestCaseGroup* createIndirectComputeDispatchTests (tcu::TestContext& testCtx)
623 static const DispatchCaseDesc s_dispatchCases[] =
625 DispatchCaseDesc("single_invocation", "Single invocation only from offset 0", INDIRECT_COMMAND_OFFSET, tcu::UVec3(1, 1, 1),
626 commandsVec(DispatchCommand(0, tcu::UVec3(1, 1, 1)))
628 DispatchCaseDesc("multiple_groups", "Multiple groups dispatched from offset 0", INDIRECT_COMMAND_OFFSET, tcu::UVec3(1, 1, 1),
629 commandsVec(DispatchCommand(0, tcu::UVec3(2, 3, 5)))
631 DispatchCaseDesc("multiple_groups_multiple_invocations", "Multiple groups of size 2x3x1 from offset 0", INDIRECT_COMMAND_OFFSET, tcu::UVec3(2, 3, 1),
632 commandsVec(DispatchCommand(0, tcu::UVec3(1, 2, 3)))
634 DispatchCaseDesc("small_offset", "Small offset", 16 + INDIRECT_COMMAND_OFFSET, tcu::UVec3(1, 1, 1),
635 commandsVec(DispatchCommand(16, tcu::UVec3(1, 1, 1)))
637 DispatchCaseDesc("large_offset", "Large offset", (2 << 20), tcu::UVec3(1, 1, 1),
638 commandsVec(DispatchCommand((1 << 20) + 12, tcu::UVec3(1, 1, 1)))
640 DispatchCaseDesc("large_offset_multiple_invocations", "Large offset, multiple invocations", (2 << 20), tcu::UVec3(2, 3, 1),
641 commandsVec(DispatchCommand((1 << 20) + 12, tcu::UVec3(1, 2, 3)))
643 DispatchCaseDesc("empty_command", "Empty command", INDIRECT_COMMAND_OFFSET, tcu::UVec3(1, 1, 1),
644 commandsVec(DispatchCommand(0, tcu::UVec3(0, 0, 0)))
646 DispatchCaseDesc("multi_dispatch", "Dispatch multiple compute commands from single buffer", 1 << 10, tcu::UVec3(3, 1, 2),
647 commandsVec(DispatchCommand(0, tcu::UVec3(1, 1, 1)),
648 DispatchCommand(INDIRECT_COMMAND_OFFSET, tcu::UVec3(2, 1, 1)),
649 DispatchCommand(104, tcu::UVec3(1, 3, 1)),
650 DispatchCommand(40, tcu::UVec3(1, 1, 7)),
651 DispatchCommand(52, tcu::UVec3(1, 1, 4)))
653 DispatchCaseDesc("multi_dispatch_reuse_command", "Dispatch multiple compute commands from single buffer", 1 << 10, tcu::UVec3(3, 1, 2),
654 commandsVec(DispatchCommand(0, tcu::UVec3(1, 1, 1)),
655 DispatchCommand(0, tcu::UVec3(1, 1, 1)),
656 DispatchCommand(0, tcu::UVec3(1, 1, 1)),
657 DispatchCommand(104, tcu::UVec3(1, 3, 1)),
658 DispatchCommand(104, tcu::UVec3(1, 3, 1)),
659 DispatchCommand(52, tcu::UVec3(1, 1, 4)),
660 DispatchCommand(52, tcu::UVec3(1, 1, 4)))
664 de::MovePtr<tcu::TestCaseGroup> indirectComputeDispatchTests(new tcu::TestCaseGroup(testCtx, "indirect_dispatch", "Indirect dispatch tests"));
666 tcu::TestCaseGroup* const groupBufferUpload = new tcu::TestCaseGroup(testCtx, "upload_buffer", "");
667 indirectComputeDispatchTests->addChild(groupBufferUpload);
669 for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_dispatchCases); ndx++)
671 groupBufferUpload->addChild(new IndirectDispatchCaseBufferUpload(testCtx, s_dispatchCases[ndx], glu::GLSL_VERSION_310_ES));
674 tcu::TestCaseGroup* const groupBufferGenerate = new tcu::TestCaseGroup(testCtx, "gen_in_compute", "");
675 indirectComputeDispatchTests->addChild(groupBufferGenerate);
677 for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_dispatchCases); ndx++)
679 groupBufferGenerate->addChild(new IndirectDispatchCaseBufferGenerate(testCtx, s_dispatchCases[ndx], glu::GLSL_VERSION_310_ES));
682 return indirectComputeDispatchTests.release();