1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Compute Shader Built-in variable tests.
23 *//*--------------------------------------------------------------------*/
25 #include "vktComputeShaderBuiltinVarTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktComputeTestsUtil.hpp"
30 #include "vkPlatform.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkStrUtil.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkDeviceUtil.hpp"
38 #include "vkTypeUtil.hpp"
39 #include "vkBuilderUtil.hpp"
41 #include "tcuTestLog.hpp"
42 #include "tcuFormatUtil.hpp"
43 #include "tcuVectorUtil.hpp"
45 #include "gluShaderUtil.hpp"
47 #include "deUniquePtr.hpp"
48 #include "deSharedPtr.hpp"
69 class ComputeBuiltinVarInstance;
70 class ComputeBuiltinVarCase;
72 static const string s_prefixProgramName ="compute_";
74 static inline bool compareNumComponents (const UVec3& a, const UVec3& b,const int numComps)
76 DE_ASSERT(numComps == 1 || numComps == 3);
77 return numComps == 3 ? tcu::allEqual(a, b) : a.x() == b.x();
80 static inline UVec3 readResultVec (const deUint32* ptr, const int numComps)
83 for (int ndx = 0; ndx < numComps; ndx++)
93 LogComps (const UVec3 &v_, int numComps_) : v(v_), numComps(numComps_) {}
96 static inline std::ostream& operator<< (std::ostream& str, const LogComps& c)
98 DE_ASSERT(c.numComps == 1 || c.numComps == 3);
99 return c.numComps == 3 ? str << c.v : str << c.v.x();
105 // Use getters instead of public const members, because SubCase must be assignable
106 // in order to be stored in a vector.
108 const UVec3& localSize (void) const { return m_localSize; }
109 const UVec3& numWorkGroups (void) const { return m_numWorkGroups; }
112 SubCase (const UVec3& localSize_, const UVec3& numWorkGroups_)
113 : m_localSize (localSize_)
114 , m_numWorkGroups (numWorkGroups_) {}
118 UVec3 m_numWorkGroups;
122 class ComputeBuiltinVarInstance : public vkt::TestInstance
125 ComputeBuiltinVarInstance (Context& context,
126 const vector<SubCase>& subCases,
127 const glu::DataType varType,
128 const ComputeBuiltinVarCase* builtinVarCase);
130 virtual tcu::TestStatus iterate (void);
133 const VkDevice m_device;
134 const DeviceInterface& m_vki;
135 const VkQueue m_queue;
136 const deUint32 m_queueFamilyIndex;
137 vector<SubCase> m_subCases;
138 const ComputeBuiltinVarCase* m_builtin_var_case;
140 const glu::DataType m_varType;
143 class ComputeBuiltinVarCase : public vkt::TestCase
146 ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent);
147 ~ComputeBuiltinVarCase (void);
149 TestInstance* createInstance (Context& context) const
151 return new ComputeBuiltinVarInstance(context, m_subCases, m_varType, this);
154 virtual void initPrograms (SourceCollections& programCollection) const;
155 virtual UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const = 0;
158 string genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const;
159 vector<SubCase> m_subCases;
162 deUint32 getProgram (const tcu::UVec3& localSize);
164 const string m_varName;
165 const glu::DataType m_varType;
167 bool m_readByComponent;
169 ComputeBuiltinVarCase (const ComputeBuiltinVarCase& other);
170 ComputeBuiltinVarCase& operator= (const ComputeBuiltinVarCase& other);
173 ComputeBuiltinVarCase::ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent)
174 : TestCase (context, name + (readByComponent ? "_component" : ""), varName)
175 , m_varName (varName)
176 , m_varType (varType)
178 , m_readByComponent (readByComponent)
182 ComputeBuiltinVarCase::~ComputeBuiltinVarCase (void)
184 ComputeBuiltinVarCase::deinit();
187 void ComputeBuiltinVarCase::initPrograms (SourceCollections& programCollection) const
189 for (std::size_t i = 0; i < m_subCases.size(); i++)
191 const SubCase& subCase = m_subCases[i];
192 std::ostringstream name;
193 name << s_prefixProgramName << i;
194 programCollection.glslSources.add(name.str()) << glu::ComputeSource(genBuiltinVarSource(m_varName, m_varType, subCase.localSize(), m_readByComponent).c_str());
198 string ComputeBuiltinVarCase::genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const
200 std::ostringstream src;
202 src << "#version 310 es\n"
203 << "layout (local_size_x = " << localSize.x() << ", local_size_y = " << localSize.y() << ", local_size_z = " << localSize.z() << ") in;\n";
205 // For the gl_WorkGroupSize case, force it to be specialized so that
206 // Glslang can't just bypass the read of the builtin variable.
207 // We will not override these spec constants.
208 src << "layout (local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;\n";
210 src << "layout(set = 0, binding = 0) uniform Stride\n"
212 << " uvec2 u_stride;\n"
214 << "layout(set = 0, binding = 1, std430) buffer Output\n"
216 << " " << glu::getDataTypeName(varType) << " result[];\n"
219 << "void main (void)\n"
221 << " highp uint offset = stride.u_stride.x*gl_GlobalInvocationID.z + stride.u_stride.y*gl_GlobalInvocationID.y + gl_GlobalInvocationID.x;\n";
223 if (readByComponent && varType != glu::TYPE_UINT) {
226 case glu::TYPE_UINT_VEC4:
227 src << " sb_out.result[offset].w = " << varName << ".w;\n";
229 case glu::TYPE_UINT_VEC3:
230 src << " sb_out.result[offset].z = " << varName << ".z;\n";
232 case glu::TYPE_UINT_VEC2:
233 src << " sb_out.result[offset].y = " << varName << ".y;\n"
234 << " sb_out.result[offset].x = " << varName << ".x;\n";
237 DE_ASSERT("Illegal data type");
241 src << " sb_out.result[offset] = " << varName << ";\n";
248 class NumWorkGroupsCase : public ComputeBuiltinVarCase
251 NumWorkGroupsCase (tcu::TestContext& context, bool readByCompnent)
252 : ComputeBuiltinVarCase(context, "num_work_groups", "gl_NumWorkGroups", glu::TYPE_UINT_VEC3, readByCompnent)
254 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
255 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
256 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
257 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
258 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
259 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
262 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
264 DE_UNREF(numWorkGroups);
265 DE_UNREF(workGroupSize);
266 DE_UNREF(workGroupID);
267 DE_UNREF(localInvocationID);
268 return numWorkGroups;
272 class WorkGroupSizeCase : public ComputeBuiltinVarCase
275 WorkGroupSizeCase (tcu::TestContext& context, bool readByComponent)
276 : ComputeBuiltinVarCase(context, "work_group_size", "gl_WorkGroupSize", glu::TYPE_UINT_VEC3, readByComponent)
278 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
279 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
280 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
281 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
282 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
283 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
284 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
285 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
286 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
289 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
291 DE_UNREF(numWorkGroups);
292 DE_UNREF(workGroupID);
293 DE_UNREF(localInvocationID);
294 return workGroupSize;
298 //-----------------------------------------------------------------------
299 class WorkGroupIDCase : public ComputeBuiltinVarCase
302 WorkGroupIDCase (tcu::TestContext& context, bool readbyComponent)
303 : ComputeBuiltinVarCase(context, "work_group_id", "gl_WorkGroupID", glu::TYPE_UINT_VEC3, readbyComponent)
305 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
306 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
307 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
308 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
309 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
310 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
313 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
315 DE_UNREF(numWorkGroups);
316 DE_UNREF(workGroupSize);
317 DE_UNREF(localInvocationID);
322 class LocalInvocationIDCase : public ComputeBuiltinVarCase
325 LocalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
326 : ComputeBuiltinVarCase(context, "local_invocation_id", "gl_LocalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
328 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
329 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
330 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
331 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
332 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
333 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
334 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
335 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
336 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
339 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
341 DE_UNREF(numWorkGroups);
342 DE_UNREF(workGroupSize);
343 DE_UNREF(workGroupID);
344 return localInvocationID;
348 class GlobalInvocationIDCase : public ComputeBuiltinVarCase
351 GlobalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
352 : ComputeBuiltinVarCase(context, "global_invocation_id", "gl_GlobalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
354 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
355 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
356 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
357 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
358 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
359 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
360 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
361 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
364 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
366 DE_UNREF(numWorkGroups);
367 return workGroupID * workGroupSize + localInvocationID;
371 class LocalInvocationIndexCase : public ComputeBuiltinVarCase
374 LocalInvocationIndexCase (tcu::TestContext& context, bool readByComponent)
375 : ComputeBuiltinVarCase(context, "local_invocation_index", "gl_LocalInvocationIndex", glu::TYPE_UINT, readByComponent)
377 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
378 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
379 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
380 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
381 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
382 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
385 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
387 DE_UNREF(workGroupID);
388 DE_UNREF(numWorkGroups);
389 return UVec3(localInvocationID.z()*workGroupSize.x()*workGroupSize.y() + localInvocationID.y()*workGroupSize.x() + localInvocationID.x(), 0, 0);
393 ComputeBuiltinVarInstance::ComputeBuiltinVarInstance (Context& context,
394 const vector<SubCase>& subCases,
395 const glu::DataType varType,
396 const ComputeBuiltinVarCase* builtinVarCase)
397 : vkt::TestInstance (context)
398 , m_device (m_context.getDevice())
399 , m_vki (m_context.getDeviceInterface())
400 , m_queue (context.getUniversalQueue())
401 , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
402 , m_subCases (subCases)
403 , m_builtin_var_case (builtinVarCase)
405 , m_varType (varType)
409 tcu::TestStatus ComputeBuiltinVarInstance::iterate (void)
411 std::ostringstream program_name;
412 program_name << s_prefixProgramName << m_subCaseNdx;
414 const SubCase& subCase = m_subCases[m_subCaseNdx];
415 const tcu::UVec3 globalSize = subCase.localSize()*subCase.numWorkGroups();
416 const tcu::UVec2 stride (globalSize[0] * globalSize[1], globalSize[0]);
417 const deUint32 sizeOfUniformBuffer = sizeof(stride);
418 const int numScalars = glu::getDataTypeScalarSize(m_varType);
419 const deUint32 numInvocations = subCase.localSize()[0] * subCase.localSize()[1] * subCase.localSize()[2] * subCase.numWorkGroups()[0] * subCase.numWorkGroups()[1] * subCase.numWorkGroups()[2];
421 deUint32 resultBufferStride = 0;
425 resultBufferStride = sizeof(deUint32);
427 case glu::TYPE_UINT_VEC2:
428 resultBufferStride = sizeof(tcu::UVec2);
430 case glu::TYPE_UINT_VEC3:
431 case glu::TYPE_UINT_VEC4:
432 resultBufferStride = sizeof(tcu::UVec4);
435 DE_ASSERT("Illegal data type");
438 const deUint32 resultBufferSize = numInvocations * resultBufferStride;
440 // Create result buffer
441 Buffer uniformBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(sizeOfUniformBuffer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT), MemoryRequirement::HostVisible);
442 Buffer resultBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(resultBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
445 const Allocation& alloc = uniformBuffer.getAllocation();
446 memcpy(alloc.getHostPtr(), &stride, sizeOfUniformBuffer);
447 flushMappedMemoryRange(m_vki, m_device, alloc.getMemory(), alloc.getOffset(), sizeOfUniformBuffer);
450 // Create descriptorSetLayout
451 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
452 DescriptorSetLayoutBuilder()
453 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
454 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
455 .build(m_vki, m_device));
457 const Unique<VkShaderModule> shaderModule(createShaderModule(m_vki, m_device, m_context.getBinaryCollection().get(program_name.str()), 0u));
458 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(m_vki, m_device, *descriptorSetLayout));
459 const Unique<VkPipeline> pipeline(makeComputePipeline(m_vki, m_device, *pipelineLayout, *shaderModule));
461 const Unique<VkDescriptorPool> descriptorPool(
462 DescriptorPoolBuilder()
463 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
464 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
465 .build(m_vki, m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
467 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(
468 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, resultBufferSize);
470 const Unique<VkCommandPool> cmdPool(makeCommandPool(m_vki, m_device, m_queueFamilyIndex));
471 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(m_vki, m_device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
473 // Start recording commands
474 beginCommandBuffer(m_vki, *cmdBuffer);
476 m_vki.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
478 // Create descriptor set
479 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(m_vki, m_device, *descriptorPool, *descriptorSetLayout));
481 const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, resultBufferSize);
482 const VkDescriptorBufferInfo uniformDescriptorInfo = makeDescriptorBufferInfo(*uniformBuffer, 0ull, sizeOfUniformBuffer);
484 DescriptorSetUpdateBuilder()
485 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &uniformDescriptorInfo)
486 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo)
487 .update(m_vki, m_device);
489 m_vki.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
491 // Dispatch indirect compute command
492 m_vki.cmdDispatch(*cmdBuffer, subCase.numWorkGroups()[0], subCase.numWorkGroups()[1], subCase.numWorkGroups()[2]);
494 m_vki.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
495 0, (const VkMemoryBarrier*)DE_NULL,
497 0, (const VkImageMemoryBarrier*)DE_NULL);
499 // End recording commands
500 endCommandBuffer(m_vki, *cmdBuffer);
502 // Wait for command buffer execution finish
503 submitCommandsAndWait(m_vki, m_device, m_queue, *cmdBuffer);
505 const Allocation& resultAlloc = resultBuffer.getAllocation();
506 invalidateMappedMemoryRange(m_vki, m_device, resultAlloc.getMemory(), resultAlloc.getOffset(), resultBufferSize);
508 const deUint8* ptr = reinterpret_cast<deUint8*>(resultAlloc.getHostPtr());
511 const int maxLogPrints = 10;
513 tcu::TestContext& testCtx = m_context.getTestContext();
515 for (deUint32 groupZ = 0; groupZ < subCase.numWorkGroups().z(); groupZ++)
516 for (deUint32 groupY = 0; groupY < subCase.numWorkGroups().y(); groupY++)
517 for (deUint32 groupX = 0; groupX < subCase.numWorkGroups().x(); groupX++)
518 for (deUint32 localZ = 0; localZ < subCase.localSize().z(); localZ++)
519 for (deUint32 localY = 0; localY < subCase.localSize().y(); localY++)
520 for (deUint32 localX = 0; localX < subCase.localSize().x(); localX++)
522 const UVec3 refGroupID(groupX, groupY, groupZ);
523 const UVec3 refLocalID(localX, localY, localZ);
524 const UVec3 refGlobalID = refGroupID * subCase.localSize() + refLocalID;
526 const deUint32 refOffset = stride.x()*refGlobalID.z() + stride.y()*refGlobalID.y() + refGlobalID.x();
528 const UVec3 refValue = m_builtin_var_case->computeReference(subCase.numWorkGroups(), subCase.localSize(), refGroupID, refLocalID);
530 const deUint32* resPtr = (const deUint32*)(ptr + refOffset * resultBufferStride);
531 const UVec3 resValue = readResultVec(resPtr, numScalars);
533 if (!compareNumComponents(refValue, resValue, numScalars))
535 if (numFailed < maxLogPrints)
538 << "ERROR: comparison failed at offset " << refOffset
539 << ": expected " << LogComps(refValue, numScalars)
540 << ", got " << LogComps(resValue, numScalars)
541 << TestLog::EndMessage;
542 else if (numFailed == maxLogPrints)
543 testCtx.getLog() << TestLog::Message << "..." << TestLog::EndMessage;
549 testCtx.getLog() << TestLog::Message << (numInvocations - numFailed) << " / " << numInvocations << " values passed" << TestLog::EndMessage;
552 return tcu::TestStatus::fail("Comparison failed");
555 return (m_subCaseNdx < (int)m_subCases.size()) ? tcu::TestStatus::incomplete() :tcu::TestStatus::pass("Comparison succeeded");
558 class ComputeShaderBuiltinVarTests : public tcu::TestCaseGroup
561 ComputeShaderBuiltinVarTests (tcu::TestContext& context);
566 ComputeShaderBuiltinVarTests (const ComputeShaderBuiltinVarTests& other);
567 ComputeShaderBuiltinVarTests& operator= (const ComputeShaderBuiltinVarTests& other);
570 ComputeShaderBuiltinVarTests::ComputeShaderBuiltinVarTests (tcu::TestContext& context)
571 : TestCaseGroup(context, "builtin_var", "Shader builtin var tests")
575 void ComputeShaderBuiltinVarTests::init (void)
577 // Builtin variables with vector values should be read whole and by component.
578 for (int i = 0; i < 2; i++)
580 const bool readByComponent = (i != 0);
581 addChild(new NumWorkGroupsCase(this->getTestContext(), readByComponent));
582 addChild(new WorkGroupSizeCase(this->getTestContext(), readByComponent));
583 addChild(new WorkGroupIDCase(this->getTestContext(), readByComponent));
584 addChild(new LocalInvocationIDCase(this->getTestContext(), readByComponent));
585 addChild(new GlobalInvocationIDCase(this->getTestContext(), readByComponent));
587 // Local invocation index is already just a scalar.
588 addChild(new LocalInvocationIndexCase(this->getTestContext(), false));
593 tcu::TestCaseGroup* createComputeShaderBuiltinVarTests (tcu::TestContext& testCtx)
595 return new ComputeShaderBuiltinVarTests(testCtx);