1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and/or associated documentation files (the
10 * "Materials"), to deal in the Materials without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Materials, and to
13 * permit persons to whom the Materials are furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice(s) and this permission notice shall be included
17 * in all copies or substantial portions of the Materials.
19 * The Materials are Confidential Information as defined by the
20 * Khronos Membership Agreement until designated non-confidential by Khronos,
21 * at which point this condition clause shall be removed.
23 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
26 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
27 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
28 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
29 * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
33 * \brief Uniform block case.
34 *//*--------------------------------------------------------------------*/
36 #include "vktUniformBlockCase.hpp"
38 #include "vkPrograms.hpp"
40 #include "gluVarType.hpp"
41 #include "tcuTestLog.hpp"
42 #include "tcuSurface.hpp"
43 #include "deRandom.hpp"
44 #include "deStringUtil.hpp"
46 #include "tcuTextureUtil.hpp"
47 #include "deSharedPtr.hpp"
49 #include "vkMemUtil.hpp"
50 #include "vkQueryUtil.hpp"
51 #include "vkTypeUtil.hpp"
53 #include "vkRefUtil.hpp"
54 #include "vkBuilderUtil.hpp"
66 // VarType implementation.
68 VarType::VarType (void)
74 VarType::VarType (const VarType& other)
81 VarType::VarType (glu::DataType basicType, deUint32 flags)
85 m_data.basicType = basicType;
88 VarType::VarType (const VarType& elementType, int arraySize)
92 m_data.array.size = arraySize;
93 m_data.array.elementType = new VarType(elementType);
96 VarType::VarType (const StructType* structPtr)
97 : m_type (TYPE_STRUCT)
100 m_data.structPtr = structPtr;
103 VarType::~VarType (void)
105 if (m_type == TYPE_ARRAY)
106 delete m_data.array.elementType;
109 VarType& VarType::operator= (const VarType& other)
112 return *this; // Self-assignment.
114 if (m_type == TYPE_ARRAY)
115 delete m_data.array.elementType;
117 m_type = other.m_type;
118 m_flags = other.m_flags;
121 if (m_type == TYPE_ARRAY)
123 m_data.array.elementType = new VarType(*other.m_data.array.elementType);
124 m_data.array.size = other.m_data.array.size;
127 m_data = other.m_data;
132 // StructType implementation.
134 void StructType::addMember (const std::string& name, const VarType& type, deUint32 flags)
136 m_members.push_back(StructMember(name, type, flags));
139 // Uniform implementation.
141 Uniform::Uniform (const std::string& name, const VarType& type, deUint32 flags)
148 // UniformBlock implementation.
150 UniformBlock::UniformBlock (const std::string& blockName)
151 : m_blockName (blockName)
157 std::ostream& operator<< (std::ostream& stream, const BlockLayoutEntry& entry)
159 stream << entry.name << " { name = " << entry.name
160 << ", size = " << entry.size
161 << ", activeUniformIndices = [";
163 for (std::vector<int>::const_iterator i = entry.activeUniformIndices.begin(); i != entry.activeUniformIndices.end(); i++)
165 if (i != entry.activeUniformIndices.begin())
174 std::ostream& operator<< (std::ostream& stream, const UniformLayoutEntry& entry)
176 stream << entry.name << " { type = " << glu::getDataTypeName(entry.type)
177 << ", size = " << entry.size
178 << ", blockNdx = " << entry.blockNdx
179 << ", offset = " << entry.offset
180 << ", arrayStride = " << entry.arrayStride
181 << ", matrixStride = " << entry.matrixStride
182 << ", isRowMajor = " << (entry.isRowMajor ? "true" : "false")
187 int UniformLayout::getUniformIndex (const std::string& name) const
189 for (int ndx = 0; ndx < (int)uniforms.size(); ndx++)
191 if (uniforms[ndx].name == name)
198 int UniformLayout::getBlockIndex (const std::string& name) const
200 for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
202 if (blocks[ndx].name == name)
209 // ShaderInterface implementation.
211 ShaderInterface::ShaderInterface (void)
215 ShaderInterface::~ShaderInterface (void)
219 StructType& ShaderInterface::allocStruct (const std::string& name)
221 m_structs.push_back(StructTypeSP(new StructType(name)));
222 return *m_structs.back();
225 struct StructNameEquals
229 StructNameEquals (const std::string& name_) : name(name_) {}
231 bool operator() (const StructTypeSP type) const
233 return type->hasTypeName() && name == type->getTypeName();
237 void ShaderInterface::getNamedStructs (std::vector<const StructType*>& structs) const
239 for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
241 if ((*i)->hasTypeName())
242 structs.push_back((*i).get());
246 UniformBlock& ShaderInterface::allocBlock (const std::string& name)
248 m_uniformBlocks.push_back(UniformBlockSP(new UniformBlock(name)));
249 return *m_uniformBlocks.back();
252 namespace // Utilities
255 struct PrecisionFlagsFmt
258 PrecisionFlagsFmt (deUint32 flags_) : flags(flags_) {}
261 std::ostream& operator<< (std::ostream& str, const PrecisionFlagsFmt& fmt)
264 DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW|PRECISION_MEDIUM|PRECISION_HIGH)) <= 1);
265 str << (fmt.flags & PRECISION_LOW ? "lowp" :
266 fmt.flags & PRECISION_MEDIUM ? "mediump" :
267 fmt.flags & PRECISION_HIGH ? "highp" : "");
271 struct LayoutFlagsFmt
274 LayoutFlagsFmt (deUint32 flags_) : flags(flags_) {}
277 std::ostream& operator<< (std::ostream& str, const LayoutFlagsFmt& fmt)
285 { LAYOUT_STD140, "std140" },
286 { LAYOUT_ROW_MAJOR, "row_major" },
287 { LAYOUT_COLUMN_MAJOR, "column_major" }
290 deUint32 remBits = fmt.flags;
291 for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
293 if (remBits & bitDesc[descNdx].bit)
295 if (remBits != fmt.flags)
297 str << bitDesc[descNdx].token;
298 remBits &= ~bitDesc[descNdx].bit;
301 DE_ASSERT(remBits == 0);
305 // Layout computation.
307 int getDataTypeByteSize (glu::DataType type)
309 return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint32);
312 int getDataTypeByteAlignment (glu::DataType type)
316 case glu::TYPE_FLOAT:
319 case glu::TYPE_BOOL: return 1*(int)sizeof(deUint32);
321 case glu::TYPE_FLOAT_VEC2:
322 case glu::TYPE_INT_VEC2:
323 case glu::TYPE_UINT_VEC2:
324 case glu::TYPE_BOOL_VEC2: return 2*(int)sizeof(deUint32);
326 case glu::TYPE_FLOAT_VEC3:
327 case glu::TYPE_INT_VEC3:
328 case glu::TYPE_UINT_VEC3:
329 case glu::TYPE_BOOL_VEC3: // Fall-through to vec4
331 case glu::TYPE_FLOAT_VEC4:
332 case glu::TYPE_INT_VEC4:
333 case glu::TYPE_UINT_VEC4:
334 case glu::TYPE_BOOL_VEC4: return 4*(int)sizeof(deUint32);
342 deInt32 getminUniformBufferOffsetAlignment (Context &ctx)
344 VkPhysicalDeviceProperties properties;
345 ctx.getInstanceInterface().getPhysicalDeviceProperties(ctx.getPhysicalDevice(), &properties);
346 VkDeviceSize align = properties.limits.minUniformBufferOffsetAlignment;
347 DE_ASSERT(align == (VkDeviceSize)(deInt32)align);
348 return (deInt32)align;
351 int getDataTypeArrayStride (glu::DataType type)
353 DE_ASSERT(!glu::isDataTypeMatrix(type));
355 const int baseStride = getDataTypeByteSize(type);
356 const int vec4Alignment = (int)sizeof(deUint32)*4;
358 DE_ASSERT(baseStride <= vec4Alignment);
359 return de::max(baseStride, vec4Alignment); // Really? See rule 4.
362 static inline int deRoundUp32 (int a, int b)
365 return d*b == a ? a : (d+1)*b;
368 int computeStd140BaseAlignment (const VarType& type)
370 const int vec4Alignment = (int)sizeof(deUint32)*4;
372 if (type.isBasicType())
374 glu::DataType basicType = type.getBasicType();
376 if (glu::isDataTypeMatrix(basicType))
378 bool isRowMajor = !!(type.getFlags() & LAYOUT_ROW_MAJOR);
379 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
380 : glu::getDataTypeMatrixNumRows(basicType);
382 return getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
385 return getDataTypeByteAlignment(basicType);
387 else if (type.isArrayType())
389 int elemAlignment = computeStd140BaseAlignment(type.getElementType());
391 // Round up to alignment of vec4
392 return deRoundUp32(elemAlignment, vec4Alignment);
396 DE_ASSERT(type.isStructType());
398 int maxBaseAlignment = 0;
400 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
401 maxBaseAlignment = de::max(maxBaseAlignment, computeStd140BaseAlignment(memberIter->getType()));
403 return deRoundUp32(maxBaseAlignment, vec4Alignment);
407 inline deUint32 mergeLayoutFlags (deUint32 prevFlags, deUint32 newFlags)
409 const deUint32 packingMask = LAYOUT_STD140;
410 const deUint32 matrixMask = LAYOUT_ROW_MAJOR|LAYOUT_COLUMN_MAJOR;
412 deUint32 mergedFlags = 0;
414 mergedFlags |= ((newFlags & packingMask) ? newFlags : prevFlags) & packingMask;
415 mergedFlags |= ((newFlags & matrixMask) ? newFlags : prevFlags) & matrixMask;
420 void computeStd140Layout (UniformLayout& layout, int& curOffset, int curBlockNdx, const std::string& curPrefix, const VarType& type, deUint32 layoutFlags)
422 int baseAlignment = computeStd140BaseAlignment(type);
424 curOffset = deAlign32(curOffset, baseAlignment);
426 if (type.isBasicType())
428 glu::DataType basicType = type.getBasicType();
429 UniformLayoutEntry entry;
431 entry.name = curPrefix;
432 entry.type = basicType;
434 entry.arrayStride = 0;
435 entry.matrixStride = 0;
436 entry.blockNdx = curBlockNdx;
438 if (glu::isDataTypeMatrix(basicType))
440 // Array of vectors as specified in rules 5 & 7.
441 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
442 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
443 : glu::getDataTypeMatrixNumRows(basicType);
444 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(basicType)
445 : glu::getDataTypeMatrixNumColumns(basicType);
446 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
448 entry.offset = curOffset;
449 entry.matrixStride = stride;
450 entry.isRowMajor = isRowMajor;
452 curOffset += numVecs*stride;
457 entry.offset = curOffset;
459 curOffset += getDataTypeByteSize(basicType);
462 layout.uniforms.push_back(entry);
464 else if (type.isArrayType())
466 const VarType& elemType = type.getElementType();
468 if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
470 // Array of scalars or vectors.
471 glu::DataType elemBasicType = elemType.getBasicType();
472 UniformLayoutEntry entry;
473 int stride = getDataTypeArrayStride(elemBasicType);
475 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
476 entry.type = elemBasicType;
477 entry.blockNdx = curBlockNdx;
478 entry.offset = curOffset;
479 entry.size = type.getArraySize();
480 entry.arrayStride = stride;
481 entry.matrixStride = 0;
483 curOffset += stride*type.getArraySize();
485 layout.uniforms.push_back(entry);
487 else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
489 // Array of matrices.
490 glu::DataType elemBasicType = elemType.getBasicType();
491 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
492 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(elemBasicType)
493 : glu::getDataTypeMatrixNumRows(elemBasicType);
494 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(elemBasicType)
495 : glu::getDataTypeMatrixNumColumns(elemBasicType);
496 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
497 UniformLayoutEntry entry;
499 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
500 entry.type = elemBasicType;
501 entry.blockNdx = curBlockNdx;
502 entry.offset = curOffset;
503 entry.size = type.getArraySize();
504 entry.arrayStride = stride*numVecs;
505 entry.matrixStride = stride;
506 entry.isRowMajor = isRowMajor;
508 curOffset += numVecs*type.getArraySize()*stride;
510 layout.uniforms.push_back(entry);
514 DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
516 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
517 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
522 DE_ASSERT(type.isStructType());
524 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
525 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "." + memberIter->getName(), memberIter->getType(), layoutFlags);
527 curOffset = deAlign32(curOffset, baseAlignment);
531 void computeStd140Layout (UniformLayout& layout, const ShaderInterface& interface)
533 int numUniformBlocks = interface.getNumUniformBlocks();
535 for (int blockNdx = 0; blockNdx < numUniformBlocks; blockNdx++)
537 const UniformBlock& block = interface.getUniformBlock(blockNdx);
538 bool hasInstanceName = block.hasInstanceName();
539 std::string blockPrefix = hasInstanceName ? (block.getBlockName() + ".") : "";
541 int activeBlockNdx = (int)layout.blocks.size();
542 int firstUniformNdx = (int)layout.uniforms.size();
544 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
546 const Uniform& uniform = *uniformIter;
547 computeStd140Layout(layout, curOffset, activeBlockNdx, blockPrefix + uniform.getName(), uniform.getType(), mergeLayoutFlags(block.getFlags(), uniform.getFlags()));
550 int uniformIndicesEnd = (int)layout.uniforms.size();
551 int blockSize = curOffset;
552 int numInstances = block.isArray() ? block.getArraySize() : 1;
554 // Create block layout entries for each instance.
555 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
557 // Allocate entry for instance.
558 layout.blocks.push_back(BlockLayoutEntry());
559 BlockLayoutEntry& blockEntry = layout.blocks.back();
561 blockEntry.name = block.getBlockName();
562 blockEntry.size = blockSize;
563 blockEntry.bindingNdx = blockNdx;
564 blockEntry.instanceNdx = instanceNdx;
566 // Compute active uniform set for block.
567 for (int uniformNdx = firstUniformNdx; uniformNdx < uniformIndicesEnd; uniformNdx++)
568 blockEntry.activeUniformIndices.push_back(uniformNdx);
571 blockEntry.name += "[" + de::toString(instanceNdx) + "]";
578 void generateValue (const UniformLayoutEntry& entry, void* basePtr, de::Random& rnd)
580 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
581 int scalarSize = glu::getDataTypeScalarSize(entry.type);
582 bool isMatrix = glu::isDataTypeMatrix(entry.type);
583 int numVecs = isMatrix ? (entry.isRowMajor ? glu::getDataTypeMatrixNumRows(entry.type) : glu::getDataTypeMatrixNumColumns(entry.type)) : 1;
584 int vecSize = scalarSize / numVecs;
585 bool isArray = entry.size > 1;
586 const int compSize = sizeof(deUint32);
588 DE_ASSERT(scalarSize%numVecs == 0);
590 for (int elemNdx = 0; elemNdx < entry.size; elemNdx++)
592 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
594 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
596 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
598 for (int compNdx = 0; compNdx < vecSize; compNdx++)
600 deUint8* compPtr = vecPtr + compSize*compNdx;
604 case glu::TYPE_FLOAT: *((float*)compPtr) = (float)rnd.getInt(-9, 9); break;
605 case glu::TYPE_INT: *((int*)compPtr) = rnd.getInt(-9, 9); break;
606 case glu::TYPE_UINT: *((deUint32*)compPtr) = (deUint32)rnd.getInt(0, 9); break;
607 // \note Random bit pattern is used for true values. Spec states that all non-zero values are
608 // interpreted as true but some implementations fail this.
609 case glu::TYPE_BOOL: *((deUint32*)compPtr) = rnd.getBool() ? rnd.getUint32()|1u : 0u; break;
618 void generateValues (const UniformLayout& layout, const std::map<int, void*>& blockPointers, deUint32 seed)
620 de::Random rnd (seed);
621 int numBlocks = (int)layout.blocks.size();
623 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
625 void* basePtr = blockPointers.find(blockNdx)->second;
626 int numEntries = (int)layout.blocks[blockNdx].activeUniformIndices.size();
628 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
630 const UniformLayoutEntry& entry = layout.uniforms[layout.blocks[blockNdx].activeUniformIndices[entryNdx]];
631 generateValue(entry, basePtr, rnd);
638 const char* getCompareFuncForType (glu::DataType type)
642 case glu::TYPE_FLOAT: return "mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; }\n";
643 case glu::TYPE_FLOAT_VEC2: return "mediump float compare_vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y); }\n";
644 case glu::TYPE_FLOAT_VEC3: return "mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); }\n";
645 case glu::TYPE_FLOAT_VEC4: return "mediump float compare_vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z)*compare_float(a.w, b.w); }\n";
646 case glu::TYPE_FLOAT_MAT2: return "mediump float compare_mat2 (highp mat2 a, highp mat2 b) { return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1]); }\n";
647 case glu::TYPE_FLOAT_MAT2X3: return "mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); }\n";
648 case glu::TYPE_FLOAT_MAT2X4: return "mediump float compare_mat2x4 (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1]); }\n";
649 case glu::TYPE_FLOAT_MAT3X2: return "mediump float compare_mat3x2 (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2]); }\n";
650 case glu::TYPE_FLOAT_MAT3: return "mediump float compare_mat3 (highp mat3 a, highp mat3 b) { return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2]); }\n";
651 case glu::TYPE_FLOAT_MAT3X4: return "mediump float compare_mat3x4 (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2]); }\n";
652 case glu::TYPE_FLOAT_MAT4X2: return "mediump float compare_mat4x2 (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2])*compare_vec2(a[3], b[3]); }\n";
653 case glu::TYPE_FLOAT_MAT4X3: return "mediump float compare_mat4x3 (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2])*compare_vec3(a[3], b[3]); }\n";
654 case glu::TYPE_FLOAT_MAT4: return "mediump float compare_mat4 (highp mat4 a, highp mat4 b) { return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2])*compare_vec4(a[3], b[3]); }\n";
655 case glu::TYPE_INT: return "mediump float compare_int (highp int a, highp int b) { return a == b ? 1.0 : 0.0; }\n";
656 case glu::TYPE_INT_VEC2: return "mediump float compare_ivec2 (highp ivec2 a, highp ivec2 b) { return a == b ? 1.0 : 0.0; }\n";
657 case glu::TYPE_INT_VEC3: return "mediump float compare_ivec3 (highp ivec3 a, highp ivec3 b) { return a == b ? 1.0 : 0.0; }\n";
658 case glu::TYPE_INT_VEC4: return "mediump float compare_ivec4 (highp ivec4 a, highp ivec4 b) { return a == b ? 1.0 : 0.0; }\n";
659 case glu::TYPE_UINT: return "mediump float compare_uint (highp uint a, highp uint b) { return a == b ? 1.0 : 0.0; }\n";
660 case glu::TYPE_UINT_VEC2: return "mediump float compare_uvec2 (highp uvec2 a, highp uvec2 b) { return a == b ? 1.0 : 0.0; }\n";
661 case glu::TYPE_UINT_VEC3: return "mediump float compare_uvec3 (highp uvec3 a, highp uvec3 b) { return a == b ? 1.0 : 0.0; }\n";
662 case glu::TYPE_UINT_VEC4: return "mediump float compare_uvec4 (highp uvec4 a, highp uvec4 b) { return a == b ? 1.0 : 0.0; }\n";
663 case glu::TYPE_BOOL: return "mediump float compare_bool (bool a, bool b) { return a == b ? 1.0 : 0.0; }\n";
664 case glu::TYPE_BOOL_VEC2: return "mediump float compare_bvec2 (bvec2 a, bvec2 b) { return a == b ? 1.0 : 0.0; }\n";
665 case glu::TYPE_BOOL_VEC3: return "mediump float compare_bvec3 (bvec3 a, bvec3 b) { return a == b ? 1.0 : 0.0; }\n";
666 case glu::TYPE_BOOL_VEC4: return "mediump float compare_bvec4 (bvec4 a, bvec4 b) { return a == b ? 1.0 : 0.0; }\n";
673 void getCompareDependencies (std::set<glu::DataType>& compareFuncs, glu::DataType basicType)
677 case glu::TYPE_FLOAT_VEC2:
678 case glu::TYPE_FLOAT_VEC3:
679 case glu::TYPE_FLOAT_VEC4:
680 compareFuncs.insert(glu::TYPE_FLOAT);
681 compareFuncs.insert(basicType);
684 case glu::TYPE_FLOAT_MAT2:
685 case glu::TYPE_FLOAT_MAT2X3:
686 case glu::TYPE_FLOAT_MAT2X4:
687 case glu::TYPE_FLOAT_MAT3X2:
688 case glu::TYPE_FLOAT_MAT3:
689 case glu::TYPE_FLOAT_MAT3X4:
690 case glu::TYPE_FLOAT_MAT4X2:
691 case glu::TYPE_FLOAT_MAT4X3:
692 case glu::TYPE_FLOAT_MAT4:
693 compareFuncs.insert(glu::TYPE_FLOAT);
694 compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
695 compareFuncs.insert(basicType);
699 compareFuncs.insert(basicType);
704 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const VarType& type)
706 if (type.isStructType())
708 for (StructType::ConstIterator iter = type.getStruct().begin(); iter != type.getStruct().end(); ++iter)
709 collectUniqueBasicTypes(basicTypes, iter->getType());
711 else if (type.isArrayType())
712 collectUniqueBasicTypes(basicTypes, type.getElementType());
715 DE_ASSERT(type.isBasicType());
716 basicTypes.insert(type.getBasicType());
720 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const UniformBlock& uniformBlock)
722 for (UniformBlock::ConstIterator iter = uniformBlock.begin(); iter != uniformBlock.end(); ++iter)
723 collectUniqueBasicTypes(basicTypes, iter->getType());
726 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const ShaderInterface& interface)
728 for (int ndx = 0; ndx < interface.getNumUniformBlocks(); ++ndx)
729 collectUniqueBasicTypes(basicTypes, interface.getUniformBlock(ndx));
732 void generateCompareFuncs (std::ostream& str, const ShaderInterface& interface)
734 std::set<glu::DataType> types;
735 std::set<glu::DataType> compareFuncs;
737 // Collect unique basic types
738 collectUniqueBasicTypes(types, interface);
740 // Set of compare functions required
741 for (std::set<glu::DataType>::const_iterator iter = types.begin(); iter != types.end(); ++iter)
743 getCompareDependencies(compareFuncs, *iter);
746 for (int type = 0; type < glu::TYPE_LAST; ++type)
748 if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
749 str << getCompareFuncForType(glu::DataType(type));
756 Indent (int level_) : level(level_) {}
759 std::ostream& operator<< (std::ostream& str, const Indent& indent)
761 for (int i = 0; i < indent.level; i++)
766 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints);
767 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel);
768 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
770 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
771 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
773 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
775 DE_ASSERT(structType.hasTypeName());
776 generateFullDeclaration(src, structType, indentLevel);
780 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
783 if (structType.hasTypeName())
784 src << " " << structType.getTypeName();
785 src << "\n" << Indent(indentLevel) << "{\n";
787 for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
789 src << Indent(indentLevel + 1);
790 generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1, memberIter->getFlags() & UNUSED_BOTH);
793 src << Indent(indentLevel) << "}";
796 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int /* indentLevel */)
798 src << structType.getTypeName();
801 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints)
803 deUint32 flags = type.getFlags();
805 if ((flags & LAYOUT_MASK) != 0)
806 src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK) << ") ";
808 if ((flags & PRECISION_MASK) != 0)
809 src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
811 if (type.isBasicType())
812 src << glu::getDataTypeName(type.getBasicType()) << " " << name;
813 else if (type.isArrayType())
815 std::vector<int> arraySizes;
816 const VarType* curType = &type;
817 while (curType->isArrayType())
819 arraySizes.push_back(curType->getArraySize());
820 curType = &curType->getElementType();
823 if (curType->isBasicType())
825 if ((curType->getFlags() & PRECISION_MASK) != 0)
826 src << PrecisionFlagsFmt(curType->getFlags() & PRECISION_MASK) << " ";
827 src << glu::getDataTypeName(curType->getBasicType());
831 DE_ASSERT(curType->isStructType());
832 generateLocalDeclaration(src, curType->getStruct(), indentLevel+1);
837 for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
838 src << "[" << *sizeIter << "]";
842 generateLocalDeclaration(src, type.getStruct(), indentLevel+1);
848 // Print out unused hints.
849 if (unusedHints != 0)
850 src << " // unused in " << (unusedHints == UNUSED_BOTH ? "both shaders" :
851 unusedHints == UNUSED_VERTEX ? "vertex shader" :
852 unusedHints == UNUSED_FRAGMENT ? "fragment shader" : "???");
857 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel)
859 if ((uniform.getFlags() & LAYOUT_MASK) != 0)
860 src << "layout(" << LayoutFlagsFmt(uniform.getFlags() & LAYOUT_MASK) << ") ";
862 generateDeclaration(src, uniform.getType(), uniform.getName(), indentLevel, uniform.getFlags() & UNUSED_BOTH);
865 void generateDeclaration (std::ostringstream& src, int blockNdx, const UniformBlock& block)
867 src << "layout(set = 0, binding = " << blockNdx;
868 if ((block.getFlags() & LAYOUT_MASK) != 0)
869 src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK);
872 src << "uniform " << block.getBlockName();
875 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
878 generateDeclaration(src, *uniformIter, 1 /* indent level */);
883 if (block.hasInstanceName())
885 src << " " << block.getInstanceName();
887 src << "[" << block.getArraySize() << "]";
890 DE_ASSERT(!block.isArray());
895 void generateValueSrc (std::ostringstream& src, const UniformLayoutEntry& entry, const void* basePtr, int elementNdx)
897 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
898 int scalarSize = glu::getDataTypeScalarSize(entry.type);
899 bool isArray = entry.size > 1;
900 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
901 const int compSize = sizeof(deUint32);
904 src << glu::getDataTypeName(entry.type) << "(";
906 if (glu::isDataTypeMatrix(entry.type))
908 int numRows = glu::getDataTypeMatrixNumRows(entry.type);
909 int numCols = glu::getDataTypeMatrixNumColumns(entry.type);
911 DE_ASSERT(scalarType == glu::TYPE_FLOAT);
913 // Constructed in column-wise order.
914 for (int colNdx = 0; colNdx < numCols; colNdx++)
916 for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
918 const deUint8* compPtr = elemPtr + (entry.isRowMajor ? (rowNdx * entry.matrixStride + colNdx * compSize)
919 : (colNdx * entry.matrixStride + rowNdx * compSize));
921 if (colNdx > 0 || rowNdx > 0)
924 src << de::floatToString(*((const float*)compPtr), 1);
930 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
932 const deUint8* compPtr = elemPtr + scalarNdx * compSize;
939 case glu::TYPE_FLOAT: src << de::floatToString(*((const float*)compPtr), 1); break;
940 case glu::TYPE_INT: src << *((const int*)compPtr); break;
941 case glu::TYPE_UINT: src << *((const deUint32*)compPtr) << "u"; break;
942 case glu::TYPE_BOOL: src << (*((const deUint32*)compPtr) != 0u ? "true" : "false"); break;
953 void generateCompareSrc (std::ostringstream& src,
954 const char* resultVar,
956 const std::string& srcName,
957 const std::string& apiName,
958 const UniformLayout& layout,
962 if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
964 // Basic type or array of basic types.
965 bool isArray = type.isArrayType();
966 glu::DataType elementType = isArray ? type.getElementType().getBasicType() : type.getBasicType();
967 const char* typeName = glu::getDataTypeName(elementType);
968 std::string fullApiName = std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
969 int uniformNdx = layout.getUniformIndex(fullApiName);
970 const UniformLayoutEntry& entry = layout.uniforms[uniformNdx];
974 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
976 src << "\tresult *= compare_" << typeName << "(" << srcName << "[" << elemNdx << "], ";
977 generateValueSrc(src, entry, basePtr, elemNdx);
983 src << "\tresult *= compare_" << typeName << "(" << srcName << ", ";
984 generateValueSrc(src, entry, basePtr, 0);
988 else if (type.isArrayType())
990 const VarType& elementType = type.getElementType();
992 for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
994 std::string op = std::string("[") + de::toString(elementNdx) + "]";
995 std::string elementSrcName = std::string(srcName) + op;
996 std::string elementApiName = std::string(apiName) + op;
997 generateCompareSrc(src, resultVar, elementType, elementSrcName, elementApiName, layout, basePtr, unusedMask);
1002 DE_ASSERT(type.isStructType());
1004 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
1006 if (memberIter->getFlags() & unusedMask)
1007 continue; // Skip member.
1009 std::string op = std::string(".") + memberIter->getName();
1010 std::string memberSrcName = std::string(srcName) + op;
1011 std::string memberApiName = std::string(apiName) + op;
1012 generateCompareSrc(src, resultVar, memberIter->getType(), memberSrcName, memberApiName, layout, basePtr, unusedMask);
1017 void generateCompareSrc (std::ostringstream& src, const char* resultVar, const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, bool isVertex)
1019 deUint32 unusedMask = isVertex ? UNUSED_VERTEX : UNUSED_FRAGMENT;
1021 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1023 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1025 if ((block.getFlags() & (isVertex ? DECLARE_VERTEX : DECLARE_FRAGMENT)) == 0)
1028 bool hasInstanceName = block.hasInstanceName();
1029 bool isArray = block.isArray();
1030 int numInstances = isArray ? block.getArraySize() : 1;
1031 std::string apiPrefix = hasInstanceName ? block.getBlockName() + "." : std::string("");
1033 DE_ASSERT(!isArray || hasInstanceName);
1035 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1037 std::string instancePostfix = isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1038 std::string blockInstanceName = block.getBlockName() + instancePostfix;
1039 std::string srcPrefix = hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1040 int activeBlockNdx = layout.getBlockIndex(blockInstanceName);
1041 void* basePtr = blockPointers.find(activeBlockNdx)->second;
1043 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
1045 const Uniform& uniform = *uniformIter;
1047 if (uniform.getFlags() & unusedMask)
1048 continue; // Don't read from that uniform.
1050 std::string srcName = srcPrefix + uniform.getName();
1051 std::string apiName = apiPrefix + uniform.getName();
1052 generateCompareSrc(src, resultVar, uniform.getType(), srcName, apiName, layout, basePtr, unusedMask);
1058 std::string generateVertexShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers)
1060 std::ostringstream src;
1061 src << "#version 450\n";
1063 src << "layout(location = 0) in highp vec4 a_position;\n";
1064 src << "layout(location = 0) out mediump float v_vtxResult;\n";
1067 std::vector<const StructType*> namedStructs;
1068 interface.getNamedStructs(namedStructs);
1069 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1070 generateDeclaration(src, **structIter, 0);
1072 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1074 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1075 if (block.getFlags() & DECLARE_VERTEX)
1076 generateDeclaration(src, blockNdx, block);
1079 // Comparison utilities.
1081 generateCompareFuncs(src, interface);
1084 "void main (void)\n"
1086 " gl_Position = a_position;\n"
1087 " mediump float result = 1.0;\n";
1090 generateCompareSrc(src, "result", interface, layout, blockPointers, true);
1092 src << " v_vtxResult = result;\n"
1098 std::string generateFragmentShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers)
1100 std::ostringstream src;
1101 src << "#version 450\n";
1103 src << "layout(location = 0) in mediump float v_vtxResult;\n";
1104 src << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
1107 std::vector<const StructType*> namedStructs;
1108 interface.getNamedStructs(namedStructs);
1109 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1110 generateDeclaration(src, **structIter, 0);
1112 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1114 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1115 if (block.getFlags() & DECLARE_FRAGMENT)
1116 generateDeclaration(src, blockNdx, block);
1119 // Comparison utilities.
1121 generateCompareFuncs(src, interface);
1124 "void main (void)\n"
1126 " mediump float result = 1.0;\n";
1129 generateCompareSrc(src, "result", interface, layout, blockPointers, false);
1131 src << " dEQP_FragColor = vec4(1.0, v_vtxResult, result, 1.0);\n"
1137 Move<VkBuffer> createBuffer (Context& context, VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
1139 const VkDevice vkDevice = context.getDevice();
1140 const DeviceInterface& vk = context.getDeviceInterface();
1141 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1143 const VkBufferCreateInfo bufferInfo =
1145 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1146 DE_NULL, // const void* pNext;
1147 0u, // VkBufferCreateFlags flags;
1148 bufferSize, // VkDeviceSize size;
1149 usageFlags, // VkBufferUsageFlags usage;
1150 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1151 1u, // deUint32 queueFamilyIndexCount;
1152 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1155 return vk::createBuffer(vk, vkDevice, &bufferInfo);
1158 Move<vk::VkImage> createImage2D (Context& context, int width, int height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
1160 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1161 const vk::VkImageCreateInfo params =
1163 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType
1164 DE_NULL, // const void* pNext
1165 0u, // VkImageCreateFlags flags
1166 vk::VK_IMAGE_TYPE_2D, // VkImageType imageType
1167 format, // VkFormat format
1168 { width, height, 1 }, // VkExtent3D extent
1169 1u, // deUint32 mipLevels
1170 1u, // deUint32 arrayLayers
1171 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
1172 tiling, // VkImageTiling tiling
1173 usageFlags, // VkImageUsageFlags usage
1174 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
1175 1u, // deUint32 queueFamilyIndexCount
1176 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices
1177 vk::VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout
1180 return vk::createImage(context.getDeviceInterface(), context.getDevice(), ¶ms);
1183 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1185 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1186 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1187 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
1189 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1194 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1196 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1197 const vk::VkMemoryRequirements imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1198 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1200 vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1205 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
1207 const vk::VkImageViewCreateInfo params =
1209 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1213 vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
1215 vk::makeComponentMappingRGBA(), // components
1216 { vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u,1u }, // subresourceRange
1219 return vk::createImageView(context.getDeviceInterface(), context.getDevice(), ¶ms);
1222 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
1224 const vk::VkPipelineLayoutCreateInfo params =
1226 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
1229 1u, // setLayoutCount
1230 &descriptorSetLayout, // pSetLayouts
1231 0u, // pushConstantRangeCount
1232 DE_NULL, // pPushConstantRanges
1235 return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), ¶ms);
1238 Move<vk::VkCommandPool> createCmdPool (Context& context)
1240 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1241 const vk::VkCommandPoolCreateInfo params =
1243 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1245 vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1246 queueFamilyIndex, // queueFamilyIndex
1249 return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), ¶ms);
1252 Move<vk::VkCommandBuffer> createCmdBuffer (Context& context, vk::VkCommandPool cmdPool)
1254 const vk::VkCommandBufferAllocateInfo params =
1256 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1258 cmdPool, // commandPool
1259 vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1263 return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
1267 // UniformBlockCaseInstance
1269 class UniformBlockCaseInstance : public vkt::TestInstance
1272 UniformBlockCaseInstance (Context& context,
1273 UniformBlockCase::BufferMode bufferMode,
1274 const UniformLayout& layout,
1275 const std::map<int, void*>& blockPointers);
1276 virtual ~UniformBlockCaseInstance (void);
1277 virtual tcu::TestStatus iterate (void);
1283 RENDER_HEIGHT = 100,
1286 vk::Move<VkRenderPass> createRenderPass (vk::VkFormat format) const;
1287 vk::Move<VkFramebuffer> createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const;
1288 vk::Move<VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
1289 vk::Move<VkDescriptorPool> createDescriptorPool (void) const;
1290 vk::Move<VkPipeline> createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const;
1292 vk::VkDescriptorBufferInfo addUniformData (deUint32 size, const void* dataPtr);
1294 UniformBlockCase::BufferMode m_bufferMode;
1295 const UniformLayout& m_layout;
1296 const std::map<int, void*>& m_blockPointers;
1298 typedef de::SharedPtr<vk::Unique<vk::VkBuffer> > VkBufferSp;
1299 typedef de::SharedPtr<vk::Allocation> AllocationSp;
1301 std::vector<VkBufferSp> m_uniformBuffers;
1302 std::vector<AllocationSp> m_uniformAllocs;
1305 UniformBlockCaseInstance::UniformBlockCaseInstance (Context& ctx,
1306 UniformBlockCase::BufferMode bufferMode,
1307 const UniformLayout& layout,
1308 const std::map<int, void*>& blockPointers)
1309 : vkt::TestInstance (ctx)
1310 , m_bufferMode (bufferMode)
1312 , m_blockPointers (blockPointers)
1316 UniformBlockCaseInstance::~UniformBlockCaseInstance (void)
1320 tcu::TestStatus UniformBlockCaseInstance::iterate (void)
1322 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
1323 const vk::VkDevice device = m_context.getDevice();
1324 const vk::VkQueue queue = m_context.getUniversalQueue();
1325 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1327 const float positions[] =
1329 -1.0f, -1.0f, 0.0f, 1.0f,
1330 -1.0f, +1.0f, 0.0f, 1.0f,
1331 +1.0f, -1.0f, 0.0f, 1.0f,
1332 +1.0f, +1.0f, 0.0f, 1.0f
1335 const deUint32 indices[] = { 0, 1, 2, 2, 1, 3 };
1337 vk::Unique<VkBuffer> positionsBuffer (createBuffer(m_context, sizeof(positions), vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1338 de::UniquePtr<Allocation> positionsAlloc (allocateAndBindMemory(m_context, *positionsBuffer, MemoryRequirement::HostVisible));
1339 vk::Unique<VkBuffer> indicesBuffer (createBuffer(m_context, sizeof(indices), vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1340 de::UniquePtr<Allocation> indicesAlloc (allocateAndBindMemory(m_context, *indicesBuffer, MemoryRequirement::HostVisible));
1342 int minUniformBufferOffsetAlignment = getminUniformBufferOffsetAlignment(m_context);
1344 // Upload attrbiutes data
1346 deMemcpy(positionsAlloc->getHostPtr(), positions, sizeof(positions));
1347 flushMappedMemoryRange(vk, device, positionsAlloc->getMemory(), positionsAlloc->getOffset(), sizeof(positions));
1349 deMemcpy(indicesAlloc->getHostPtr(), indices, sizeof(indices));
1350 flushMappedMemoryRange(vk, device, indicesAlloc->getMemory(), indicesAlloc->getOffset(), sizeof(indices));
1353 vk::Unique<VkImage> colorImage (createImage2D(m_context,
1356 vk::VK_FORMAT_R8G8B8A8_UNORM,
1357 vk::VK_IMAGE_TILING_OPTIMAL,
1358 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
1359 de::UniquePtr<Allocation> colorImageAlloc (allocateAndBindMemory(m_context, *colorImage, MemoryRequirement::Any));
1360 vk::Unique<VkImageView> colorImageView (createAttachmentView(m_context, *colorImage, vk::VK_FORMAT_R8G8B8A8_UNORM));
1362 vk::Unique<VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
1363 vk::Unique<VkDescriptorPool> descriptorPool (createDescriptorPool());
1365 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
1367 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1368 DE_NULL, // const void* pNext;
1369 *descriptorPool, // VkDescriptorPool descriptorPool;
1370 1u, // deUint32 setLayoutCount;
1371 &descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
1374 vk::Unique<VkDescriptorSet> descriptorSet(vk::allocateDescriptorSet(vk, device, &descriptorSetAllocateInfo));
1375 int numBlocks = (int)m_layout.blocks.size();
1376 std::vector<vk::VkDescriptorBufferInfo> descriptors(numBlocks);
1378 // Upload uniform data
1380 vk::DescriptorSetUpdateBuilder descriptorSetUpdateBuilder;
1382 if (m_bufferMode == UniformBlockCase::BUFFERMODE_PER_BLOCK)
1384 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1386 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1387 const void* srcPtr = m_blockPointers.find(blockNdx)->second;
1389 descriptors[blockNdx] = addUniformData(block.size, srcPtr);
1390 descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1391 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptors[blockNdx]);
1396 int currentOffset = 0;
1397 std::map<int, int> offsets;
1398 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1400 if (minUniformBufferOffsetAlignment > 0)
1401 currentOffset = deAlign32(currentOffset, minUniformBufferOffsetAlignment);
1402 offsets[blockNdx] = currentOffset;
1403 currentOffset += m_layout.blocks[blockNdx].size;
1406 deUint32 totalSize = currentOffset;
1408 // Make a copy of the data that satisfies the device's min uniform buffer alignment
1409 std::vector<deUint8> data;
1410 data.resize(totalSize);
1411 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1413 deMemcpy(&data[offsets[blockNdx]], m_blockPointers.find(blockNdx)->second, m_layout.blocks[blockNdx].size);
1416 vk::VkBuffer buffer = addUniformData(totalSize, &data[0]).buffer;
1418 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1420 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1421 deUint32 size = block.size;
1423 const VkDescriptorBufferInfo descriptor =
1425 buffer, // VkBuffer buffer;
1426 (deUint32)offsets[blockNdx], // VkDeviceSize offset;
1427 size, // VkDeviceSize range;
1430 descriptors[blockNdx] = descriptor;
1431 descriptorSetUpdateBuilder.writeSingle(*descriptorSet,
1432 vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1433 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1434 &descriptors[blockNdx]);
1438 descriptorSetUpdateBuilder.update(vk, device);
1441 vk::Unique<VkRenderPass> renderPass (createRenderPass(vk::VK_FORMAT_R8G8B8A8_UNORM));
1442 vk::Unique<VkFramebuffer> framebuffer (createFramebuffer(*renderPass, *colorImageView));
1443 vk::Unique<VkPipelineLayout> pipelineLayout (createPipelineLayout(m_context, *descriptorSetLayout));
1445 vk::Unique<VkShaderModule> vtxShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
1446 vk::Unique<VkShaderModule> fragShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
1447 vk::Unique<VkPipeline> pipeline (createPipeline(*vtxShaderModule, *fragShaderModule, *pipelineLayout, *renderPass));
1448 vk::Unique<VkCommandPool> cmdPool (createCmdPool(m_context));
1449 vk::Unique<VkCommandBuffer> cmdBuffer (createCmdBuffer(m_context, *cmdPool));
1450 vk::Unique<VkBuffer> readImageBuffer (createBuffer(m_context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1451 de::UniquePtr<Allocation> readImageAlloc (allocateAndBindMemory(m_context, *readImageBuffer, vk::MemoryRequirement::HostVisible));
1453 // Record command buffer
1454 const vk::VkCommandBufferBeginInfo beginInfo =
1456 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1457 DE_NULL, // const void* pNext;
1458 0u, // VkCommandBufferUsageFlags flags;
1459 (vk::VkRenderPass)0, // VkRenderPass renderPass;
1460 0u, // deUint32 subpass;
1461 (vk::VkFramebuffer)0, // VkFramebuffer framebuffer;
1462 false, // VkBool32 occlusionQueryEnable;
1463 0u, // VkQueryControlFlags queryFlags;
1464 0u // VkQueryPipelineStatisticFlags pipelineStatistics;
1466 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &beginInfo));
1468 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1469 const vk::VkRenderPassBeginInfo passBeginInfo =
1471 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
1472 DE_NULL, // const void* pNext;
1473 *renderPass, // VkRenderPass renderPass;
1474 *framebuffer, // VkFramebuffer framebuffer;
1475 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // VkRect2D renderArea;
1476 1u, // deUint32 clearValueCount;
1477 &clearValue, // const VkClearValue* pClearValues;
1480 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1482 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1483 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1485 const vk::VkDeviceSize offsets[] = { 0u };
1486 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &*positionsBuffer, offsets);
1487 vk.cmdBindIndexBuffer(*cmdBuffer, *indicesBuffer, (vk::VkDeviceSize)0, vk::VK_INDEX_TYPE_UINT32);
1489 vk.cmdDrawIndexed(*cmdBuffer, DE_LENGTH_OF_ARRAY(indices), 1u, 0u, 0u, 0u);
1490 vk.cmdEndRenderPass(*cmdBuffer);
1492 // Add render finish barrier
1494 const vk::VkImageMemoryBarrier renderFinishBarrier =
1496 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1497 DE_NULL, // const void* pNext
1498 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VVkAccessFlags srcAccessMask;
1499 vk::VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1500 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1501 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1502 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1503 queueFamilyIndex, // deUint32 dstQueueFamilyIndex;
1504 *colorImage, // VkImage image;
1506 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1507 0u, // deUint32 baseMipLevel;
1508 1u, // deUint32 mipLevels;
1509 0u, // deUint32 baseArraySlice;
1510 1u, // deUint32 arraySize;
1511 } // VkImageSubresourceRange subresourceRange
1513 const void* const barriers[] = { &renderFinishBarrier };
1515 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
1518 // Add Image->Buffer copy command
1520 const vk::VkBufferImageCopy copyParams =
1522 (vk::VkDeviceSize)0u, // VkDeviceSize bufferOffset;
1523 (deUint32)RENDER_WIDTH, // deUint32 bufferRowLength;
1524 (deUint32)RENDER_HEIGHT, // deUint32 bufferImageHeight;
1526 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspect aspect;
1527 0u, // deUint32 mipLevel;
1528 0u, // deUint32 arrayLayer;
1529 1u, // deUint32 arraySize;
1530 }, // VkImageSubresourceCopy imageSubresource
1531 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1532 { RENDER_WIDTH, RENDER_HEIGHT, 1u } // VkExtent3D imageExtent;
1535 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1538 // Add copy finish barrier
1540 const vk::VkBufferMemoryBarrier copyFinishBarrier =
1542 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1543 DE_NULL, // const void* pNext;
1544 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1545 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1546 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1547 queueFamilyIndex, // deUint32 destQueueFamilyIndex;
1548 *readImageBuffer, // VkBuffer buffer;
1549 0u, // VkDeviceSize offset;
1550 (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4)// VkDeviceSize size;
1552 const void* const barriers[] = { ©FinishBarrier };
1554 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, (deUint32)DE_LENGTH_OF_ARRAY(barriers), barriers);
1557 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1559 // Submit the command buffer
1561 const vk::VkFenceCreateInfo fenceParams =
1563 vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1564 DE_NULL, // const void* pNext;
1565 0u, // VkFenceCreateFlags flags;
1567 const Unique<vk::VkFence> fence(vk::createFence(vk, device, &fenceParams));
1569 const VkSubmitInfo submitInfo =
1571 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1572 DE_NULL, // const void* pNext;
1573 0u, // deUint32 waitSemaphoreCount;
1574 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1575 1u, // deUint32 commandBufferCount;
1576 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1577 0u, // deUint32 signalSemaphoreCount;
1578 DE_NULL // const VkSemaphore* pSignalSemaphores;
1581 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
1582 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
1585 // Read back the results
1586 tcu::Surface surface(RENDER_WIDTH, RENDER_HEIGHT);
1588 const tcu::TextureFormat textureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
1589 const tcu::ConstPixelBufferAccess imgAccess(textureFormat, RENDER_WIDTH, RENDER_HEIGHT, 1, readImageAlloc->getHostPtr());
1590 const vk::VkDeviceSize bufferSize = RENDER_WIDTH * RENDER_HEIGHT * 4;
1591 invalidateMappedMemoryRange(vk, device, readImageAlloc->getMemory(), readImageAlloc->getOffset(), bufferSize);
1593 tcu::copy(surface.getAccess(), imgAccess);
1596 // Check if the result image is all white
1597 tcu::RGBA white(tcu::RGBA::white());
1598 int numFailedPixels = 0;
1600 for (int y = 0; y < surface.getHeight(); y++)
1602 for (int x = 0; x < surface.getWidth(); x++)
1604 if (surface.getPixel(x, y) != white)
1605 numFailedPixels += 1;
1609 if (numFailedPixels > 0)
1611 tcu::TestLog& log = m_context.getTestContext().getLog();
1612 log << tcu::TestLog::Image("Image", "Rendered image", surface);
1613 log << tcu::TestLog::Message << "Image comparison failed, got " << numFailedPixels << " non-white pixels" << tcu::TestLog::EndMessage;
1615 for (size_t blockNdx = 0; blockNdx < m_layout.blocks.size(); blockNdx++)
1617 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1618 log << tcu::TestLog::Message << "Block index: " << blockNdx << " infos: " << block << tcu::TestLog::EndMessage;
1621 for (size_t uniformNdx = 0; uniformNdx < m_layout.uniforms.size(); uniformNdx++)
1623 log << tcu::TestLog::Message << "Uniform index: " << uniformNdx << " infos: " << m_layout.uniforms[uniformNdx] << tcu::TestLog::EndMessage;
1626 return tcu::TestStatus::fail("Detected non-white pixels");
1629 return tcu::TestStatus::pass("Full white image ok");
1632 vk::VkDescriptorBufferInfo UniformBlockCaseInstance::addUniformData (deUint32 size, const void* dataPtr)
1634 const VkDevice vkDevice = m_context.getDevice();
1635 const DeviceInterface& vk = m_context.getDeviceInterface();
1637 Move<VkBuffer> buffer = createBuffer(m_context, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
1638 de::MovePtr<Allocation> alloc = allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible);
1640 deMemcpy(alloc->getHostPtr(), dataPtr, size);
1641 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
1643 const VkDescriptorBufferInfo descriptor =
1645 *buffer, // VkBuffer buffer;
1646 0u, // VkDeviceSize offset;
1647 size, // VkDeviceSize range;
1651 m_uniformBuffers.push_back(VkBufferSp(new vk::Unique<vk::VkBuffer>(buffer)));
1652 m_uniformAllocs.push_back(AllocationSp(alloc.release()));
1657 vk::Move<VkRenderPass> UniformBlockCaseInstance::createRenderPass (vk::VkFormat format) const
1659 const VkDevice vkDevice = m_context.getDevice();
1660 const DeviceInterface& vk = m_context.getDeviceInterface();
1662 const VkAttachmentDescription attachmentDescription =
1664 0u, // VkAttachmentDescriptorFlags flags;
1665 format, // VkFormat format;
1666 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1667 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
1668 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
1669 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
1670 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
1671 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
1672 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
1675 const VkAttachmentReference attachmentReference =
1677 0u, // deUint32 attachment;
1678 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
1682 const VkSubpassDescription subpassDescription =
1684 0u, // VkSubpassDescriptionFlags flags;
1685 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
1686 0u, // deUint32 inputAttachmentCount;
1687 DE_NULL, // const VkAttachmentReference* pInputAttachments;
1688 1u, // deUint32 colorAttachmentCount;
1689 &attachmentReference, // const VkAttachmentReference* pColorAttachments;
1690 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
1691 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
1692 0u, // deUint32 preserveAttachmentCount;
1693 DE_NULL // const VkAttachmentReference* pPreserveAttachments;
1696 const VkRenderPassCreateInfo renderPassParams =
1698 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
1699 DE_NULL, // const void* pNext;
1700 0u, // VkRenderPassCreateFlags flags;
1701 1u, // deUint32 attachmentCount;
1702 &attachmentDescription, // const VkAttachmentDescription* pAttachments;
1703 1u, // deUint32 subpassCount;
1704 &subpassDescription, // const VkSubpassDescription* pSubpasses;
1705 0u, // deUint32 dependencyCount;
1706 DE_NULL // const VkSubpassDependency* pDependencies;
1709 return vk::createRenderPass(vk, vkDevice, &renderPassParams);
1712 vk::Move<VkFramebuffer> UniformBlockCaseInstance::createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const
1714 const VkDevice vkDevice = m_context.getDevice();
1715 const DeviceInterface& vk = m_context.getDeviceInterface();
1717 const VkFramebufferCreateInfo framebufferParams =
1719 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
1720 DE_NULL, // const void* pNext;
1721 0u, // VkFramebufferCreateFlags flags;
1722 renderPass, // VkRenderPass renderPass;
1723 1u, // deUint32 attachmentCount;
1724 &colorImageView, // const VkImageView* pAttachments;
1725 RENDER_WIDTH, // deUint32 width;
1726 RENDER_HEIGHT, // deUint32 height;
1727 1u // deUint32 layers;
1730 return vk::createFramebuffer(vk, vkDevice, &framebufferParams);
1733 vk::Move<VkDescriptorSetLayout> UniformBlockCaseInstance::createDescriptorSetLayout (void) const
1735 int numBlocks = (int)m_layout.blocks.size();
1736 int lastBindingNdx = -1;
1737 std::vector<int> lengths;
1739 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1741 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1743 if (block.bindingNdx == lastBindingNdx)
1749 lengths.push_back(1);
1750 lastBindingNdx = block.bindingNdx;
1754 vk::DescriptorSetLayoutBuilder layoutBuilder;
1755 for (size_t i = 0; i < lengths.size(); i++)
1759 layoutBuilder.addArrayBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, lengths[i], vk::VK_SHADER_STAGE_ALL);
1763 layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL);
1767 return layoutBuilder.build(m_context.getDeviceInterface(), m_context.getDevice());
1770 vk::Move<VkDescriptorPool> UniformBlockCaseInstance::createDescriptorPool (void) const
1772 vk::DescriptorPoolBuilder poolBuilder;
1775 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (int)m_layout.blocks.size())
1776 .build(m_context.getDeviceInterface(), m_context.getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1779 vk::Move<VkPipeline> UniformBlockCaseInstance::createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const
1781 const VkDevice vkDevice = m_context.getDevice();
1782 const DeviceInterface& vk = m_context.getDeviceInterface();
1784 const VkVertexInputBindingDescription vertexBinding =
1786 0, // deUint32 binding;
1787 (deUint32)sizeof(float) * 4, // deUint32 strideInBytes;
1788 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1791 const VkVertexInputAttributeDescription vertexAttribute =
1793 0, // deUint32 location;
1794 0, // deUint32 binding;
1795 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1796 0u // deUint32 offset;
1799 const VkPipelineShaderStageCreateInfo shaderStages[2] =
1802 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1803 DE_NULL, // const void* pNext;
1804 0u, // VkPipelineShaderStageCreateFlags flags;
1805 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
1806 vtxShaderModule, // VkShaderModule module;
1807 "main", // const char* pName;
1808 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
1811 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1812 DE_NULL, // const void* pNext;
1813 0u, // VkPipelineShaderStageCreateFlags flags;
1814 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
1815 fragShaderModule, // VkShaderModule module;
1816 "main", // const char* pName;
1817 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
1821 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1823 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
1824 DE_NULL, // const void* pNext;
1825 0u, // VkPipelineVertexInputStateCreateFlags flags;
1826 1u, // deUint32 vertexBindingDescriptionCount;
1827 &vertexBinding, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
1828 1u, // deUint32 vertexAttributeDescriptionCount;
1829 &vertexAttribute, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
1832 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
1834 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,// VkStructureType sType;
1835 DE_NULL, // const void* pNext;
1836 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
1837 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology;
1838 false // VkBool32 primitiveRestartEnable;
1841 const VkViewport viewport =
1843 0.0f, // float originX;
1844 0.0f, // float originY;
1845 (float)RENDER_WIDTH, // float width;
1846 (float)RENDER_HEIGHT, // float height;
1847 0.0f, // float minDepth;
1848 1.0f // float maxDepth;
1852 const VkRect2D scissor =
1857 }, // VkOffset2D offset;
1859 RENDER_WIDTH, // deUint32 width;
1860 RENDER_HEIGHT, // deUint32 height;
1861 }, // VkExtent2D extent;
1864 const VkPipelineViewportStateCreateInfo viewportStateParams =
1866 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
1867 DE_NULL, // const void* pNext;
1868 0u, // VkPipelineViewportStateCreateFlags flags;
1869 1u, // deUint32 viewportCount;
1870 &viewport, // const VkViewport* pViewports;
1871 1u, // deUint32 scissorsCount;
1872 &scissor, // const VkRect2D* pScissors;
1875 const VkPipelineRasterizationStateCreateInfo rasterStateParams =
1877 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
1878 DE_NULL, // const void* pNext;
1879 0u, // VkPipelineRasterizationStateCreateFlags flags;
1880 false, // VkBool32 depthClampEnable;
1881 false, // VkBool32 rasterizerDiscardEnable;
1882 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
1883 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
1884 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
1885 false, // VkBool32 depthBiasEnable;
1886 0.0f, // float depthBiasConstantFactor;
1887 0.0f, // float depthBiasClamp;
1888 0.0f, // float depthBiasSlopeFactor;
1889 1.0f, // float lineWidth;
1892 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
1894 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
1895 DE_NULL, // const void* pNext;
1896 0u, // VkPipelineMultisampleStateCreateFlags flags;
1897 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
1898 VK_FALSE, // VkBool32 sampleShadingEnable;
1899 0.0f, // float minSampleShading;
1900 DE_NULL, // const VkSampleMask* pSampleMask;
1901 VK_FALSE, // VkBool32 alphaToCoverageEnable;
1902 VK_FALSE // VkBool32 alphaToOneEnable;
1905 const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
1907 false, // VkBool32 blendEnable;
1908 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendColor;
1909 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendColor;
1910 VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
1911 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendAlpha;
1912 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendAlpha;
1913 VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
1914 VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // VkChannelFlags channelWriteMask;
1915 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
1918 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
1920 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
1921 DE_NULL, // const void* pNext;
1922 0u, // VkPipelineColorBlendStateCreateFlags flags;
1923 false, // VkBool32 logicOpEnable;
1924 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
1925 1u, // deUint32 attachmentCount;
1926 &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
1927 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
1930 const VkPipelineDynamicStateCreateInfo dynamicStateInfo =
1932 VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType;
1933 DE_NULL, // const void* pNext;
1934 0u, // VkPipelineDynamicStateCreateFlags flags;
1935 0u, // deUint32 dynamicStateCount;
1936 DE_NULL // const VkDynamicState* pDynamicStates;
1939 const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
1941 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
1942 DE_NULL, // const void* pNext;
1943 0u, // VkPipelineCreateFlags flags;
1944 2u, // deUint32 stageCount;
1945 shaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
1946 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
1947 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
1948 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
1949 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
1950 &rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
1951 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
1952 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
1953 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
1954 &dynamicStateInfo, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
1955 pipelineLayout, // VkPipelineLayout layout;
1956 renderPass, // VkRenderPass renderPass;
1957 0u, // deUint32 subpass;
1958 0u, // VkPipeline basePipelineHandle;
1959 0u // deInt32 basePipelineIndex;
1962 return vk::createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
1965 } // anonymous (utilities)
1967 // UniformBlockCase.
1969 UniformBlockCase::UniformBlockCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, BufferMode bufferMode)
1970 : TestCase (testCtx, name, description)
1971 , m_bufferMode (bufferMode)
1975 UniformBlockCase::~UniformBlockCase (void)
1979 void UniformBlockCase::initPrograms (vk::SourceCollections& programCollection) const
1981 DE_ASSERT(!m_vertShaderSource.empty());
1982 DE_ASSERT(!m_fragShaderSource.empty());
1984 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
1985 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
1988 TestInstance* UniformBlockCase::createInstance (Context& context) const
1990 return new UniformBlockCaseInstance(context, m_bufferMode, m_uniformLayout, m_blockPointers);
1993 void UniformBlockCase::init (void)
1995 // Compute reference layout.
1996 computeStd140Layout(m_uniformLayout, m_interface);
1998 // Assign storage for reference values.
2001 for (std::vector<BlockLayoutEntry>::const_iterator blockIter = m_uniformLayout.blocks.begin(); blockIter != m_uniformLayout.blocks.end(); blockIter++)
2002 totalSize += blockIter->size;
2003 m_data.resize(totalSize);
2005 // Pointers for each block.
2007 for (int blockNdx = 0; blockNdx < (int)m_uniformLayout.blocks.size(); blockNdx++)
2009 m_blockPointers[blockNdx] = &m_data[0] + curOffset;
2010 curOffset += m_uniformLayout.blocks[blockNdx].size;
2015 generateValues(m_uniformLayout, m_blockPointers, 1 /* seed */);
2017 // Generate shaders.
2018 m_vertShaderSource = generateVertexShader(m_interface, m_uniformLayout, m_blockPointers);
2019 m_fragShaderSource = generateFragmentShader(m_interface, m_uniformLayout, m_blockPointers);