1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
23 * \brief Uniform block case.
24 *//*--------------------------------------------------------------------*/
26 #include "vktUniformBlockCase.hpp"
28 #include "vkPrograms.hpp"
30 #include "gluVarType.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuSurface.hpp"
33 #include "deRandom.hpp"
34 #include "deStringUtil.hpp"
36 #include "tcuTextureUtil.hpp"
37 #include "deSharedPtr.hpp"
39 #include "vkMemUtil.hpp"
40 #include "vkQueryUtil.hpp"
41 #include "vkTypeUtil.hpp"
43 #include "vkRefUtil.hpp"
44 #include "vkBuilderUtil.hpp"
56 // VarType implementation.
58 VarType::VarType (void)
64 VarType::VarType (const VarType& other)
71 VarType::VarType (glu::DataType basicType, deUint32 flags)
75 m_data.basicType = basicType;
78 VarType::VarType (const VarType& elementType, int arraySize)
82 m_data.array.size = arraySize;
83 m_data.array.elementType = new VarType(elementType);
86 VarType::VarType (const StructType* structPtr)
87 : m_type (TYPE_STRUCT)
90 m_data.structPtr = structPtr;
93 VarType::~VarType (void)
95 if (m_type == TYPE_ARRAY)
96 delete m_data.array.elementType;
99 VarType& VarType::operator= (const VarType& other)
102 return *this; // Self-assignment.
104 if (m_type == TYPE_ARRAY)
105 delete m_data.array.elementType;
107 m_type = other.m_type;
108 m_flags = other.m_flags;
111 if (m_type == TYPE_ARRAY)
113 m_data.array.elementType = new VarType(*other.m_data.array.elementType);
114 m_data.array.size = other.m_data.array.size;
117 m_data = other.m_data;
122 // StructType implementation.
124 void StructType::addMember (const std::string& name, const VarType& type, deUint32 flags)
126 m_members.push_back(StructMember(name, type, flags));
129 // Uniform implementation.
131 Uniform::Uniform (const std::string& name, const VarType& type, deUint32 flags)
138 // UniformBlock implementation.
140 UniformBlock::UniformBlock (const std::string& blockName)
141 : m_blockName (blockName)
147 std::ostream& operator<< (std::ostream& stream, const BlockLayoutEntry& entry)
149 stream << entry.name << " { name = " << entry.name
150 << ", size = " << entry.size
151 << ", activeUniformIndices = [";
153 for (std::vector<int>::const_iterator i = entry.activeUniformIndices.begin(); i != entry.activeUniformIndices.end(); i++)
155 if (i != entry.activeUniformIndices.begin())
164 std::ostream& operator<< (std::ostream& stream, const UniformLayoutEntry& entry)
166 stream << entry.name << " { type = " << glu::getDataTypeName(entry.type)
167 << ", size = " << entry.size
168 << ", blockNdx = " << entry.blockNdx
169 << ", offset = " << entry.offset
170 << ", arrayStride = " << entry.arrayStride
171 << ", matrixStride = " << entry.matrixStride
172 << ", isRowMajor = " << (entry.isRowMajor ? "true" : "false")
177 int UniformLayout::getUniformIndex (const std::string& name) const
179 for (int ndx = 0; ndx < (int)uniforms.size(); ndx++)
181 if (uniforms[ndx].name == name)
188 int UniformLayout::getBlockIndex (const std::string& name) const
190 for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
192 if (blocks[ndx].name == name)
199 // ShaderInterface implementation.
201 ShaderInterface::ShaderInterface (void)
205 ShaderInterface::~ShaderInterface (void)
209 StructType& ShaderInterface::allocStruct (const std::string& name)
211 m_structs.push_back(StructTypeSP(new StructType(name)));
212 return *m_structs.back();
215 struct StructNameEquals
219 StructNameEquals (const std::string& name_) : name(name_) {}
221 bool operator() (const StructTypeSP type) const
223 return type->hasTypeName() && name == type->getTypeName();
227 void ShaderInterface::getNamedStructs (std::vector<const StructType*>& structs) const
229 for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
231 if ((*i)->hasTypeName())
232 structs.push_back((*i).get());
236 UniformBlock& ShaderInterface::allocBlock (const std::string& name)
238 m_uniformBlocks.push_back(UniformBlockSP(new UniformBlock(name)));
239 return *m_uniformBlocks.back();
242 namespace // Utilities
245 struct PrecisionFlagsFmt
248 PrecisionFlagsFmt (deUint32 flags_) : flags(flags_) {}
251 std::ostream& operator<< (std::ostream& str, const PrecisionFlagsFmt& fmt)
254 DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW|PRECISION_MEDIUM|PRECISION_HIGH)) <= 1);
255 str << (fmt.flags & PRECISION_LOW ? "lowp" :
256 fmt.flags & PRECISION_MEDIUM ? "mediump" :
257 fmt.flags & PRECISION_HIGH ? "highp" : "");
261 struct LayoutFlagsFmt
264 LayoutFlagsFmt (deUint32 flags_) : flags(flags_) {}
267 std::ostream& operator<< (std::ostream& str, const LayoutFlagsFmt& fmt)
275 { LAYOUT_STD140, "std140" },
276 { LAYOUT_ROW_MAJOR, "row_major" },
277 { LAYOUT_COLUMN_MAJOR, "column_major" }
280 deUint32 remBits = fmt.flags;
281 for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
283 if (remBits & bitDesc[descNdx].bit)
285 if (remBits != fmt.flags)
287 str << bitDesc[descNdx].token;
288 remBits &= ~bitDesc[descNdx].bit;
291 DE_ASSERT(remBits == 0);
295 // Layout computation.
297 int getDataTypeByteSize (glu::DataType type)
299 return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint32);
302 int getDataTypeByteAlignment (glu::DataType type)
306 case glu::TYPE_FLOAT:
309 case glu::TYPE_BOOL: return 1*(int)sizeof(deUint32);
311 case glu::TYPE_FLOAT_VEC2:
312 case glu::TYPE_INT_VEC2:
313 case glu::TYPE_UINT_VEC2:
314 case glu::TYPE_BOOL_VEC2: return 2*(int)sizeof(deUint32);
316 case glu::TYPE_FLOAT_VEC3:
317 case glu::TYPE_INT_VEC3:
318 case glu::TYPE_UINT_VEC3:
319 case glu::TYPE_BOOL_VEC3: // Fall-through to vec4
321 case glu::TYPE_FLOAT_VEC4:
322 case glu::TYPE_INT_VEC4:
323 case glu::TYPE_UINT_VEC4:
324 case glu::TYPE_BOOL_VEC4: return 4*(int)sizeof(deUint32);
332 deInt32 getminUniformBufferOffsetAlignment (Context &ctx)
334 VkPhysicalDeviceProperties properties;
335 ctx.getInstanceInterface().getPhysicalDeviceProperties(ctx.getPhysicalDevice(), &properties);
336 VkDeviceSize align = properties.limits.minUniformBufferOffsetAlignment;
337 DE_ASSERT(align == (VkDeviceSize)(deInt32)align);
338 return (deInt32)align;
341 int getDataTypeArrayStride (glu::DataType type)
343 DE_ASSERT(!glu::isDataTypeMatrix(type));
345 const int baseStride = getDataTypeByteSize(type);
346 const int vec4Alignment = (int)sizeof(deUint32)*4;
348 DE_ASSERT(baseStride <= vec4Alignment);
349 return de::max(baseStride, vec4Alignment); // Really? See rule 4.
352 static inline int deRoundUp32 (int a, int b)
355 return d*b == a ? a : (d+1)*b;
358 int computeStd140BaseAlignment (const VarType& type)
360 const int vec4Alignment = (int)sizeof(deUint32)*4;
362 if (type.isBasicType())
364 glu::DataType basicType = type.getBasicType();
366 if (glu::isDataTypeMatrix(basicType))
368 bool isRowMajor = !!(type.getFlags() & LAYOUT_ROW_MAJOR);
369 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
370 : glu::getDataTypeMatrixNumRows(basicType);
372 return getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
375 return getDataTypeByteAlignment(basicType);
377 else if (type.isArrayType())
379 int elemAlignment = computeStd140BaseAlignment(type.getElementType());
381 // Round up to alignment of vec4
382 return deRoundUp32(elemAlignment, vec4Alignment);
386 DE_ASSERT(type.isStructType());
388 int maxBaseAlignment = 0;
390 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
391 maxBaseAlignment = de::max(maxBaseAlignment, computeStd140BaseAlignment(memberIter->getType()));
393 return deRoundUp32(maxBaseAlignment, vec4Alignment);
397 inline deUint32 mergeLayoutFlags (deUint32 prevFlags, deUint32 newFlags)
399 const deUint32 packingMask = LAYOUT_STD140;
400 const deUint32 matrixMask = LAYOUT_ROW_MAJOR|LAYOUT_COLUMN_MAJOR;
402 deUint32 mergedFlags = 0;
404 mergedFlags |= ((newFlags & packingMask) ? newFlags : prevFlags) & packingMask;
405 mergedFlags |= ((newFlags & matrixMask) ? newFlags : prevFlags) & matrixMask;
410 void computeStd140Layout (UniformLayout& layout, int& curOffset, int curBlockNdx, const std::string& curPrefix, const VarType& type, deUint32 layoutFlags)
412 int baseAlignment = computeStd140BaseAlignment(type);
414 curOffset = deAlign32(curOffset, baseAlignment);
416 if (type.isBasicType())
418 glu::DataType basicType = type.getBasicType();
419 UniformLayoutEntry entry;
421 entry.name = curPrefix;
422 entry.type = basicType;
424 entry.arrayStride = 0;
425 entry.matrixStride = 0;
426 entry.blockNdx = curBlockNdx;
428 if (glu::isDataTypeMatrix(basicType))
430 // Array of vectors as specified in rules 5 & 7.
431 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
432 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
433 : glu::getDataTypeMatrixNumRows(basicType);
434 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(basicType)
435 : glu::getDataTypeMatrixNumColumns(basicType);
436 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
438 entry.offset = curOffset;
439 entry.matrixStride = stride;
440 entry.isRowMajor = isRowMajor;
442 curOffset += numVecs*stride;
447 entry.offset = curOffset;
449 curOffset += getDataTypeByteSize(basicType);
452 layout.uniforms.push_back(entry);
454 else if (type.isArrayType())
456 const VarType& elemType = type.getElementType();
458 if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
460 // Array of scalars or vectors.
461 glu::DataType elemBasicType = elemType.getBasicType();
462 UniformLayoutEntry entry;
463 int stride = getDataTypeArrayStride(elemBasicType);
465 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
466 entry.type = elemBasicType;
467 entry.blockNdx = curBlockNdx;
468 entry.offset = curOffset;
469 entry.size = type.getArraySize();
470 entry.arrayStride = stride;
471 entry.matrixStride = 0;
473 curOffset += stride*type.getArraySize();
475 layout.uniforms.push_back(entry);
477 else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
479 // Array of matrices.
480 glu::DataType elemBasicType = elemType.getBasicType();
481 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
482 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(elemBasicType)
483 : glu::getDataTypeMatrixNumRows(elemBasicType);
484 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(elemBasicType)
485 : glu::getDataTypeMatrixNumColumns(elemBasicType);
486 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
487 UniformLayoutEntry entry;
489 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
490 entry.type = elemBasicType;
491 entry.blockNdx = curBlockNdx;
492 entry.offset = curOffset;
493 entry.size = type.getArraySize();
494 entry.arrayStride = stride*numVecs;
495 entry.matrixStride = stride;
496 entry.isRowMajor = isRowMajor;
498 curOffset += numVecs*type.getArraySize()*stride;
500 layout.uniforms.push_back(entry);
504 DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
506 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
507 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
512 DE_ASSERT(type.isStructType());
514 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
515 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "." + memberIter->getName(), memberIter->getType(), layoutFlags);
517 curOffset = deAlign32(curOffset, baseAlignment);
521 void computeStd140Layout (UniformLayout& layout, const ShaderInterface& interface)
523 int numUniformBlocks = interface.getNumUniformBlocks();
525 for (int blockNdx = 0; blockNdx < numUniformBlocks; blockNdx++)
527 const UniformBlock& block = interface.getUniformBlock(blockNdx);
528 bool hasInstanceName = block.hasInstanceName();
529 std::string blockPrefix = hasInstanceName ? (block.getBlockName() + ".") : "";
531 int activeBlockNdx = (int)layout.blocks.size();
532 int firstUniformNdx = (int)layout.uniforms.size();
534 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
536 const Uniform& uniform = *uniformIter;
537 computeStd140Layout(layout, curOffset, activeBlockNdx, blockPrefix + uniform.getName(), uniform.getType(), mergeLayoutFlags(block.getFlags(), uniform.getFlags()));
540 int uniformIndicesEnd = (int)layout.uniforms.size();
541 int blockSize = curOffset;
542 int numInstances = block.isArray() ? block.getArraySize() : 1;
544 // Create block layout entries for each instance.
545 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
547 // Allocate entry for instance.
548 layout.blocks.push_back(BlockLayoutEntry());
549 BlockLayoutEntry& blockEntry = layout.blocks.back();
551 blockEntry.name = block.getBlockName();
552 blockEntry.size = blockSize;
553 blockEntry.bindingNdx = blockNdx;
554 blockEntry.instanceNdx = instanceNdx;
556 // Compute active uniform set for block.
557 for (int uniformNdx = firstUniformNdx; uniformNdx < uniformIndicesEnd; uniformNdx++)
558 blockEntry.activeUniformIndices.push_back(uniformNdx);
561 blockEntry.name += "[" + de::toString(instanceNdx) + "]";
568 void generateValue (const UniformLayoutEntry& entry, void* basePtr, de::Random& rnd)
570 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
571 int scalarSize = glu::getDataTypeScalarSize(entry.type);
572 bool isMatrix = glu::isDataTypeMatrix(entry.type);
573 int numVecs = isMatrix ? (entry.isRowMajor ? glu::getDataTypeMatrixNumRows(entry.type) : glu::getDataTypeMatrixNumColumns(entry.type)) : 1;
574 int vecSize = scalarSize / numVecs;
575 bool isArray = entry.size > 1;
576 const int compSize = sizeof(deUint32);
578 DE_ASSERT(scalarSize%numVecs == 0);
580 for (int elemNdx = 0; elemNdx < entry.size; elemNdx++)
582 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
584 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
586 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
588 for (int compNdx = 0; compNdx < vecSize; compNdx++)
590 deUint8* compPtr = vecPtr + compSize*compNdx;
594 case glu::TYPE_FLOAT: *((float*)compPtr) = (float)rnd.getInt(-9, 9); break;
595 case glu::TYPE_INT: *((int*)compPtr) = rnd.getInt(-9, 9); break;
596 case glu::TYPE_UINT: *((deUint32*)compPtr) = (deUint32)rnd.getInt(0, 9); break;
597 // \note Random bit pattern is used for true values. Spec states that all non-zero values are
598 // interpreted as true but some implementations fail this.
599 case glu::TYPE_BOOL: *((deUint32*)compPtr) = rnd.getBool() ? rnd.getUint32()|1u : 0u; break;
608 void generateValues (const UniformLayout& layout, const std::map<int, void*>& blockPointers, deUint32 seed)
610 de::Random rnd (seed);
611 int numBlocks = (int)layout.blocks.size();
613 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
615 void* basePtr = blockPointers.find(blockNdx)->second;
616 int numEntries = (int)layout.blocks[blockNdx].activeUniformIndices.size();
618 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
620 const UniformLayoutEntry& entry = layout.uniforms[layout.blocks[blockNdx].activeUniformIndices[entryNdx]];
621 generateValue(entry, basePtr, rnd);
628 const char* getCompareFuncForType (glu::DataType type)
632 case glu::TYPE_FLOAT: return "mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; }\n";
633 case glu::TYPE_FLOAT_VEC2: return "mediump float compare_vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y); }\n";
634 case glu::TYPE_FLOAT_VEC3: return "mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); }\n";
635 case glu::TYPE_FLOAT_VEC4: return "mediump float compare_vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z)*compare_float(a.w, b.w); }\n";
636 case glu::TYPE_FLOAT_MAT2: return "mediump float compare_mat2 (highp mat2 a, highp mat2 b) { return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1]); }\n";
637 case glu::TYPE_FLOAT_MAT2X3: return "mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); }\n";
638 case glu::TYPE_FLOAT_MAT2X4: return "mediump float compare_mat2x4 (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1]); }\n";
639 case glu::TYPE_FLOAT_MAT3X2: return "mediump float compare_mat3x2 (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2]); }\n";
640 case glu::TYPE_FLOAT_MAT3: return "mediump float compare_mat3 (highp mat3 a, highp mat3 b) { return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2]); }\n";
641 case glu::TYPE_FLOAT_MAT3X4: return "mediump float compare_mat3x4 (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2]); }\n";
642 case glu::TYPE_FLOAT_MAT4X2: return "mediump float compare_mat4x2 (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2])*compare_vec2(a[3], b[3]); }\n";
643 case glu::TYPE_FLOAT_MAT4X3: return "mediump float compare_mat4x3 (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2])*compare_vec3(a[3], b[3]); }\n";
644 case glu::TYPE_FLOAT_MAT4: return "mediump float compare_mat4 (highp mat4 a, highp mat4 b) { return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2])*compare_vec4(a[3], b[3]); }\n";
645 case glu::TYPE_INT: return "mediump float compare_int (highp int a, highp int b) { return a == b ? 1.0 : 0.0; }\n";
646 case glu::TYPE_INT_VEC2: return "mediump float compare_ivec2 (highp ivec2 a, highp ivec2 b) { return a == b ? 1.0 : 0.0; }\n";
647 case glu::TYPE_INT_VEC3: return "mediump float compare_ivec3 (highp ivec3 a, highp ivec3 b) { return a == b ? 1.0 : 0.0; }\n";
648 case glu::TYPE_INT_VEC4: return "mediump float compare_ivec4 (highp ivec4 a, highp ivec4 b) { return a == b ? 1.0 : 0.0; }\n";
649 case glu::TYPE_UINT: return "mediump float compare_uint (highp uint a, highp uint b) { return a == b ? 1.0 : 0.0; }\n";
650 case glu::TYPE_UINT_VEC2: return "mediump float compare_uvec2 (highp uvec2 a, highp uvec2 b) { return a == b ? 1.0 : 0.0; }\n";
651 case glu::TYPE_UINT_VEC3: return "mediump float compare_uvec3 (highp uvec3 a, highp uvec3 b) { return a == b ? 1.0 : 0.0; }\n";
652 case glu::TYPE_UINT_VEC4: return "mediump float compare_uvec4 (highp uvec4 a, highp uvec4 b) { return a == b ? 1.0 : 0.0; }\n";
653 case glu::TYPE_BOOL: return "mediump float compare_bool (bool a, bool b) { return a == b ? 1.0 : 0.0; }\n";
654 case glu::TYPE_BOOL_VEC2: return "mediump float compare_bvec2 (bvec2 a, bvec2 b) { return a == b ? 1.0 : 0.0; }\n";
655 case glu::TYPE_BOOL_VEC3: return "mediump float compare_bvec3 (bvec3 a, bvec3 b) { return a == b ? 1.0 : 0.0; }\n";
656 case glu::TYPE_BOOL_VEC4: return "mediump float compare_bvec4 (bvec4 a, bvec4 b) { return a == b ? 1.0 : 0.0; }\n";
663 void getCompareDependencies (std::set<glu::DataType>& compareFuncs, glu::DataType basicType)
667 case glu::TYPE_FLOAT_VEC2:
668 case glu::TYPE_FLOAT_VEC3:
669 case glu::TYPE_FLOAT_VEC4:
670 compareFuncs.insert(glu::TYPE_FLOAT);
671 compareFuncs.insert(basicType);
674 case glu::TYPE_FLOAT_MAT2:
675 case glu::TYPE_FLOAT_MAT2X3:
676 case glu::TYPE_FLOAT_MAT2X4:
677 case glu::TYPE_FLOAT_MAT3X2:
678 case glu::TYPE_FLOAT_MAT3:
679 case glu::TYPE_FLOAT_MAT3X4:
680 case glu::TYPE_FLOAT_MAT4X2:
681 case glu::TYPE_FLOAT_MAT4X3:
682 case glu::TYPE_FLOAT_MAT4:
683 compareFuncs.insert(glu::TYPE_FLOAT);
684 compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
685 compareFuncs.insert(basicType);
689 compareFuncs.insert(basicType);
694 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const VarType& type)
696 if (type.isStructType())
698 for (StructType::ConstIterator iter = type.getStruct().begin(); iter != type.getStruct().end(); ++iter)
699 collectUniqueBasicTypes(basicTypes, iter->getType());
701 else if (type.isArrayType())
702 collectUniqueBasicTypes(basicTypes, type.getElementType());
705 DE_ASSERT(type.isBasicType());
706 basicTypes.insert(type.getBasicType());
710 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const UniformBlock& uniformBlock)
712 for (UniformBlock::ConstIterator iter = uniformBlock.begin(); iter != uniformBlock.end(); ++iter)
713 collectUniqueBasicTypes(basicTypes, iter->getType());
716 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const ShaderInterface& interface)
718 for (int ndx = 0; ndx < interface.getNumUniformBlocks(); ++ndx)
719 collectUniqueBasicTypes(basicTypes, interface.getUniformBlock(ndx));
722 void generateCompareFuncs (std::ostream& str, const ShaderInterface& interface)
724 std::set<glu::DataType> types;
725 std::set<glu::DataType> compareFuncs;
727 // Collect unique basic types
728 collectUniqueBasicTypes(types, interface);
730 // Set of compare functions required
731 for (std::set<glu::DataType>::const_iterator iter = types.begin(); iter != types.end(); ++iter)
733 getCompareDependencies(compareFuncs, *iter);
736 for (int type = 0; type < glu::TYPE_LAST; ++type)
738 if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
739 str << getCompareFuncForType(glu::DataType(type));
746 Indent (int level_) : level(level_) {}
749 std::ostream& operator<< (std::ostream& str, const Indent& indent)
751 for (int i = 0; i < indent.level; i++)
756 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints);
757 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel);
758 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
760 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
761 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
763 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
765 DE_ASSERT(structType.hasTypeName());
766 generateFullDeclaration(src, structType, indentLevel);
770 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
773 if (structType.hasTypeName())
774 src << " " << structType.getTypeName();
775 src << "\n" << Indent(indentLevel) << "{\n";
777 for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
779 src << Indent(indentLevel + 1);
780 generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1, memberIter->getFlags() & UNUSED_BOTH);
783 src << Indent(indentLevel) << "}";
786 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int /* indentLevel */)
788 src << structType.getTypeName();
791 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints)
793 deUint32 flags = type.getFlags();
795 if ((flags & LAYOUT_MASK) != 0)
796 src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK) << ") ";
798 if ((flags & PRECISION_MASK) != 0)
799 src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
801 if (type.isBasicType())
802 src << glu::getDataTypeName(type.getBasicType()) << " " << name;
803 else if (type.isArrayType())
805 std::vector<int> arraySizes;
806 const VarType* curType = &type;
807 while (curType->isArrayType())
809 arraySizes.push_back(curType->getArraySize());
810 curType = &curType->getElementType();
813 if (curType->isBasicType())
815 if ((curType->getFlags() & PRECISION_MASK) != 0)
816 src << PrecisionFlagsFmt(curType->getFlags() & PRECISION_MASK) << " ";
817 src << glu::getDataTypeName(curType->getBasicType());
821 DE_ASSERT(curType->isStructType());
822 generateLocalDeclaration(src, curType->getStruct(), indentLevel+1);
827 for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
828 src << "[" << *sizeIter << "]";
832 generateLocalDeclaration(src, type.getStruct(), indentLevel+1);
838 // Print out unused hints.
839 if (unusedHints != 0)
840 src << " // unused in " << (unusedHints == UNUSED_BOTH ? "both shaders" :
841 unusedHints == UNUSED_VERTEX ? "vertex shader" :
842 unusedHints == UNUSED_FRAGMENT ? "fragment shader" : "???");
847 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel)
849 if ((uniform.getFlags() & LAYOUT_MASK) != 0)
850 src << "layout(" << LayoutFlagsFmt(uniform.getFlags() & LAYOUT_MASK) << ") ";
852 generateDeclaration(src, uniform.getType(), uniform.getName(), indentLevel, uniform.getFlags() & UNUSED_BOTH);
855 void generateDeclaration (std::ostringstream& src, int blockNdx, const UniformBlock& block)
857 src << "layout(set = 0, binding = " << blockNdx;
858 if ((block.getFlags() & LAYOUT_MASK) != 0)
859 src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK);
862 src << "uniform " << block.getBlockName();
865 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
868 generateDeclaration(src, *uniformIter, 1 /* indent level */);
873 if (block.hasInstanceName())
875 src << " " << block.getInstanceName();
877 src << "[" << block.getArraySize() << "]";
880 DE_ASSERT(!block.isArray());
885 void generateValueSrc (std::ostringstream& src, const UniformLayoutEntry& entry, const void* basePtr, int elementNdx)
887 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
888 int scalarSize = glu::getDataTypeScalarSize(entry.type);
889 bool isArray = entry.size > 1;
890 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
891 const int compSize = sizeof(deUint32);
894 src << glu::getDataTypeName(entry.type) << "(";
896 if (glu::isDataTypeMatrix(entry.type))
898 int numRows = glu::getDataTypeMatrixNumRows(entry.type);
899 int numCols = glu::getDataTypeMatrixNumColumns(entry.type);
901 DE_ASSERT(scalarType == glu::TYPE_FLOAT);
903 // Constructed in column-wise order.
904 for (int colNdx = 0; colNdx < numCols; colNdx++)
906 for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
908 const deUint8* compPtr = elemPtr + (entry.isRowMajor ? (rowNdx * entry.matrixStride + colNdx * compSize)
909 : (colNdx * entry.matrixStride + rowNdx * compSize));
911 if (colNdx > 0 || rowNdx > 0)
914 src << de::floatToString(*((const float*)compPtr), 1);
920 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
922 const deUint8* compPtr = elemPtr + scalarNdx * compSize;
929 case glu::TYPE_FLOAT: src << de::floatToString(*((const float*)compPtr), 1); break;
930 case glu::TYPE_INT: src << *((const int*)compPtr); break;
931 case glu::TYPE_UINT: src << *((const deUint32*)compPtr) << "u"; break;
932 case glu::TYPE_BOOL: src << (*((const deUint32*)compPtr) != 0u ? "true" : "false"); break;
943 bool isMatrix (glu::DataType elementType)
945 return (elementType >= glu::TYPE_FLOAT_MAT2) && (elementType <= glu::TYPE_FLOAT_MAT4);
948 void writeMatrixTypeSrc (int columnCount,
951 std::string compareType,
952 std::ostringstream& src,
953 const std::string& srcName,
955 const UniformLayoutEntry& entry,
958 if (vector) // generateTestSrcMatrixPerVec
960 for (int colNdex = 0; colNdex < columnCount; colNdex++)
962 src << "\tresult *= " << compare + compareType << "(" << srcName << "[" << colNdex << "], ";
964 if (glu::isDataTypeMatrix(entry.type))
966 int scalarSize = glu::getDataTypeScalarSize(entry.type);
967 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset;
968 const int compSize = sizeof(deUint32);
971 src << compareType << "(";
972 for (int rowNdex = 0; rowNdex < rowCount; rowNdex++)
974 const deUint8* compPtr = elemPtr + (entry.isRowMajor ? (rowNdex * entry.matrixStride + colNdex * compSize)
975 : (colNdex * entry.matrixStride + rowNdex * compSize));
976 src << de::floatToString(*((const float*)compPtr), 1);
978 if (rowNdex < rowCount-1)
985 generateValueSrc(src, entry, basePtr, 0);
986 src << "[" << colNdex << "]);\n";
990 else // generateTestSrcMatrixPerElement
992 for (int colNdex = 0; colNdex < columnCount; colNdex++)
994 for (int rowNdex = 0; rowNdex < rowCount; rowNdex++)
996 src << "\tresult *= " << compare + compareType << "(" << srcName << "[" << colNdex << "][" << rowNdex << "], ";
997 if (glu::isDataTypeMatrix(entry.type))
999 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset;
1000 const int compSize = sizeof(deUint32);
1001 const deUint8* compPtr = elemPtr + (entry.isRowMajor ? (rowNdex * entry.matrixStride + colNdex * compSize)
1002 : (colNdex * entry.matrixStride + rowNdex * compSize));
1004 src << de::floatToString(*((const float*)compPtr), 1) << ");\n";
1008 generateValueSrc(src, entry, basePtr, 0);
1009 src << "[" << colNdex << "][" << rowNdex << "]);\n";
1016 void generateTestSrcMatrixPerVec (glu::DataType elementType,
1017 std::ostringstream& src,
1018 const std::string& srcName,
1019 const void* basePtr,
1020 const UniformLayoutEntry& entry,
1023 std::string compare = "compare_";
1024 switch (elementType)
1026 case glu::TYPE_FLOAT_MAT2:
1027 writeMatrixTypeSrc(2, 2, compare, "vec2", src, srcName, basePtr, entry, vector);
1030 case glu::TYPE_FLOAT_MAT2X3:
1031 writeMatrixTypeSrc(2, 3, compare, "vec3", src, srcName, basePtr, entry, vector);
1034 case glu::TYPE_FLOAT_MAT2X4:
1035 writeMatrixTypeSrc(2, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1038 case glu::TYPE_FLOAT_MAT3X4:
1039 writeMatrixTypeSrc(3, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1042 case glu::TYPE_FLOAT_MAT4:
1043 writeMatrixTypeSrc(4, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1046 case glu::TYPE_FLOAT_MAT4X2:
1047 writeMatrixTypeSrc(4, 2, compare, "vec2", src, srcName, basePtr, entry, vector);
1050 case glu::TYPE_FLOAT_MAT4X3:
1051 writeMatrixTypeSrc(4, 3, compare, "vec3", src, srcName, basePtr, entry, vector);
1059 void generateTestSrcMatrixPerElement (glu::DataType elementType,
1060 std::ostringstream& src,
1061 const std::string& srcName,
1062 const void* basePtr,
1063 const UniformLayoutEntry& entry,
1066 std::string compare = "compare_";
1067 std::string compareType = "float";
1068 switch (elementType)
1070 case glu::TYPE_FLOAT_MAT2:
1071 writeMatrixTypeSrc(2, 2, compare, compareType, src, srcName, basePtr, entry, vector);
1074 case glu::TYPE_FLOAT_MAT2X3:
1075 writeMatrixTypeSrc(2, 3, compare, compareType, src, srcName, basePtr, entry, vector);
1078 case glu::TYPE_FLOAT_MAT2X4:
1079 writeMatrixTypeSrc(2, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1082 case glu::TYPE_FLOAT_MAT3X4:
1083 writeMatrixTypeSrc(3, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1086 case glu::TYPE_FLOAT_MAT4:
1087 writeMatrixTypeSrc(4, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1090 case glu::TYPE_FLOAT_MAT4X2:
1091 writeMatrixTypeSrc(4, 2, compare, compareType, src, srcName, basePtr, entry, vector);
1094 case glu::TYPE_FLOAT_MAT4X3:
1095 writeMatrixTypeSrc(4, 3, compare, compareType, src, srcName, basePtr, entry, vector);
1103 void generateSingleCompare (std::ostringstream& src,
1104 glu::DataType elementType,
1105 const std::string& srcName,
1106 const void* basePtr,
1107 const UniformLayoutEntry& entry,
1108 MatrixLoadFlags matrixLoadFlag)
1110 if (matrixLoadFlag == LOAD_FULL_MATRIX)
1112 const char* typeName = glu::getDataTypeName(elementType);
1114 src << "\tresult *= compare_" << typeName << "(" << srcName << ", ";
1115 generateValueSrc(src, entry, basePtr, 0);
1120 if (isMatrix(elementType))
1122 generateTestSrcMatrixPerVec (elementType, src, srcName, basePtr, entry, true);
1123 generateTestSrcMatrixPerElement (elementType, src, srcName, basePtr, entry, false);
1128 void generateCompareSrc (std::ostringstream& src,
1129 const char* resultVar,
1130 const VarType& type,
1131 const std::string& srcName,
1132 const std::string& apiName,
1133 const UniformLayout& layout,
1134 const void* basePtr,
1135 deUint32 unusedMask,
1136 MatrixLoadFlags matrixLoadFlag)
1138 if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
1140 // Basic type or array of basic types.
1141 bool isArray = type.isArrayType();
1142 glu::DataType elementType = isArray ? type.getElementType().getBasicType() : type.getBasicType();
1143 const char* typeName = glu::getDataTypeName(elementType);
1144 std::string fullApiName = std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
1145 int uniformNdx = layout.getUniformIndex(fullApiName);
1146 const UniformLayoutEntry& entry = layout.uniforms[uniformNdx];
1150 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
1152 src << "\tresult *= compare_" << typeName << "(" << srcName << "[" << elemNdx << "], ";
1153 generateValueSrc(src, entry, basePtr, elemNdx);
1159 generateSingleCompare(src, elementType, srcName, basePtr, entry, matrixLoadFlag);
1162 else if (type.isArrayType())
1164 const VarType& elementType = type.getElementType();
1166 for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
1168 std::string op = std::string("[") + de::toString(elementNdx) + "]";
1169 std::string elementSrcName = std::string(srcName) + op;
1170 std::string elementApiName = std::string(apiName) + op;
1171 generateCompareSrc(src, resultVar, elementType, elementSrcName, elementApiName, layout, basePtr, unusedMask, LOAD_FULL_MATRIX);
1176 DE_ASSERT(type.isStructType());
1178 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
1180 if (memberIter->getFlags() & unusedMask)
1181 continue; // Skip member.
1183 std::string op = std::string(".") + memberIter->getName();
1184 std::string memberSrcName = std::string(srcName) + op;
1185 std::string memberApiName = std::string(apiName) + op;
1186 generateCompareSrc(src, resultVar, memberIter->getType(), memberSrcName, memberApiName, layout, basePtr, unusedMask, LOAD_FULL_MATRIX);
1191 void generateCompareSrc (std::ostringstream& src,
1192 const char* resultVar,
1193 const ShaderInterface& interface,
1194 const UniformLayout& layout,
1196 void*>& blockPointers,
1198 MatrixLoadFlags matrixLoadFlag)
1200 deUint32 unusedMask = isVertex ? UNUSED_VERTEX : UNUSED_FRAGMENT;
1202 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1204 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1206 if ((block.getFlags() & (isVertex ? DECLARE_VERTEX : DECLARE_FRAGMENT)) == 0)
1209 bool hasInstanceName = block.hasInstanceName();
1210 bool isArray = block.isArray();
1211 int numInstances = isArray ? block.getArraySize() : 1;
1212 std::string apiPrefix = hasInstanceName ? block.getBlockName() + "." : std::string("");
1214 DE_ASSERT(!isArray || hasInstanceName);
1216 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1218 std::string instancePostfix = isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1219 std::string blockInstanceName = block.getBlockName() + instancePostfix;
1220 std::string srcPrefix = hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1221 int activeBlockNdx = layout.getBlockIndex(blockInstanceName);
1222 void* basePtr = blockPointers.find(activeBlockNdx)->second;
1224 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
1226 const Uniform& uniform = *uniformIter;
1228 if (uniform.getFlags() & unusedMask)
1229 continue; // Don't read from that uniform.
1231 std::string srcName = srcPrefix + uniform.getName();
1232 std::string apiName = apiPrefix + uniform.getName();
1233 generateCompareSrc(src, resultVar, uniform.getType(), srcName, apiName, layout, basePtr, unusedMask, matrixLoadFlag);
1239 std::string generateVertexShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, MatrixLoadFlags matrixLoadFlag)
1241 std::ostringstream src;
1242 src << "#version 450\n";
1244 src << "layout(location = 0) in highp vec4 a_position;\n";
1245 src << "layout(location = 0) out mediump float v_vtxResult;\n";
1248 std::vector<const StructType*> namedStructs;
1249 interface.getNamedStructs(namedStructs);
1250 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1251 generateDeclaration(src, **structIter, 0);
1253 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1255 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1256 if (block.getFlags() & DECLARE_VERTEX)
1257 generateDeclaration(src, blockNdx, block);
1260 // Comparison utilities.
1262 generateCompareFuncs(src, interface);
1265 "void main (void)\n"
1267 " gl_Position = a_position;\n"
1268 " mediump float result = 1.0;\n";
1271 generateCompareSrc(src, "result", interface, layout, blockPointers, true, matrixLoadFlag);
1273 src << " v_vtxResult = result;\n"
1279 std::string generateFragmentShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, MatrixLoadFlags matrixLoadFlag)
1281 std::ostringstream src;
1282 src << "#version 450\n";
1284 src << "layout(location = 0) in mediump float v_vtxResult;\n";
1285 src << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
1288 std::vector<const StructType*> namedStructs;
1289 interface.getNamedStructs(namedStructs);
1290 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1291 generateDeclaration(src, **structIter, 0);
1293 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1295 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1296 if (block.getFlags() & DECLARE_FRAGMENT)
1297 generateDeclaration(src, blockNdx, block);
1300 // Comparison utilities.
1302 generateCompareFuncs(src, interface);
1305 "void main (void)\n"
1307 " mediump float result = 1.0;\n";
1310 generateCompareSrc(src, "result", interface, layout, blockPointers, false, matrixLoadFlag);
1312 src << " dEQP_FragColor = vec4(1.0, v_vtxResult, result, 1.0);\n"
1318 Move<VkBuffer> createBuffer (Context& context, VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
1320 const VkDevice vkDevice = context.getDevice();
1321 const DeviceInterface& vk = context.getDeviceInterface();
1322 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1324 const VkBufferCreateInfo bufferInfo =
1326 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1327 DE_NULL, // const void* pNext;
1328 0u, // VkBufferCreateFlags flags;
1329 bufferSize, // VkDeviceSize size;
1330 usageFlags, // VkBufferUsageFlags usage;
1331 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1332 1u, // deUint32 queueFamilyIndexCount;
1333 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1336 return vk::createBuffer(vk, vkDevice, &bufferInfo);
1339 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
1341 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1342 const vk::VkImageCreateInfo params =
1344 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType
1345 DE_NULL, // const void* pNext
1346 0u, // VkImageCreateFlags flags
1347 vk::VK_IMAGE_TYPE_2D, // VkImageType imageType
1348 format, // VkFormat format
1349 { width, height, 1u }, // VkExtent3D extent
1350 1u, // deUint32 mipLevels
1351 1u, // deUint32 arrayLayers
1352 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
1353 tiling, // VkImageTiling tiling
1354 usageFlags, // VkImageUsageFlags usage
1355 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
1356 1u, // deUint32 queueFamilyIndexCount
1357 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices
1358 vk::VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout
1361 return vk::createImage(context.getDeviceInterface(), context.getDevice(), ¶ms);
1364 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1366 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1367 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1368 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
1370 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1375 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1377 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1378 const vk::VkMemoryRequirements imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1379 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1381 vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1386 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
1388 const vk::VkImageViewCreateInfo params =
1390 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1394 vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
1396 vk::makeComponentMappingRGBA(), // components
1397 { vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u,1u }, // subresourceRange
1400 return vk::createImageView(context.getDeviceInterface(), context.getDevice(), ¶ms);
1403 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
1405 const vk::VkPipelineLayoutCreateInfo params =
1407 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
1410 1u, // setLayoutCount
1411 &descriptorSetLayout, // pSetLayouts
1412 0u, // pushConstantRangeCount
1413 DE_NULL, // pPushConstantRanges
1416 return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), ¶ms);
1419 Move<vk::VkCommandPool> createCmdPool (Context& context)
1421 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1422 const vk::VkCommandPoolCreateInfo params =
1424 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1426 vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1427 queueFamilyIndex, // queueFamilyIndex
1430 return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), ¶ms);
1433 Move<vk::VkCommandBuffer> createCmdBuffer (Context& context, vk::VkCommandPool cmdPool)
1435 const vk::VkCommandBufferAllocateInfo params =
1437 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1439 cmdPool, // commandPool
1440 vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1444 return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
1448 // UniformBlockCaseInstance
1450 class UniformBlockCaseInstance : public vkt::TestInstance
1453 UniformBlockCaseInstance (Context& context,
1454 UniformBlockCase::BufferMode bufferMode,
1455 const UniformLayout& layout,
1456 const std::map<int, void*>& blockPointers);
1457 virtual ~UniformBlockCaseInstance (void);
1458 virtual tcu::TestStatus iterate (void);
1464 RENDER_HEIGHT = 100,
1467 vk::Move<VkRenderPass> createRenderPass (vk::VkFormat format) const;
1468 vk::Move<VkFramebuffer> createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const;
1469 vk::Move<VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
1470 vk::Move<VkDescriptorPool> createDescriptorPool (void) const;
1471 vk::Move<VkPipeline> createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const;
1473 vk::VkDescriptorBufferInfo addUniformData (deUint32 size, const void* dataPtr);
1475 UniformBlockCase::BufferMode m_bufferMode;
1476 const UniformLayout& m_layout;
1477 const std::map<int, void*>& m_blockPointers;
1479 typedef de::SharedPtr<vk::Unique<vk::VkBuffer> > VkBufferSp;
1480 typedef de::SharedPtr<vk::Allocation> AllocationSp;
1482 std::vector<VkBufferSp> m_uniformBuffers;
1483 std::vector<AllocationSp> m_uniformAllocs;
1486 UniformBlockCaseInstance::UniformBlockCaseInstance (Context& ctx,
1487 UniformBlockCase::BufferMode bufferMode,
1488 const UniformLayout& layout,
1489 const std::map<int, void*>& blockPointers)
1490 : vkt::TestInstance (ctx)
1491 , m_bufferMode (bufferMode)
1493 , m_blockPointers (blockPointers)
1497 UniformBlockCaseInstance::~UniformBlockCaseInstance (void)
1501 tcu::TestStatus UniformBlockCaseInstance::iterate (void)
1503 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
1504 const vk::VkDevice device = m_context.getDevice();
1505 const vk::VkQueue queue = m_context.getUniversalQueue();
1506 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1508 const float positions[] =
1510 -1.0f, -1.0f, 0.0f, 1.0f,
1511 -1.0f, +1.0f, 0.0f, 1.0f,
1512 +1.0f, -1.0f, 0.0f, 1.0f,
1513 +1.0f, +1.0f, 0.0f, 1.0f
1516 const deUint32 indices[] = { 0, 1, 2, 2, 1, 3 };
1518 vk::Unique<VkBuffer> positionsBuffer (createBuffer(m_context, sizeof(positions), vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1519 de::UniquePtr<Allocation> positionsAlloc (allocateAndBindMemory(m_context, *positionsBuffer, MemoryRequirement::HostVisible));
1520 vk::Unique<VkBuffer> indicesBuffer (createBuffer(m_context, sizeof(indices), vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1521 de::UniquePtr<Allocation> indicesAlloc (allocateAndBindMemory(m_context, *indicesBuffer, MemoryRequirement::HostVisible));
1523 int minUniformBufferOffsetAlignment = getminUniformBufferOffsetAlignment(m_context);
1525 // Upload attrbiutes data
1527 deMemcpy(positionsAlloc->getHostPtr(), positions, sizeof(positions));
1528 flushMappedMemoryRange(vk, device, positionsAlloc->getMemory(), positionsAlloc->getOffset(), sizeof(positions));
1530 deMemcpy(indicesAlloc->getHostPtr(), indices, sizeof(indices));
1531 flushMappedMemoryRange(vk, device, indicesAlloc->getMemory(), indicesAlloc->getOffset(), sizeof(indices));
1534 vk::Unique<VkImage> colorImage (createImage2D(m_context,
1537 vk::VK_FORMAT_R8G8B8A8_UNORM,
1538 vk::VK_IMAGE_TILING_OPTIMAL,
1539 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
1540 de::UniquePtr<Allocation> colorImageAlloc (allocateAndBindMemory(m_context, *colorImage, MemoryRequirement::Any));
1541 vk::Unique<VkImageView> colorImageView (createAttachmentView(m_context, *colorImage, vk::VK_FORMAT_R8G8B8A8_UNORM));
1543 vk::Unique<VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
1544 vk::Unique<VkDescriptorPool> descriptorPool (createDescriptorPool());
1546 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
1548 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1549 DE_NULL, // const void* pNext;
1550 *descriptorPool, // VkDescriptorPool descriptorPool;
1551 1u, // deUint32 setLayoutCount;
1552 &descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
1555 vk::Unique<VkDescriptorSet> descriptorSet(vk::allocateDescriptorSet(vk, device, &descriptorSetAllocateInfo));
1556 int numBlocks = (int)m_layout.blocks.size();
1557 std::vector<vk::VkDescriptorBufferInfo> descriptors(numBlocks);
1559 // Upload uniform data
1561 vk::DescriptorSetUpdateBuilder descriptorSetUpdateBuilder;
1563 if (m_bufferMode == UniformBlockCase::BUFFERMODE_PER_BLOCK)
1565 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1567 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1568 const void* srcPtr = m_blockPointers.find(blockNdx)->second;
1570 descriptors[blockNdx] = addUniformData(block.size, srcPtr);
1571 descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1572 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptors[blockNdx]);
1577 int currentOffset = 0;
1578 std::map<int, int> offsets;
1579 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1581 if (minUniformBufferOffsetAlignment > 0)
1582 currentOffset = deAlign32(currentOffset, minUniformBufferOffsetAlignment);
1583 offsets[blockNdx] = currentOffset;
1584 currentOffset += m_layout.blocks[blockNdx].size;
1587 deUint32 totalSize = currentOffset;
1589 // Make a copy of the data that satisfies the device's min uniform buffer alignment
1590 std::vector<deUint8> data;
1591 data.resize(totalSize);
1592 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1594 deMemcpy(&data[offsets[blockNdx]], m_blockPointers.find(blockNdx)->second, m_layout.blocks[blockNdx].size);
1597 vk::VkBuffer buffer = addUniformData(totalSize, &data[0]).buffer;
1599 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1601 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1602 deUint32 size = block.size;
1604 const VkDescriptorBufferInfo descriptor =
1606 buffer, // VkBuffer buffer;
1607 (deUint32)offsets[blockNdx], // VkDeviceSize offset;
1608 size, // VkDeviceSize range;
1611 descriptors[blockNdx] = descriptor;
1612 descriptorSetUpdateBuilder.writeSingle(*descriptorSet,
1613 vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1614 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1615 &descriptors[blockNdx]);
1619 descriptorSetUpdateBuilder.update(vk, device);
1622 vk::Unique<VkRenderPass> renderPass (createRenderPass(vk::VK_FORMAT_R8G8B8A8_UNORM));
1623 vk::Unique<VkFramebuffer> framebuffer (createFramebuffer(*renderPass, *colorImageView));
1624 vk::Unique<VkPipelineLayout> pipelineLayout (createPipelineLayout(m_context, *descriptorSetLayout));
1626 vk::Unique<VkShaderModule> vtxShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
1627 vk::Unique<VkShaderModule> fragShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
1628 vk::Unique<VkPipeline> pipeline (createPipeline(*vtxShaderModule, *fragShaderModule, *pipelineLayout, *renderPass));
1629 vk::Unique<VkCommandPool> cmdPool (createCmdPool(m_context));
1630 vk::Unique<VkCommandBuffer> cmdBuffer (createCmdBuffer(m_context, *cmdPool));
1631 vk::Unique<VkBuffer> readImageBuffer (createBuffer(m_context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1632 de::UniquePtr<Allocation> readImageAlloc (allocateAndBindMemory(m_context, *readImageBuffer, vk::MemoryRequirement::HostVisible));
1634 // Record command buffer
1635 const vk::VkCommandBufferBeginInfo beginInfo =
1637 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1638 DE_NULL, // const void* pNext;
1639 0u, // VkCommandBufferUsageFlags flags;
1640 (const vk::VkCommandBufferInheritanceInfo*)DE_NULL,
1642 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &beginInfo));
1644 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1645 const vk::VkRenderPassBeginInfo passBeginInfo =
1647 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
1648 DE_NULL, // const void* pNext;
1649 *renderPass, // VkRenderPass renderPass;
1650 *framebuffer, // VkFramebuffer framebuffer;
1651 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // VkRect2D renderArea;
1652 1u, // deUint32 clearValueCount;
1653 &clearValue, // const VkClearValue* pClearValues;
1656 // Add barrier for initializing image state
1658 const vk::VkImageMemoryBarrier initializeBarrier =
1660 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1661 DE_NULL, // const void* pNext
1662 0, // VVkAccessFlags srcAccessMask;
1663 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1664 vk::VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1665 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1666 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1667 queueFamilyIndex, // deUint32 dstQueueFamilyIndex;
1668 *colorImage, // VkImage image;
1670 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1671 0u, // deUint32 baseMipLevel;
1672 1u, // deUint32 mipLevels;
1673 0u, // deUint32 baseArraySlice;
1674 1u, // deUint32 arraySize;
1675 } // VkImageSubresourceRange subresourceRange
1678 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1679 0, (const vk::VkMemoryBarrier*)DE_NULL,
1680 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1681 1, &initializeBarrier);
1684 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1686 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1687 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1689 const vk::VkDeviceSize offsets[] = { 0u };
1690 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &*positionsBuffer, offsets);
1691 vk.cmdBindIndexBuffer(*cmdBuffer, *indicesBuffer, (vk::VkDeviceSize)0, vk::VK_INDEX_TYPE_UINT32);
1693 vk.cmdDrawIndexed(*cmdBuffer, DE_LENGTH_OF_ARRAY(indices), 1u, 0u, 0u, 0u);
1694 vk.cmdEndRenderPass(*cmdBuffer);
1696 // Add render finish barrier
1698 const vk::VkImageMemoryBarrier renderFinishBarrier =
1700 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1701 DE_NULL, // const void* pNext
1702 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VVkAccessFlags srcAccessMask;
1703 vk::VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1704 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1705 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1706 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1707 queueFamilyIndex, // deUint32 dstQueueFamilyIndex;
1708 *colorImage, // VkImage image;
1710 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1711 0u, // deUint32 baseMipLevel;
1712 1u, // deUint32 mipLevels;
1713 0u, // deUint32 baseArraySlice;
1714 1u, // deUint32 arraySize;
1715 } // VkImageSubresourceRange subresourceRange
1718 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1719 0, (const vk::VkMemoryBarrier*)DE_NULL,
1720 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1721 1, &renderFinishBarrier);
1724 // Add Image->Buffer copy command
1726 const vk::VkBufferImageCopy copyParams =
1728 (vk::VkDeviceSize)0u, // VkDeviceSize bufferOffset;
1729 (deUint32)RENDER_WIDTH, // deUint32 bufferRowLength;
1730 (deUint32)RENDER_HEIGHT, // deUint32 bufferImageHeight;
1732 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspect aspect;
1733 0u, // deUint32 mipLevel;
1734 0u, // deUint32 arrayLayer;
1735 1u, // deUint32 arraySize;
1736 }, // VkImageSubresourceCopy imageSubresource
1737 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1738 { RENDER_WIDTH, RENDER_HEIGHT, 1u } // VkExtent3D imageExtent;
1741 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1744 // Add copy finish barrier
1746 const vk::VkBufferMemoryBarrier copyFinishBarrier =
1748 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1749 DE_NULL, // const void* pNext;
1750 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1751 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1752 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1753 queueFamilyIndex, // deUint32 destQueueFamilyIndex;
1754 *readImageBuffer, // VkBuffer buffer;
1755 0u, // VkDeviceSize offset;
1756 (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4)// VkDeviceSize size;
1759 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1760 0, (const vk::VkMemoryBarrier*)DE_NULL,
1761 1, ©FinishBarrier,
1762 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1765 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1767 // Submit the command buffer
1769 const vk::VkFenceCreateInfo fenceParams =
1771 vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1772 DE_NULL, // const void* pNext;
1773 0u, // VkFenceCreateFlags flags;
1775 const Unique<vk::VkFence> fence(vk::createFence(vk, device, &fenceParams));
1777 const VkSubmitInfo submitInfo =
1779 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1780 DE_NULL, // const void* pNext;
1781 0u, // deUint32 waitSemaphoreCount;
1782 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1783 (const VkPipelineStageFlags*)DE_NULL,
1784 1u, // deUint32 commandBufferCount;
1785 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1786 0u, // deUint32 signalSemaphoreCount;
1787 DE_NULL // const VkSemaphore* pSignalSemaphores;
1790 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
1791 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
1794 // Read back the results
1795 tcu::Surface surface(RENDER_WIDTH, RENDER_HEIGHT);
1797 const tcu::TextureFormat textureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
1798 const tcu::ConstPixelBufferAccess imgAccess(textureFormat, RENDER_WIDTH, RENDER_HEIGHT, 1, readImageAlloc->getHostPtr());
1799 const vk::VkDeviceSize bufferSize = RENDER_WIDTH * RENDER_HEIGHT * 4;
1800 invalidateMappedMemoryRange(vk, device, readImageAlloc->getMemory(), readImageAlloc->getOffset(), bufferSize);
1802 tcu::copy(surface.getAccess(), imgAccess);
1805 // Check if the result image is all white
1806 tcu::RGBA white(tcu::RGBA::white());
1807 int numFailedPixels = 0;
1809 for (int y = 0; y < surface.getHeight(); y++)
1811 for (int x = 0; x < surface.getWidth(); x++)
1813 if (surface.getPixel(x, y) != white)
1814 numFailedPixels += 1;
1818 if (numFailedPixels > 0)
1820 tcu::TestLog& log = m_context.getTestContext().getLog();
1821 log << tcu::TestLog::Image("Image", "Rendered image", surface);
1822 log << tcu::TestLog::Message << "Image comparison failed, got " << numFailedPixels << " non-white pixels" << tcu::TestLog::EndMessage;
1824 for (size_t blockNdx = 0; blockNdx < m_layout.blocks.size(); blockNdx++)
1826 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1827 log << tcu::TestLog::Message << "Block index: " << blockNdx << " infos: " << block << tcu::TestLog::EndMessage;
1830 for (size_t uniformNdx = 0; uniformNdx < m_layout.uniforms.size(); uniformNdx++)
1832 log << tcu::TestLog::Message << "Uniform index: " << uniformNdx << " infos: " << m_layout.uniforms[uniformNdx] << tcu::TestLog::EndMessage;
1835 return tcu::TestStatus::fail("Detected non-white pixels");
1838 return tcu::TestStatus::pass("Full white image ok");
1841 vk::VkDescriptorBufferInfo UniformBlockCaseInstance::addUniformData (deUint32 size, const void* dataPtr)
1843 const VkDevice vkDevice = m_context.getDevice();
1844 const DeviceInterface& vk = m_context.getDeviceInterface();
1846 Move<VkBuffer> buffer = createBuffer(m_context, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
1847 de::MovePtr<Allocation> alloc = allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible);
1849 deMemcpy(alloc->getHostPtr(), dataPtr, size);
1850 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
1852 const VkDescriptorBufferInfo descriptor =
1854 *buffer, // VkBuffer buffer;
1855 0u, // VkDeviceSize offset;
1856 size, // VkDeviceSize range;
1860 m_uniformBuffers.push_back(VkBufferSp(new vk::Unique<vk::VkBuffer>(buffer)));
1861 m_uniformAllocs.push_back(AllocationSp(alloc.release()));
1866 vk::Move<VkRenderPass> UniformBlockCaseInstance::createRenderPass (vk::VkFormat format) const
1868 const VkDevice vkDevice = m_context.getDevice();
1869 const DeviceInterface& vk = m_context.getDeviceInterface();
1871 const VkAttachmentDescription attachmentDescription =
1873 0u, // VkAttachmentDescriptorFlags flags;
1874 format, // VkFormat format;
1875 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1876 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
1877 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
1878 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
1879 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
1880 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
1881 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
1884 const VkAttachmentReference attachmentReference =
1886 0u, // deUint32 attachment;
1887 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
1891 const VkSubpassDescription subpassDescription =
1893 0u, // VkSubpassDescriptionFlags flags;
1894 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
1895 0u, // deUint32 inputAttachmentCount;
1896 DE_NULL, // const VkAttachmentReference* pInputAttachments;
1897 1u, // deUint32 colorAttachmentCount;
1898 &attachmentReference, // const VkAttachmentReference* pColorAttachments;
1899 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
1900 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
1901 0u, // deUint32 preserveAttachmentCount;
1902 DE_NULL // const VkAttachmentReference* pPreserveAttachments;
1905 const VkRenderPassCreateInfo renderPassParams =
1907 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
1908 DE_NULL, // const void* pNext;
1909 0u, // VkRenderPassCreateFlags flags;
1910 1u, // deUint32 attachmentCount;
1911 &attachmentDescription, // const VkAttachmentDescription* pAttachments;
1912 1u, // deUint32 subpassCount;
1913 &subpassDescription, // const VkSubpassDescription* pSubpasses;
1914 0u, // deUint32 dependencyCount;
1915 DE_NULL // const VkSubpassDependency* pDependencies;
1918 return vk::createRenderPass(vk, vkDevice, &renderPassParams);
1921 vk::Move<VkFramebuffer> UniformBlockCaseInstance::createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const
1923 const VkDevice vkDevice = m_context.getDevice();
1924 const DeviceInterface& vk = m_context.getDeviceInterface();
1926 const VkFramebufferCreateInfo framebufferParams =
1928 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
1929 DE_NULL, // const void* pNext;
1930 0u, // VkFramebufferCreateFlags flags;
1931 renderPass, // VkRenderPass renderPass;
1932 1u, // deUint32 attachmentCount;
1933 &colorImageView, // const VkImageView* pAttachments;
1934 RENDER_WIDTH, // deUint32 width;
1935 RENDER_HEIGHT, // deUint32 height;
1936 1u // deUint32 layers;
1939 return vk::createFramebuffer(vk, vkDevice, &framebufferParams);
1942 vk::Move<VkDescriptorSetLayout> UniformBlockCaseInstance::createDescriptorSetLayout (void) const
1944 int numBlocks = (int)m_layout.blocks.size();
1945 int lastBindingNdx = -1;
1946 std::vector<int> lengths;
1948 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1950 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1952 if (block.bindingNdx == lastBindingNdx)
1958 lengths.push_back(1);
1959 lastBindingNdx = block.bindingNdx;
1963 vk::DescriptorSetLayoutBuilder layoutBuilder;
1964 for (size_t i = 0; i < lengths.size(); i++)
1968 layoutBuilder.addArrayBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, lengths[i], vk::VK_SHADER_STAGE_ALL);
1972 layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL);
1976 return layoutBuilder.build(m_context.getDeviceInterface(), m_context.getDevice());
1979 vk::Move<VkDescriptorPool> UniformBlockCaseInstance::createDescriptorPool (void) const
1981 vk::DescriptorPoolBuilder poolBuilder;
1984 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (int)m_layout.blocks.size())
1985 .build(m_context.getDeviceInterface(), m_context.getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1988 vk::Move<VkPipeline> UniformBlockCaseInstance::createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const
1990 const VkDevice vkDevice = m_context.getDevice();
1991 const DeviceInterface& vk = m_context.getDeviceInterface();
1993 const VkVertexInputBindingDescription vertexBinding =
1995 0, // deUint32 binding;
1996 (deUint32)sizeof(float) * 4, // deUint32 strideInBytes;
1997 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
2000 const VkVertexInputAttributeDescription vertexAttribute =
2002 0, // deUint32 location;
2003 0, // deUint32 binding;
2004 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
2005 0u // deUint32 offset;
2008 const VkPipelineShaderStageCreateInfo shaderStages[2] =
2011 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2012 DE_NULL, // const void* pNext;
2013 0u, // VkPipelineShaderStageCreateFlags flags;
2014 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
2015 vtxShaderModule, // VkShaderModule module;
2016 "main", // const char* pName;
2017 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2020 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2021 DE_NULL, // const void* pNext;
2022 0u, // VkPipelineShaderStageCreateFlags flags;
2023 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
2024 fragShaderModule, // VkShaderModule module;
2025 "main", // const char* pName;
2026 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2030 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
2032 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2033 DE_NULL, // const void* pNext;
2034 0u, // VkPipelineVertexInputStateCreateFlags flags;
2035 1u, // deUint32 vertexBindingDescriptionCount;
2036 &vertexBinding, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2037 1u, // deUint32 vertexAttributeDescriptionCount;
2038 &vertexAttribute, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2041 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
2043 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,// VkStructureType sType;
2044 DE_NULL, // const void* pNext;
2045 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
2046 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology;
2047 false // VkBool32 primitiveRestartEnable;
2050 const VkViewport viewport =
2052 0.0f, // float originX;
2053 0.0f, // float originY;
2054 (float)RENDER_WIDTH, // float width;
2055 (float)RENDER_HEIGHT, // float height;
2056 0.0f, // float minDepth;
2057 1.0f // float maxDepth;
2061 const VkRect2D scissor =
2066 }, // VkOffset2D offset;
2068 RENDER_WIDTH, // deUint32 width;
2069 RENDER_HEIGHT, // deUint32 height;
2070 }, // VkExtent2D extent;
2073 const VkPipelineViewportStateCreateInfo viewportStateParams =
2075 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
2076 DE_NULL, // const void* pNext;
2077 0u, // VkPipelineViewportStateCreateFlags flags;
2078 1u, // deUint32 viewportCount;
2079 &viewport, // const VkViewport* pViewports;
2080 1u, // deUint32 scissorsCount;
2081 &scissor, // const VkRect2D* pScissors;
2084 const VkPipelineRasterizationStateCreateInfo rasterStateParams =
2086 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2087 DE_NULL, // const void* pNext;
2088 0u, // VkPipelineRasterizationStateCreateFlags flags;
2089 false, // VkBool32 depthClampEnable;
2090 false, // VkBool32 rasterizerDiscardEnable;
2091 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
2092 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
2093 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
2094 false, // VkBool32 depthBiasEnable;
2095 0.0f, // float depthBiasConstantFactor;
2096 0.0f, // float depthBiasClamp;
2097 0.0f, // float depthBiasSlopeFactor;
2098 1.0f, // float lineWidth;
2101 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
2103 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
2104 DE_NULL, // const void* pNext;
2105 0u, // VkPipelineMultisampleStateCreateFlags flags;
2106 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
2107 VK_FALSE, // VkBool32 sampleShadingEnable;
2108 0.0f, // float minSampleShading;
2109 DE_NULL, // const VkSampleMask* pSampleMask;
2110 VK_FALSE, // VkBool32 alphaToCoverageEnable;
2111 VK_FALSE // VkBool32 alphaToOneEnable;
2114 const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
2116 false, // VkBool32 blendEnable;
2117 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendColor;
2118 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendColor;
2119 VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
2120 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendAlpha;
2121 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendAlpha;
2122 VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
2123 VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // VkChannelFlags channelWriteMask;
2124 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
2127 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
2129 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
2130 DE_NULL, // const void* pNext;
2131 0u, // VkPipelineColorBlendStateCreateFlags flags;
2132 false, // VkBool32 logicOpEnable;
2133 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
2134 1u, // deUint32 attachmentCount;
2135 &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
2136 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
2139 const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
2141 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2142 DE_NULL, // const void* pNext;
2143 0u, // VkPipelineCreateFlags flags;
2144 2u, // deUint32 stageCount;
2145 shaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
2146 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2147 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2148 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2149 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
2150 &rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
2151 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2152 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2153 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2154 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2155 pipelineLayout, // VkPipelineLayout layout;
2156 renderPass, // VkRenderPass renderPass;
2157 0u, // deUint32 subpass;
2158 0u, // VkPipeline basePipelineHandle;
2159 0u // deInt32 basePipelineIndex;
2162 return vk::createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
2165 } // anonymous (utilities)
2167 // UniformBlockCase.
2169 UniformBlockCase::UniformBlockCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, BufferMode bufferMode, MatrixLoadFlags matrixLoadFlag)
2170 : TestCase (testCtx, name, description)
2171 , m_bufferMode (bufferMode)
2172 , m_matrixLoadFlag (matrixLoadFlag)
2176 UniformBlockCase::~UniformBlockCase (void)
2180 void UniformBlockCase::initPrograms (vk::SourceCollections& programCollection) const
2182 DE_ASSERT(!m_vertShaderSource.empty());
2183 DE_ASSERT(!m_fragShaderSource.empty());
2185 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
2186 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
2189 TestInstance* UniformBlockCase::createInstance (Context& context) const
2191 return new UniformBlockCaseInstance(context, m_bufferMode, m_uniformLayout, m_blockPointers);
2194 void UniformBlockCase::init (void)
2196 // Compute reference layout.
2197 computeStd140Layout(m_uniformLayout, m_interface);
2199 // Assign storage for reference values.
2202 for (std::vector<BlockLayoutEntry>::const_iterator blockIter = m_uniformLayout.blocks.begin(); blockIter != m_uniformLayout.blocks.end(); blockIter++)
2203 totalSize += blockIter->size;
2204 m_data.resize(totalSize);
2206 // Pointers for each block.
2208 for (int blockNdx = 0; blockNdx < (int)m_uniformLayout.blocks.size(); blockNdx++)
2210 m_blockPointers[blockNdx] = &m_data[0] + curOffset;
2211 curOffset += m_uniformLayout.blocks[blockNdx].size;
2216 generateValues(m_uniformLayout, m_blockPointers, 1 /* seed */);
2218 // Generate shaders.
2219 m_vertShaderSource = generateVertexShader(m_interface, m_uniformLayout, m_blockPointers, m_matrixLoadFlag);
2220 m_fragShaderSource = generateFragmentShader(m_interface, m_uniformLayout, m_blockPointers, m_matrixLoadFlag);