1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 * Copyright (c) 2018 The Khronos Group Inc.
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
14 * http://www.apache.org/licenses/LICENSE-2.0
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
24 * \brief Vulkan Transform Feedback Fuzz Layout Tests
25 *//*--------------------------------------------------------------------*/
27 #include "vktTransformFeedbackFuzzLayoutCase.hpp"
29 #include "vkPrograms.hpp"
31 #include "gluVarType.hpp"
32 #include "tcuTestLog.hpp"
33 #include "tcuSurface.hpp"
34 #include "deRandom.hpp"
35 #include "deStringUtil.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "deSharedPtr.hpp"
39 #include "deFloat16.h"
41 #include "vkMemUtil.hpp"
42 #include "vkQueryUtil.hpp"
43 #include "vkTypeUtil.hpp"
45 #include "vkRefUtil.hpp"
46 #include "vkBuilderUtil.hpp"
47 #include "vkCmdUtil.hpp"
48 #include "vkObjUtil.hpp"
58 namespace TransformFeedback
63 typedef std::map<int, int> BufferGeneralMapping;
65 typedef std::pair<int, int> UsedRange;
66 typedef std::vector<UsedRange> UsedRangeList;
67 typedef std::map<int, UsedRangeList> BufferUsedRangesMap;
69 // VarType implementation.
71 VarType::VarType (void)
77 VarType::VarType (const VarType& other)
84 VarType::VarType (glu::DataType basicType, deUint32 flags)
88 m_data.basicType = basicType;
91 VarType::VarType (const VarType& elementType, int arraySize)
95 m_data.array.size = arraySize;
96 m_data.array.elementType = new VarType(elementType);
99 VarType::VarType (const StructType* structPtr, deUint32 flags)
100 : m_type (TYPE_STRUCT)
103 m_data.structPtr = structPtr;
106 VarType::~VarType (void)
108 if (m_type == TYPE_ARRAY)
109 delete m_data.array.elementType;
112 VarType& VarType::operator= (const VarType& other)
115 return *this; // Self-assignment.
117 VarType *oldElementType = m_type == TYPE_ARRAY ? m_data.array.elementType : DE_NULL;
119 m_type = other.m_type;
120 m_flags = other.m_flags;
123 if (m_type == TYPE_ARRAY)
125 m_data.array.elementType = new VarType(*other.m_data.array.elementType);
126 m_data.array.size = other.m_data.array.size;
129 m_data = other.m_data;
131 delete oldElementType;
136 // StructType implementation.
137 void StructType::addMember (const std::string& name, const VarType& type, deUint32 flags)
139 m_members.push_back(StructMember(name, type, flags));
142 // InterfaceBlockMember implementation.
143 InterfaceBlockMember::InterfaceBlockMember (const std::string& name, const VarType& type, deUint32 flags)
150 // InterfaceBlock implementation.
151 InterfaceBlock::InterfaceBlock (const std::string& blockName)
152 : m_blockName (blockName)
159 std::ostream& operator<< (std::ostream& stream, const BlockLayoutEntry& entry)
161 stream << entry.name << " { name = " << entry.name
162 << ", buffer = " << entry.xfbBuffer
163 << ", offset = " << entry.xfbOffset
164 << ", size = " << entry.xfbSize
165 << ", blockDeclarationNdx = " << entry.blockDeclarationNdx
166 << ", instanceNdx = " << entry.instanceNdx
167 << ", activeInterfaceIndices = [";
169 for (std::vector<int>::const_iterator i = entry.activeInterfaceIndices.begin(); i != entry.activeInterfaceIndices.end(); i++)
171 if (i != entry.activeInterfaceIndices.begin())
180 std::ostream& operator<< (std::ostream& stream, const InterfaceLayoutEntry& entry)
182 stream << entry.name << " { type = " << glu::getDataTypeName(entry.type)
183 << ", arraySize = " << entry.arraySize
184 << ", blockNdx = " << entry.blockLayoutNdx
185 << ", offset = " << entry.offset
186 << ", arrayStride = " << entry.arrayStride
187 << ", matrixStride = " << entry.matrixStride
193 std::ostream& operator<< (std::ostream& str, const InterfaceLayout& layout)
195 const int numBlocks = (int)layout.blocks.size();
197 str << "Blocks:" << std::endl;
198 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
199 str << layout.blocks[blockNdx] << std::endl;
202 str << "Interfaces:" << std::endl;
203 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
205 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
207 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
209 const InterfaceLayoutEntry& entry = layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
211 str << blockNdx << ":" << entryNdx << " " << entry << std::endl;
219 int InterfaceLayout::getInterfaceLayoutIndex (int blockNdx, const std::string& name) const
221 for (int ndx = 0; ndx < (int)interfaces.size(); ndx++)
223 if (blocks[interfaces[ndx].blockLayoutNdx].blockDeclarationNdx == blockNdx && interfaces[ndx].name == name)
230 int InterfaceLayout::getBlockLayoutIndex (int blockNdx, int instanceNdx) const
232 for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
234 if (blocks[ndx].blockDeclarationNdx == blockNdx && blocks[ndx].instanceNdx == instanceNdx)
241 // ShaderInterface implementation.
243 ShaderInterface::ShaderInterface (void)
247 ShaderInterface::~ShaderInterface (void)
251 StructType& ShaderInterface::allocStruct (const std::string& name)
253 m_structs.push_back(StructTypeSP(new StructType(name)));
254 return *m_structs.back();
257 struct StructNameEquals
261 StructNameEquals (const std::string& name_) : name(name_) {}
263 bool operator() (const StructTypeSP type) const
265 return type->hasTypeName() && name == type->getTypeName();
269 void ShaderInterface::getNamedStructs (std::vector<const StructType*>& structs) const
271 for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
273 if ((*i)->hasTypeName())
274 structs.push_back((*i).get());
278 InterfaceBlock& ShaderInterface::allocBlock (const std::string& name)
280 m_interfaceBlocks.push_back(InterfaceBlockSP(new InterfaceBlock(name)));
282 return *m_interfaceBlocks.back();
285 namespace // Utilities
288 struct PrecisionFlagsFmt
291 PrecisionFlagsFmt (deUint32 flags_) : flags(flags_) {}
294 void dumpBytes (std::ostream& str, const std::string& msg, const void* dataBytes, size_t size, const void* dataMask = DE_NULL)
296 const deUint8* data = (const deUint8*)dataBytes;
297 const deUint8* mask = (const deUint8*)dataMask;
298 std::ios::fmtflags flags;
302 flags = str.flags ( std::ios::hex | std::ios::uppercase );
304 for (size_t i = 0; i < size; i++)
306 if (i%16 == 0) str << std::endl << std::setfill('0') << std::setw(8) << i << ":";
307 else if (i%8 == 0) str << " ";
308 else if (i%4 == 0) str << " ";
310 str << " " << std::setfill('0') << std::setw(2);
312 if (mask == DE_NULL || mask[i] != 0)
313 str << (deUint32)data[i];
317 str << std::endl << std::endl;
322 std::ostream& operator<< (std::ostream& str, const PrecisionFlagsFmt& fmt)
325 DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW|PRECISION_MEDIUM|PRECISION_HIGH)) <= 1);
326 str << (fmt.flags & PRECISION_LOW ? "lowp" :
327 fmt.flags & PRECISION_MEDIUM ? "mediump" :
328 fmt.flags & PRECISION_HIGH ? "highp" : "");
332 struct LayoutFlagsFmt
339 LayoutFlagsFmt (const deUint32 flags_,
340 const deUint32 buffer_,
341 const deUint32 stride_,
342 const deUint32 offset_)
351 std::ostream& operator<< (std::ostream& str, const LayoutFlagsFmt& fmt)
359 { LAYOUT_XFBBUFFER, "xfb_buffer" },
360 { LAYOUT_XFBOFFSET, "xfb_offset" },
361 { LAYOUT_XFBSTRIDE, "xfb_stride" },
364 deUint32 remBits = fmt.flags;
365 for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
367 if (remBits & bitDesc[descNdx].bit)
369 str << bitDesc[descNdx].token;
371 if (bitDesc[descNdx].bit == LAYOUT_XFBBUFFER) str << " = " << fmt.buffer;
372 if (bitDesc[descNdx].bit == LAYOUT_XFBOFFSET) str << " = " << fmt.offset;
373 if (bitDesc[descNdx].bit == LAYOUT_XFBSTRIDE) str << " = " << fmt.stride;
375 remBits &= ~bitDesc[descNdx].bit;
381 DE_ASSERT(remBits == 0);
385 std::ostream& operator<< (std::ostream& str, const DeviceSizeVector& vec)
389 for (size_t vecNdx = 0; vecNdx < vec.size(); vecNdx++)
390 str << (deUint64)vec[vecNdx] << (vecNdx + 1 < vec.size() ? ", " : "]");
395 // Layout computation.
397 int getDataTypeByteSize (glu::DataType type)
399 if (getDataTypeScalarType(type) == glu::TYPE_DOUBLE)
401 return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint64);
405 return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint32);
409 int getDataTypeArrayStride (glu::DataType type)
411 DE_ASSERT(!glu::isDataTypeMatrix(type));
413 return getDataTypeByteSize(type);
416 int getDataTypeArrayStrideForLocation (glu::DataType type)
418 DE_ASSERT(!glu::isDataTypeMatrix(type));
420 const int baseStride = getDataTypeByteSize(type);
421 const int vec4Alignment = (int)sizeof(deUint32) * 4;
423 return deAlign32(baseStride, vec4Alignment);
426 int computeInterfaceBlockMemberAlignment (const VarType& type)
428 if (type.isBasicType())
430 glu::DataType basicType = type.getBasicType();
432 if (glu::isDataTypeMatrix(basicType) || isDataTypeVector(basicType))
433 basicType = glu::getDataTypeScalarType(basicType);
437 case glu::TYPE_FLOAT:
439 case glu::TYPE_UINT: return sizeof(deUint32);
440 case glu::TYPE_DOUBLE: return sizeof(deUint64);
441 default: TCU_THROW(InternalError, "Invalid type");
444 else if (type.isArrayType())
446 return computeInterfaceBlockMemberAlignment(type.getElementType());
448 else if (type.isStructType())
450 int maxAlignment = 0;
452 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
453 maxAlignment = de::max(maxAlignment, computeInterfaceBlockMemberAlignment(memberIter->getType()));
458 TCU_THROW(InternalError, "Invalid type");
461 void createMask (void* maskBasePtr, const InterfaceLayoutEntry& entry, const void* basePtr0, const void* basePtr)
463 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
464 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
465 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
466 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
467 const int vecSize = scalarSize / numVecs;
468 const bool isArray = entry.arraySize > 1;
469 const size_t compSize = getDataTypeByteSize(scalarType);
471 DE_ASSERT(scalarSize%numVecs == 0);
473 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
475 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
477 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
479 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
481 for (int compNdx = 0; compNdx < vecSize; compNdx++)
483 const deUint8* compPtr = vecPtr + compSize*compNdx;
484 const size_t offset = compPtr - (deUint8*)basePtr0;
485 deUint8* maskPtr = (deUint8*)maskBasePtr + offset;
489 case glu::TYPE_DOUBLE:
490 case glu::TYPE_FLOAT:
494 for (size_t ndx = 0; ndx < compSize; ++ndx)
507 std::vector<deUint8> createMask (const InterfaceLayout& layout, const std::map<int, void*>& blockPointers, const void* basePtr0, const size_t baseSize)
509 std::vector<deUint8> mask (baseSize, 0);
510 const int numBlocks ((int)layout.blocks.size());
512 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
514 void* basePtr = blockPointers.find(blockNdx)->second;
515 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
517 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
519 const InterfaceLayoutEntry& entry = layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
522 createMask (&mask[0], entry, basePtr0, basePtr);
529 int computeInterfaceBlockAlignment(const InterfaceBlock& interfaceBlock)
531 int baseAlignment = 0;
533 for (InterfaceBlock::ConstIterator memberIter = interfaceBlock.begin(); memberIter != interfaceBlock.end(); memberIter++)
535 const InterfaceBlockMember& member = *memberIter;
537 baseAlignment = std::max(baseAlignment, computeInterfaceBlockMemberAlignment(member.getType()));
540 return baseAlignment;
543 static inline bool isOverlaped(const int a1, const int b1, const int a2, const int b2)
545 DE_ASSERT(b1 > 0 && b2 > 0);
547 const int b1s = b1 - 1;
548 const int b2s = b2 - 1;
550 return deInRange32(a1, a2, b2s) ||
551 deInRange32(b1s, a2, b2s) ||
552 deInRange32(a2, a1, b1s) ||
553 deInRange32(b2s, a1, b1s);
556 void computeXfbLayout (InterfaceLayout& layout, int& curOffset, int& curLocation, int curBlockNdx, const std::string& curPrefix, const VarType& type, deUint32 layoutFlags)
558 const int locationAlignSize = 16;
559 const bool validate = 0 == (layoutFlags & (FIELD_MISSING|FIELD_UNASSIGNED));
560 int baseAlignment = computeInterfaceBlockMemberAlignment(type);
562 DE_ASSERT(baseAlignment == sizeof(deUint32) || baseAlignment == sizeof(deUint64));
564 curOffset = deAlign32(curOffset, baseAlignment);
566 if (type.isBasicType())
568 const glu::DataType basicType = type.getBasicType();
570 int fieldSizeForLocation = 0;
571 InterfaceLayoutEntry entry;
573 entry.name = curPrefix;
574 entry.type = basicType;
576 entry.arrayStride = 0;
577 entry.matrixStride = 0;
578 entry.blockLayoutNdx = curBlockNdx;
579 entry.locationNdx = 0;
580 entry.validate = validate;
582 if (glu::isDataTypeMatrix(basicType))
585 const int vecSize = glu::getDataTypeMatrixNumRows(basicType);
586 const int numVecs = glu::getDataTypeMatrixNumColumns(basicType);
587 const glu::DataType elemType = glu::getDataTypeScalarType(basicType);
588 const int stride = getDataTypeArrayStride(glu::getDataTypeVector(elemType, vecSize));
589 const int strideForLocation = getDataTypeArrayStrideForLocation(glu::getDataTypeVector(elemType, vecSize));
591 entry.matrixStride = stride;
593 fieldSize = numVecs * stride;
594 fieldSizeForLocation = numVecs * strideForLocation;
599 fieldSize = getDataTypeByteSize(basicType);
600 fieldSizeForLocation = deAlign32(fieldSize, locationAlignSize);
603 entry.offset = curOffset;
604 entry.locationNdx = curLocation;
606 curOffset += fieldSize;
607 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
609 layout.interfaces.push_back(entry);
611 else if (type.isArrayType())
613 const VarType& elemType = type.getElementType();
615 if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
617 // Array of scalars or vectors.
618 const glu::DataType elemBasicType = elemType.getBasicType();
619 const int stride = getDataTypeArrayStride(elemBasicType);
620 const int fieldSize = stride * type.getArraySize();
621 const int strideForLocation = getDataTypeArrayStrideForLocation(elemBasicType);
622 const int fieldSizeForLocation = strideForLocation * type.getArraySize();
623 InterfaceLayoutEntry entry;
625 entry.name = curPrefix + "[0]"; // Array interfaces are always postfixed with [0]
626 entry.type = elemBasicType;
627 entry.blockLayoutNdx = curBlockNdx;
628 entry.offset = curOffset;
629 entry.arraySize = type.getArraySize();
630 entry.arrayStride = stride;
631 entry.matrixStride = 0;
632 entry.locationNdx = curLocation;
633 entry.validate = validate;
635 curOffset += fieldSize;
636 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
638 layout.interfaces.push_back(entry);
640 else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
642 // Array of matrices.
643 const glu::DataType elemBasicType = elemType.getBasicType();
644 const glu::DataType scalarType = glu::getDataTypeScalarType(elemBasicType);
645 const int vecSize = glu::getDataTypeMatrixNumRows(elemBasicType);
646 const int numVecs = glu::getDataTypeMatrixNumColumns(elemBasicType);
647 const int stride = getDataTypeArrayStride(glu::getDataTypeVector(scalarType, vecSize));
648 const int fieldSize = numVecs * type.getArraySize() * stride;
649 const int strideForLocation = getDataTypeArrayStrideForLocation(glu::getDataTypeVector(scalarType, vecSize));
650 const int fieldSizeForLocation = numVecs * type.getArraySize() * strideForLocation;
651 InterfaceLayoutEntry entry;
653 entry.name = curPrefix + "[0]"; // Array interfaces are always postfixed with [0]
654 entry.type = elemBasicType;
655 entry.blockLayoutNdx = curBlockNdx;
656 entry.offset = curOffset;
657 entry.arraySize = type.getArraySize();
658 entry.arrayStride = stride*numVecs;
659 entry.matrixStride = stride;
660 entry.locationNdx = curLocation;
661 entry.validate = validate;
663 curOffset += fieldSize;
664 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
666 layout.interfaces.push_back(entry);
670 DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
672 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
673 computeXfbLayout(layout, curOffset, curLocation, curBlockNdx, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
678 DE_ASSERT(type.isStructType());
680 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
681 computeXfbLayout(layout, curOffset, curLocation, curBlockNdx, curPrefix + "." + memberIter->getName(), memberIter->getType(), (memberIter->getFlags() | layoutFlags) & FIELD_OPTIONS);
683 curOffset = deAlign32(curOffset, baseAlignment);
687 void computeXfbLayout (InterfaceLayout& layout, ShaderInterface& shaderInterface, BufferGeneralMapping& perBufferXfbOffsets, deUint32& locationsUsed)
689 const int numInterfaceBlocks = shaderInterface.getNumInterfaceBlocks();
691 BufferGeneralMapping bufferAlignments;
692 BufferGeneralMapping buffersList;
693 BufferGeneralMapping bufferStrideGroup;
694 BufferUsedRangesMap bufferUsedRanges;
696 for (int blockNdx = 0; blockNdx < numInterfaceBlocks; blockNdx++)
698 const InterfaceBlock& interfaceBlock = shaderInterface.getInterfaceBlock(blockNdx);
699 const int xfbBuffer = interfaceBlock.getXfbBuffer();
701 buffersList[xfbBuffer] = 1;
702 bufferStrideGroup[xfbBuffer] = xfbBuffer;
705 for (BufferGeneralMapping::const_iterator xfbBuffersIter = buffersList.begin(); xfbBuffersIter != buffersList.end(); xfbBuffersIter++)
707 const int xfbBufferAnalyzed = xfbBuffersIter->first;
709 for (int blockNdx = 0; blockNdx < numInterfaceBlocks; blockNdx++)
711 InterfaceBlock& interfaceBlock = shaderInterface.getInterfaceBlockForModify(blockNdx);
713 if (interfaceBlock.getXfbBuffer() == xfbBufferAnalyzed)
715 const bool hasInstanceName = interfaceBlock.hasInstanceName();
716 const std::string blockPrefix = hasInstanceName ? (interfaceBlock.getBlockName() + ".") : "";
717 const int numInstances = interfaceBlock.isArray() ? interfaceBlock.getArraySize() : 1;
718 int activeBlockNdx = (int)layout.blocks.size();
719 int startInterfaceNdx = (int)layout.interfaces.size();
720 int startLocationNdx = (int)curLocation;
721 int interfaceAlignement = computeInterfaceBlockAlignment(interfaceBlock);
727 const int xfbFirstInstanceBuffer = interfaceBlock.getXfbBuffer();
728 int& xfbFirstInstanceBufferOffset = perBufferXfbOffsets[xfbFirstInstanceBuffer];
729 const int savedLayoutInterfacesNdx = (int)layout.interfaces.size();
730 const int savedCurOffset = curOffset;
731 const int savedCurLocation = curLocation;
732 UsedRangeList& usedRanges = bufferUsedRanges[xfbFirstInstanceBuffer];
733 bool fitIntoBuffer = true;
736 // Further, if applied to an aggregate containing a double, the offset must also be a multiple of 8,
737 // and the space taken in the buffer will be a multiple of 8.
738 xfbFirstInstanceBufferOffset = deAlign32(xfbFirstInstanceBufferOffset, interfaceAlignement);
740 for (InterfaceBlock::ConstIterator memberIter = interfaceBlock.begin(); memberIter != interfaceBlock.end(); memberIter++)
742 const InterfaceBlockMember& member = *memberIter;
744 computeXfbLayout(layout, curOffset, curLocation, activeBlockNdx, blockPrefix + member.getName(), member.getType(), member.getFlags() & FIELD_OPTIONS);
748 // Further, if applied to an aggregate containing a double, the offset must also be a multiple of 8,
749 // and the space taken in the buffer will be a multiple of 8.
750 blockSize = deAlign32(curOffset, interfaceAlignement);
753 for (UsedRangeList::const_iterator usedRangeIt = usedRanges.begin();
754 usedRangeIt != usedRanges.end();
757 const int& usedRangeStart = usedRangeIt->first;
758 const int& usedRangeEnd = usedRangeIt->second;
759 const int genRangeStart = xfbFirstInstanceBufferOffset;
760 const int genRangeEnd = xfbFirstInstanceBufferOffset + blockSize;
762 // Validate if block has overlapping
763 if (isOverlaped(genRangeStart, genRangeEnd, usedRangeStart, usedRangeEnd))
765 // Restart from obstacle interface end
766 fitIntoBuffer = false;
768 DE_ASSERT(xfbFirstInstanceBufferOffset > usedRangeEnd);
770 // Bump up interface start to the end of used range
771 xfbFirstInstanceBufferOffset = usedRangeEnd;
774 curOffset = savedCurOffset;
775 curLocation = savedCurLocation;
777 layout.interfaces.resize(savedLayoutInterfacesNdx);
785 const int xfbFirstInstanceBuffer = interfaceBlock.getXfbBuffer();
786 const int xfbFirstInstanceBufferOffset = perBufferXfbOffsets[xfbFirstInstanceBuffer];
787 const int endInterfaceNdx = (int)layout.interfaces.size();
788 const int blockSizeInLocations = curLocation - startLocationNdx;
790 curLocation -= blockSizeInLocations;
792 if (numInstances > 1)
793 interfaceBlock.setFlag(LAYOUT_XFBSTRIDE);
795 // Create block layout entries for each instance.
796 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
798 // Allocate entry for instance.
799 layout.blocks.push_back(BlockLayoutEntry());
801 BlockLayoutEntry& blockEntry = layout.blocks.back();
802 const int xfbBuffer = xfbFirstInstanceBuffer + instanceNdx;
803 int& xfbBufferOffset = perBufferXfbOffsets[xfbBuffer];
805 DE_ASSERT(xfbBufferOffset <= xfbFirstInstanceBufferOffset);
807 xfbBufferOffset = xfbFirstInstanceBufferOffset;
809 blockEntry.name = interfaceBlock.getBlockName();
810 blockEntry.xfbBuffer = xfbBuffer;
811 blockEntry.xfbOffset = xfbBufferOffset;
812 blockEntry.xfbSize = blockSize;
813 blockEntry.blockDeclarationNdx = blockNdx;
814 blockEntry.instanceNdx = instanceNdx;
815 blockEntry.locationNdx = curLocation;
816 blockEntry.locationSize = blockSizeInLocations;
818 xfbBufferOffset += blockSize;
819 curLocation += blockSizeInLocations;
821 // Compute active interface set for block.
822 for (int interfaceNdx = startInterfaceNdx; interfaceNdx < endInterfaceNdx; interfaceNdx++)
823 blockEntry.activeInterfaceIndices.push_back(interfaceNdx);
825 if (interfaceBlock.isArray())
826 blockEntry.name += "[" + de::toString(instanceNdx) + "]";
828 bufferUsedRanges[xfbBuffer].push_back(UsedRange(blockEntry.xfbOffset, blockEntry.xfbOffset + blockEntry.xfbSize));
830 // Store maximum per-buffer alignment
831 bufferAlignments[xfbBuffer] = std::max(interfaceAlignement, bufferAlignments[xfbBuffer]);
833 // Buffers bound through instanced arrays must have same stride (and alignment)
834 bufferStrideGroup[xfbBuffer] = bufferStrideGroup[xfbFirstInstanceBuffer];
840 // All XFB buffers within group must have same stride
842 BufferGeneralMapping groupStride;
844 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin(); xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
846 const int xfbBuffer = xfbBuffersIter->first;
847 const int xfbStride = perBufferXfbOffsets[xfbBuffer];
848 const int group = bufferStrideGroup[xfbBuffer];
850 groupStride[group] = std::max(groupStride[group], xfbStride);
853 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin(); xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
855 const int xfbBuffer = xfbBuffersIter->first;
856 const int group = bufferStrideGroup[xfbBuffer];
858 perBufferXfbOffsets[xfbBuffer] = groupStride[group];
862 // All XFB buffers within group must have same stride alignment
864 BufferGeneralMapping groupAlignment;
866 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin(); xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
868 const int xfbBuffer = xfbBuffersIter->first;
869 const int group = bufferStrideGroup[xfbBuffer];
870 const int xfbAlign = bufferAlignments[xfbBuffer];
872 groupAlignment[group] = std::max(groupAlignment[group], xfbAlign);
875 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin(); xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
877 const int xfbBuffer = xfbBuffersIter->first;
878 const int group = bufferStrideGroup[xfbBuffer];
880 bufferAlignments[xfbBuffer] = groupAlignment[group];
885 // If the buffer is capturing any outputs with double-precision components, the stride must be a multiple of 8, ...
886 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin(); xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
888 const int xfbBuffer = xfbBuffersIter->first;
889 const int xfbAlign = bufferAlignments[xfbBuffer];
890 int& xfbOffset = perBufferXfbOffsets[xfbBuffer];
892 xfbOffset = deAlign32(xfbOffset, xfbAlign);
895 // Keep stride in interface blocks
896 for (int blockNdx = 0; blockNdx < (int)layout.blocks.size(); blockNdx++)
897 layout.blocks[blockNdx].xfbStride = perBufferXfbOffsets[layout.blocks[blockNdx].xfbBuffer];
899 locationsUsed = static_cast<deUint32>(curLocation);
904 void generateValue (const InterfaceLayoutEntry& entry, void* basePtr, de::Random& rnd)
906 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
907 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
908 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
909 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
910 const int vecSize = scalarSize / numVecs;
911 const bool isArray = entry.arraySize > 1;
912 const size_t compSize = getDataTypeByteSize(scalarType);
914 DE_ASSERT(scalarSize%numVecs == 0);
916 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
918 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
920 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
922 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
924 for (int compNdx = 0; compNdx < vecSize; compNdx++)
926 deUint8* compPtr = vecPtr + compSize*compNdx;
927 const int sign = rnd.getBool() ? +1 : -1;
928 const int value = rnd.getInt(1, 127);
932 case glu::TYPE_DOUBLE: *((double*)compPtr) = (double) (sign * value); break;
933 case glu::TYPE_FLOAT: *((float*)compPtr) = (float) (sign * value); break;
934 case glu::TYPE_INT: *((deInt32*)compPtr) = (deInt32) (sign * value); break;
935 case glu::TYPE_UINT: *((deUint32*)compPtr) = (deUint32)( value); break;
944 void generateValues (const InterfaceLayout& layout, const std::map<int, void*>& blockPointers, deUint32 seed)
946 de::Random rnd (seed);
947 int numBlocks = (int)layout.blocks.size();
949 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
951 void* basePtr = blockPointers.find(blockNdx)->second;
952 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
954 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
956 const InterfaceLayoutEntry& entry = layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
959 generateValue(entry, basePtr, rnd);
969 Indent (int level_) : level(level_) {}
972 std::ostream& operator<< (std::ostream& str, const Indent& indent)
974 for (int i = 0; i < indent.level; i++)
979 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints, deUint32 flagsMask, deUint32 buffer, deUint32 stride, deUint32 offset);
980 void generateDeclaration (std::ostringstream& src, const InterfaceBlockMember& member, int indentLevel, deUint32 buffer, deUint32 stride, deUint32 offset);
981 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
983 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
984 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
986 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
988 DE_ASSERT(structType.hasTypeName());
989 generateFullDeclaration(src, structType, indentLevel);
993 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
996 if (structType.hasTypeName())
997 src << " " << structType.getTypeName();
998 src << "\n" << Indent(indentLevel) << "{\n";
1000 for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
1002 src << Indent(indentLevel + 1);
1003 generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1, memberIter->getFlags() & FIELD_OPTIONS, ~LAYOUT_MASK, 0u, 0u, 0u);
1006 src << Indent(indentLevel) << "}";
1009 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int /* indentLevel */)
1011 src << structType.getTypeName();
1014 void generateLayoutAndPrecisionDeclaration (std::ostringstream& src, deUint32 flags, deUint32 buffer, deUint32 stride, deUint32 offset)
1016 if ((flags & LAYOUT_MASK) != 0)
1017 src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK, buffer, stride, offset) << ") ";
1019 if ((flags & PRECISION_MASK) != 0)
1020 src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
1023 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 fieldHints, deUint32 flagsMask, deUint32 buffer, deUint32 stride, deUint32 offset)
1025 if (fieldHints & FIELD_MISSING)
1028 generateLayoutAndPrecisionDeclaration(src, type.getFlags() & flagsMask, buffer, stride, offset);
1030 if (type.isBasicType())
1031 src << glu::getDataTypeName(type.getBasicType()) << " " << name;
1032 else if (type.isArrayType())
1034 std::vector<int> arraySizes;
1035 const VarType* curType = &type;
1036 while (curType->isArrayType())
1038 arraySizes.push_back(curType->getArraySize());
1039 curType = &curType->getElementType();
1042 generateLayoutAndPrecisionDeclaration(src, curType->getFlags() & flagsMask, buffer, stride, offset);
1044 if (curType->isBasicType())
1045 src << glu::getDataTypeName(curType->getBasicType());
1048 DE_ASSERT(curType->isStructType());
1049 generateLocalDeclaration(src, curType->getStruct(), indentLevel+1);
1054 for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
1055 src << "[" << *sizeIter << "]";
1059 generateLocalDeclaration(src, type.getStruct(), indentLevel+1);
1065 // Print out unused hints.
1066 if (fieldHints & FIELD_MISSING)
1067 src << " // missing field";
1068 else if (fieldHints & FIELD_UNASSIGNED)
1069 src << " // unassigned";
1074 void generateDeclaration (std::ostringstream& src, const InterfaceBlockMember& member, int indentLevel, deUint32 buffer, deUint32 stride, deUint32 offset)
1076 if ((member.getFlags() & LAYOUT_MASK) != 0)
1077 src << "layout(" << LayoutFlagsFmt(member.getFlags() & LAYOUT_MASK, buffer, stride, offset) << ") ";
1079 generateDeclaration(src, member.getType(), member.getName(), indentLevel, member.getFlags() & FIELD_OPTIONS, ~0u, buffer, stride, offset);
1082 deUint32 getBlockMemberOffset (int blockNdx, const InterfaceBlock& block, const InterfaceBlockMember& member, const InterfaceLayout& layout)
1084 std::ostringstream name;
1085 const VarType* curType = &member.getType();
1087 if (block.getInstanceName().length() != 0)
1088 name << block.getBlockName() << "."; // \note InterfaceLayoutEntry uses block name rather than instance name
1090 name << member.getName();
1092 while (!curType->isBasicType())
1094 if (curType->isArrayType())
1097 curType = &curType->getElementType();
1100 if (curType->isStructType())
1102 const StructType::ConstIterator firstMember = curType->getStruct().begin();
1104 name << "." << firstMember->getName();
1105 curType = &firstMember->getType();
1109 const int interfaceLayoutNdx = layout.getInterfaceLayoutIndex(blockNdx, name.str());
1110 DE_ASSERT(interfaceLayoutNdx >= 0);
1112 return layout.interfaces[interfaceLayoutNdx].offset;
1115 template<typename T>
1116 void semiShuffle (std::vector<T>& v)
1118 const std::vector<T> src = v;
1120 int n = static_cast<int>(src.size());
1127 v.push_back(src[i]);
1128 n = (n > 0 ? 1 - n : -1 - n);
1132 template<typename T>
1133 //! \note Stores pointers to original elements
1137 template<typename Iter>
1138 Traverser (const Iter beg, const Iter end, const bool shuffled)
1140 for (Iter it = beg; it != end; ++it)
1141 m_elements.push_back(&(*it));
1144 semiShuffle(m_elements);
1146 m_next = m_elements.begin();
1151 if (m_next != m_elements.end())
1158 typename std::vector<T*> m_elements;
1159 typename std::vector<T*>::const_iterator m_next;
1162 void generateDeclaration (std::ostringstream& src, int blockNdx, const InterfaceBlock& block, const InterfaceLayout& layout, bool shuffleUniformMembers)
1164 const int indentOne = 1;
1165 const int ndx = layout.getBlockLayoutIndex(blockNdx, 0);
1166 const int locationNdx = layout.blocks[ndx].locationNdx;
1167 const int xfbOffset = layout.blocks[ndx].xfbOffset;
1168 const int xfbBuffer = layout.blocks[ndx].xfbBuffer;
1169 const int xfbStride = layout.blocks[ndx].xfbStride;
1172 src << "location = " << locationNdx;
1173 if ((block.getFlags() & LAYOUT_MASK) != 0)
1174 src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK, xfbBuffer, xfbStride, xfbOffset);
1175 src << ") out " << block.getBlockName();
1178 << " sizeInBytes=" << layout.blocks[ndx].xfbSize
1179 << " sizeInLocations=" << layout.blocks[ndx].locationSize;
1183 Traverser<const InterfaceBlockMember> interfaces(block.begin(), block.end(), shuffleUniformMembers);
1185 while (const InterfaceBlockMember* pUniform = interfaces.next())
1187 src << Indent(indentOne);
1188 generateDeclaration(src, *pUniform, indentOne, xfbBuffer, xfbStride, xfbOffset + getBlockMemberOffset(blockNdx, block, *pUniform, layout));
1193 if (block.hasInstanceName())
1195 src << " " << block.getInstanceName();
1196 if (block.isArray())
1197 src << "[" << block.getArraySize() << "]";
1200 DE_ASSERT(!block.isArray());
1205 int generateValueSrc (std::ostringstream& src, const InterfaceLayoutEntry& entry, const void* basePtr, int elementNdx)
1207 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1208 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1209 const bool isArray = entry.arraySize > 1;
1210 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
1211 const size_t compSize = getDataTypeByteSize(scalarType);
1214 src << glu::getDataTypeName(entry.type) << "(";
1216 if (glu::isDataTypeMatrix(entry.type))
1218 const int numRows = glu::getDataTypeMatrixNumRows(entry.type);
1219 const int numCols = glu::getDataTypeMatrixNumColumns(entry.type);
1221 DE_ASSERT(scalarType == glu::TYPE_FLOAT || scalarType == glu::TYPE_DOUBLE);
1223 // Constructed in column-wise order.
1224 for (int colNdx = 0; colNdx < numCols; colNdx++)
1226 for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
1228 const deUint8* compPtr = elemPtr + (colNdx * entry.matrixStride + rowNdx * compSize);
1229 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float*)compPtr)
1230 : (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double*)compPtr)
1233 if (colNdx > 0 || rowNdx > 0)
1236 src << de::floatToString(compVal, 1);
1242 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1244 const deUint8* compPtr = elemPtr + scalarNdx * compSize;
1251 case glu::TYPE_DOUBLE: src << de::floatToString((float)(*((const double*)compPtr)), 1); break;
1252 case glu::TYPE_FLOAT: src << de::floatToString(*((const float*)compPtr), 1) << "f"; break;
1253 case glu::TYPE_INT: src << *((const int*)compPtr); break;
1254 case glu::TYPE_UINT: src << *((const deUint32*)compPtr) << "u"; break;
1255 default: DE_ASSERT(false && "Invalid type"); break;
1263 return static_cast<int>(elemPtr - static_cast<const deUint8*>(basePtr));
1266 void writeMatrixTypeSrc (int columnCount,
1269 std::ostringstream& src,
1270 const std::string& srcName,
1271 const void* basePtr,
1272 const InterfaceLayoutEntry& entry,
1275 if (vector) // generateTestSrcMatrixPerVec
1277 for (int colNdx = 0; colNdx < columnCount; colNdx++)
1279 src << "\t" << srcName << "[" << colNdx << "] = ";
1281 if (glu::isDataTypeMatrix(entry.type))
1283 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1284 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1285 const deUint8* compPtr = (const deUint8*)basePtr + entry.offset;
1290 for (int rowNdx = 0; rowNdx < rowCount; rowNdx++)
1292 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float*)compPtr)
1293 : (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double*)compPtr)
1296 src << de::floatToString(compVal, 1);
1298 if (rowNdx < rowCount-1)
1306 generateValueSrc(src, entry, basePtr, 0);
1307 src << "[" << colNdx << "];\n";
1311 else // generateTestSrcMatrixPerElement
1313 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1315 for (int colNdx = 0; colNdx < columnCount; colNdx++)
1317 for (int rowNdx = 0; rowNdx < rowCount; rowNdx++)
1319 src << "\t" << srcName << "[" << colNdx << "][" << rowNdx << "] = ";
1320 if (glu::isDataTypeMatrix(entry.type))
1322 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset;
1323 const size_t compSize = getDataTypeByteSize(scalarType);
1324 const deUint8* compPtr = elemPtr + (colNdx * entry.matrixStride + rowNdx * compSize);
1325 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float*)compPtr)
1326 : (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double*)compPtr)
1329 src << de::floatToString(compVal, 1) << ";\n";
1333 generateValueSrc(src, entry, basePtr, 0);
1334 src << "[" << colNdx << "][" << rowNdx << "];\n";
1341 void generateTestSrcMatrixPerVec (std::ostringstream& src,
1342 glu::DataType elementType,
1343 const std::string& srcName,
1344 const void* basePtr,
1345 const InterfaceLayoutEntry& entry)
1347 switch (elementType)
1349 case glu::TYPE_FLOAT_MAT2: writeMatrixTypeSrc(2, 2, "vec2", src, srcName, basePtr, entry, true); break;
1350 case glu::TYPE_FLOAT_MAT2X3: writeMatrixTypeSrc(2, 3, "vec3", src, srcName, basePtr, entry, true); break;
1351 case glu::TYPE_FLOAT_MAT2X4: writeMatrixTypeSrc(2, 4, "vec4", src, srcName, basePtr, entry, true); break;
1352 case glu::TYPE_FLOAT_MAT3X4: writeMatrixTypeSrc(3, 4, "vec4", src, srcName, basePtr, entry, true); break;
1353 case glu::TYPE_FLOAT_MAT4: writeMatrixTypeSrc(4, 4, "vec4", src, srcName, basePtr, entry, true); break;
1354 case glu::TYPE_FLOAT_MAT4X2: writeMatrixTypeSrc(4, 2, "vec2", src, srcName, basePtr, entry, true); break;
1355 case glu::TYPE_FLOAT_MAT4X3: writeMatrixTypeSrc(4, 3, "vec3", src, srcName, basePtr, entry, true); break;
1356 default: DE_ASSERT(false && "Invalid type"); break;
1360 void generateTestSrcMatrixPerElement (std::ostringstream& src,
1361 glu::DataType elementType,
1362 const std::string& srcName,
1363 const void* basePtr,
1364 const InterfaceLayoutEntry& entry)
1366 std::string type = "float";
1367 switch (elementType)
1369 case glu::TYPE_FLOAT_MAT2: writeMatrixTypeSrc(2, 2, type, src, srcName, basePtr, entry, false); break;
1370 case glu::TYPE_FLOAT_MAT2X3: writeMatrixTypeSrc(2, 3, type, src, srcName, basePtr, entry, false); break;
1371 case glu::TYPE_FLOAT_MAT2X4: writeMatrixTypeSrc(2, 4, type, src, srcName, basePtr, entry, false); break;
1372 case glu::TYPE_FLOAT_MAT3X4: writeMatrixTypeSrc(3, 4, type, src, srcName, basePtr, entry, false); break;
1373 case glu::TYPE_FLOAT_MAT4: writeMatrixTypeSrc(4, 4, type, src, srcName, basePtr, entry, false); break;
1374 case glu::TYPE_FLOAT_MAT4X2: writeMatrixTypeSrc(4, 2, type, src, srcName, basePtr, entry, false); break;
1375 case glu::TYPE_FLOAT_MAT4X3: writeMatrixTypeSrc(4, 3, type, src, srcName, basePtr, entry, false); break;
1376 default: DE_ASSERT(false && "Invalid type"); break;
1380 void generateSingleAssignment (std::ostringstream& src,
1381 glu::DataType elementType,
1382 const std::string& srcName,
1383 const void* basePtr,
1384 const InterfaceLayoutEntry& entry,
1385 MatrixLoadFlags matrixLoadFlag)
1387 if (matrixLoadFlag == LOAD_FULL_MATRIX)
1389 src << "\t" << srcName << " = ";
1390 generateValueSrc(src, entry, basePtr, 0);
1395 if (glu::isDataTypeMatrix(elementType))
1397 generateTestSrcMatrixPerVec (src, elementType, srcName, basePtr, entry);
1398 generateTestSrcMatrixPerElement (src, elementType, srcName, basePtr, entry);
1403 void generateAssignment (std::ostringstream& src,
1404 const InterfaceLayout& layout,
1405 const VarType& type,
1406 const std::string& srcName,
1407 const std::string& apiName,
1409 const void* basePtr,
1410 MatrixLoadFlags matrixLoadFlag)
1412 if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
1414 // Basic type or array of basic types.
1415 bool isArray = type.isArrayType();
1416 glu::DataType elementType = isArray ? type.getElementType().getBasicType() : type.getBasicType();
1417 std::string fullApiName = std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
1418 int interfaceLayoutNdx = layout.getInterfaceLayoutIndex(blockNdx, fullApiName);
1419 const InterfaceLayoutEntry& entry = layout.interfaces[interfaceLayoutNdx];
1423 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
1425 src << "\t" << srcName << "[" << elemNdx << "] = ";
1426 generateValueSrc(src, entry, basePtr, elemNdx);
1432 generateSingleAssignment(src, elementType, srcName, basePtr, entry, matrixLoadFlag);
1435 else if (type.isArrayType())
1437 const VarType& elementType = type.getElementType();
1439 for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
1441 const std::string op = std::string("[") + de::toString(elementNdx) + "]";
1442 const std::string elementSrcName = std::string(srcName) + op;
1443 const std::string elementApiName = std::string(apiName) + op;
1445 generateAssignment(src, layout, elementType, elementSrcName, elementApiName, blockNdx, basePtr, LOAD_FULL_MATRIX);
1450 DE_ASSERT(type.isStructType());
1452 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
1454 const StructMember& member = *memberIter;
1455 const std::string op = std::string(".") + member.getName();
1456 const std::string memberSrcName = std::string(srcName) + op;
1457 const std::string memberApiName = std::string(apiName) + op;
1459 if (0 == (member.getFlags() & (FIELD_UNASSIGNED | FIELD_MISSING)))
1460 generateAssignment(src, layout, memberIter->getType(), memberSrcName, memberApiName, blockNdx, basePtr, LOAD_FULL_MATRIX);
1465 void generateAssignment (std::ostringstream& src,
1466 const InterfaceLayout& layout,
1467 const ShaderInterface& shaderInterface,
1468 const std::map<int, void*>& blockPointers,
1469 MatrixLoadFlags matrixLoadFlag)
1471 for (int blockNdx = 0; blockNdx < shaderInterface.getNumInterfaceBlocks(); blockNdx++)
1473 const InterfaceBlock& block = shaderInterface.getInterfaceBlock(blockNdx);
1475 bool hasInstanceName = block.hasInstanceName();
1476 bool isArray = block.isArray();
1477 int numInstances = isArray ? block.getArraySize() : 1;
1478 std::string apiPrefix = hasInstanceName ? block.getBlockName() + "." : std::string("");
1480 DE_ASSERT(!isArray || hasInstanceName);
1482 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1484 std::string instancePostfix = isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1485 std::string blockInstanceName = block.getBlockName() + instancePostfix;
1486 std::string srcPrefix = hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1487 int blockLayoutNdx = layout.getBlockLayoutIndex(blockNdx, instanceNdx);
1488 void* basePtr = blockPointers.find(blockLayoutNdx)->second;
1490 for (InterfaceBlock::ConstIterator interfaceMemberIter = block.begin(); interfaceMemberIter != block.end(); interfaceMemberIter++)
1492 const InterfaceBlockMember& interfaceMember = *interfaceMemberIter;
1494 if ((interfaceMember.getFlags() & (FIELD_MISSING | FIELD_UNASSIGNED)) == 0)
1496 std::string srcName = srcPrefix + interfaceMember.getName();
1497 std::string apiName = apiPrefix + interfaceMember.getName();
1499 generateAssignment(src, layout, interfaceMember.getType(), srcName, apiName, blockNdx, basePtr, matrixLoadFlag);
1506 std::string generatePassthroughShader ()
1508 std::ostringstream src;
1510 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n";
1513 "void main (void)\n"
1520 std::string generateTestShader (const ShaderInterface& shaderInterface, const InterfaceLayout& layout, const std::map<int, void*>& blockPointers, MatrixLoadFlags matrixLoadFlag, TestStageFlags testStageFlags, bool shuffleUniformMembers)
1522 std::ostringstream src;
1523 std::vector<const StructType*> namedStructs;
1525 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n";
1527 if (testStageFlags == TEST_STAGE_GEOMETRY)
1529 src << "layout(points) in;\n"
1530 << "layout(points, max_vertices = 1) out;\n\n";
1533 shaderInterface.getNamedStructs(namedStructs);
1534 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1535 generateDeclaration(src, **structIter, 0);
1537 for (int blockNdx = 0; blockNdx < shaderInterface.getNumInterfaceBlocks(); blockNdx++)
1539 const InterfaceBlock& block = shaderInterface.getInterfaceBlock(blockNdx);
1541 generateDeclaration(src, blockNdx, block, layout, shuffleUniformMembers);
1545 "void main (void)\n"
1548 generateAssignment(src, layout, shaderInterface, blockPointers, matrixLoadFlag);
1550 if (testStageFlags == TEST_STAGE_GEOMETRY)
1553 << "\tEmitVertex();\n"
1554 << "\tEndPrimitive();\n";
1562 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
1563 const VkDevice device,
1564 const VkPipelineLayout pipelineLayout,
1565 const VkRenderPass renderPass,
1566 const VkShaderModule vertexModule,
1567 const VkShaderModule geometryModule,
1568 const VkExtent2D renderSize)
1570 const std::vector<VkViewport> viewports (1, makeViewport(renderSize));
1571 const std::vector<VkRect2D> scissors (1, makeRect2D(renderSize));
1572 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
1574 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType
1575 DE_NULL, // const void* pNext
1576 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags
1577 0u, // deUint32 vertexBindingDescriptionCount
1578 DE_NULL, // const VkVertexInputBindingDescription* pVertexBindingDescriptions
1579 0u, // deUint32 vertexAttributeDescriptionCount
1580 DE_NULL, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions
1583 return makeGraphicsPipeline(vk, // const DeviceInterface& vk
1584 device, // const VkDevice device
1585 pipelineLayout, // const VkPipelineLayout pipelineLayout
1586 vertexModule, // const VkShaderModule vertexShaderModule
1587 DE_NULL, // const VkShaderModule tessellationControlModule
1588 DE_NULL, // const VkShaderModule tessellationEvalModule
1589 geometryModule, // const VkShaderModule geometryShaderModule
1590 DE_NULL, // const VkShaderModule m_maxGeometryBlocksShaderModule
1591 renderPass, // const VkRenderPass renderPass
1592 viewports, // const std::vector<VkViewport>& viewports
1593 scissors, // const std::vector<VkRect2D>& scissors
1594 VK_PRIMITIVE_TOPOLOGY_POINT_LIST, // const VkPrimitiveTopology topology
1595 0u, // const deUint32 subpass
1596 0u, // const deUint32 patchControlPoints
1597 &vertexInputStateCreateInfo); // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
1600 // InterfaceBlockCaseInstance
1602 class InterfaceBlockCaseInstance : public vkt::TestInstance
1605 InterfaceBlockCaseInstance (Context& context,
1606 const InterfaceLayout& layout,
1607 const std::map<int, void*>& blockPointers,
1608 const std::vector<deUint8>& data,
1609 const std::vector<VkDeviceSize>& tfBufBindingOffsets,
1610 const std::vector<VkDeviceSize>& tfBufBindingSizes,
1611 const deUint32 locationsRequired,
1612 const TestStageFlags testStageFlags);
1614 virtual ~InterfaceBlockCaseInstance (void);
1615 virtual tcu::TestStatus iterate (void);
1618 Move<VkShaderModule> getGeometryShaderModule (const DeviceInterface& vk,
1619 const VkDevice device);
1621 bool usesFloat64 (void);
1622 std::string validateValue (const InterfaceLayoutEntry& entry, const void* basePtr0, const void* basePtr, const void* receivedBasePtr);
1623 std::string validateValues (const void* recievedDataPtr);
1625 typedef de::SharedPtr<vk::Unique<vk::VkBuffer> > VkBufferSp;
1626 typedef de::SharedPtr<vk::Allocation> AllocationSp;
1628 const InterfaceLayout& m_layout;
1629 const std::vector<deUint8>& m_data;
1630 const DeviceSizeVector& m_tfBufBindingOffsets;
1631 const DeviceSizeVector& m_tfBufBindingSizes;
1632 const std::map<int, void*>& m_blockPointers;
1633 const deUint32 m_locationsRequired;
1634 const TestStageFlags m_testStageFlags;
1635 const VkExtent2D m_imageExtent2D;
1638 InterfaceBlockCaseInstance::InterfaceBlockCaseInstance (Context& ctx,
1639 const InterfaceLayout& layout,
1640 const std::map<int, void*>& blockPointers,
1641 const std::vector<deUint8>& data,
1642 const std::vector<VkDeviceSize>& tfBufBindingOffsets,
1643 const std::vector<VkDeviceSize>& tfBufBindingSizes,
1644 const deUint32 locationsRequired,
1645 const TestStageFlags testStageFlags)
1646 : vkt::TestInstance (ctx)
1649 , m_tfBufBindingOffsets (tfBufBindingOffsets)
1650 , m_tfBufBindingSizes (tfBufBindingSizes)
1651 , m_blockPointers (blockPointers)
1652 , m_locationsRequired (locationsRequired)
1653 , m_testStageFlags (testStageFlags)
1654 , m_imageExtent2D (makeExtent2D(256u, 256u))
1656 const deUint32 componentsPerLocation = 4u;
1657 const deUint32 componentsRequired = m_locationsRequired * componentsPerLocation;
1658 const InstanceInterface& vki = m_context.getInstanceInterface();
1659 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
1660 const VkPhysicalDeviceTransformFeedbackFeaturesEXT& transformFeedbackFeatures = m_context.getTransformFeedbackFeatures();
1661 const VkPhysicalDeviceLimits limits = getPhysicalDeviceProperties(vki, physDevice).limits;
1662 VkPhysicalDeviceTransformFeedbackPropertiesEXT transformFeedbackProperties;
1663 VkPhysicalDeviceProperties2 deviceProperties2;
1665 if (transformFeedbackFeatures.transformFeedback == DE_FALSE)
1666 TCU_THROW(NotSupportedError, "transformFeedback feature is not supported");
1668 deMemset(&deviceProperties2, 0, sizeof(deviceProperties2));
1669 deMemset(&transformFeedbackProperties, 0x00, sizeof(transformFeedbackProperties));
1671 deviceProperties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1672 deviceProperties2.pNext = &transformFeedbackProperties;
1674 transformFeedbackProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
1675 transformFeedbackProperties.pNext = DE_NULL;
1677 vki.getPhysicalDeviceProperties2(physDevice, &deviceProperties2);
1679 if (transformFeedbackProperties.maxTransformFeedbackBuffers < tfBufBindingSizes.size())
1680 TCU_THROW(NotSupportedError, "maxTransformFeedbackBuffers=" + de::toString(transformFeedbackProperties.maxTransformFeedbackBuffers) + " is less than required (" + de::toString(tfBufBindingSizes.size()) + ")");
1682 if (transformFeedbackProperties.maxTransformFeedbackBufferDataSize < m_data.size())
1683 TCU_THROW(NotSupportedError, "maxTransformFeedbackBufferDataSize=" + de::toString(transformFeedbackProperties.maxTransformFeedbackBufferDataSize) + " is less than required (" + de::toString(m_data.size()) + ")");
1685 if (m_testStageFlags == TEST_STAGE_VERTEX)
1687 if (limits.maxVertexOutputComponents < componentsRequired)
1688 TCU_THROW(NotSupportedError, "maxVertexOutputComponents=" + de::toString(limits.maxVertexOutputComponents) + " is less than required (" + de::toString(componentsRequired) + ")");
1691 if (m_testStageFlags == TEST_STAGE_GEOMETRY)
1693 if (limits.maxGeometryOutputComponents < componentsRequired)
1694 TCU_THROW(NotSupportedError, "maxGeometryOutputComponents=" + de::toString(limits.maxGeometryOutputComponents) + " is less than required (" + de::toString(componentsRequired) + ")");
1698 m_context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_FLOAT64);
1701 InterfaceBlockCaseInstance::~InterfaceBlockCaseInstance (void)
1705 bool InterfaceBlockCaseInstance::usesFloat64 (void)
1707 for (size_t layoutNdx = 0; layoutNdx< m_layout.interfaces.size(); ++layoutNdx)
1708 if (isDataTypeDoubleType(m_layout.interfaces[layoutNdx].type))
1714 Move<VkShaderModule> InterfaceBlockCaseInstance::getGeometryShaderModule (const DeviceInterface& vk,
1715 const VkDevice device)
1717 if (m_testStageFlags == TEST_STAGE_GEOMETRY)
1718 return createShaderModule(vk, device, m_context.getBinaryCollection().get("geom"), 0u);
1720 return Move<VkShaderModule>();
1723 tcu::TestStatus InterfaceBlockCaseInstance::iterate (void)
1725 const DeviceInterface& vk = m_context.getDeviceInterface();
1726 const VkDevice device = m_context.getDevice();
1727 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1728 const VkQueue queue = m_context.getUniversalQueue();
1729 Allocator& allocator = m_context.getDefaultAllocator();
1731 const Move<VkShaderModule> vertModule (createShaderModule (vk, device, m_context.getBinaryCollection().get("vert"), 0u));
1732 const Move<VkShaderModule> geomModule (getGeometryShaderModule(vk, device));
1733 const Move<VkRenderPass> renderPass (makeRenderPass (vk, device, VK_FORMAT_UNDEFINED));
1734 const Move<VkFramebuffer> framebuffer (makeFramebuffer (vk, device, *renderPass, 0u, DE_NULL, m_imageExtent2D.width, m_imageExtent2D.height));
1735 const Move<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device));
1736 const Move<VkPipeline> pipeline (makeGraphicsPipeline (vk, device, *pipelineLayout, *renderPass, *vertModule, *geomModule, m_imageExtent2D));
1737 const Move<VkCommandPool> cmdPool (createCommandPool (vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1738 const Move<VkCommandBuffer> cmdBuffer (allocateCommandBuffer (vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1740 const VkBufferCreateInfo tfBufCreateInfo = makeBufferCreateInfo(m_data.size(), VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT);
1741 const Move<VkBuffer> tfBuf = createBuffer(vk, device, &tfBufCreateInfo);
1742 const de::MovePtr<Allocation> tfBufAllocation = allocator.allocate(getBufferMemoryRequirements(vk, device, *tfBuf), MemoryRequirement::HostVisible);
1743 const deUint32 tfBufBindingCount = static_cast<deUint32>(m_tfBufBindingOffsets.size());
1744 const std::vector<VkBuffer> tfBufBindings (tfBufBindingCount, *tfBuf);
1746 DE_ASSERT(tfBufBindings.size() == tfBufBindingCount);
1748 VK_CHECK(vk.bindBufferMemory(device, *tfBuf, tfBufAllocation->getMemory(), tfBufAllocation->getOffset()));
1750 deMemset(tfBufAllocation->getHostPtr(), 0, m_data.size());
1751 flushMappedMemoryRange(vk, device, tfBufAllocation->getMemory(), tfBufAllocation->getOffset(), VK_WHOLE_SIZE);
1753 beginCommandBuffer(vk, *cmdBuffer);
1755 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(m_imageExtent2D));
1757 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1759 vk.cmdBindTransformFeedbackBuffersEXT(*cmdBuffer, 0, tfBufBindingCount, &tfBufBindings[0], &m_tfBufBindingOffsets[0], &m_tfBufBindingSizes[0]);
1761 vk.cmdBeginTransformFeedbackEXT(*cmdBuffer, 0, 0, DE_NULL, DE_NULL);
1763 vk.cmdDraw(*cmdBuffer, 1u, 1u, 0u, 0u);
1765 vk.cmdEndTransformFeedbackEXT(*cmdBuffer, 0, 0, DE_NULL, DE_NULL);
1767 endRenderPass(vk, *cmdBuffer);
1769 endCommandBuffer(vk, *cmdBuffer);
1770 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1772 invalidateMappedMemoryRange(vk, device, tfBufAllocation->getMemory(), tfBufAllocation->getOffset(), VK_WHOLE_SIZE);
1774 std::string result = validateValues(tfBufAllocation->getHostPtr());
1776 if (!result.empty())
1777 return tcu::TestStatus::fail(result);
1779 return tcu::TestStatus::pass("Pass");
1782 std::string InterfaceBlockCaseInstance::validateValue (const InterfaceLayoutEntry& entry, const void* basePtr0, const void* basePtr, const void* receivedBasePtr)
1784 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1785 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1786 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
1787 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
1788 const int vecSize = scalarSize / numVecs;
1789 const bool isArray = entry.arraySize > 1;
1790 const size_t compSize = getDataTypeByteSize(scalarType);
1793 DE_ASSERT(scalarSize%numVecs == 0);
1795 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
1797 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
1799 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
1801 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
1803 for (int compNdx = 0; compNdx < vecSize; compNdx++)
1805 const deUint8* compPtr = vecPtr + compSize*compNdx;
1806 const size_t offset = compPtr - (deUint8*)basePtr0;
1807 const deUint8* receivedPtr = (deUint8*)receivedBasePtr + offset;
1811 case glu::TYPE_DOUBLE:
1813 const double expected = *((double*)compPtr);
1814 const double received = *((double*)receivedPtr);
1816 if (deAbs(received - expected) > 0.05)
1817 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) + " received " + de::toString(received);
1821 case glu::TYPE_FLOAT:
1823 const float expected = *((float*)compPtr);
1824 const float received = *((float*)receivedPtr);
1826 if (deAbs(received - expected) > 0.05)
1827 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) + " received " + de::toString(received);
1833 const deInt32 expected = *((deInt32*)compPtr);
1834 const deInt32 received = *((deInt32*)receivedPtr);
1836 if (received != expected)
1837 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) + " received " + de::toString(received);
1841 case glu::TYPE_UINT:
1843 const deUint32 expected = *((deUint32*)compPtr);
1844 const deUint32 received = *((deUint32*)receivedPtr);
1846 if (received != expected)
1847 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) + " received " + de::toString(received);
1855 if (!result.empty())
1857 result += " (elemNdx=" + de::toString(elemNdx) + " vecNdx=" + de::toString(vecNdx) + " compNdx=" + de::toString(compNdx) + ")";
1868 std::string InterfaceBlockCaseInstance::validateValues (const void* recievedDataPtr)
1870 const int numBlocks = (int)m_layout.blocks.size();
1872 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1874 void* basePtr = m_blockPointers.find(blockNdx)->second;
1875 int numEntries = (int)m_layout.blocks[blockNdx].activeInterfaceIndices.size();
1877 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
1879 const InterfaceLayoutEntry& entry = m_layout.interfaces[m_layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
1880 const std::string result = entry.validate ? validateValue(entry, &m_data[0], basePtr, recievedDataPtr) : "";
1882 if (!result.empty())
1884 tcu::TestLog& log = m_context.getTestContext().getLog();
1885 std::vector<deUint8> mask = createMask(m_layout, m_blockPointers, &m_data[0], m_data.size());
1886 std::ostringstream str;
1888 str << "Error at entry '" << entry.name << "' block '" << m_layout.blocks[blockNdx].name << "'" << std::endl;
1889 str << result << std::endl;
1893 str << "Xfb buffer offsets: " << m_tfBufBindingOffsets << std::endl;
1894 str << "Xfb buffer sizes: " << m_tfBufBindingSizes << std::endl << std::endl;
1896 dumpBytes(str, "Expected:", &m_data[0], m_data.size(), &mask[0]);
1897 dumpBytes(str, "Retrieved:", recievedDataPtr, m_data.size(), &mask[0]);
1899 dumpBytes(str, "Expected (unfiltered):", &m_data[0], m_data.size());
1900 dumpBytes(str, "Retrieved (unfiltered):", recievedDataPtr, m_data.size());
1902 log << tcu::TestLog::Message << str.str() << tcu::TestLog::EndMessage;
1909 return std::string();
1912 } // anonymous (utilities)
1914 // InterfaceBlockCase.
1916 InterfaceBlockCase::InterfaceBlockCase (tcu::TestContext& testCtx,
1917 const std::string& name,
1918 const std::string& description,
1919 MatrixLoadFlags matrixLoadFlag,
1920 TestStageFlags testStageFlags,
1921 bool shuffleInterfaceMembers)
1922 : TestCase (testCtx, name, description)
1923 , m_matrixLoadFlag (matrixLoadFlag)
1924 , m_testStageFlags (testStageFlags)
1925 , m_shuffleInterfaceMembers (shuffleInterfaceMembers)
1926 , m_locationsRequired (0)
1930 InterfaceBlockCase::~InterfaceBlockCase (void)
1934 void InterfaceBlockCase::initPrograms (vk::SourceCollections& programCollection) const
1936 DE_ASSERT(!m_vertShaderSource.empty());
1938 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
1940 if (!m_geomShaderSource.empty())
1941 programCollection.glslSources.add("geom") << glu::GeometrySource(m_geomShaderSource);
1944 TestInstance* InterfaceBlockCase::createInstance (Context& context) const
1946 return new InterfaceBlockCaseInstance(context, m_interfaceLayout, m_blockPointers, m_data, m_tfBufBindingOffsets, m_tfBufBindingSizes, m_locationsRequired, m_testStageFlags);
1949 void InterfaceBlockCase::init (void)
1951 BufferGeneralMapping xfbBufferSize;
1952 std::string notSupportedComment;
1954 // Compute reference layout.
1955 computeXfbLayout(m_interfaceLayout, m_interface, xfbBufferSize, m_locationsRequired);
1957 // Assign storage for reference values.
1958 // m_data contains all xfb buffers starting with all interfaces of first xfb_buffer, then all interfaces of next xfb_buffer
1960 BufferGeneralMapping xfbBufferOffsets;
1964 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferSize.begin(); xfbBuffersIter != xfbBufferSize.end(); xfbBuffersIter++)
1966 xfbBufferOffsets[xfbBuffersIter->first] = totalSize;
1967 totalSize += xfbBuffersIter->second;
1968 maxXfb = std::max(maxXfb, xfbBuffersIter->first);
1970 m_data.resize(totalSize);
1972 DE_ASSERT(de::inBounds(maxXfb, 0, 256)); // Not correlated with spec: just make sure vectors won't be huge
1974 m_tfBufBindingSizes.resize(maxXfb + 1);
1975 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferSize.begin(); xfbBuffersIter != xfbBufferSize.end(); xfbBuffersIter++)
1976 m_tfBufBindingSizes[xfbBuffersIter->first] = xfbBuffersIter->second;
1978 m_tfBufBindingOffsets.resize(maxXfb + 1);
1979 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferOffsets.begin(); xfbBuffersIter != xfbBufferOffsets.end(); xfbBuffersIter++)
1980 m_tfBufBindingOffsets[xfbBuffersIter->first] = xfbBuffersIter->second;
1982 // Pointers for each block.
1983 for (int blockNdx = 0; blockNdx < (int)m_interfaceLayout.blocks.size(); blockNdx++)
1985 const int dataXfbBufferStartOffset = xfbBufferOffsets[m_interfaceLayout.blocks[blockNdx].xfbBuffer];
1986 const int offset = dataXfbBufferStartOffset + m_interfaceLayout.blocks[blockNdx].xfbOffset;
1988 m_blockPointers[blockNdx] = &m_data[0] + offset;
1993 generateValues(m_interfaceLayout, m_blockPointers, 1 /* seed */);
1995 // Overlap validation
1997 std::vector<deUint8> mask = createMask(m_interfaceLayout, m_blockPointers, &m_data[0], m_data.size());
1999 for (size_t maskNdx = 0; maskNdx < mask.size(); ++maskNdx)
2000 DE_ASSERT(mask[maskNdx] <= 1);
2003 if (m_testStageFlags == TEST_STAGE_VERTEX)
2005 m_vertShaderSource = generateTestShader(m_interface, m_interfaceLayout, m_blockPointers, m_matrixLoadFlag, m_testStageFlags, m_shuffleInterfaceMembers);
2006 m_geomShaderSource = "";
2008 else if (m_testStageFlags == TEST_STAGE_GEOMETRY)
2010 m_vertShaderSource = generatePassthroughShader();
2011 m_geomShaderSource = generateTestShader(m_interface, m_interfaceLayout, m_blockPointers, m_matrixLoadFlag, m_testStageFlags, m_shuffleInterfaceMembers);
2015 DE_ASSERT(false && "Unknown test stage specified");
2019 } // TransformFeedback