c_m23: offset 16, type 8b67, size 1, index 2\r
c_scalarAfterm23: offset 64, type 1404, size 1, index 2\r
scalarBeforeArray: offset 96, type 1404, size 1, index 0\r
-floatArray: offset 112, type 1406, size 3, index 0\r
+floatArray: offset 112, type 1406, size 5, index 0\r
scalarAfterArray: offset 192, type 1404, size 1, index 0\r
ablock.memvec2: offset 48, type 8b50, size 1, index 1\r
ablock.memf1: offset 56, type 1406, size 1, index 1\r
nest.foo.n2.b: offset 16, type 1406, size 1, index 3\r
nest.foo.n2.c: offset 20, type 1406, size 1, index 3\r
nest.foo.n2.d: offset 24, type 1406, size 1, index 3\r
+deepA[0].d2.d1[2].va: offset -1, type 8b50, size 2, index -1\r
+deepA[1].d2.d1[2].va: offset -1, type 8b50, size 2, index -1\r
+deepB[1].d2.d1[0].va: offset -1, type 8b50, size 2, index -1\r
+deepB[1].d2.d1[1].va: offset -1, type 8b50, size 2, index -1\r
+deepB[1].d2.d1[2].va: offset -1, type 8b50, size 2, index -1\r
+deepB[1].d2.d1[3].va: offset -1, type 8b50, size 2, index -1\r
+deepB[0].d2.d1[0].va: offset -1, type 8b50, size 2, index -1\r
+deepB[0].d2.d1[1].va: offset -1, type 8b50, size 2, index -1\r
+deepB[0].d2.d1[2].va: offset -1, type 8b50, size 2, index -1\r
+deepB[0].d2.d1[3].va: offset -1, type 8b50, size 2, index -1\r
+deepC[1].iv4: offset -1, type 8b52, size 1, index -1\r
+deepC[1].d2.i: offset -1, type 1404, size 1, index -1\r
+deepC[1].d2.d1[0].va: offset -1, type 8b50, size 3, index -1\r
+deepC[1].d2.d1[0].b: offset -1, type 8b56, size 1, index -1\r
+deepC[1].d2.d1[1].va: offset -1, type 8b50, size 3, index -1\r
+deepC[1].d2.d1[1].b: offset -1, type 8b56, size 1, index -1\r
+deepC[1].d2.d1[2].va: offset -1, type 8b50, size 3, index -1\r
+deepC[1].d2.d1[2].b: offset -1, type 8b56, size 1, index -1\r
+deepC[1].d2.d1[3].va: offset -1, type 8b50, size 3, index -1\r
+deepC[1].d2.d1[3].b: offset -1, type 8b56, size 1, index -1\r
+deepC[1].v3: offset -1, type 8b54, size 1, index -1\r
+deepD[0].iv4: offset -1, type 8b52, size 1, index -1\r
+deepD[0].d2.i: offset -1, type 1404, size 1, index -1\r
+deepD[0].d2.d1[0].va: offset -1, type 8b50, size 3, index -1\r
+deepD[0].d2.d1[0].b: offset -1, type 8b56, size 1, index -1\r
+deepD[0].d2.d1[1].va: offset -1, type 8b50, size 3, index -1\r
+deepD[0].d2.d1[1].b: offset -1, type 8b56, size 1, index -1\r
+deepD[0].d2.d1[2].va: offset -1, type 8b50, size 3, index -1\r
+deepD[0].d2.d1[2].b: offset -1, type 8b56, size 1, index -1\r
+deepD[0].d2.d1[3].va: offset -1, type 8b50, size 3, index -1\r
+deepD[0].d2.d1[3].b: offset -1, type 8b56, size 1, index -1\r
+deepD[0].v3: offset -1, type 8b54, size 1, index -1\r
+deepD[1].iv4: offset -1, type 8b52, size 1, index -1\r
+deepD[1].d2.i: offset -1, type 1404, size 1, index -1\r
+deepD[1].d2.d1[0].va: offset -1, type 8b50, size 3, index -1\r
+deepD[1].d2.d1[0].b: offset -1, type 8b56, size 1, index -1\r
+deepD[1].d2.d1[1].va: offset -1, type 8b50, size 3, index -1\r
+deepD[1].d2.d1[1].b: offset -1, type 8b56, size 1, index -1\r
+deepD[1].d2.d1[2].va: offset -1, type 8b50, size 3, index -1\r
+deepD[1].d2.d1[2].b: offset -1, type 8b56, size 1, index -1\r
+deepD[1].d2.d1[3].va: offset -1, type 8b50, size 3, index -1\r
+deepD[1].d2.d1[3].b: offset -1, type 8b56, size 1, index -1\r
+deepD[1].v3: offset -1, type 8b54, size 1, index -1\r
anonMember1: offset 0, type 8b51, size 1, index 0\r
uf1: offset -1, type 1406, size 1, index -1\r
uf2: offset -1, type 1406, size 1, index -1\r
\r
Uniform block reflection:\r
nameless: offset -1, type ffffffff, size 496, index -1\r
-ablock: offset -1, type ffffffff, size 304, index -1\r
+named: offset -1, type ffffffff, size 304, index -1\r
c_nameless: offset -1, type ffffffff, size 112, index -1\r
-nest: offset -1, type ffffffff, size 28, index -1\r
+nested: offset -1, type ffffffff, size 28, index -1\r
\r
}\r
}\r
\r
- // Add a simple uniform variable reference to the uniform database, no dereference involved.\r
- void addUniform(const TIntermSymbol& symbol)\r
+ // Add a simple reference to a uniform variable to the uniform database, no dereference involved.\r
+ // However, no dereference doesn't mean simple... it could be a complex aggregate.\r
+ void addUniform(const TIntermSymbol& base)\r
{\r
- if (reflection.nameToIndex.find(symbol.getName()) == reflection.nameToIndex.end()) {\r
- if (isReflectionGranularity(symbol.getType())) {\r
- reflection.nameToIndex[symbol.getName()] = reflection.indexToUniform.size();\r
- reflection.indexToUniform.push_back(TObjectReflection(symbol.getName(), -1, mapToGlType(symbol.getType()), mapToGlArraySize(symbol.getType()), -1));\r
- }\r
+ if (processedDerefs.find(&base) == processedDerefs.end()) {\r
+ processedDerefs.insert(&base);\r
+\r
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of\r
+ // the dereference change expected by blowUpActiveAggregate.\r
+ TList<TIntermBinary*> derefs;\r
+ blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);\r
}\r
}\r
\r
return size;\r
}\r
\r
+ // Traverse the provided deref chain, including the base, and\r
+ // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity\r
+ // - recursively expand any variable array index in the middle of that traversal\r
+ // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity\r
+ //\r
+ // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.\r
+ // A value of 0 for arraySize will mean to use the full array's size.\r
+ void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs, \r
+ TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)\r
+ {\r
+ TString name = baseName;\r
+ const TType* terminalType = &baseType;\r
+ for (; deref != derefs.end(); ++deref) {\r
+ TIntermBinary* visitNode = *deref;\r
+ terminalType = &visitNode->getType();\r
+ int index;\r
+ switch (visitNode->getOp()) {\r
+ case EOpIndexIndirect:\r
+ // Visit all the indices of this array, and for each one, then add on the remaining dereferencing\r
+ for (int i = 0; i < visitNode->getLeft()->getType().getArraySize(); ++i) {\r
+ TString newBaseName = name;\r
+ newBaseName.append(TString("[") + String(i) + "]");\r
+ TList<TIntermBinary*>::const_iterator nextDeref = deref;\r
+ ++nextDeref;\r
+ TType derefType(*terminalType, 0);\r
+ blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);\r
+ }\r
+\r
+ // it was all completed in the recursive calls above\r
+ return;\r
+ case EOpIndexDirect:\r
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();\r
+ name.append(TString("[") + String(index) + "]");\r
+ break;\r
+ case EOpIndexDirectStruct:\r
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();\r
+ if (offset >= 0)\r
+ offset += getBlockMemberOffset(visitNode->getLeft()->getType(), index);\r
+ if (name.size() > 0)\r
+ name.append(".");\r
+ name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());\r
+ break;\r
+ default:\r
+ break;\r
+ }\r
+ }\r
+ \r
+ // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...\r
+ if (! isReflectionGranularity(*terminalType)) {\r
+ if (terminalType->isArray()) {\r
+ // Visit all the indices of this array, and for each one, \r
+ // fully explode the remaining aggregate to dereference\r
+ for (int i = 0; i < terminalType->getArraySize(); ++i) {\r
+ TString newBaseName = name;\r
+ newBaseName.append(TString("[") + String(i) + "]");\r
+ TType derefType(*terminalType, 0);\r
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);\r
+ }\r
+ } else {\r
+ // Visit all members of this aggregate, and for each one, \r
+ // fully explode the remaining aggregate to dereference\r
+ const TTypeList& typeList = *terminalType->getStruct();\r
+ for (size_t i = 0; i < typeList.size(); ++i) {\r
+ TString newBaseName = name;\r
+ newBaseName.append(TString(".") + typeList[i].type->getFieldName());\r
+ TType derefType(*terminalType, i);\r
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);\r
+ }\r
+ }\r
+\r
+ // it was all completed in the recursive calls above\r
+ return;\r
+ }\r
+\r
+ // Finally, add a full string to the reflection database, and update the array size if necessary.\r
+ // If the derefenced entity to record is an array, compute the size and update the maximum size.\r
+\r
+ // there might not be a final array dereference, it could have been copied as an array object\r
+ if (arraySize == 0)\r
+ arraySize = mapToGlArraySize(*terminalType);\r
+\r
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);\r
+ if (it == reflection.nameToIndex.end()) {\r
+ reflection.nameToIndex[name] = reflection.indexToUniform.size(); \r
+ reflection.indexToUniform.push_back(TObjectReflection(name, offset, mapToGlType(*terminalType), arraySize, blockIndex));\r
+ } else if (arraySize > 1) {\r
+ int& reflectedArraySize = reflection.indexToUniform[it->second].size;\r
+ reflectedArraySize = std::max(arraySize, reflectedArraySize);\r
+ }\r
+ }\r
+\r
// Add a uniform dereference where blocks/struct/arrays are involved in the access.\r
// Handles the situation where the left node is at the correct or too coarse a\r
// granularity for reflection. (That is, further dereferences up the tree will be \r
if (block) {\r
// TODO: how is an array of blocks handled differently?\r
anonymous = base->getName().compare(0, 6, "__anon") == 0;\r
- const TString& blockName = anonymous ? base->getType().getTypeName() : base->getName();\r
+ const TString& blockName = anonymous ? base->getType().getTypeName() : base->getType().getTypeName();\r
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(blockName);\r
if (it == reflection.nameToIndex.end()) {\r
blockIndex = reflection.indexToUniformBlock.size();\r
reflection.indexToUniformBlock.push_back(TObjectReflection(blockName, offset, -1, getBlockSize(base->getType()), -1));\r
} else\r
blockIndex = it->second;\r
+ offset = 0;\r
}\r
\r
- // If the derefenced entity to record is an array, note the maximum array size.\r
- int maxArraySize;\r
- const TType* reflectionType;\r
- if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {\r
- reflectionType = &topNode->getLeft()->getType();\r
- switch (topNode->getOp()) {\r
- case EOpIndexIndirect:\r
- maxArraySize = topNode->getLeft()->getType().getArraySize();\r
- break;\r
- case EOpIndexDirect:\r
- maxArraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;\r
- break;\r
- default:\r
- assert(0);\r
- maxArraySize = 1;\r
- break;\r
- }\r
- } else {\r
- reflectionType = &topNode->getType();\r
- maxArraySize = 1;\r
- }\r
-\r
- // TODO: fully expand a partially dereferenced aggregate\r
-\r
- // Process the dereference chain, backward, accumulating the pieces on a stack.\r
- // If the topNode is a simple array dereference, don't include that.\r
- if (block)\r
- offset = 0;\r
- std::list<TString> derefs;\r
+ // Process the dereference chain, backward, accumulating the pieces for later forward traversal.\r
+ // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.\r
+ TList<TIntermBinary*> derefs;\r
for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {\r
+ if (isReflectionGranularity(visitNode->getLeft()->getType()))\r
+ continue;\r
+\r
+ derefs.push_front(visitNode);\r
processedDerefs.insert(visitNode);\r
- int index;\r
- switch (visitNode->getOp()) {\r
- case EOpIndexIndirect:\r
- // TODO handle indirect references in mid-chain: enumerate all possibilities?\r
- if (! isReflectionGranularity(visitNode->getLeft()->getType()))\r
- derefs.push_back(TString("[") + String(0) + "]");\r
- break;\r
- case EOpIndexDirect:\r
- if (! isReflectionGranularity(visitNode->getLeft()->getType())) {\r
- index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();\r
- derefs.push_back(TString("[") + String(index) + "]");\r
- }\r
- break;\r
- case EOpIndexDirectStruct:\r
- index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();\r
- if (block)\r
- offset += getBlockMemberOffset(visitNode->getLeft()->getType(), index);\r
- derefs.push_back(TString(""));\r
- if (visitNode->getLeft()->getAsSymbolNode() != base || ! anonymous)\r
- derefs.back().append(".");\r
- derefs.back().append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName().c_str());\r
- break;\r
- default:\r
- break;\r
- }\r
}\r
+ processedDerefs.insert(base);\r
\r
- // Put the dereference chain together, forward (reversing the stack)\r
- TString name;\r
- if (! anonymous)\r
- name = base->getName();\r
- while (! derefs.empty()) {\r
- name += derefs.back();\r
- derefs.pop_back();\r
+ // See if we have a specific array size to stick to while enumerating the explosion of the aggregate\r
+ int arraySize = 0;\r
+ if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {\r
+ if (topNode->getOp() == EOpIndexDirect)\r
+ arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;\r
}\r
\r
- if (name.size() > 0) {\r
- if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {\r
- reflection.nameToIndex[name] = reflection.indexToUniform.size(); \r
- reflection.indexToUniform.push_back(TObjectReflection(name, offset, mapToGlType(*reflectionType), maxArraySize, blockIndex));\r
- }\r
- }\r
+ // Put the dereference chain together, forward\r
+ TString baseName;\r
+ if (! anonymous)\r
+ baseName = base->getName();\r
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);\r
}\r
\r
//\r
// Return 0 if the topology does not fit this situation.\r
TIntermSymbol* findBase(const TIntermBinary* node)\r
{\r
- TIntermSymbol *symbol = node->getLeft()->getAsSymbolNode();\r
- if (symbol)\r
- return symbol;\r
+ TIntermSymbol *base = node->getLeft()->getAsSymbolNode();\r
+ if (base)\r
+ return base;\r
TIntermBinary* left = node->getLeft()->getAsBinaryNode();\r
if (! left)\r
return 0;\r
TFunctionStack functions;\r
const TIntermediate& intermediate;\r
TReflection& reflection;\r
- std::set<TIntermNode*> processedDerefs;\r
+ std::set<const TIntermNode*> processedDerefs;\r
};\r
\r
const int TLiveTraverser::baseAlignmentVec4Std140 = 16;\r
}\r
\r
// To reflect non-dereferenced objects.\r
-void LiveSymbol(TIntermSymbol* symbol, TIntermTraverser* it)\r
+void LiveSymbol(TIntermSymbol* base, TIntermTraverser* it)\r
{\r
TLiveTraverser* oit = static_cast<TLiveTraverser*>(it);\r
\r
- if (symbol->getQualifier().storage == EvqUniform)\r
- oit->addUniform(*symbol);\r
+ if (base->getQualifier().storage == EvqUniform)\r
+ oit->addUniform(*base);\r
}\r
\r
// To prune semantically dead paths.\r