// POSSIBILITY OF SUCH DAMAGE.
//
-#ifndef GLSLANG_WEB
+#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
#include "../Include/Common.h"
#include "reflection.h"
// This is in the glslang namespace directly so it can be a friend of TReflection.
//
-class TReflectionTraverser : public TLiveTraverser {
+class TReflectionTraverser : public TIntermTraverser {
public:
TReflectionTraverser(const TIntermediate& i, TReflection& r) :
- TLiveTraverser(i), reflection(r) { }
+ TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
virtual bool visitBinary(TVisit, TIntermBinary* node);
virtual void visitSymbol(TIntermSymbol* base);
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
+ int blockIndex = -1;
+ int offset = -1;
+ TList<TIntermBinary*> derefs;
+ TString baseName = base.getName();
+
+ if (base.getType().getBasicType() == EbtBlock) {
+ offset = 0;
+ bool anonymous = IsAnonymous(baseName);
+ const TString& blockName = base.getType().getTypeName();
+
+ if (!anonymous)
+ baseName = blockName;
+ else
+ baseName = "";
+
+ blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
+ }
+
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
- TList<TIntermBinary*> derefs;
- blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
- base.getQualifier().storage, true);
+ blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
+ base.getQualifier().storage, updateStageMasks);
}
}
if (it == ioMapper.end()) {
// seperate pipe i/o params from uniforms and blocks
// in is only for input in first stage as out is only for last stage. check traverse in call stack.
- ioMapper[name.c_str()] = ioItems.size();
+ ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
void getOffsets(const TType& type, TVector<int>& offsets)
{
const TTypeList& memberList = *type.getStruct();
-
int memberSize = 0;
int offset = 0;
+
for (size_t m = 0; m < offsets.size(); ++m) {
// if the user supplied an offset, snap to it now
if (memberList[m].type->getQualifier().hasOffset())
// A value of 0 for arraySize will mean to use the full array's size.
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
- int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
+ int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
{
// when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
// Broadly:
// Visit all the indices of this array, and for each one add on the remaining dereferencing
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
- if (strictArraySuffix && blockParent)
+ if (terminalType->getBasicType() == EbtBlock) {}
+ else if (strictArraySuffix && blockParent)
newBaseName.append(TString("[0]"));
else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
newBaseName.append(TString("[") + String(i) + "]");
TList<TIntermBinary*>::const_iterator nextDeref = deref;
++nextDeref;
blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
- topLevelArrayStride, baseStorage, active);
+ topLevelArraySize, topLevelArrayStride, baseStorage, active);
if (offset >= 0)
offset += stride;
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
- if (strictArraySuffix && blockParent) {
+ if (terminalType->getBasicType() == EbtBlock) {}
+ else if (strictArraySuffix && blockParent)
name.append(TString("[0]"));
- } else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
+ else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
name.append(TString("[") + String(index) + "]");
if (offset >= 0)
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
- blockParent = false;
+ // expand top-level arrays in blocks with [0] suffix
+ if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
+ blockParent = false;
+ }
break;
}
case EOpIndexDirectStruct:
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
+
+ // expand non top-level arrays with [x] suffix
+ if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
+ {
+ blockParent = false;
+ }
break;
default:
break;
if (offset >= 0)
stride = getArrayStride(baseType, *terminalType);
- if (topLevelArrayStride == 0)
- topLevelArrayStride = stride;
-
int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
// for top-level arrays in blocks, only expand [0] to avoid explosion of items
- if (strictArraySuffix && blockParent)
+ if ((strictArraySuffix && blockParent) ||
+ ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
arrayIterateSize = 1;
+ }
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
for (int i = 0; i < arrayIterateSize; ++i) {
TString newBaseName = name;
- newBaseName.append(TString("[") + String(i) + "]");
+ if (terminalType->getBasicType() != EbtBlock)
+ newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
if (offset >= 0)
offset = baseOffset + stride * i;
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
- topLevelArrayStride, baseStorage, active);
+ topLevelArraySize, topLevelArrayStride, baseStorage, active);
}
} else {
// Visit all members of this aggregate, and for each one,
arrayStride = getArrayStride(baseType, derefType);
}
- blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
- arrayStride, baseStorage, active);
+ if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
+ topLevelArraySize = 1;
+
+ if (strictArraySuffix && blockParent) {
+ // if this member is an array, store the top-level array stride but start the explosion from
+ // the inner struct type.
+ if (derefType.isArray() && derefType.isStruct()) {
+ newBaseName.append("[0]");
+ auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
+ blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
+ blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
+ }
+ else if (derefType.isArray()) {
+ auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
+ 0, dimSize, 0, terminalType->getQualifier().storage, false);
+ }
+ else {
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
+ 0, 1, 0, terminalType->getQualifier().storage, false);
+ }
+ } else {
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
+ topLevelArraySize, arrayStride, baseStorage, active);
+ }
}
}
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
reflection.atomicCounterUniformIndices.push_back(uniformIndex);
+ variables.back().topLevelArraySize = topLevelArraySize;
variables.back().topLevelArrayStride = topLevelArrayStride;
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
if (! anonymous)
baseName = blockName;
- if (base->getType().isArray()) {
- TType derefType(base->getType(), 0);
-
- assert(! anonymous);
- for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
- blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
- intermediate.getBlockSize(base->getType()));
- baseName.append(TString("[0]"));
- } else
- blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
+ blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
if (reflection.options & EShReflectionAllBlockVariables) {
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
- // because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
- // member in the struct definition. This will lose any information about whether the parent was a buffer
- // block. So if we're using strict array rules which don't expand the first child of a buffer block we
- // instead iterate over the children here.
- const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
- bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
-
- if (strictArraySuffix && blockParent) {
- TType structDerefType(base->getType(), 0);
-
- const TType &structType = base->getType().isArray() ? structDerefType : base->getType();
- const TTypeList& typeList = *structType.getStruct();
-
- TVector<int> memberOffsets;
-
- memberOffsets.resize(typeList.size());
- getOffsets(structType, memberOffsets);
-
- for (int i = 0; i < (int)typeList.size(); ++i) {
- TType derefType(structType, i);
- TString name = baseName;
- if (name.size() > 0)
- name.append(".");
- name.append(typeList[i].type->getFieldName());
-
- // if this member is an array, store the top-level array stride but start the explosion from
- // the inner struct type.
- if (derefType.isArray() && derefType.isStruct()) {
- name.append("[0]");
- blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
- blockIndex, 0, getArrayStride(structType, derefType),
- base->getQualifier().storage, false);
- } else {
- blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
- 0, 0, base->getQualifier().storage, false);
- }
- }
- } else {
- // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
- // expanding root arrays anyway, just start the iteration from the base block type.
- blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
+ // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
+ // expanding root arrays anyway, just start the iteration from the base block type.
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
base->getQualifier().storage, false);
- }
}
}
else
baseName = base->getName();
}
- blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
base->getQualifier().storage, true);
}
int addBlockName(const TString& name, const TType& type, int size)
{
- TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
+ int blockIndex = 0;
+ if (type.isArray()) {
+ TType derefType(type, 0);
+ for (int e = 0; e < type.getOuterArraySize(); ++e) {
+ int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
+ if (e == 0)
+ blockIndex = memberBlockIndex;
+ }
+ } else {
+ TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
- int blockIndex;
- TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
- if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
- blockIndex = (int)blocks.size();
- reflection.nameToIndex[name.c_str()] = blockIndex;
- blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
+ blockIndex = (int)blocks.size();
+ reflection.nameToIndex[name.c_str()] = blockIndex;
+ blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
- blocks.back().numMembers = countAggregateMembers(type);
+ blocks.back().numMembers = countAggregateMembers(type);
- EShLanguageMask& stages = blocks.back().stages;
- stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
- } else {
- blockIndex = it->second;
-
- EShLanguageMask& stages = blocks[blockIndex].stages;
- stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ if (updateStageMasks) {
+ EShLanguageMask& stages = blocks.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+ else {
+ blockIndex = it->second;
+ if (updateStageMasks) {
+ EShLanguageMask& stages = blocks[blockIndex].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
}
return blockIndex;
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
- case EbtInt64: return GL_INT64_ARB + offset;
- case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset;
+ case EbtInt64: return GL_INT64_VEC2_ARB + offset;
+ case EbtUint64: return GL_UNSIGNED_INT64_VEC2_ARB + offset;
case EbtBool: return GL_BOOL_VEC2 + offset;
case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
default: return 0;
return type.isArray() ? type.getOuterArraySize() : 1;
}
+ const TIntermediate& intermediate;
TReflection& reflection;
std::set<const TIntermNode*> processedDerefs;
+ bool updateStageMasks;
protected:
TReflectionTraverser(TReflectionTraverser&);
// To reflect non-dereferenced objects.
void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
{
- if (base->getQualifier().storage == EvqUniform)
+ if (base->getQualifier().storage == EvqUniform) {
+ if (base->getBasicType() == EbtBlock) {
+ if (reflection.options & EShReflectionSharedStd140UBO) {
+ addUniform(*base);
+ }
+ } else {
+ addUniform(*base);
+ }
+ }
+
+ // #TODO add std140/layout active rules for ssbo, same with ubo.
+ // Storage buffer blocks will be collected and expanding in this part.
+ if((reflection.options & EShReflectionSharedStd140SSBO) &&
+ (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
+ (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
addUniform(*base);
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
if (index >= 0)
indexToUniformBlock[i].counterIndex = index;
}
+#else
+ (void)intermediate;
#endif
}
TReflectionTraverser it(intermediate, *this);
- // put the entry point on the list of functions to process
- it.pushFunction(intermediate.getEntryPointMangledName().c_str());
-
- // process all the functions
- while (! it.functions.empty()) {
- TIntermNode* function = it.functions.back();
- it.functions.pop_back();
- function->traverse(&it);
+ for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
+ if (sequnence->getAsAggregate() != nullptr) {
+ if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
+ it.updateStageMasks = false;
+ TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
+ for (auto& sequnence : linkerObjects->getSequence()) {
+ auto pNode = sequnence->getAsSymbolNode();
+ if (pNode != nullptr) {
+ if ((pNode->getQualifier().storage == EvqUniform &&
+ (options & EShReflectionSharedStd140UBO)) ||
+ (pNode->getQualifier().storage == EvqBuffer &&
+ (options & EShReflectionSharedStd140SSBO))) {
+ // collect std140 and shared uniform block form AST
+ if ((pNode->getBasicType() == EbtBlock) &&
+ ((pNode->getQualifier().layoutPacking == ElpStd140) ||
+ (pNode->getQualifier().layoutPacking == ElpShared))) {
+ pNode->traverse(&it);
+ }
+ }
+ else if ((options & EShReflectionAllIOVariables) &&
+ (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
+ {
+ pNode->traverse(&it);
+ }
+ }
+ }
+ } else {
+ // This traverser will travers all function in AST.
+ // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
+ // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
+ // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
+ //
+ // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
+ // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
+ // So, travers all function node can equivalent to travers live function.
+ it.updateStageMasks = true;
+ sequnence->getAsAggregate()->traverse(&it);
+ }
+ }
}
+ it.updateStageMasks = true;
buildCounterIndices(intermediate);
buildUniformStageMask(intermediate);
for (int dim=0; dim<3; ++dim)
if (getLocalSize(dim) > 1)
- printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
+ printf("Local size %s: %u\n", axis[dim], getLocalSize(dim));
printf("\n");
}
} // end namespace glslang
-#endif // GLSLANG_WEB
+#endif // !GLSLANG_WEB && !GLSLANG_ANGLE