2 // Copyright (C) 2013-2016 LunarG, Inc.
4 // All rights reserved.
6 // Redistribution and use in source and binary forms, with or without
7 // modification, are permitted provided that the following conditions
10 // Redistributions of source code must retain the above copyright
11 // notice, this list of conditions and the following disclaimer.
13 // Redistributions in binary form must reproduce the above
14 // copyright notice, this list of conditions and the following
15 // disclaimer in the documentation and/or other materials provided
16 // with the distribution.
18 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
19 // contributors may be used to endorse or promote products derived
20 // from this software without specific prior written permission.
22 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 // POSSIBILITY OF SUCH DAMAGE.
36 #if !defined(GLSLANG_WEB)
38 #include "../Include/Common.h"
39 #include "reflection.h"
40 #include "LiveTraverser.h"
41 #include "localintermediate.h"
46 // Grow the reflection database through a friend traverser class of TReflection and a
47 // collection of functions to do a liveness traversal that note what uniforms are used
48 // in semantically non-dead code.
50 // Can be used multiple times, once per stage, to grow a program reflection.
52 // High-level algorithm for one stage:
54 // 1. Put the entry point on the list of live functions.
56 // 2. Traverse any live function, while skipping if-tests with a compile-time constant
57 // condition of false, and while adding any encountered function calls to the live
60 // Repeat until the live function list is empty.
62 // 3. Add any encountered uniform variables and blocks to the reflection database.
64 // Can be attempted with a failed link, but will return false if recursion had been detected, or
65 // there wasn't exactly one entry point.
71 // The traverser: mostly pass through, except
72 // - processing binary nodes to see if they are dereferences of an aggregates to track
73 // - processing symbol nodes to see if they are non-aggregate objects to track
75 // This ignores semantically dead code by using TLiveTraverser.
77 // This is in the glslang namespace directly so it can be a friend of TReflection.
80 class TReflectionTraverser : public TIntermTraverser {
82 TReflectionTraverser(const TIntermediate& i, TReflection& r) :
83 TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
85 virtual bool visitBinary(TVisit, TIntermBinary* node);
86 virtual void visitSymbol(TIntermSymbol* base);
88 // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
89 // However, no dereference doesn't mean simple... it could be a complex aggregate.
90 void addUniform(const TIntermSymbol& base)
92 if (processedDerefs.find(&base) == processedDerefs.end()) {
93 processedDerefs.insert(&base);
97 TList<TIntermBinary*> derefs;
98 TString baseName = base.getName();
100 if (base.getType().getBasicType() == EbtBlock) {
102 bool anonymous = IsAnonymous(baseName);
103 const TString& blockName = base.getType().getTypeName();
106 baseName = blockName;
110 blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
113 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
114 // the dereference change expected by blowUpActiveAggregate.
115 blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
116 base.getQualifier().storage, updateStageMasks);
120 void addPipeIOVariable(const TIntermSymbol& base)
122 if (processedDerefs.find(&base) == processedDerefs.end()) {
123 processedDerefs.insert(&base);
125 const TString &name = base.getName();
126 const TType &type = base.getType();
127 const bool input = base.getQualifier().isPipeInput();
129 TReflection::TMapIndexToReflection &ioItems =
130 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
133 TReflection::TNameToIndex &ioMapper =
134 input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
136 if (reflection.options & EShReflectionUnwrapIOBlocks) {
137 bool anonymous = IsAnonymous(name);
140 if (type.getBasicType() == EbtBlock) {
141 baseName = anonymous ? TString() : type.getTypeName();
143 baseName = anonymous ? TString() : name;
146 // by convention if this is an arrayed block we ignore the array in the reflection
147 if (type.isArray() && type.getBasicType() == EbtBlock) {
148 blowUpIOAggregate(input, baseName, TType(type, 0));
150 blowUpIOAggregate(input, baseName, type);
153 TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
154 if (it == ioMapper.end()) {
155 // seperate pipe i/o params from uniforms and blocks
156 // in is only for input in first stage as out is only for last stage. check traverse in call stack.
157 ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
159 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
160 EShLanguageMask& stages = ioItems.back().stages;
161 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
163 EShLanguageMask& stages = ioItems[it->second].stages;
164 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
170 // Lookup or calculate the offset of all block members at once, using the recursively
171 // defined block offset rules.
172 void getOffsets(const TType& type, TVector<int>& offsets)
174 const TTypeList& memberList = *type.getStruct();
178 for (size_t m = 0; m < offsets.size(); ++m) {
179 // if the user supplied an offset, snap to it now
180 if (memberList[m].type->getQualifier().hasOffset())
181 offset = memberList[m].type->getQualifier().layoutOffset;
183 // calculate the offset of the next member and align the current offset to this member
184 intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
186 // save the offset of this member
189 // update for the next member
190 offset += memberSize;
194 // Calculate the stride of an array type
195 int getArrayStride(const TType& baseType, const TType& type)
200 // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
201 if (type.getBasicType() == EbtBlock)
204 TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
205 intermediate.getMemberAlignment(type, dummySize, stride,
206 baseType.getQualifier().layoutPacking,
207 subMatrixLayout != ElmNone
208 ? subMatrixLayout == ElmRowMajor
209 : baseType.getQualifier().layoutMatrix == ElmRowMajor);
214 // count the total number of leaf members from iterating out of a block type
215 int countAggregateMembers(const TType& parentType)
217 if (! parentType.isStruct())
220 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
222 bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
224 const TTypeList &memberList = *parentType.getStruct();
228 for (size_t i = 0; i < memberList.size(); i++)
230 const TType &memberType = *memberList[i].type;
231 int numMembers = countAggregateMembers(memberType);
232 // for sized arrays of structs, apply logic to expand out the same as we would below in
233 // blowUpActiveAggregate
234 if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
235 if (! strictArraySuffix || ! blockParent)
236 numMembers *= memberType.getArraySizes()->getCumulativeSize();
244 // Traverse the provided deref chain, including the base, and
245 // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
246 // - recursively expand any variable array index in the middle of that traversal
247 // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
249 // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
250 // A value of 0 for arraySize will mean to use the full array's size.
251 void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
252 TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
253 int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
255 // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
257 // * arrays-of-structs always have a [x] suffix.
258 // * with array-of-struct variables in the root of a buffer block, only ever return [0].
259 // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
260 const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
262 // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
263 bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
265 // process the part of the dereference chain that was explicit in the shader
266 TString name = baseName;
267 const TType* terminalType = &baseType;
268 for (; deref != derefs.end(); ++deref) {
269 TIntermBinary* visitNode = *deref;
270 terminalType = &visitNode->getType();
272 switch (visitNode->getOp()) {
273 case EOpIndexIndirect: {
274 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
276 if (topLevelArrayStride == 0)
277 topLevelArrayStride = stride;
279 // Visit all the indices of this array, and for each one add on the remaining dereferencing
280 for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
281 TString newBaseName = name;
282 if (terminalType->getBasicType() == EbtBlock) {}
283 else if (strictArraySuffix && blockParent)
284 newBaseName.append(TString("[0]"));
285 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
286 newBaseName.append(TString("[") + String(i) + "]");
287 TList<TIntermBinary*>::const_iterator nextDeref = deref;
289 blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
290 topLevelArraySize, topLevelArrayStride, baseStorage, active);
296 // it was all completed in the recursive calls above
299 case EOpIndexDirect: {
300 int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
302 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
303 if (terminalType->getBasicType() == EbtBlock) {}
304 else if (strictArraySuffix && blockParent)
305 name.append(TString("[0]"));
306 else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
307 name.append(TString("[") + String(index) + "]");
310 offset += stride * index;
313 if (topLevelArrayStride == 0)
314 topLevelArrayStride = stride;
316 // expand top-level arrays in blocks with [0] suffix
317 if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
322 case EOpIndexDirectStruct:
323 index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
325 offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
328 name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
330 // expand non top-level arrays with [x] suffix
331 if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
341 // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
342 if (! isReflectionGranularity(*terminalType)) {
343 // the base offset of this node, that children are relative to
344 int baseOffset = offset;
346 if (terminalType->isArray()) {
347 // Visit all the indices of this array, and for each one,
348 // fully explode the remaining aggregate to dereference
352 stride = getArrayStride(baseType, *terminalType);
354 int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
356 // for top-level arrays in blocks, only expand [0] to avoid explosion of items
357 if ((strictArraySuffix && blockParent) ||
358 ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
359 arrayIterateSize = 1;
362 if (topLevelArrayStride == 0)
363 topLevelArrayStride = stride;
365 for (int i = 0; i < arrayIterateSize; ++i) {
366 TString newBaseName = name;
367 if (terminalType->getBasicType() != EbtBlock)
368 newBaseName.append(TString("[") + String(i) + "]");
369 TType derefType(*terminalType, 0);
371 offset = baseOffset + stride * i;
373 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
374 topLevelArraySize, topLevelArrayStride, baseStorage, active);
377 // Visit all members of this aggregate, and for each one,
378 // fully explode the remaining aggregate to dereference
379 const TTypeList& typeList = *terminalType->getStruct();
381 TVector<int> memberOffsets;
383 if (baseOffset >= 0) {
384 memberOffsets.resize(typeList.size());
385 getOffsets(*terminalType, memberOffsets);
388 for (int i = 0; i < (int)typeList.size(); ++i) {
389 TString newBaseName = name;
390 if (newBaseName.size() > 0)
391 newBaseName.append(".");
392 newBaseName.append(typeList[i].type->getFieldName());
393 TType derefType(*terminalType, i);
395 offset = baseOffset + memberOffsets[i];
397 int arrayStride = topLevelArrayStride;
398 if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
399 derefType.isArray()) {
400 arrayStride = getArrayStride(baseType, derefType);
403 if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
404 topLevelArraySize = 1;
406 if (strictArraySuffix && blockParent) {
407 // if this member is an array, store the top-level array stride but start the explosion from
408 // the inner struct type.
409 if (derefType.isArray() && derefType.isStruct()) {
410 newBaseName.append("[0]");
411 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
412 blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
413 blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
415 else if (derefType.isArray()) {
416 auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
417 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
418 0, dimSize, 0, terminalType->getQualifier().storage, false);
421 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
422 0, 1, 0, terminalType->getQualifier().storage, false);
425 blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
426 topLevelArraySize, arrayStride, baseStorage, active);
431 // it was all completed in the recursive calls above
435 if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
436 name.append(TString("[0]"));
439 // Finally, add a full string to the reflection database, and update the array size if necessary.
440 // If the dereferenced entity to record is an array, compute the size and update the maximum size.
442 // there might not be a final array dereference, it could have been copied as an array object
444 arraySize = mapToGlArraySize(*terminalType);
446 TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
448 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
449 if (it == reflection.nameToIndex.end()) {
450 int uniformIndex = (int)variables.size();
451 reflection.nameToIndex[name.c_str()] = uniformIndex;
452 variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
453 arraySize, blockIndex));
454 if (terminalType->isArray()) {
455 variables.back().arrayStride = getArrayStride(baseType, *terminalType);
456 if (topLevelArrayStride == 0)
457 topLevelArrayStride = variables.back().arrayStride;
460 if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
461 reflection.atomicCounterUniformIndices.push_back(uniformIndex);
463 variables.back().topLevelArraySize = topLevelArraySize;
464 variables.back().topLevelArrayStride = topLevelArrayStride;
466 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
467 EShLanguageMask& stages = variables.back().stages;
468 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
472 int& reflectedArraySize = variables[it->second].size;
473 reflectedArraySize = std::max(arraySize, reflectedArraySize);
476 if ((reflection.options & EShReflectionAllBlockVariables) && active) {
477 EShLanguageMask& stages = variables[it->second].stages;
478 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
483 // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
484 void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
486 TString name = baseName;
488 // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
489 if (! isReflectionGranularity(type)) {
490 if (type.isArray()) {
491 // Visit all the indices of this array, and for each one,
492 // fully explode the remaining aggregate to dereference
493 for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
494 TString newBaseName = name;
495 newBaseName.append(TString("[") + String(i) + "]");
496 TType derefType(type, 0);
498 blowUpIOAggregate(input, newBaseName, derefType);
501 // Visit all members of this aggregate, and for each one,
502 // fully explode the remaining aggregate to dereference
503 const TTypeList& typeList = *type.getStruct();
505 for (int i = 0; i < (int)typeList.size(); ++i) {
506 TString newBaseName = name;
507 if (newBaseName.size() > 0)
508 newBaseName.append(".");
509 newBaseName.append(typeList[i].type->getFieldName());
510 TType derefType(type, i);
512 blowUpIOAggregate(input, newBaseName, derefType);
516 // it was all completed in the recursive calls above
520 if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
521 name.append(TString("[0]"));
524 TReflection::TMapIndexToReflection &ioItems =
525 input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
527 std::string namespacedName = input ? "in " : "out ";
528 namespacedName += name.c_str();
530 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
531 if (it == reflection.nameToIndex.end()) {
532 reflection.nameToIndex[namespacedName] = (int)ioItems.size();
534 TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
536 EShLanguageMask& stages = ioItems.back().stages;
537 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
539 EShLanguageMask& stages = ioItems[it->second].stages;
540 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
544 // Add a uniform dereference where blocks/struct/arrays are involved in the access.
545 // Handles the situation where the left node is at the correct or too coarse a
546 // granularity for reflection. (That is, further dereferences up the tree will be
547 // skipped.) Earlier dereferences, down the tree, will be handled
548 // at the same time, and logged to prevent reprocessing as the tree is traversed.
550 // Note: Other things like the following must be caught elsewhere:
551 // - a simple non-array, non-struct variable (no dereference even conceivable)
552 // - an aggregrate consumed en masse, without a dereference
554 // So, this code is for cases like
555 // - a struct/block dereferencing a member (whether the member is array or not)
556 // - an array of struct
557 // - structs/arrays containing the above
559 void addDereferencedUniform(TIntermBinary* topNode)
561 // See if too fine-grained to process (wait to get further down the tree)
562 const TType& leftType = topNode->getLeft()->getType();
563 if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
566 // We have an array or structure or block dereference, see if it's a uniform
567 // based dereference (if not, skip it).
568 TIntermSymbol* base = findBase(topNode);
569 if (! base || ! base->getQualifier().isUniformOrBuffer())
572 // See if we've already processed this (e.g., in the middle of something
573 // we did earlier), and if so skip it
574 if (processedDerefs.find(topNode) != processedDerefs.end())
577 // Process this uniform dereference
581 bool anonymous = false;
583 // See if we need to record the block itself
584 bool block = base->getBasicType() == EbtBlock;
587 anonymous = IsAnonymous(base->getName());
589 const TString& blockName = base->getType().getTypeName();
593 baseName = blockName;
595 blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
597 if (reflection.options & EShReflectionAllBlockVariables) {
598 // Use a degenerate (empty) set of dereferences to immediately put as at the end of
599 // the dereference change expected by blowUpActiveAggregate.
600 TList<TIntermBinary*> derefs;
602 // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
603 // expanding root arrays anyway, just start the iteration from the base block type.
604 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
605 base->getQualifier().storage, false);
609 // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
610 // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
611 TList<TIntermBinary*> derefs;
612 for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
613 if (isReflectionGranularity(visitNode->getLeft()->getType()))
616 derefs.push_front(visitNode);
617 processedDerefs.insert(visitNode);
619 processedDerefs.insert(base);
621 // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
623 if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
624 if (topNode->getOp() == EOpIndexDirect)
625 arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
628 // Put the dereference chain together, forward
632 baseName = base->getType().getTypeName();
634 baseName = base->getName();
636 blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
637 base->getQualifier().storage, true);
640 int addBlockName(const TString& name, const TType& type, int size)
643 if (type.isArray()) {
644 TType derefType(type, 0);
645 for (int e = 0; e < type.getOuterArraySize(); ++e) {
646 int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
648 blockIndex = memberBlockIndex;
651 TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
653 TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
654 if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
655 blockIndex = (int)blocks.size();
656 reflection.nameToIndex[name.c_str()] = blockIndex;
657 blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
659 blocks.back().numMembers = countAggregateMembers(type);
661 if (updateStageMasks) {
662 EShLanguageMask& stages = blocks.back().stages;
663 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
667 blockIndex = it->second;
668 if (updateStageMasks) {
669 EShLanguageMask& stages = blocks[blockIndex].stages;
670 stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
678 // Are we at a level in a dereference chain at which individual active uniform queries are made?
679 bool isReflectionGranularity(const TType& type)
681 return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
684 // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
685 // Return 0 if the topology does not fit this situation.
686 TIntermSymbol* findBase(const TIntermBinary* node)
688 TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
691 TIntermBinary* left = node->getLeft()->getAsBinaryNode();
695 return findBase(left);
699 // Translate a glslang sampler type into the GL API #define number.
701 int mapSamplerToGlType(TSampler sampler)
703 if (! sampler.image) {
705 switch (sampler.type) {
707 switch ((int)sampler.dim) {
709 switch ((int)sampler.shadow) {
710 case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
711 case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
714 switch ((int)sampler.ms) {
716 switch ((int)sampler.shadow) {
717 case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
718 case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
720 case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
723 return GL_SAMPLER_3D;
725 switch ((int)sampler.shadow) {
726 case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
727 case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
730 return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
732 return GL_SAMPLER_BUFFER;
735 switch ((int)sampler.dim) {
737 switch ((int)sampler.shadow) {
738 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
739 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
742 switch ((int)sampler.ms) {
744 switch ((int)sampler.shadow) {
745 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
746 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
748 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
751 return GL_FLOAT16_SAMPLER_3D_AMD;
753 switch ((int)sampler.shadow) {
754 case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
755 case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
758 return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
760 return GL_FLOAT16_SAMPLER_BUFFER_AMD;
763 switch ((int)sampler.dim) {
765 return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
767 switch ((int)sampler.ms) {
768 case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
769 case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
770 : GL_INT_SAMPLER_2D_MULTISAMPLE;
773 return GL_INT_SAMPLER_3D;
775 return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
777 return GL_INT_SAMPLER_2D_RECT;
779 return GL_INT_SAMPLER_BUFFER;
782 switch ((int)sampler.dim) {
784 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
786 switch ((int)sampler.ms) {
787 case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
788 case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
789 : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
792 return GL_UNSIGNED_INT_SAMPLER_3D;
794 return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
796 return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
798 return GL_UNSIGNED_INT_SAMPLER_BUFFER;
805 switch (sampler.type) {
807 switch ((int)sampler.dim) {
809 return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
811 switch ((int)sampler.ms) {
812 case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
813 case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
818 return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
820 return GL_IMAGE_2D_RECT;
822 return GL_IMAGE_BUFFER;
825 switch ((int)sampler.dim) {
827 return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
829 switch ((int)sampler.ms) {
830 case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
831 case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
834 return GL_FLOAT16_IMAGE_3D_AMD;
836 return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
838 return GL_FLOAT16_IMAGE_2D_RECT_AMD;
840 return GL_FLOAT16_IMAGE_BUFFER_AMD;
843 switch ((int)sampler.dim) {
845 return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
847 switch ((int)sampler.ms) {
848 case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
849 case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
852 return GL_INT_IMAGE_3D;
854 return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
856 return GL_INT_IMAGE_2D_RECT;
858 return GL_INT_IMAGE_BUFFER;
861 switch ((int)sampler.dim) {
863 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
865 switch ((int)sampler.ms) {
866 case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
867 case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
868 : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
871 return GL_UNSIGNED_INT_IMAGE_3D;
873 return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
875 return GL_UNSIGNED_INT_IMAGE_2D_RECT;
877 return GL_UNSIGNED_INT_IMAGE_BUFFER;
886 // Translate a glslang type into the GL API #define number.
887 // Ignores arrayness.
889 int mapToGlType(const TType& type)
891 switch (type.getBasicType()) {
893 return mapSamplerToGlType(type.getSampler());
902 if (type.isVector()) {
903 int offset = type.getVectorSize() - 2;
904 switch (type.getBasicType()) {
905 case EbtFloat: return GL_FLOAT_VEC2 + offset;
906 case EbtDouble: return GL_DOUBLE_VEC2 + offset;
907 case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
908 case EbtInt: return GL_INT_VEC2 + offset;
909 case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
910 case EbtInt64: return GL_INT64_VEC2_ARB + offset;
911 case EbtUint64: return GL_UNSIGNED_INT64_VEC2_ARB + offset;
912 case EbtBool: return GL_BOOL_VEC2 + offset;
913 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
917 if (type.isMatrix()) {
918 switch (type.getBasicType()) {
920 switch (type.getMatrixCols()) {
922 switch (type.getMatrixRows()) {
923 case 2: return GL_FLOAT_MAT2;
924 case 3: return GL_FLOAT_MAT2x3;
925 case 4: return GL_FLOAT_MAT2x4;
929 switch (type.getMatrixRows()) {
930 case 2: return GL_FLOAT_MAT3x2;
931 case 3: return GL_FLOAT_MAT3;
932 case 4: return GL_FLOAT_MAT3x4;
936 switch (type.getMatrixRows()) {
937 case 2: return GL_FLOAT_MAT4x2;
938 case 3: return GL_FLOAT_MAT4x3;
939 case 4: return GL_FLOAT_MAT4;
944 switch (type.getMatrixCols()) {
946 switch (type.getMatrixRows()) {
947 case 2: return GL_DOUBLE_MAT2;
948 case 3: return GL_DOUBLE_MAT2x3;
949 case 4: return GL_DOUBLE_MAT2x4;
953 switch (type.getMatrixRows()) {
954 case 2: return GL_DOUBLE_MAT3x2;
955 case 3: return GL_DOUBLE_MAT3;
956 case 4: return GL_DOUBLE_MAT3x4;
960 switch (type.getMatrixRows()) {
961 case 2: return GL_DOUBLE_MAT4x2;
962 case 3: return GL_DOUBLE_MAT4x3;
963 case 4: return GL_DOUBLE_MAT4;
968 switch (type.getMatrixCols()) {
970 switch (type.getMatrixRows()) {
971 case 2: return GL_FLOAT16_MAT2_AMD;
972 case 3: return GL_FLOAT16_MAT2x3_AMD;
973 case 4: return GL_FLOAT16_MAT2x4_AMD;
977 switch (type.getMatrixRows()) {
978 case 2: return GL_FLOAT16_MAT3x2_AMD;
979 case 3: return GL_FLOAT16_MAT3_AMD;
980 case 4: return GL_FLOAT16_MAT3x4_AMD;
984 switch (type.getMatrixRows()) {
985 case 2: return GL_FLOAT16_MAT4x2_AMD;
986 case 3: return GL_FLOAT16_MAT4x3_AMD;
987 case 4: return GL_FLOAT16_MAT4_AMD;
995 if (type.getVectorSize() == 1) {
996 switch (type.getBasicType()) {
997 case EbtFloat: return GL_FLOAT;
998 case EbtDouble: return GL_DOUBLE;
999 case EbtFloat16: return GL_FLOAT16_NV;
1000 case EbtInt: return GL_INT;
1001 case EbtUint: return GL_UNSIGNED_INT;
1002 case EbtInt64: return GL_INT64_ARB;
1003 case EbtUint64: return GL_UNSIGNED_INT64_ARB;
1004 case EbtBool: return GL_BOOL;
1005 case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
1013 int mapToGlArraySize(const TType& type)
1015 return type.isArray() ? type.getOuterArraySize() : 1;
1018 const TIntermediate& intermediate;
1019 TReflection& reflection;
1020 std::set<const TIntermNode*> processedDerefs;
1021 bool updateStageMasks;
1024 TReflectionTraverser(TReflectionTraverser&);
1025 TReflectionTraverser& operator=(TReflectionTraverser&);
1029 // Implement the traversal functions of interest.
1032 // To catch dereferenced aggregates that must be reflected.
1033 // This catches them at the highest level possible in the tree.
1034 bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
1036 switch (node->getOp()) {
1037 case EOpIndexDirect:
1038 case EOpIndexIndirect:
1039 case EOpIndexDirectStruct:
1040 addDereferencedUniform(node);
1046 // still need to visit everything below, which could contain sub-expressions
1047 // containing different uniforms
1051 // To reflect non-dereferenced objects.
1052 void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
1054 if (base->getQualifier().storage == EvqUniform) {
1055 if (base->getBasicType() == EbtBlock) {
1056 if (reflection.options & EShReflectionSharedStd140UBO) {
1064 // #TODO add std140/layout active rules for ssbo, same with ubo.
1065 // Storage buffer blocks will be collected and expanding in this part.
1066 if((reflection.options & EShReflectionSharedStd140SSBO) &&
1067 (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
1068 (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
1071 if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
1072 (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
1073 addPipeIOVariable(*base);
1077 // Implement TObjectReflection methods.
1080 TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
1081 int pSize, int pIndex)
1082 : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
1083 numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
1087 int TObjectReflection::getBinding() const
1089 if (type == nullptr || !type->getQualifier().hasBinding())
1091 return type->getQualifier().layoutBinding;
1094 void TObjectReflection::dump() const
1096 printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
1097 index, getBinding(), stages);
1099 if (counterIndex != -1)
1100 printf(", counter %d", counterIndex);
1102 if (numMembers != -1)
1103 printf(", numMembers %d", numMembers);
1105 if (arrayStride != 0)
1106 printf(", arrayStride %d", arrayStride);
1108 if (topLevelArrayStride != 0)
1109 printf(", topLevelArrayStride %d", topLevelArrayStride);
1115 // Implement TReflection methods.
1118 // Track any required attribute reflection, such as compute shader numthreads.
1120 void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
1122 if (stage == EShLangCompute) {
1123 // Remember thread dimensions
1124 for (int dim=0; dim<3; ++dim)
1125 localSize[dim] = intermediate.getLocalSize(dim);
1129 // build counter block index associations for buffers
1130 void TReflection::buildCounterIndices(const TIntermediate& intermediate)
1133 // search for ones that have counters
1134 for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
1135 const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
1136 const int index = getIndex(counterName);
1139 indexToUniformBlock[i].counterIndex = index;
1146 // build Shader Stages mask for all uniforms
1147 void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
1149 if (options & EShReflectionAllBlockVariables)
1152 for (int i = 0; i < int(indexToUniform.size()); ++i) {
1153 indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
1156 for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
1157 indexToBufferVariable[i].stages =
1158 static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
1162 // Merge live symbols from 'intermediate' into the existing reflection database.
1164 // Returns false if the input is too malformed to do this.
1165 bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
1167 if (intermediate.getTreeRoot() == nullptr ||
1168 intermediate.getNumEntryPoints() != 1 ||
1169 intermediate.isRecursive())
1172 buildAttributeReflection(stage, intermediate);
1174 TReflectionTraverser it(intermediate, *this);
1176 for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
1177 if (sequnence->getAsAggregate() != nullptr) {
1178 if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
1179 it.updateStageMasks = false;
1180 TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
1181 for (auto& sequnence : linkerObjects->getSequence()) {
1182 auto pNode = sequnence->getAsSymbolNode();
1183 if (pNode != nullptr) {
1184 if ((pNode->getQualifier().storage == EvqUniform &&
1185 (options & EShReflectionSharedStd140UBO)) ||
1186 (pNode->getQualifier().storage == EvqBuffer &&
1187 (options & EShReflectionSharedStd140SSBO))) {
1188 // collect std140 and shared uniform block form AST
1189 if ((pNode->getBasicType() == EbtBlock) &&
1190 ((pNode->getQualifier().layoutPacking == ElpStd140) ||
1191 (pNode->getQualifier().layoutPacking == ElpShared))) {
1192 pNode->traverse(&it);
1195 else if ((options & EShReflectionAllIOVariables) &&
1196 (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
1198 pNode->traverse(&it);
1203 // This traverser will travers all function in AST.
1204 // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
1205 // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
1206 // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
1208 // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
1209 // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
1210 // So, travers all function node can equivalent to travers live function.
1211 it.updateStageMasks = true;
1212 sequnence->getAsAggregate()->traverse(&it);
1216 it.updateStageMasks = true;
1218 buildCounterIndices(intermediate);
1219 buildUniformStageMask(intermediate);
1224 void TReflection::dump()
1226 printf("Uniform reflection:\n");
1227 for (size_t i = 0; i < indexToUniform.size(); ++i)
1228 indexToUniform[i].dump();
1231 printf("Uniform block reflection:\n");
1232 for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
1233 indexToUniformBlock[i].dump();
1236 printf("Buffer variable reflection:\n");
1237 for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
1238 indexToBufferVariable[i].dump();
1241 printf("Buffer block reflection:\n");
1242 for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
1243 indexToBufferBlock[i].dump();
1246 printf("Pipeline input reflection:\n");
1247 for (size_t i = 0; i < indexToPipeInput.size(); ++i)
1248 indexToPipeInput[i].dump();
1251 printf("Pipeline output reflection:\n");
1252 for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
1253 indexToPipeOutput[i].dump();
1256 if (getLocalSize(0) > 1) {
1257 static const char* axis[] = { "X", "Y", "Z" };
1259 for (int dim=0; dim<3; ++dim)
1260 if (getLocalSize(dim) > 1)
1261 printf("Local size %s: %u\n", axis[dim], getLocalSize(dim));
1266 // printf("Live names\n");
1267 // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
1268 // printf("%s: %d\n", it->first.c_str(), it->second);
1272 } // end namespace glslang
1274 #endif // !GLSLANG_WEB