2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
6 // All rights reserved.
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
12 // Redistributions of source code must retain the above copyright
13 // notice, this list of conditions and the following disclaimer.
15 // Redistributions in binary form must reproduce the above
16 // copyright notice, this list of conditions and the following
17 // disclaimer in the documentation and/or other materials provided
18 // with the distribution.
20 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 // contributors may be used to endorse or promote products derived
22 // from this software without specific prior written permission.
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
39 // Do link-time merging and validation of intermediate representations.
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance. Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage. A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
49 #include "localintermediate.h"
50 #include "../Include/InfoSink.h"
51 #include "SymbolTable.h"
56 // Link-time error emitter.
58 void TIntermediate::error(TInfoSink& infoSink, const char* message)
61 infoSink.info.prefix(EPrefixError);
62 infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
69 void TIntermediate::warn(TInfoSink& infoSink, const char* message)
72 infoSink.info.prefix(EPrefixWarning);
73 infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
77 // TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
78 // name must have the exact same set of members qualified with offset and their integral-constant
79 // expression values must be the same, or a link-time error results."
82 // Merge the information from 'unit' into 'this'
84 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
86 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
87 mergeCallGraphs(infoSink, unit);
88 mergeModes(infoSink, unit);
89 mergeTrees(infoSink, unit);
94 // check that link objects between stages
96 void TIntermediate::mergeUniformObjects(TInfoSink& infoSink, TIntermediate& unit) {
97 if (unit.treeRoot == nullptr || treeRoot == nullptr)
100 // Get the linker-object lists
101 TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
102 TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
104 // filter unitLinkerObjects to only contain uniforms
105 auto end = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
106 [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqUniform &&
107 node->getAsSymbolNode()->getQualifier().storage != EvqBuffer; });
108 unitLinkerObjects.resize(end - unitLinkerObjects.begin());
110 // merge uniforms and do error checking
111 bool mergeExistingOnly = false;
112 mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
113 mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
117 // do error checking on the shader boundary in / out vars
119 void TIntermediate::checkStageIO(TInfoSink& infoSink, TIntermediate& unit) {
120 if (unit.treeRoot == nullptr || treeRoot == nullptr)
123 // Get copies of the linker-object lists
124 TIntermSequence linkerObjects = findLinkerObjects()->getSequence();
125 TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
127 // filter linkerObjects to only contain out variables
128 auto end = std::remove_if(linkerObjects.begin(), linkerObjects.end(),
129 [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingOut; });
130 linkerObjects.resize(end - linkerObjects.begin());
132 // filter unitLinkerObjects to only contain in variables
133 auto unitEnd = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
134 [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingIn; });
135 unitLinkerObjects.resize(unitEnd - unitLinkerObjects.begin());
137 // do matching and error checking
138 mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
140 // TODO: final check; make sure that any statically used `in` have matching `out` written to
143 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
145 if (unit.getNumEntryPoints() > 0) {
146 if (getNumEntryPoints() > 0)
147 error(infoSink, "can't handle multiple entry points per stage");
149 entryPointName = unit.getEntryPointName();
150 entryPointMangledName = unit.getEntryPointMangledName();
153 numEntryPoints += unit.getNumEntryPoints();
155 callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
158 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
160 #define MERGE_MAX(member) member = std::max(member, unit.member)
161 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
163 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
165 if (language != unit.language)
166 error(infoSink, "stages must match when linking into a single stage");
168 if (getSource() == EShSourceNone)
169 setSource(unit.getSource());
170 if (getSource() != unit.getSource())
171 error(infoSink, "can't link compilation units from different source languages");
173 if (treeRoot == nullptr) {
174 profile = unit.profile;
175 version = unit.version;
176 requestedExtensions = unit.requestedExtensions;
178 if ((isEsProfile()) != (unit.isEsProfile()))
179 error(infoSink, "Cannot cross link ES and desktop profiles");
180 else if (unit.profile == ECompatibilityProfile)
181 profile = ECompatibilityProfile;
182 version = std::max(version, unit.version);
183 requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
186 MERGE_MAX(spvVersion.spv);
187 MERGE_MAX(spvVersion.vulkanGlsl);
188 MERGE_MAX(spvVersion.vulkan);
189 MERGE_MAX(spvVersion.openGl);
190 MERGE_TRUE(spvVersion.vulkanRelaxed);
192 numErrors += unit.getNumErrors();
193 // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant
194 // is the same for all units.
195 if (numPushConstants > 1 || unit.numPushConstants > 1)
196 error(infoSink, "Only one push_constant block is allowed per stage");
197 numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1);
199 if (unit.invocations != TQualifier::layoutNotSet) {
200 if (invocations == TQualifier::layoutNotSet)
201 invocations = unit.invocations;
202 else if (invocations != unit.invocations)
203 error(infoSink, "number of invocations must match between compilation units");
206 if (vertices == TQualifier::layoutNotSet)
207 vertices = unit.vertices;
208 else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) {
209 if (language == EShLangGeometry || language == EShLangMeshNV)
210 error(infoSink, "Contradictory layout max_vertices values");
211 else if (language == EShLangTessControl)
212 error(infoSink, "Contradictory layout vertices values");
216 if (primitives == TQualifier::layoutNotSet)
217 primitives = unit.primitives;
218 else if (primitives != unit.primitives) {
219 if (language == EShLangMeshNV)
220 error(infoSink, "Contradictory layout max_primitives values");
225 if (inputPrimitive == ElgNone)
226 inputPrimitive = unit.inputPrimitive;
227 else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive)
228 error(infoSink, "Contradictory input layout primitives");
230 if (outputPrimitive == ElgNone)
231 outputPrimitive = unit.outputPrimitive;
232 else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive)
233 error(infoSink, "Contradictory output layout primitives");
235 if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
236 error(infoSink, "gl_FragCoord redeclarations must match across shaders");
238 if (vertexSpacing == EvsNone)
239 vertexSpacing = unit.vertexSpacing;
240 else if (vertexSpacing != unit.vertexSpacing)
241 error(infoSink, "Contradictory input vertex spacing");
243 if (vertexOrder == EvoNone)
244 vertexOrder = unit.vertexOrder;
245 else if (vertexOrder != unit.vertexOrder)
246 error(infoSink, "Contradictory triangle ordering");
248 MERGE_TRUE(pointMode);
250 for (int i = 0; i < 3; ++i) {
251 if (unit.localSizeNotDefault[i]) {
252 if (!localSizeNotDefault[i]) {
253 localSize[i] = unit.localSize[i];
254 localSizeNotDefault[i] = true;
256 else if (localSize[i] != unit.localSize[i])
257 error(infoSink, "Contradictory local size");
260 if (localSizeSpecId[i] == TQualifier::layoutNotSet)
261 localSizeSpecId[i] = unit.localSizeSpecId[i];
262 else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
263 error(infoSink, "Contradictory local size specialization ids");
266 MERGE_TRUE(earlyFragmentTests);
267 MERGE_TRUE(postDepthCoverage);
269 if (depthLayout == EldNone)
270 depthLayout = unit.depthLayout;
271 else if (depthLayout != unit.depthLayout)
272 error(infoSink, "Contradictory depth layouts");
274 MERGE_TRUE(depthReplacing);
275 MERGE_TRUE(hlslFunctionality1);
277 blendEquations |= unit.blendEquations;
281 for (size_t b = 0; b < xfbBuffers.size(); ++b) {
282 if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
283 xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
284 else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
285 error(infoSink, "Contradictory xfb_stride");
286 xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
287 if (unit.xfbBuffers[b].contains64BitType)
288 xfbBuffers[b].contains64BitType = true;
289 if (unit.xfbBuffers[b].contains32BitType)
290 xfbBuffers[b].contains32BitType = true;
291 if (unit.xfbBuffers[b].contains16BitType)
292 xfbBuffers[b].contains16BitType = true;
293 // TODO: 4.4 link: enhanced layouts: compare ranges
296 MERGE_TRUE(multiStream);
297 MERGE_TRUE(layoutOverrideCoverage);
298 MERGE_TRUE(geoPassthroughEXT);
300 for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
301 if (unit.shiftBinding[i] > 0)
302 setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
305 for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
306 for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
307 setShiftBindingForSet((TResourceType)i, it->second, it->first);
310 resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
312 MERGE_TRUE(autoMapBindings);
313 MERGE_TRUE(autoMapLocations);
315 MERGE_TRUE(flattenUniformArrays);
316 MERGE_TRUE(useUnknownFormat);
317 MERGE_TRUE(hlslOffsets);
318 MERGE_TRUE(useStorageBuffer);
319 MERGE_TRUE(invariantAll);
320 MERGE_TRUE(hlslIoMapping);
326 MERGE_TRUE(needToLegalize);
327 MERGE_TRUE(binaryDoubleOutput);
328 MERGE_TRUE(usePhysicalStorageBuffer);
332 // Merge the 'unit' AST into 'this' AST.
333 // That includes rationalizing the unique IDs, which were set up independently,
334 // and might have overlaps that are not the same symbol, or might have different
335 // IDs for what should be the same shared symbol.
337 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
339 if (unit.treeRoot == nullptr)
342 if (treeRoot == nullptr) {
343 treeRoot = unit.treeRoot;
347 // Getting this far means we have two existing trees to merge...
348 numShaderRecordBlocks += unit.numShaderRecordBlocks;
349 numTaskNVBlocks += unit.numTaskNVBlocks;
351 // Get the top-level globals of each unit
352 TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
353 TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
355 // Get the linker-object lists
356 TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
357 const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
359 // Map by global name to unique ID to rationalize the same object having
360 // differing IDs in different trees.
363 seedIdMap(idMaps, idShift);
364 remapIds(idMaps, idShift + 1, unit);
366 mergeBodies(infoSink, globals, unitGlobals);
367 bool mergeExistingOnly = false;
368 mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
369 mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
370 ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
375 static const TString& getNameForIdMap(TIntermSymbol* symbol)
377 TShaderInterface si = symbol->getType().getShaderInterface();
379 return symbol->getName();
381 return symbol->getType().getTypeName();
386 // Traverser that seeds an ID map with all built-ins, and tracks the
387 // maximum ID used, currently using (maximum ID + 1) as new symbol id shift seed.
388 // Level id will keep same after shifting.
389 // (It would be nice to put this in a function, but that causes warnings
390 // on having no bodies for the copy-constructor/operator=.)
391 class TBuiltInIdTraverser : public TIntermTraverser {
393 TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), idShift(0) { }
394 // If it's a built in, add it to the map.
395 virtual void visitSymbol(TIntermSymbol* symbol)
397 const TQualifier& qualifier = symbol->getType().getQualifier();
398 if (qualifier.builtIn != EbvNone) {
399 TShaderInterface si = symbol->getType().getShaderInterface();
400 idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
402 idShift = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
403 std::max(idShift & TSymbolTable::uniqueIdMask,
404 symbol->getId() & TSymbolTable::uniqueIdMask);
406 long long getIdShift() const { return idShift; }
408 TBuiltInIdTraverser(TBuiltInIdTraverser&);
409 TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
414 // Traverser that seeds an ID map with non-builtins.
415 // (It would be nice to put this in a function, but that causes warnings
416 // on having no bodies for the copy-constructor/operator=.)
417 class TUserIdTraverser : public TIntermTraverser {
419 TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { }
420 // If its a non-built-in global, add it to the map.
421 virtual void visitSymbol(TIntermSymbol* symbol)
423 const TQualifier& qualifier = symbol->getType().getQualifier();
424 if (qualifier.builtIn == EbvNone) {
425 TShaderInterface si = symbol->getType().getShaderInterface();
426 idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
431 TUserIdTraverser(TUserIdTraverser&);
432 TUserIdTraverser& operator=(TUserIdTraverser&);
433 TIdMaps& idMaps; // over biggest id
436 // Initialize the the ID map with what we know of 'this' AST.
437 void TIntermediate::seedIdMap(TIdMaps& idMaps, long long& idShift)
439 // all built-ins everywhere need to align on IDs and contribute to the max ID
440 TBuiltInIdTraverser builtInIdTraverser(idMaps);
441 treeRoot->traverse(&builtInIdTraverser);
442 idShift = builtInIdTraverser.getIdShift() & TSymbolTable::uniqueIdMask;
444 // user variables in the linker object list need to align on ids
445 TUserIdTraverser userIdTraverser(idMaps);
446 findLinkerObjects()->traverse(&userIdTraverser);
449 // Traverser to map an AST ID to what was known from the seeding AST.
450 // (It would be nice to put this in a function, but that causes warnings
451 // on having no bodies for the copy-constructor/operator=.)
452 class TRemapIdTraverser : public TIntermTraverser {
454 TRemapIdTraverser(const TIdMaps& idMaps, long long idShift) : idMaps(idMaps), idShift(idShift) { }
456 // - if the same symbol, adopt the 'this' ID
457 // - otherwise, ensure a unique ID by shifting to a new space
458 virtual void visitSymbol(TIntermSymbol* symbol)
460 const TQualifier& qualifier = symbol->getType().getQualifier();
461 bool remapped = false;
462 if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
463 TShaderInterface si = symbol->getType().getShaderInterface();
464 auto it = idMaps[si].find(getNameForIdMap(symbol));
465 if (it != idMaps[si].end()) {
466 uint64_t id = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
467 (it->second & TSymbolTable::uniqueIdMask);
468 symbol->changeId(id);
473 symbol->changeId(symbol->getId() + idShift);
476 TRemapIdTraverser(TRemapIdTraverser&);
477 TRemapIdTraverser& operator=(TRemapIdTraverser&);
478 const TIdMaps& idMaps;
482 void TIntermediate::remapIds(const TIdMaps& idMaps, long long idShift, TIntermediate& unit)
484 // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
485 TRemapIdTraverser idTraverser(idMaps, idShift);
486 unit.getTreeRoot()->traverse(&idTraverser);
490 // Merge the function bodies and global-level initializers from unitGlobals into globals.
491 // Will error check duplication of function bodies for the same signature.
493 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
495 // TODO: link-time performance: Processing in alphabetical order will be faster
497 // Error check the global objects, not including the linker objects
498 for (unsigned int child = 0; child < globals.size() - 1; ++child) {
499 for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
500 TIntermAggregate* body = globals[child]->getAsAggregate();
501 TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
502 if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
503 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
504 infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n";
509 // Merge the global objects, just in front of the linker objects
510 globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
513 static inline bool isSameInterface(TIntermSymbol* symbol, EShLanguage stage, TIntermSymbol* unitSymbol, EShLanguage unitStage) {
514 return // 1) same stage and same shader interface
515 (stage == unitStage && symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) ||
516 // 2) accross stages and both are uniform or buffer
517 (symbol->getQualifier().storage == EvqUniform && unitSymbol->getQualifier().storage == EvqUniform) ||
518 (symbol->getQualifier().storage == EvqBuffer && unitSymbol->getQualifier().storage == EvqBuffer) ||
519 // 3) in/out matched across stage boundary
520 (stage < unitStage && symbol->getQualifier().storage == EvqVaryingOut && unitSymbol->getQualifier().storage == EvqVaryingIn) ||
521 (unitStage < stage && symbol->getQualifier().storage == EvqVaryingIn && unitSymbol->getQualifier().storage == EvqVaryingOut);
525 // Global Unfiform block stores any default uniforms (i.e. uniforms without a block)
526 // If two linked stages declare the same member, they are meant to be the same uniform
527 // and need to be in the same block
528 // merge the members of different stages to allow them to be linked properly
531 void TIntermediate::mergeGlobalUniformBlocks(TInfoSink& infoSink, TIntermediate& unit, bool mergeExistingOnly)
533 TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
534 TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
536 // build lists of default blocks from the intermediates
537 TIntermSequence defaultBlocks;
538 TIntermSequence unitDefaultBlocks;
540 auto filter = [](TIntermSequence& list, TIntermNode* node) {
541 if (node->getAsSymbolNode()->getQualifier().defaultBlock) {
542 list.push_back(node);
546 std::for_each(linkerObjects.begin(), linkerObjects.end(),
547 [&defaultBlocks, &filter](TIntermNode* node) {
548 filter(defaultBlocks, node);
550 std::for_each(unitLinkerObjects.begin(), unitLinkerObjects.end(),
551 [&unitDefaultBlocks, &filter](TIntermNode* node) {
552 filter(unitDefaultBlocks, node);
555 auto itUnitBlock = unitDefaultBlocks.begin();
556 for (; itUnitBlock != unitDefaultBlocks.end(); itUnitBlock++) {
558 bool add = !mergeExistingOnly;
559 auto itBlock = defaultBlocks.begin();
561 for (; itBlock != defaultBlocks.end(); itBlock++) {
562 TIntermSymbol* block = (*itBlock)->getAsSymbolNode();
563 TIntermSymbol* unitBlock = (*itUnitBlock)->getAsSymbolNode();
565 assert(block && unitBlock);
567 // if the two default blocks match, then merge their definitions
568 if (block->getType().getTypeName() == unitBlock->getType().getTypeName() &&
569 block->getQualifier().storage == unitBlock->getQualifier().storage) {
571 mergeBlockDefinitions(infoSink, block, unitBlock, &unit);
575 // push back on original list; won't change the size of the list we're iterating over
576 linkerObjects.push_back(*itUnitBlock);
581 void TIntermediate::mergeBlockDefinitions(TInfoSink& infoSink, TIntermSymbol* block, TIntermSymbol* unitBlock, TIntermediate* unit) {
582 if (block->getType() == unitBlock->getType()) {
586 if (block->getType().getTypeName() != unitBlock->getType().getTypeName() ||
587 block->getType().getBasicType() != unitBlock->getType().getBasicType() ||
588 block->getQualifier().storage != unitBlock->getQualifier().storage ||
589 block->getQualifier().layoutSet != unitBlock->getQualifier().layoutSet) {
590 // different block names likely means different blocks
595 // order of declarations doesn't matter and they matched based on member name
596 TTypeList* memberList = block->getType().getWritableStruct();
597 TTypeList* unitMemberList = unitBlock->getType().getWritableStruct();
599 // keep track of which members have changed position
600 // so we don't have to search the array again
601 std::map<unsigned int, unsigned int> memberIndexUpdates;
603 size_t memberListStartSize = memberList->size();
604 for (unsigned int i = 0; i < unitMemberList->size(); ++i) {
606 for (unsigned int j = 0; j < memberListStartSize; ++j) {
607 if ((*memberList)[j].type->getFieldName() == (*unitMemberList)[i].type->getFieldName()) {
609 const TType* memberType = (*memberList)[j].type;
610 const TType* unitMemberType = (*unitMemberList)[i].type;
613 // don't need as many checks as when merging symbols, since
614 // initializers and most qualifiers are stripped when the member is moved into the block
615 if ((*memberType) != (*unitMemberType)) {
616 error(infoSink, "Types must match:");
617 infoSink.info << " " << memberType->getFieldName() << ": ";
618 infoSink.info << "\"" << memberType->getCompleteString() << "\" versus ";
619 infoSink.info << "\"" << unitMemberType->getCompleteString() << "\"\n";
622 memberIndexUpdates[i] = j;
626 memberList->push_back((*unitMemberList)[i]);
627 memberIndexUpdates[i] = (unsigned int)memberList->size() - 1;
632 unitType.shallowCopy(unitBlock->getType());
634 // update symbol node in unit tree,
635 // and other nodes that may reference it
636 class TMergeBlockTraverser : public TIntermTraverser {
638 TMergeBlockTraverser(const glslang::TType &type, const glslang::TType& unitType,
639 glslang::TIntermediate& unit,
640 const std::map<unsigned int, unsigned int>& memberIdxUpdates) :
641 newType(type), unitType(unitType), unit(unit), memberIndexUpdates(memberIdxUpdates)
643 virtual ~TMergeBlockTraverser() { }
645 const glslang::TType& newType; // type with modifications
646 const glslang::TType& unitType; // copy of original type
647 glslang::TIntermediate& unit; // intermediate that is being updated
648 const std::map<unsigned int, unsigned int>& memberIndexUpdates;
650 virtual void visitSymbol(TIntermSymbol* symbol)
652 glslang::TType& symType = symbol->getWritableType();
654 if (symType == unitType) {
655 // each symbol node has a local copy of the unitType
656 // if merging involves changing properties that aren't shared objects
657 // they should be updated in all instances
659 // e.g. the struct list is a ptr to an object, so it can be updated
660 // once, outside the traverser
661 //*symType.getWritableStruct() = *newType.getStruct();
666 virtual bool visitBinary(TVisit, glslang::TIntermBinary* node)
668 if (node->getOp() == EOpIndexDirectStruct && node->getLeft()->getType() == unitType) {
669 // this is a dereference to a member of the block since the
670 // member list changed, need to update this to point to the
672 assert(node->getRight()->getAsConstantUnion());
674 glslang::TIntermConstantUnion* constNode = node->getRight()->getAsConstantUnion();
675 unsigned int memberIdx = constNode->getConstArray()[0].getUConst();
676 unsigned int newIdx = memberIndexUpdates.at(memberIdx);
677 TIntermTyped* newConstNode = unit.addConstantUnion(newIdx, node->getRight()->getLoc());
679 node->setRight(newConstNode);
686 } finalLinkTraverser(block->getType(), unitType, *unit, memberIndexUpdates);
688 // update the tree to use the new type
689 unit->getTreeRoot()->traverse(&finalLinkTraverser);
691 // update the member list
692 (*unitMemberList) = (*memberList);
696 // Merge the linker objects from unitLinkerObjects into linkerObjects.
697 // Duplication is expected and filtered out, but contradictions are an error.
699 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects, EShLanguage unitStage)
701 // Error check and merge the linker objects (duplicates should not be created)
702 std::size_t initialNumLinkerObjects = linkerObjects.size();
703 for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
705 for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
706 TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
707 TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
708 assert(symbol && unitSymbol);
710 bool isSameSymbol = false;
711 // If they are both blocks in the same shader interface,
712 // match by the block-name, not the identifier name.
713 if (symbol->getType().getBasicType() == EbtBlock && unitSymbol->getType().getBasicType() == EbtBlock) {
714 if (isSameInterface(symbol, getStage(), unitSymbol, unitStage)) {
715 isSameSymbol = symbol->getType().getTypeName() == unitSymbol->getType().getTypeName();
718 else if (symbol->getName() == unitSymbol->getName())
725 // but if one has an initializer and the other does not, update
727 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
728 symbol->setConstArray(unitSymbol->getConstArray());
730 // Similarly for binding
731 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
732 symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
734 // Similarly for location
735 if (!symbol->getQualifier().hasLocation() && unitSymbol->getQualifier().hasLocation()) {
736 symbol->getQualifier().layoutLocation = unitSymbol->getQualifier().layoutLocation;
739 // Update implicit array sizes
740 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
742 // Check for consistent types/qualification/initializers etc.
743 mergeErrorCheck(infoSink, *symbol, *unitSymbol, unitStage);
745 // If different symbols, verify they arn't push_constant since there can only be one per stage
746 else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant() && getStage() == unitStage)
747 error(infoSink, "Only one push_constant block is allowed per stage");
750 linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
752 // for anonymous blocks, check that their members don't conflict with other names
753 if (unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getBasicType() == EbtBlock &&
754 IsAnonymous(unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getName())) {
755 for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
756 TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
757 TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
758 assert(symbol && unitSymbol);
760 auto checkName = [this, unitSymbol, &infoSink](const TString& name) {
761 for (unsigned int i = 0; i < unitSymbol->getType().getStruct()->size(); ++i) {
762 if (name == (*unitSymbol->getType().getStruct())[i].type->getFieldName()) {
763 error(infoSink, "Anonymous member name used for global variable or other anonymous member: ");
764 infoSink.info << (*unitSymbol->getType().getStruct())[i].type->getCompleteString() << "\n";
769 if (isSameInterface(symbol, getStage(), unitSymbol, unitStage)) {
770 checkName(symbol->getName());
772 // check members of other anonymous blocks
773 if (symbol->getBasicType() == EbtBlock && IsAnonymous(symbol->getName())) {
774 for (unsigned int i = 0; i < symbol->getType().getStruct()->size(); ++i) {
775 checkName((*symbol->getType().getStruct())[i].type->getFieldName());
785 // TODO 4.5 link functionality: cull distance array size checking
787 // Recursively merge the implicit array sizes through the objects' respective type trees.
788 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
790 if (type.isUnsizedArray()) {
791 if (unitType.isUnsizedArray()) {
792 type.updateImplicitArraySize(unitType.getImplicitArraySize());
793 if (unitType.isArrayVariablyIndexed())
794 type.setArrayVariablyIndexed();
795 } else if (unitType.isSizedArray())
796 type.changeOuterArraySize(unitType.getOuterArraySize());
799 // Type mismatches are caught and reported after this, just be careful for now.
800 if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
803 for (int i = 0; i < (int)type.getStruct()->size(); ++i)
804 mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
808 // Compare two global objects from two compilation units and see if they match
809 // well enough. Rules can be different for intra- vs. cross-stage matching.
811 // This function only does one of intra- or cross-stage matching per call.
813 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, EShLanguage unitStage)
815 #if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
816 bool crossStage = getStage() != unitStage;
817 bool writeTypeComparison = false;
819 // Types have to match
821 // but, we make an exception if one is an implicit array and the other is sized
822 // or if the array sizes differ because of the extra array dimension on some in/out boundaries
823 bool arraysMatch = false;
824 if (isIoResizeArray(symbol.getType(), getStage()) || isIoResizeArray(unitSymbol.getType(), unitStage)) {
825 // if the arrays have an extra dimension because of the stage.
826 // compare dimensions while ignoring the outer dimension
827 unsigned int firstDim = isIoResizeArray(symbol.getType(), getStage()) ? 1 : 0;
828 unsigned int numDim = symbol.getArraySizes()
829 ? symbol.getArraySizes()->getNumDims() : 0;
830 unsigned int unitFirstDim = isIoResizeArray(unitSymbol.getType(), unitStage) ? 1 : 0;
831 unsigned int unitNumDim = unitSymbol.getArraySizes()
832 ? unitSymbol.getArraySizes()->getNumDims() : 0;
833 arraysMatch = (numDim - firstDim) == (unitNumDim - unitFirstDim);
834 // check that array sizes match as well
835 for (unsigned int i = 0; i < (numDim - firstDim) && arraysMatch; i++) {
836 if (symbol.getArraySizes()->getDimSize(firstDim + i) !=
837 unitSymbol.getArraySizes()->getDimSize(unitFirstDim + i)) {
844 arraysMatch = symbol.getType().sameArrayness(unitSymbol.getType()) ||
845 (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
846 (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()));
849 if (!symbol.getType().sameElementType(unitSymbol.getType()) ||
850 !symbol.getType().sameTypeParameters(unitSymbol.getType()) ||
852 writeTypeComparison = true;
853 error(infoSink, "Types must match:");
857 // Interface block member-wise layout qualifiers have to match
858 if (symbol.getType().getBasicType() == EbtBlock && unitSymbol.getType().getBasicType() == EbtBlock &&
859 symbol.getType().getStruct() && unitSymbol.getType().getStruct() &&
860 symbol.getType().sameStructType(unitSymbol.getType())) {
861 for (unsigned int i = 0; i < symbol.getType().getStruct()->size(); ++i) {
862 const TQualifier& qualifier = (*symbol.getType().getStruct())[i].type->getQualifier();
863 const TQualifier& unitQualifier = (*unitSymbol.getType().getStruct())[i].type->getQualifier();
864 if (qualifier.layoutMatrix != unitQualifier.layoutMatrix ||
865 qualifier.layoutOffset != unitQualifier.layoutOffset ||
866 qualifier.layoutAlign != unitQualifier.layoutAlign ||
867 qualifier.layoutLocation != unitQualifier.layoutLocation ||
868 qualifier.layoutComponent != unitQualifier.layoutComponent) {
869 error(infoSink, "Interface block member layout qualifiers must match:");
870 writeTypeComparison = true;
875 bool isInOut = crossStage &&
876 ((symbol.getQualifier().storage == EvqVaryingIn && unitSymbol.getQualifier().storage == EvqVaryingOut) ||
877 (symbol.getQualifier().storage == EvqVaryingOut && unitSymbol.getQualifier().storage == EvqVaryingIn));
879 // Qualifiers have to (almost) match
881 if (!isInOut && symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
882 error(infoSink, "Storage qualifiers must match:");
883 writeTypeComparison = true;
886 // Uniform and buffer blocks must either both have an instance name, or
887 // must both be anonymous. The names don't need to match though.
888 if (symbol.getQualifier().isUniformOrBuffer() &&
889 (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) {
890 error(infoSink, "Matched Uniform or Storage blocks must all be anonymous,"
891 " or all be named:");
892 writeTypeComparison = true;
895 if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage &&
896 (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) ||
897 (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) {
898 warn(infoSink, "Matched shader interfaces are using different instance names.");
899 writeTypeComparison = true;
903 if (!isInOut && symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
904 error(infoSink, "Precision qualifiers must match:");
905 writeTypeComparison = true;
909 if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
910 error(infoSink, "Presence of invariant qualifier must match:");
911 writeTypeComparison = true;
915 if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
916 error(infoSink, "Presence of precise qualifier must match:");
917 writeTypeComparison = true;
920 // Auxiliary and interpolation...
921 // "interpolation qualification (e.g., flat) and auxiliary qualification (e.g. centroid) may differ.
922 // These mismatches are allowed between any pair of stages ...
923 // those provided in the fragment shader supersede those provided in previous stages."
925 (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
926 symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
927 symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
928 symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
929 symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
930 symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective())) {
931 error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
932 writeTypeComparison = true;
936 if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
937 symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
938 symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
939 symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
940 symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
941 symbol.getQualifier().shadercallcoherent!= unitSymbol.getQualifier().shadercallcoherent ||
942 symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
943 symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
944 symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
945 symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
946 symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
947 error(infoSink, "Memory qualifiers must match:");
948 writeTypeComparison = true;
952 // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
953 // requires separate user-supplied offset from actual computed offset, but
954 // current implementation only has one offset.
955 if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix ||
956 symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking ||
957 symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation ||
958 symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
959 symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex ||
960 symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding ||
961 (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
962 error(infoSink, "Layout qualification must match:");
963 writeTypeComparison = true;
966 // Initializers have to match, if both are present, and if we don't already know the types don't match
967 if (! writeTypeComparison) {
968 if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
969 if (symbol.getConstArray() != unitSymbol.getConstArray()) {
970 error(infoSink, "Initializers must match:");
971 infoSink.info << " " << symbol.getName() << "\n";
976 if (writeTypeComparison) {
977 infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus ";
978 if (symbol.getName() != unitSymbol.getName())
979 infoSink.info << unitSymbol.getName() << ": ";
981 infoSink.info << "\"" << unitSymbol.getType().getCompleteString() << "\"\n";
986 void TIntermediate::sharedBlockCheck(TInfoSink& infoSink)
988 bool has_shared_block = false;
989 bool has_shared_non_block = false;
990 TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
991 for (size_t i = 0; i < linkObjects.size(); ++i) {
992 const TType& type = linkObjects[i]->getAsTyped()->getType();
993 const TQualifier& qualifier = type.getQualifier();
994 if (qualifier.storage == glslang::EvqShared) {
995 if (type.getBasicType() == glslang::EbtBlock)
996 has_shared_block = true;
998 has_shared_non_block = true;
1001 if (has_shared_block && has_shared_non_block)
1002 error(infoSink, "cannot mix use of shared variables inside and outside blocks");
1006 // Do final link-time error checking of a complete (merged) intermediate representation.
1007 // (Much error checking was done during merging).
1009 // Also, lock in defaults of things not set, including array sizes.
1011 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
1013 if (getTreeRoot() == nullptr)
1016 if (numEntryPoints < 1) {
1017 if (getSource() == EShSourceGlsl)
1018 error(infoSink, "Missing entry point: Each stage requires one entry point");
1020 warn(infoSink, "Entry point not found");
1023 // recursion and missing body checking
1024 checkCallGraphCycles(infoSink);
1025 checkCallGraphBodies(infoSink, keepUncalled);
1027 // overlap/alias/missing I/O, etc.
1028 inOutLocationCheck(infoSink);
1031 if (getNumPushConstants() > 1)
1032 error(infoSink, "Only one push_constant block is allowed per stage");
1035 if (invocations == TQualifier::layoutNotSet)
1038 if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
1039 error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1040 if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
1041 error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1043 if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
1044 error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
1045 if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
1046 error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
1048 for (size_t b = 0; b < xfbBuffers.size(); ++b) {
1049 if (xfbBuffers[b].contains64BitType)
1050 RoundToPow2(xfbBuffers[b].implicitStride, 8);
1051 else if (xfbBuffers[b].contains32BitType)
1052 RoundToPow2(xfbBuffers[b].implicitStride, 4);
1053 else if (xfbBuffers[b].contains16BitType)
1054 RoundToPow2(xfbBuffers[b].implicitStride, 2);
1056 // "It is a compile-time or link-time error to have
1057 // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
1058 // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
1059 // compile-time or link-time error to have different values specified for the stride for the same buffer."
1060 if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
1061 error(infoSink, "xfb_stride is too small to hold all buffer entries:");
1062 infoSink.info.prefix(EPrefixError);
1063 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
1065 if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
1066 xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
1068 // "If the buffer is capturing any
1069 // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
1070 // multiple of 4, or a compile-time or link-time error results."
1071 if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
1072 error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
1073 infoSink.info.prefix(EPrefixError);
1074 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1075 } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
1076 error(infoSink, "xfb_stride must be multiple of 4:");
1077 infoSink.info.prefix(EPrefixError);
1078 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1080 // "If the buffer is capturing any
1081 // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
1082 else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
1083 error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
1084 infoSink.info.prefix(EPrefixError);
1085 infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1088 // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
1089 // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
1090 if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) {
1091 error(infoSink, "xfb_stride is too large:");
1092 infoSink.info.prefix(EPrefixError);
1093 infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n";
1100 case EShLangTessControl:
1101 if (vertices == TQualifier::layoutNotSet)
1102 error(infoSink, "At least one shader must specify an output layout(vertices=...)");
1104 case EShLangTessEvaluation:
1105 if (getSource() == EShSourceGlsl) {
1106 if (inputPrimitive == ElgNone)
1107 error(infoSink, "At least one shader must specify an input layout primitive");
1108 if (vertexSpacing == EvsNone)
1109 vertexSpacing = EvsEqual;
1110 if (vertexOrder == EvoNone)
1111 vertexOrder = EvoCcw;
1114 case EShLangGeometry:
1115 if (inputPrimitive == ElgNone)
1116 error(infoSink, "At least one shader must specify an input layout primitive");
1117 if (outputPrimitive == ElgNone)
1118 error(infoSink, "At least one shader must specify an output layout primitive");
1119 if (vertices == TQualifier::layoutNotSet)
1120 error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1122 case EShLangFragment:
1123 // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
1124 // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
1125 // requiring explicit early_fragment_tests
1126 if (getPostDepthCoverage() && !getEarlyFragmentTests())
1127 error(infoSink, "post_depth_coverage requires early_fragment_tests");
1129 case EShLangCompute:
1130 sharedBlockCheck(infoSink);
1133 case EShLangIntersect:
1135 case EShLangClosestHit:
1137 case EShLangCallable:
1138 if (numShaderRecordBlocks > 1)
1139 error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
1142 // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
1143 if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
1144 error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
1145 if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
1146 error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
1147 if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
1148 error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
1149 if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
1150 error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
1151 if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
1152 error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
1153 if (outputPrimitive == ElgNone)
1154 error(infoSink, "At least one shader must specify an output layout primitive");
1155 if (vertices == TQualifier::layoutNotSet)
1156 error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1157 if (primitives == TQualifier::layoutNotSet)
1158 error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
1161 if (numTaskNVBlocks > 1)
1162 error(infoSink, "Only one taskNV interface block is allowed per shader");
1163 sharedBlockCheck(infoSink);
1166 error(infoSink, "Unknown Stage.");
1170 // Process the tree for any node-specific work.
1171 class TFinalLinkTraverser : public TIntermTraverser {
1173 TFinalLinkTraverser() { }
1174 virtual ~TFinalLinkTraverser() { }
1176 virtual void visitSymbol(TIntermSymbol* symbol)
1178 // Implicitly size arrays.
1179 // If an unsized array is left as unsized, it effectively
1180 // becomes run-time sized.
1181 symbol->getWritableType().adoptImplicitArraySizes(false);
1183 } finalLinkTraverser;
1185 treeRoot->traverse(&finalLinkTraverser);
1190 // See if the call graph contains any static recursion, which is disallowed
1191 // by the specification.
1193 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
1195 // Clear fields we'll use for this.
1196 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1197 call->visited = false;
1198 call->currentPath = false;
1199 call->errorGiven = false;
1203 // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration.
1208 // See if we have unvisited parts of the graph.
1210 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1211 if (! call->visited) {
1217 // If not, we are done.
1221 // Otherwise, we found a new subgraph, process it:
1222 // See what all can be reached by this new root, and if any of
1223 // that is recursive. This is done by depth-first traversals, seeing
1224 // if a new call is found that was already in the currentPath (a back edge),
1225 // thereby detecting recursion.
1226 std::list<TCall*> stack;
1227 newRoot->currentPath = true; // currentPath will be true iff it is on the stack
1228 stack.push_back(newRoot);
1229 while (! stack.empty()) {
1231 TCall* call = stack.back();
1233 // Add to the stack just one callee.
1234 // This algorithm always terminates, because only !visited and !currentPath causes a push
1235 // and all pushes change currentPath to true, and all pops change visited to true.
1236 TGraph::iterator child = callGraph.begin();
1237 for (; child != callGraph.end(); ++child) {
1239 // If we already visited this node, its whole subgraph has already been processed, so skip it.
1243 if (call->callee == child->caller) {
1244 if (child->currentPath) {
1245 // Then, we found a back edge
1246 if (! child->errorGiven) {
1247 error(infoSink, "Recursion detected:");
1248 infoSink.info << " " << call->callee << " calling " << child->callee << "\n";
1249 child->errorGiven = true;
1253 child->currentPath = true;
1254 stack.push_back(&(*child));
1259 if (child == callGraph.end()) {
1260 // no more callees, we bottomed out, never look at this node again
1261 stack.back()->currentPath = false;
1262 stack.back()->visited = true;
1265 } // end while, meaning nothing left to process in this subtree
1267 } while (newRoot); // redundant loop check; should always exit via the 'break' above
1271 // See which functions are reachable from the entry point and which have bodies.
1272 // Reachable ones with missing bodies are errors.
1273 // Unreachable bodies are dead code.
1275 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
1277 // Clear fields we'll use for this.
1278 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1279 call->visited = false;
1280 call->calleeBodyPosition = -1;
1283 // The top level of the AST includes function definitions (bodies).
1284 // Compare these to function calls in the call graph.
1285 // We'll end up knowing which have bodies, and if so,
1286 // how to map the call-graph node to the location in the AST.
1287 TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
1288 std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
1289 for (int f = 0; f < (int)functionSequence.size(); ++f) {
1290 glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
1291 if (node && (node->getOp() == glslang::EOpFunction)) {
1292 if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
1293 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
1294 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1295 if (call->callee == node->getName())
1296 call->calleeBodyPosition = f;
1301 // Start call-graph traversal by visiting the entry point nodes.
1302 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1303 if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
1304 call->visited = true;
1307 // Propagate 'visited' through the call-graph to every part of the graph it
1308 // can reach (seeded with the entry-point setting above).
1312 for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
1313 if (call1->visited) {
1314 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
1315 if (! call2->visited) {
1316 if (call1->callee == call2->caller) {
1318 call2->visited = true;
1326 // Any call-graph node set to visited but without a callee body is an error.
1327 for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1328 if (call->visited) {
1329 if (call->calleeBodyPosition == -1) {
1330 error(infoSink, "No function definition (body) found: ");
1331 infoSink.info << " " << call->callee << "\n";
1333 reachable[call->calleeBodyPosition] = true;
1337 // Bodies in the AST not reached by the call graph are dead;
1338 // clear them out, since they can't be reached and also can't
1339 // be translated further due to possibility of being ill defined.
1340 if (! keepUncalled) {
1341 for (int f = 0; f < (int)functionSequence.size(); ++f) {
1343 functionSequence[f] = nullptr;
1345 functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
1350 // Satisfy rules for location qualifiers on inputs and outputs
1352 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
1354 // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
1355 bool fragOutWithNoLocation = false;
1358 // TODO: linker functionality: location collision checking
1360 TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1361 for (size_t i = 0; i < linkObjects.size(); ++i) {
1362 const TType& type = linkObjects[i]->getAsTyped()->getType();
1363 const TQualifier& qualifier = type.getQualifier();
1364 if (language == EShLangFragment) {
1365 if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
1367 if (!qualifier.hasAnyLocation())
1368 fragOutWithNoLocation = true;
1373 if (isEsProfile()) {
1374 if (numFragOut > 1 && fragOutWithNoLocation)
1375 error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
1379 TIntermAggregate* TIntermediate::findLinkerObjects() const
1381 // Get the top-level globals
1382 TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
1384 // Get the last member of the sequences, expected to be the linker-object lists
1385 assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
1387 return globals.back()->getAsAggregate();
1390 // See if a variable was both a user-declared output and used.
1391 // Note: the spec discusses writing to one, but this looks at read or write, which
1392 // is more useful, and perhaps the spec should be changed to reflect that.
1393 bool TIntermediate::userOutputUsed() const
1395 const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
1398 for (size_t i = 0; i < linkerObjects.size(); ++i) {
1399 const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
1400 if (symbolNode.getQualifier().storage == EvqVaryingOut &&
1401 symbolNode.getName().compare(0, 3, "gl_") != 0 &&
1402 inIoAccessed(symbolNode.getName())) {
1411 // Accumulate locations used for inputs, outputs, and uniforms, payload and callable data
1412 // and check for collisions as the accumulation is done.
1414 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1416 // typeCollision is set to true if there is no direct collision, but the types in the same location
1419 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
1421 typeCollision = false;
1425 if (qualifier.isPipeInput())
1427 else if (qualifier.isPipeOutput())
1429 else if (qualifier.storage == EvqUniform)
1431 else if (qualifier.storage == EvqBuffer)
1433 else if (qualifier.isAnyPayload())
1435 else if (qualifier.isAnyCallable())
1441 if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1443 } else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1444 if (type.isSizedArray())
1445 size = type.getCumulativeArraySize();
1449 // Strip off the outer array dimension for those having an extra one.
1450 if (type.isArray() && qualifier.isArrayedIo(language)) {
1451 TType elementType(type, 0);
1452 size = computeTypeLocationSize(elementType, language);
1454 size = computeTypeLocationSize(type, language);
1457 // Locations, and components within locations.
1459 // Almost always, dealing with components means a single location is involved.
1460 // The exception is a dvec3. From the spec:
1462 // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1463 // the second location. This leaves components 2 and 3 available for other component-qualified
1466 // That means, without ever mentioning a component, a component range
1467 // for a different location gets specified, if it's not a vertex shader input. (!)
1468 // (A vertex shader input will show using only one location, even for a dvec3/4.)
1470 // So, for the case of dvec3, we need two independent ioRanges.
1472 // For raytracing IO (payloads and callabledata) each declaration occupies a single
1473 // slot irrespective of type.
1474 int collision = -1; // no collision
1476 if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1477 TRange range(qualifier.layoutLocation, qualifier.layoutLocation);
1478 collision = checkLocationRT(setRT, qualifier.layoutLocation);
1480 usedIoRT[setRT].push_back(range);
1481 } else if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1482 (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1483 // Dealing with dvec3 in/out split across two locations.
1484 // Need two io-ranges.
1485 // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1488 TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1489 TRange componentRange(0, 3);
1490 TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
1492 // check for collisions
1493 collision = checkLocationRange(set, range, type, typeCollision);
1494 if (collision < 0) {
1495 usedIo[set].push_back(range);
1498 TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1499 TRange componentRange2(0, 1);
1500 TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
1502 // check for collisions
1503 collision = checkLocationRange(set, range2, type, typeCollision);
1505 usedIo[set].push_back(range2);
1510 // Not a dvec3 in/out split across two locations, generic path.
1511 // Need a single IO-range block.
1513 TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1514 TRange componentRange(0, 3);
1515 if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1516 int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1517 if (qualifier.hasComponent())
1518 componentRange.start = qualifier.layoutComponent;
1519 componentRange.last = componentRange.start + consumedComponents - 1;
1522 // combine location and component ranges
1523 TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0);
1525 // check for collisions, except for vertex inputs on desktop targeting OpenGL
1526 if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1527 collision = checkLocationRange(set, range, type, typeCollision);
1530 usedIo[set].push_back(range);
1536 // Compare a new (the passed in) 'range' against the existing set, and see
1537 // if there are any collisions.
1539 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1541 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1543 for (size_t r = 0; r < usedIo[set].size(); ++r) {
1544 if (range.overlap(usedIo[set][r])) {
1545 // there is a collision; pick one
1546 return std::max(range.location.start, usedIo[set][r].location.start);
1547 } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
1548 // aliased-type mismatch
1549 typeCollision = true;
1550 return std::max(range.location.start, usedIo[set][r].location.start);
1554 return -1; // no collision
1557 int TIntermediate::checkLocationRT(int set, int location) {
1558 TRange range(location, location);
1559 for (size_t r = 0; r < usedIoRT[set].size(); ++r) {
1560 if (range.overlap(usedIoRT[set][r])) {
1564 return -1; // no collision
1567 // Accumulate bindings and offsets, and check for collisions
1568 // as the accumulation is done.
1570 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1572 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1574 TRange bindingRange(binding, binding);
1575 TRange offsetRange(offset, offset + numOffsets - 1);
1576 TOffsetRange range(bindingRange, offsetRange);
1578 // check for collisions, except for vertex inputs on desktop
1579 for (size_t r = 0; r < usedAtomics.size(); ++r) {
1580 if (range.overlap(usedAtomics[r])) {
1581 // there is a collision; pick one
1582 return std::max(offset, usedAtomics[r].offset.start);
1586 usedAtomics.push_back(range);
1588 return -1; // no collision
1591 // Accumulate used constant_id values.
1593 // Return false is one was already used.
1594 bool TIntermediate::addUsedConstantId(int id)
1596 if (usedConstantId.find(id) != usedConstantId.end())
1599 usedConstantId.insert(id);
1604 // Recursively figure out how many locations are used up by an input or output type.
1605 // Return the size of type, as measured by "locations".
1606 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1608 // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1609 // consecutive locations..."
1610 if (type.isArray()) {
1611 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1612 // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
1613 TType elementType(type, 0);
1614 if (type.isSizedArray() && !type.getQualifier().isPerView())
1615 return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1618 // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
1619 elementType.getQualifier().perViewNV = false;
1621 return computeTypeLocationSize(elementType, stage);
1625 // "The locations consumed by block and structure members are determined by applying the rules above
1627 if (type.isStruct()) {
1629 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1630 TType memberType(type, member);
1631 size += computeTypeLocationSize(memberType, stage);
1636 // ES: "If a shader input is any scalar or vector type, it will consume a single location."
1638 // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
1639 // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
1640 // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
1641 // consume only a single location, in all stages."
1642 if (type.isScalar())
1644 if (type.isVector()) {
1645 if (stage == EShLangVertex && type.getQualifier().isPipeInput())
1647 if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
1653 // "If the declared input is an n x m single- or double-precision matrix, ...
1654 // The number of locations assigned for each matrix will be the same as
1655 // for an n-element array of m-component vectors..."
1656 if (type.isMatrix()) {
1657 TType columnType(type, 0);
1658 return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
1665 // Same as computeTypeLocationSize but for uniforms
1666 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
1668 // "Individual elements of a uniform array are assigned
1669 // consecutive locations with the first element taking location
1671 if (type.isArray()) {
1672 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1673 TType elementType(type, 0);
1674 if (type.isSizedArray()) {
1675 return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
1677 // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early.
1678 return computeTypeUniformLocationSize(elementType);
1682 // "Each subsequent inner-most member or element gets incremental
1683 // locations for the entire structure or array."
1684 if (type.isStruct()) {
1686 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1687 TType memberType(type, member);
1688 size += computeTypeUniformLocationSize(memberType);
1698 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
1700 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1702 int TIntermediate::addXfbBufferOffset(const TType& type)
1704 const TQualifier& qualifier = type.getQualifier();
1706 assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
1707 TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
1709 // compute the range
1710 unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
1711 buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
1712 TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
1714 // check for collisions
1715 for (size_t r = 0; r < buffer.ranges.size(); ++r) {
1716 if (range.overlap(buffer.ranges[r])) {
1717 // there is a collision; pick an example to return
1718 return std::max(range.start, buffer.ranges[r].start);
1722 buffer.ranges.push_back(range);
1724 return -1; // no collision
1727 // Recursively figure out how many bytes of xfb buffer are used by the given type.
1728 // Return the size of type, in bytes.
1729 // Sets contains64BitType to true if the type contains a 64-bit data type.
1730 // Sets contains32BitType to true if the type contains a 32-bit data type.
1731 // Sets contains16BitType to true if the type contains a 16-bit data type.
1732 // N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
1733 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
1735 // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1736 // and the space taken in the buffer will be a multiple of 8.
1737 // ...within the qualified entity, subsequent components are each
1738 // assigned, in order, to the next available offset aligned to a multiple of
1739 // that component's size. Aggregate types are flattened down to the component
1740 // level to get this sequence of components."
1742 if (type.isSizedArray()) {
1743 // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1744 // Unsized array use to xfb should be a compile error.
1745 TType elementType(type, 0);
1746 return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
1749 if (type.isStruct()) {
1750 unsigned int size = 0;
1751 bool structContains64BitType = false;
1752 bool structContains32BitType = false;
1753 bool structContains16BitType = false;
1754 for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
1755 TType memberType(type, member);
1756 // "... if applied to
1757 // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
1758 // and the space taken in the buffer will be a multiple of 8."
1759 bool memberContains64BitType = false;
1760 bool memberContains32BitType = false;
1761 bool memberContains16BitType = false;
1762 int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
1763 if (memberContains64BitType) {
1764 structContains64BitType = true;
1765 RoundToPow2(size, 8);
1766 } else if (memberContains32BitType) {
1767 structContains32BitType = true;
1768 RoundToPow2(size, 4);
1769 } else if (memberContains16BitType) {
1770 structContains16BitType = true;
1771 RoundToPow2(size, 2);
1776 if (structContains64BitType) {
1777 contains64BitType = true;
1778 RoundToPow2(size, 8);
1779 } else if (structContains32BitType) {
1780 contains32BitType = true;
1781 RoundToPow2(size, 4);
1782 } else if (structContains16BitType) {
1783 contains16BitType = true;
1784 RoundToPow2(size, 2);
1790 if (type.isScalar())
1792 else if (type.isVector())
1793 numComponents = type.getVectorSize();
1794 else if (type.isMatrix())
1795 numComponents = type.getMatrixCols() * type.getMatrixRows();
1801 if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
1802 contains64BitType = true;
1803 return 8 * numComponents;
1804 } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
1805 contains16BitType = true;
1806 return 2 * numComponents;
1807 } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
1808 return numComponents;
1810 contains32BitType = true;
1811 return 4 * numComponents;
1817 const int baseAlignmentVec4Std140 = 16;
1819 // Return the size and alignment of a component of the given type.
1820 // The size is returned in the 'size' parameter
1821 // Return value is the alignment..
1822 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
1828 switch (type.getBasicType()) {
1831 case EbtDouble: size = 8; return 8;
1832 case EbtFloat16: size = 2; return 2;
1834 case EbtUint8: size = 1; return 1;
1836 case EbtUint16: size = 2; return 2;
1837 case EbtReference: size = 8; return 8;
1838 default: size = 4; return 4;
1842 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
1843 // Operates recursively.
1845 // If std140 is true, it does the rounding up to vec4 size required by std140,
1846 // otherwise it does not, yielding std430 rules.
1848 // The size is returned in the 'size' parameter
1850 // The stride is only non-0 for arrays or matrices, and is the stride of the
1851 // top-level object nested within the type. E.g., for an array of matrices,
1852 // it is the distances needed between matrices, despite the rules saying the
1853 // stride comes from the flattening down to vectors.
1855 // Return value is the alignment of the type.
1856 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
1860 bool std140 = layoutPacking == glslang::ElpStd140;
1861 // When using the std140 storage layout, structures will be laid out in buffer
1862 // storage with its members stored in monotonically increasing order based on their
1863 // location in the declaration. A structure and each structure member have a base
1864 // offset and a base alignment, from which an aligned offset is computed by rounding
1865 // the base offset up to a multiple of the base alignment. The base offset of the first
1866 // member of a structure is taken from the aligned offset of the structure itself. The
1867 // base offset of all other structure members is derived by taking the offset of the
1868 // last basic machine unit consumed by the previous member and adding one. Each
1869 // structure member is stored in memory at its aligned offset. The members of a top-
1870 // level uniform block are laid out in buffer storage by treating the uniform block as
1871 // a structure with a base offset of zero.
1873 // 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
1875 // 2. If the member is a two- or four-component vector with components consuming N basic
1876 // machine units, the base alignment is 2N or 4N, respectively.
1878 // 3. If the member is a three-component vector with components consuming N
1879 // basic machine units, the base alignment is 4N.
1881 // 4. If the member is an array of scalars or vectors, the base alignment and array
1882 // stride are set to match the base alignment of a single array element, according
1883 // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
1884 // array may have padding at the end; the base offset of the member following
1885 // the array is rounded up to the next multiple of the base alignment.
1887 // 5. If the member is a column-major matrix with C columns and R rows, the
1888 // matrix is stored identically to an array of C column vectors with R
1889 // components each, according to rule (4).
1891 // 6. If the member is an array of S column-major matrices with C columns and
1892 // R rows, the matrix is stored identically to a row of S X C column vectors
1893 // with R components each, according to rule (4).
1895 // 7. If the member is a row-major matrix with C columns and R rows, the matrix
1896 // is stored identically to an array of R row vectors with C components each,
1897 // according to rule (4).
1899 // 8. If the member is an array of S row-major matrices with C columns and R
1900 // rows, the matrix is stored identically to a row of S X R row vectors with C
1901 // components each, according to rule (4).
1903 // 9. If the member is a structure, the base alignment of the structure is N , where
1904 // N is the largest base alignment value of any of its members, and rounded
1905 // up to the base alignment of a vec4. The individual members of this substructure
1906 // are then assigned offsets by applying this set of rules recursively,
1907 // where the base offset of the first member of the sub-structure is equal to the
1908 // aligned offset of the structure. The structure may have padding at the end;
1909 // the base offset of the member following the sub-structure is rounded up to
1910 // the next multiple of the base alignment of the structure.
1912 // 10. If the member is an array of S structures, the S elements of the array are laid
1913 // out in order, according to rule (9).
1915 // Assuming, for rule 10: The stride is the same as the size of an element.
1920 // rules 4, 6, 8, and 10
1921 if (type.isArray()) {
1922 // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1923 TType derefType(type, 0);
1924 alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1926 alignment = std::max(baseAlignmentVec4Std140, alignment);
1927 RoundToPow2(size, alignment);
1928 stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
1929 // uses the assumption for rule 10 in the comment above
1930 // use one element to represent the last member of SSBO which is unsized array
1931 int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize();
1932 size = stride * arraySize;
1937 if (type.getBasicType() == EbtStruct || type.getBasicType() == EbtBlock) {
1938 const TTypeList& memberList = *type.getStruct();
1941 int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
1942 for (size_t m = 0; m < memberList.size(); ++m) {
1944 // modify just the children's view of matrix layout, if there is one for this member
1945 TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
1946 int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
1947 (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
1948 maxAlignment = std::max(maxAlignment, memberAlignment);
1949 RoundToPow2(size, memberAlignment);
1953 // The structure may have padding at the end; the base offset of
1954 // the member following the sub-structure is rounded up to the next
1955 // multiple of the base alignment of the structure.
1956 RoundToPow2(size, maxAlignment);
1958 return maxAlignment;
1962 if (type.isScalar())
1963 return getBaseAlignmentScalar(type, size);
1966 if (type.isVector()) {
1967 int scalarAlign = getBaseAlignmentScalar(type, size);
1968 switch (type.getVectorSize()) {
1969 case 1: // HLSL has this, GLSL does not
1973 return 2 * scalarAlign;
1975 size *= type.getVectorSize();
1976 return 4 * scalarAlign;
1981 if (type.isMatrix()) {
1982 // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
1983 TType derefType(type, 0, rowMajor);
1985 alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
1987 alignment = std::max(baseAlignmentVec4Std140, alignment);
1988 RoundToPow2(size, alignment);
1989 stride = size; // use intra-matrix stride for stride of a just a matrix
1991 size = stride * type.getMatrixRows();
1993 size = stride * type.getMatrixCols();
1998 assert(0); // all cases should be covered above
1999 size = baseAlignmentVec4Std140;
2000 return baseAlignmentVec4Std140;
2003 // To aid the basic HLSL rule about crossing vec4 boundaries.
2004 bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
2006 if (! type.isVector() || type.isArray())
2009 return size <= 16 ? offset / 16 != (offset + size - 1) / 16
2013 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
2020 if (type.isArray()) {
2021 TType derefType(type, 0);
2022 alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2025 RoundToPow2(stride, alignment);
2027 size = stride * (type.getOuterArraySize() - 1) + size;
2031 if (type.getBasicType() == EbtStruct) {
2032 const TTypeList& memberList = *type.getStruct();
2035 int maxAlignment = 0;
2036 for (size_t m = 0; m < memberList.size(); ++m) {
2038 // modify just the children's view of matrix layout, if there is one for this member
2039 TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
2040 int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
2041 (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
2042 maxAlignment = std::max(maxAlignment, memberAlignment);
2043 RoundToPow2(size, memberAlignment);
2047 return maxAlignment;
2050 if (type.isScalar())
2051 return getBaseAlignmentScalar(type, size);
2053 if (type.isVector()) {
2054 int scalarAlign = getBaseAlignmentScalar(type, size);
2056 size *= type.getVectorSize();
2060 if (type.isMatrix()) {
2061 TType derefType(type, 0, rowMajor);
2063 alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2065 stride = size; // use intra-matrix stride for stride of a just a matrix
2067 size = stride * type.getMatrixRows();
2069 size = stride * type.getMatrixCols();
2074 assert(0); // all cases should be covered above
2079 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
2081 if (layoutPacking == glslang::ElpScalar) {
2082 return getScalarAlignment(type, size, stride, rowMajor);
2084 return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
2088 // shared calculation by getOffset and getOffsets
2089 void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
2093 // modify just the children's view of matrix layout, if there is one for this member
2094 TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
2095 int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
2096 parentType.getQualifier().layoutPacking,
2097 subMatrixLayout != ElmNone
2098 ? subMatrixLayout == ElmRowMajor
2099 : parentType.getQualifier().layoutMatrix == ElmRowMajor);
2100 RoundToPow2(offset, memberAlignment);
2103 // Lookup or calculate the offset of a block member, using the recursively
2104 // defined block offset rules.
2105 int TIntermediate::getOffset(const TType& type, int index)
2107 const TTypeList& memberList = *type.getStruct();
2109 // Don't calculate offset if one is present, it could be user supplied
2110 // and different than what would be calculated. That is, this is faster,
2111 // but not just an optimization.
2112 if (memberList[index].type->getQualifier().hasOffset())
2113 return memberList[index].type->getQualifier().layoutOffset;
2117 for (int m = 0; m <= index; ++m) {
2118 updateOffset(type, *memberList[m].type, offset, memberSize);
2121 offset += memberSize;
2127 // Calculate the block data size.
2128 // Block arrayness is not taken into account, each element is backed by a separate buffer.
2129 int TIntermediate::getBlockSize(const TType& blockType)
2131 const TTypeList& memberList = *blockType.getStruct();
2132 int lastIndex = (int)memberList.size() - 1;
2133 int lastOffset = getOffset(blockType, lastIndex);
2137 getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
2138 blockType.getQualifier().layoutPacking,
2139 blockType.getQualifier().layoutMatrix == ElmRowMajor);
2141 return lastOffset + lastMemberSize;
2144 int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
2146 assert(type.isReference());
2147 int size = getBlockSize(*type.getReferentType());
2149 int align = type.getBufferReferenceAlignment();
2152 size = (size + align - 1) & ~(align-1);
2159 bool TIntermediate::isIoResizeArray(const TType& type, EShLanguage language) {
2160 return type.isArray() &&
2161 ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) ||
2162 (language == EShLangTessControl && (type.getQualifier().storage == EvqVaryingIn || type.getQualifier().storage == EvqVaryingOut) &&
2163 ! type.getQualifier().patch) ||
2164 (language == EShLangTessEvaluation && type.getQualifier().storage == EvqVaryingIn) ||
2165 (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn &&
2166 type.getQualifier().pervertexNV) ||
2167 (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut &&
2168 !type.getQualifier().perTaskNV));
2170 #endif // not GLSLANG_WEB
2172 } // end namespace glslang