2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2015 LunarG, Inc.
4 // Copyright (C) 2015-2020 Google, Inc.
5 // Copyright (C) 2017 ARM Limited.
7 // All rights reserved.
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
40 // Build the intermediate representation.
43 #include "localintermediate.h"
44 #include "RemoveTree.h"
45 #include "SymbolTable.h"
46 #include "propagateNoContraction.h"
54 ////////////////////////////////////////////////////////////////////////////
56 // First set of functions are to help build the intermediate representation.
57 // These functions are not member functions of the nodes.
58 // They are called from parser productions.
60 /////////////////////////////////////////////////////////////////////////////
63 // Add a terminal node for an identifier in an expression.
65 // Returns the added node.
68 TIntermSymbol* TIntermediate::addSymbol(long long id, const TString& name, const TType& type, const TConstUnionArray& constArray,
69 TIntermTyped* constSubtree, const TSourceLoc& loc)
71 TIntermSymbol* node = new TIntermSymbol(id, name, type);
73 node->setConstArray(constArray);
74 node->setConstSubtree(constSubtree);
79 TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol)
81 return addSymbol(intermSymbol.getId(),
82 intermSymbol.getName(),
83 intermSymbol.getType(),
84 intermSymbol.getConstArray(),
85 intermSymbol.getConstSubtree(),
86 intermSymbol.getLoc());
89 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable)
91 glslang::TSourceLoc loc; // just a null location
94 return addSymbol(variable, loc);
97 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc)
99 return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc);
102 TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc)
104 TConstUnionArray unionArray; // just a null constant
106 return addSymbol(0, "", type, unionArray, nullptr, loc);
110 // Connect two nodes with a new parent that does a binary operation on the nodes.
112 // Returns the added node.
114 // Returns nullptr if the working conversions and promotions could not be found.
116 TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
118 // No operations work on blocks
119 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
122 // Convert "reference +/- int" and "reference - reference" to integer math
123 if (op == EOpAdd || op == EOpSub) {
125 // No addressing math on struct with unsized array.
126 if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) ||
127 (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) {
131 if (left->isReference() && isTypeInt(right->getBasicType())) {
132 const TType& referenceType = left->getType();
133 TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
134 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
136 right = createConversion(EbtInt64, right);
137 right = addBinaryMath(EOpMul, right, size, loc);
139 TIntermTyped *node = addBinaryMath(op, left, right, loc);
140 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
145 if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
146 const TType& referenceType = right->getType();
147 TIntermConstantUnion* size =
148 addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
149 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
151 left = createConversion(EbtInt64, left);
152 left = addBinaryMath(EOpMul, left, size, loc);
154 TIntermTyped *node = addBinaryMath(op, left, right, loc);
155 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
159 if (op == EOpSub && left->isReference() && right->isReference()) {
160 TIntermConstantUnion* size =
161 addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
163 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
164 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
166 left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
167 right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
169 left = addBinaryMath(EOpSub, left, right, loc);
171 TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
175 // No other math operators supported on references
176 if (left->isReference() || right->isReference())
179 // Try converting the children's base types to compatible types.
180 auto children = addPairConversion(op, left, right);
181 left = std::get<0>(children);
182 right = std::get<1>(children);
184 if (left == nullptr || right == nullptr)
187 // Convert the children's type shape to be compatible.
188 addBiShapeConversion(op, left, right);
189 if (left == nullptr || right == nullptr)
193 // Need a new node holding things together. Make
194 // one and promote it to the right type.
196 TIntermBinary* node = addBinaryNode(op, left, right, loc);
200 node->updatePrecision();
203 // If they are both (non-specialization) constants, they must be folded.
204 // (Unless it's the sequence (comma) operator, but that's handled in addComma().)
206 TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
207 TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
208 if (leftTempConstant && rightTempConstant) {
209 TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
214 // If can propagate spec-constantness and if the operation is an allowed
215 // specialization-constant operation, make a spec-constant.
216 if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
217 node->getWritableType().getQualifier().makeSpecConstant();
219 // If must propagate nonuniform, make a nonuniform.
220 if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) &&
221 isNonuniformPropagating(node->getOp()))
222 node->getWritableType().getQualifier().nonUniform = true;
228 // Low level: add binary node (no promotions or other argument modifications)
230 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
231 const TSourceLoc& loc) const
234 TIntermBinary* node = new TIntermBinary(op);
235 node->setLoc(loc.line != 0 ? loc : left->getLoc());
237 node->setRight(right);
243 // like non-type form, but sets node's type.
245 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
246 const TSourceLoc& loc, const TType& type) const
248 TIntermBinary* node = addBinaryNode(op, left, right, loc);
254 // Low level: add unary node (no promotions or other argument modifications)
256 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc) const
258 TIntermUnary* node = new TIntermUnary(op);
259 node->setLoc(loc.line != 0 ? loc : child->getLoc());
260 node->setOperand(child);
266 // like non-type form, but sets node's type.
268 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc, const TType& type)
271 TIntermUnary* node = addUnaryNode(op, child, loc);
277 // Connect two nodes through an assignment.
279 // Returns the added node.
281 // Returns nullptr if the 'right' type could not be converted to match the 'left' type,
282 // or the resulting operation cannot be properly promoted.
284 TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right,
285 const TSourceLoc& loc)
287 // No block assignment
288 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
291 // Convert "reference += int" to "reference = reference + int". We need this because the
292 // "reference + int" calculation involves a cast back to the original type, which makes it
294 if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference()) {
295 if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
298 TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc);
302 TIntermSymbol* symbol = left->getAsSymbolNode();
303 left = addSymbol(*symbol);
305 node = addAssign(EOpAssign, left, node, loc);
310 // Like adding binary math, except the conversion can only go
311 // from right to left.
314 // convert base types, nullptr return means not possible
315 right = addConversion(op, left->getType(), right);
316 if (right == nullptr)
320 right = addUniShapeConversion(op, left->getType(), right);
323 TIntermBinary* node = addBinaryNode(op, left, right, loc);
328 node->updatePrecision();
334 // Connect two nodes through an index operator, where the left node is the base
335 // of an array or struct, and the right node is a direct or indirect offset.
337 // Returns the added node.
338 // The caller should set the type of the returned node.
340 TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index,
341 const TSourceLoc& loc)
343 // caller should set the type
344 return addBinaryNode(op, base, index, loc);
348 // Add one node as the parent of another that it operates on.
350 // Returns the added node.
352 TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child,
353 const TSourceLoc& loc)
355 if (child == nullptr)
358 if (child->getType().getBasicType() == EbtBlock)
363 if (getSource() == EShSourceHlsl) {
364 break; // HLSL can promote logical not
367 if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
372 case EOpPostIncrement:
373 case EOpPreIncrement:
374 case EOpPostDecrement:
375 case EOpPreDecrement:
377 if (child->getType().getBasicType() == EbtStruct || child->getType().isArray())
379 default: break; // some compilers want this
383 // Do we need to promote the operand?
385 TBasicType newType = EbtVoid;
387 case EOpConstructBool: newType = EbtBool; break;
388 case EOpConstructFloat: newType = EbtFloat; break;
389 case EOpConstructInt: newType = EbtInt; break;
390 case EOpConstructUint: newType = EbtUint; break;
392 case EOpConstructInt8: newType = EbtInt8; break;
393 case EOpConstructUint8: newType = EbtUint8; break;
394 case EOpConstructInt16: newType = EbtInt16; break;
395 case EOpConstructUint16: newType = EbtUint16; break;
396 case EOpConstructInt64: newType = EbtInt64; break;
397 case EOpConstructUint64: newType = EbtUint64; break;
398 case EOpConstructDouble: newType = EbtDouble; break;
399 case EOpConstructFloat16: newType = EbtFloat16; break;
401 default: break; // some compilers want this
404 if (newType != EbtVoid) {
405 child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(),
406 child->getMatrixCols(),
407 child->getMatrixRows(),
410 if (child == nullptr)
415 // For constructors, we are now done, it was all in the conversion.
416 // TODO: but, did this bypass constant folding?
419 case EOpConstructInt8:
420 case EOpConstructUint8:
421 case EOpConstructInt16:
422 case EOpConstructUint16:
423 case EOpConstructInt:
424 case EOpConstructUint:
425 case EOpConstructInt64:
426 case EOpConstructUint64:
427 case EOpConstructBool:
428 case EOpConstructFloat:
429 case EOpConstructDouble:
430 case EOpConstructFloat16: {
431 TIntermUnary* unary_node = child->getAsUnaryNode();
432 if (unary_node != nullptr)
433 unary_node->updatePrecision();
436 default: break; // some compilers want this
440 // Make a new node for the operator.
442 TIntermUnary* node = addUnaryNode(op, child, loc);
447 node->updatePrecision();
449 // If it's a (non-specialization) constant, it must be folded.
450 if (node->getOperand()->getAsConstantUnion())
451 return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
453 // If it's a specialization constant, the result is too,
454 // if the operation is allowed for specialization constants.
455 if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
456 node->getWritableType().getQualifier().makeSpecConstant();
458 // If must propagate nonuniform, make a nonuniform.
459 if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp()))
460 node->getWritableType().getQualifier().nonUniform = true;
465 TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary,
466 TIntermNode* childNode, const TType& returnType)
470 // Treat it like a unary operator.
471 // addUnaryMath() should get the type correct on its own;
472 // including constness (which would differ from the prototype).
474 TIntermTyped* child = childNode->getAsTyped();
475 if (child == nullptr)
478 if (child->getAsConstantUnion()) {
479 TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType);
484 return addUnaryNode(op, child, child->getLoc(), returnType);
486 // setAggregateOperater() calls fold() for constant folding
487 TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc);
494 // This is the safe way to change the operator on an aggregate, as it
495 // does lots of error checking and fixing. Especially for establishing
496 // a function call's operation on its set of parameters. Sequences
497 // of instructions are also aggregates, but they just directly set
498 // their operator to EOpSequence.
500 // Returns an aggregate node, which could be the one passed in if
501 // it was already an aggregate.
503 TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type,
504 const TSourceLoc& loc)
506 TIntermAggregate* aggNode;
509 // Make sure we have an aggregate. If not turn it into one.
511 if (node != nullptr) {
512 aggNode = node->getAsAggregate();
513 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
515 // Make an aggregate containing this node.
517 aggNode = new TIntermAggregate();
518 aggNode->getSequence().push_back(node);
521 aggNode = new TIntermAggregate();
526 aggNode->setOperator(op);
527 if (loc.line != 0 || node != nullptr)
528 aggNode->setLoc(loc.line != 0 ? loc : node->getLoc());
530 aggNode->setType(type);
532 return fold(aggNode);
535 bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
538 // Does the base type even allow the operation?
540 switch (node->getBasicType()) {
546 // opaque types can be passed to functions
547 if (op == EOpFunction)
550 // HLSL can assign samplers directly (no constructor)
551 if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler)
554 // samplers can get assigned via a sampler constructor
555 // (well, not yet, but code in the rest of this function is ready for it)
556 if (node->getBasicType() == EbtSampler && op == EOpAssign &&
557 node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
560 // otherwise, opaque types can't even be operated on, let alone converted
569 bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const
575 case EbtUint: newOp = EOpConvUintToDouble; break;
576 case EbtBool: newOp = EOpConvBoolToDouble; break;
577 case EbtFloat: newOp = EOpConvFloatToDouble; break;
578 case EbtInt: newOp = EOpConvIntToDouble; break;
579 case EbtInt8: newOp = EOpConvInt8ToDouble; break;
580 case EbtUint8: newOp = EOpConvUint8ToDouble; break;
581 case EbtInt16: newOp = EOpConvInt16ToDouble; break;
582 case EbtUint16: newOp = EOpConvUint16ToDouble; break;
583 case EbtFloat16: newOp = EOpConvFloat16ToDouble; break;
584 case EbtInt64: newOp = EOpConvInt64ToDouble; break;
585 case EbtUint64: newOp = EOpConvUint64ToDouble; break;
593 case EbtInt: newOp = EOpConvIntToFloat; break;
594 case EbtUint: newOp = EOpConvUintToFloat; break;
595 case EbtBool: newOp = EOpConvBoolToFloat; break;
597 case EbtDouble: newOp = EOpConvDoubleToFloat; break;
598 case EbtInt8: newOp = EOpConvInt8ToFloat; break;
599 case EbtUint8: newOp = EOpConvUint8ToFloat; break;
600 case EbtInt16: newOp = EOpConvInt16ToFloat; break;
601 case EbtUint16: newOp = EOpConvUint16ToFloat; break;
602 case EbtFloat16: newOp = EOpConvFloat16ToFloat; break;
603 case EbtInt64: newOp = EOpConvInt64ToFloat; break;
604 case EbtUint64: newOp = EOpConvUint64ToFloat; break;
613 case EbtInt8: newOp = EOpConvInt8ToFloat16; break;
614 case EbtUint8: newOp = EOpConvUint8ToFloat16; break;
615 case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
616 case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
617 case EbtInt: newOp = EOpConvIntToFloat16; break;
618 case EbtUint: newOp = EOpConvUintToFloat16; break;
619 case EbtBool: newOp = EOpConvBoolToFloat16; break;
620 case EbtFloat: newOp = EOpConvFloatToFloat16; break;
621 case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
622 case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
623 case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
631 case EbtInt: newOp = EOpConvIntToBool; break;
632 case EbtUint: newOp = EOpConvUintToBool; break;
633 case EbtFloat: newOp = EOpConvFloatToBool; break;
635 case EbtDouble: newOp = EOpConvDoubleToBool; break;
636 case EbtInt8: newOp = EOpConvInt8ToBool; break;
637 case EbtUint8: newOp = EOpConvUint8ToBool; break;
638 case EbtInt16: newOp = EOpConvInt16ToBool; break;
639 case EbtUint16: newOp = EOpConvUint16ToBool; break;
640 case EbtFloat16: newOp = EOpConvFloat16ToBool; break;
641 case EbtInt64: newOp = EOpConvInt64ToBool; break;
642 case EbtUint64: newOp = EOpConvUint64ToBool; break;
651 case EbtUint8: newOp = EOpConvUint8ToInt8; break;
652 case EbtInt16: newOp = EOpConvInt16ToInt8; break;
653 case EbtUint16: newOp = EOpConvUint16ToInt8; break;
654 case EbtInt: newOp = EOpConvIntToInt8; break;
655 case EbtUint: newOp = EOpConvUintToInt8; break;
656 case EbtInt64: newOp = EOpConvInt64ToInt8; break;
657 case EbtUint64: newOp = EOpConvUint64ToInt8; break;
658 case EbtBool: newOp = EOpConvBoolToInt8; break;
659 case EbtFloat: newOp = EOpConvFloatToInt8; break;
660 case EbtDouble: newOp = EOpConvDoubleToInt8; break;
661 case EbtFloat16: newOp = EOpConvFloat16ToInt8; break;
668 case EbtInt8: newOp = EOpConvInt8ToUint8; break;
669 case EbtInt16: newOp = EOpConvInt16ToUint8; break;
670 case EbtUint16: newOp = EOpConvUint16ToUint8; break;
671 case EbtInt: newOp = EOpConvIntToUint8; break;
672 case EbtUint: newOp = EOpConvUintToUint8; break;
673 case EbtInt64: newOp = EOpConvInt64ToUint8; break;
674 case EbtUint64: newOp = EOpConvUint64ToUint8; break;
675 case EbtBool: newOp = EOpConvBoolToUint8; break;
676 case EbtFloat: newOp = EOpConvFloatToUint8; break;
677 case EbtDouble: newOp = EOpConvDoubleToUint8; break;
678 case EbtFloat16: newOp = EOpConvFloat16ToUint8; break;
686 case EbtUint8: newOp = EOpConvUint8ToInt16; break;
687 case EbtInt8: newOp = EOpConvInt8ToInt16; break;
688 case EbtUint16: newOp = EOpConvUint16ToInt16; break;
689 case EbtInt: newOp = EOpConvIntToInt16; break;
690 case EbtUint: newOp = EOpConvUintToInt16; break;
691 case EbtInt64: newOp = EOpConvInt64ToInt16; break;
692 case EbtUint64: newOp = EOpConvUint64ToInt16; break;
693 case EbtBool: newOp = EOpConvBoolToInt16; break;
694 case EbtFloat: newOp = EOpConvFloatToInt16; break;
695 case EbtDouble: newOp = EOpConvDoubleToInt16; break;
696 case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
703 case EbtInt8: newOp = EOpConvInt8ToUint16; break;
704 case EbtUint8: newOp = EOpConvUint8ToUint16; break;
705 case EbtInt16: newOp = EOpConvInt16ToUint16; break;
706 case EbtInt: newOp = EOpConvIntToUint16; break;
707 case EbtUint: newOp = EOpConvUintToUint16; break;
708 case EbtInt64: newOp = EOpConvInt64ToUint16; break;
709 case EbtUint64: newOp = EOpConvUint64ToUint16; break;
710 case EbtBool: newOp = EOpConvBoolToUint16; break;
711 case EbtFloat: newOp = EOpConvFloatToUint16; break;
712 case EbtDouble: newOp = EOpConvDoubleToUint16; break;
713 case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
722 case EbtUint: newOp = EOpConvUintToInt; break;
723 case EbtBool: newOp = EOpConvBoolToInt; break;
724 case EbtFloat: newOp = EOpConvFloatToInt; break;
726 case EbtInt8: newOp = EOpConvInt8ToInt; break;
727 case EbtUint8: newOp = EOpConvUint8ToInt; break;
728 case EbtInt16: newOp = EOpConvInt16ToInt; break;
729 case EbtUint16: newOp = EOpConvUint16ToInt; break;
730 case EbtDouble: newOp = EOpConvDoubleToInt; break;
731 case EbtFloat16: newOp = EOpConvFloat16ToInt; break;
732 case EbtInt64: newOp = EOpConvInt64ToInt; break;
733 case EbtUint64: newOp = EOpConvUint64ToInt; break;
741 case EbtInt: newOp = EOpConvIntToUint; break;
742 case EbtBool: newOp = EOpConvBoolToUint; break;
743 case EbtFloat: newOp = EOpConvFloatToUint; break;
745 case EbtInt8: newOp = EOpConvInt8ToUint; break;
746 case EbtUint8: newOp = EOpConvUint8ToUint; break;
747 case EbtInt16: newOp = EOpConvInt16ToUint; break;
748 case EbtUint16: newOp = EOpConvUint16ToUint; break;
749 case EbtDouble: newOp = EOpConvDoubleToUint; break;
750 case EbtFloat16: newOp = EOpConvFloat16ToUint; break;
751 case EbtInt64: newOp = EOpConvInt64ToUint; break;
752 case EbtUint64: newOp = EOpConvUint64ToUint; break;
761 case EbtInt8: newOp = EOpConvInt8ToInt64; break;
762 case EbtUint8: newOp = EOpConvUint8ToInt64; break;
763 case EbtInt16: newOp = EOpConvInt16ToInt64; break;
764 case EbtUint16: newOp = EOpConvUint16ToInt64; break;
765 case EbtInt: newOp = EOpConvIntToInt64; break;
766 case EbtUint: newOp = EOpConvUintToInt64; break;
767 case EbtBool: newOp = EOpConvBoolToInt64; break;
768 case EbtFloat: newOp = EOpConvFloatToInt64; break;
769 case EbtDouble: newOp = EOpConvDoubleToInt64; break;
770 case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
771 case EbtUint64: newOp = EOpConvUint64ToInt64; break;
778 case EbtInt8: newOp = EOpConvInt8ToUint64; break;
779 case EbtUint8: newOp = EOpConvUint8ToUint64; break;
780 case EbtInt16: newOp = EOpConvInt16ToUint64; break;
781 case EbtUint16: newOp = EOpConvUint16ToUint64; break;
782 case EbtInt: newOp = EOpConvIntToUint64; break;
783 case EbtUint: newOp = EOpConvUintToUint64; break;
784 case EbtBool: newOp = EOpConvBoolToUint64; break;
785 case EbtFloat: newOp = EOpConvFloatToUint64; break;
786 case EbtDouble: newOp = EOpConvDoubleToUint64; break;
787 case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
788 case EbtInt64: newOp = EOpConvInt64ToUint64; break;
800 // This is 'mechanism' here, it does any conversion told.
801 // It is about basic type, not about shape.
802 // The policy comes from the shader or the calling code.
803 TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const
806 // Add a new newNode for the conversion.
810 bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 ||
811 convertTo == EbtInt16 || convertTo == EbtUint16 ||
812 convertTo == EbtInt || convertTo == EbtUint ||
813 convertTo == EbtInt64 || convertTo == EbtUint64);
815 bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 ||
816 node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 ||
817 node->getBasicType() == EbtInt || node->getBasicType() == EbtUint ||
818 node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64);
820 bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble);
822 bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 ||
823 node->getBasicType() == EbtFloat ||
824 node->getBasicType() == EbtDouble);
826 if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
827 ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) {
828 if (! getArithemeticInt8Enabled()) {
833 if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
834 ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) {
835 if (! getArithemeticInt16Enabled()) {
840 if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
841 (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) {
842 if (! getArithemeticFloat16Enabled()) {
848 TIntermUnary* newNode = nullptr;
849 TOperator newOp = EOpNull;
850 if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) {
854 TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows());
855 newNode = addUnaryNode(newOp, node, node->getLoc(), newType);
857 if (node->getAsConstantUnion()) {
859 // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions
861 if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) &&
862 (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) &&
863 (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16)))
866 TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType);
872 // Propagate specialization-constant-ness, if allowed
873 if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode))
874 newNode->getWritableType().getQualifier().makeSpecConstant();
879 TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const
881 return createConversion(convertTo, node);
884 // For converting a pair of operands to a binary operation to compatible
885 // types with each other, relative to the operation in 'op'.
886 // This does not cover assignment operations, which is asymmetric in that the
887 // left type is not changeable.
888 // See addConversion(op, type, node) for assignments and unary operation
891 // Generally, this is focused on basic type conversion, not shape conversion.
892 // See addShapeConversion() for shape conversions.
894 // Returns the converted pair of nodes.
895 // Returns <nullptr, nullptr> when there is no conversion.
896 std::tuple<TIntermTyped*, TIntermTyped*>
897 TIntermediate::addPairConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
899 if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
900 return std::make_tuple(nullptr, nullptr);
902 if (node0->getType() != node1->getType()) {
903 // If differing structure, then no conversions.
904 if (node0->isStruct() || node1->isStruct())
905 return std::make_tuple(nullptr, nullptr);
907 // If differing arrays, then no conversions.
908 if (node0->getType().isArray() || node1->getType().isArray())
909 return std::make_tuple(nullptr, nullptr);
911 // No implicit conversions for operations involving cooperative matrices
912 if (node0->getType().isCoopMat() || node1->getType().isCoopMat())
913 return std::make_tuple(node0, node1);
916 auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes);
920 // List all the binary ops that can implicitly convert one operand to the other's type;
921 // This implements the 'policy' for implicit type conversion.
925 case EOpLessThanEqual:
926 case EOpGreaterThanEqual:
936 case EOpVectorTimesScalar:
937 case EOpVectorTimesMatrix:
938 case EOpMatrixTimesVector:
939 case EOpMatrixTimesScalar:
945 case EOpSequence: // used by ?:
947 if (node0->getBasicType() == node1->getBasicType())
948 return std::make_tuple(node0, node1);
950 promoteTo = getConversionDestinationType(node0->getBasicType(), node1->getBasicType(), op);
951 if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
952 return std::make_tuple(nullptr, nullptr);
959 if (getSource() == EShSourceHlsl)
960 promoteTo = std::make_tuple(EbtBool, EbtBool);
962 return std::make_tuple(node0, node1);
965 // There are no conversions needed for GLSL; the shift amount just needs to be an
966 // integer type, as does the base.
967 // HLSL can promote bools to ints to make this work.
970 if (getSource() == EShSourceHlsl) {
971 TBasicType node0BasicType = node0->getBasicType();
972 if (node0BasicType == EbtBool)
973 node0BasicType = EbtInt;
974 if (node1->getBasicType() == EbtBool)
975 promoteTo = std::make_tuple(node0BasicType, EbtInt);
977 promoteTo = std::make_tuple(node0BasicType, node1->getBasicType());
979 if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType()))
980 return std::make_tuple(node0, node1);
982 return std::make_tuple(nullptr, nullptr);
987 if (node0->getType() == node1->getType())
988 return std::make_tuple(node0, node1);
990 return std::make_tuple(nullptr, nullptr);
993 TIntermTyped* newNode0;
994 TIntermTyped* newNode1;
996 if (std::get<0>(promoteTo) != node0->getType().getBasicType()) {
997 if (node0->getAsConstantUnion())
998 newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion());
1000 newNode0 = createConversion(std::get<0>(promoteTo), node0);
1004 if (std::get<1>(promoteTo) != node1->getType().getBasicType()) {
1005 if (node1->getAsConstantUnion())
1006 newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion());
1008 newNode1 = createConversion(std::get<1>(promoteTo), node1);
1012 return std::make_tuple(newNode0, newNode1);
1016 // Convert the node's type to the given type, as allowed by the operation involved: 'op'.
1017 // For implicit conversions, 'op' is not the requested conversion, it is the explicit
1018 // operation requiring the implicit conversion.
1020 // Binary operation conversions should be handled by addConversion(op, node, node), not here.
1022 // Returns a node representing the conversion, which could be the same
1023 // node passed in if no conversion was needed.
1025 // Generally, this is focused on basic type conversion, not shape conversion.
1026 // See addShapeConversion() for shape conversions.
1028 // Return nullptr if a conversion can't be done.
1030 TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node)
1032 if (!isConversionAllowed(op, node))
1035 // Otherwise, if types are identical, no problem
1036 if (type == node->getType())
1039 // If one's a structure, then no conversions.
1040 if (type.isStruct() || node->isStruct())
1043 // If one's an array, then no conversions.
1044 if (type.isArray() || node->getType().isArray())
1047 // Note: callers are responsible for other aspects of shape,
1048 // like vector and matrix sizes.
1052 // Explicit conversions (unary operations)
1054 case EOpConstructBool:
1055 case EOpConstructFloat:
1056 case EOpConstructInt:
1057 case EOpConstructUint:
1059 case EOpConstructDouble:
1060 case EOpConstructFloat16:
1061 case EOpConstructInt8:
1062 case EOpConstructUint8:
1063 case EOpConstructInt16:
1064 case EOpConstructUint16:
1065 case EOpConstructInt64:
1066 case EOpConstructUint64:
1072 // Implicit conversions
1076 case EOpFunctionCall:
1083 case EOpVectorTimesScalarAssign:
1084 case EOpMatrixTimesScalarAssign:
1088 case EOpInclusiveOrAssign:
1089 case EOpExclusiveOrAssign:
1097 case EOpFaceForward:
1114 case EOpConstructStruct:
1115 case EOpConstructCooperativeMatrix:
1117 if (type.isReference() || node->getType().isReference()) {
1118 // types must match to assign a reference
1119 if (type == node->getType())
1125 if (type.getBasicType() == node->getType().getBasicType())
1128 if (! canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
1132 // For GLSL, there are no conversions needed; the shift amount just needs to be an
1133 // integer type, as do the base/result.
1134 // HLSL can convert the shift from a bool to an int.
1135 case EOpLeftShiftAssign:
1136 case EOpRightShiftAssign:
1138 if (!(getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)) {
1139 if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
1148 // default is to require a match; all exceptions should have case statements above
1150 if (type.getBasicType() == node->getType().getBasicType())
1156 bool canPromoteConstant = true;
1158 // GL_EXT_shader_16bit_storage can't do OpConstantComposite with
1159 // 16-bit types, so disable promotion for those types.
1160 // Many issues with this, from JohnK:
1161 // - this isn't really right to discuss SPIR-V here
1162 // - this could easily be entirely about scalars, so is overstepping
1163 // - we should be looking at what the shader asked for, and saying whether or
1164 // not it can be done, in the parser, by calling requireExtensions(), not
1165 // changing language sementics on the fly by asking what extensions are in use
1166 // - at the time of this writing (14-Aug-2020), no test results are changed by this.
1168 case EOpConstructFloat16:
1169 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1170 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16);
1172 case EOpConstructInt8:
1173 case EOpConstructUint8:
1174 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1175 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8);
1177 case EOpConstructInt16:
1178 case EOpConstructUint16:
1179 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1180 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16);
1187 if (canPromoteConstant && node->getAsConstantUnion())
1188 return promoteConstantUnion(type.getBasicType(), node->getAsConstantUnion());
1191 // Add a new newNode for the conversion.
1193 TIntermTyped* newNode = createConversion(type.getBasicType(), node);
1198 // Convert the node's shape of type for the given type, as allowed by the
1199 // operation involved: 'op'. This is for situations where there is only one
1200 // direction to consider doing the shape conversion.
1202 // This implements policy, it call addShapeConversion() for the mechanism.
1204 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1205 // for GLSL. Bad shapes are caught in conversion or promotion.
1207 // Return 'node' if no conversion was done. Promotion handles final shape
1210 TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
1212 // some source languages don't do this
1213 switch (getSource()) {
1221 // some operations don't do this
1223 case EOpFunctionCall:
1228 // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
1229 // matrix *= scalar, etc.
1235 case EOpInclusiveOrAssign:
1236 case EOpExclusiveOrAssign:
1237 case EOpRightShiftAssign:
1238 case EOpLeftShiftAssign:
1239 if (node->getVectorSize() == 1)
1253 return addShapeConversion(type, node);
1256 // Convert the nodes' shapes to be compatible for the operation 'op'.
1258 // This implements policy, it call addShapeConversion() for the mechanism.
1260 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1261 // for GLSL. Bad shapes are caught in conversion or promotion.
1263 void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
1265 // some source languages don't do this
1266 switch (getSource()) {
1274 // some operations don't do this
1275 // 'break' will mean attempt bidirectional conversion
1283 case EOpInclusiveOrAssign:
1284 case EOpExclusiveOrAssign:
1285 case EOpRightShiftAssign:
1286 case EOpLeftShiftAssign:
1287 // switch to unidirectional conversion (the lhs can't change)
1288 rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
1292 // matrix multiply does not change shapes
1293 if (lhsNode->isMatrix() && rhsNode->isMatrix())
1298 // want to support vector * scalar native ops in AST and lower, not smear, similarly for
1299 // matrix * vector, etc.
1300 if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
1306 // can natively support the right operand being a scalar and the left a vector,
1307 // but not the reverse
1308 if (rhsNode->getVectorSize() == 1)
1313 case EOpGreaterThan:
1314 case EOpLessThanEqual:
1315 case EOpGreaterThanEqual:
1325 case EOpInclusiveOr:
1326 case EOpExclusiveOr:
1335 // Do bidirectional conversions
1336 if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
1337 if (lhsNode->getType().isScalarOrVec1())
1338 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1340 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1342 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1343 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1346 // Convert the node's shape of type for the given type, as allowed by the
1347 // operation involved: 'op'.
1349 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1350 // for GLSL. Bad shapes are caught in conversion or promotion.
1352 // Return 'node' if no conversion was done. Promotion handles final shape
1355 TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
1357 // no conversion needed
1358 if (node->getType() == type)
1361 // structures and arrays don't change shape, either to or from
1362 if (node->getType().isStruct() || node->getType().isArray() ||
1363 type.isStruct() || type.isArray())
1366 // The new node that handles the conversion
1367 TOperator constructorOp = mapTypeToConstructorOp(type);
1369 if (getSource() == EShSourceHlsl) {
1370 // HLSL rules for scalar, vector and matrix conversions:
1371 // 1) scalar can become anything, initializing every component with its value
1372 // 2) vector and matrix can become scalar, first element is used (warning: truncation)
1373 // 3) matrix can become matrix with less rows and/or columns (warning: truncation)
1374 // 4) vector can become vector with less rows size (warning: truncation)
1375 // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret)
1376 // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret)
1378 const TType &sourceType = node->getType();
1380 // rule 1 for scalar to matrix is special
1381 if (sourceType.isScalarOrVec1() && type.isMatrix()) {
1383 // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its
1384 // own devices, the constructor from a scalar would populate the diagonal. This forces replication
1385 // to every matrix element.
1387 // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here
1388 // repeatedly, so we copy it to a temp, then use the temp.
1389 const int matSize = type.computeNumComponents();
1390 TIntermAggregate* rhsAggregate = new TIntermAggregate();
1392 const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
1395 assert(0); // TODO: use node replicator service when available.
1398 for (int x = 0; x < matSize; ++x)
1399 rhsAggregate->getSequence().push_back(node);
1401 return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc());
1405 if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar()))
1406 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1409 if (sourceType.isMatrix()) {
1411 if (type.isMatrix()) {
1412 if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) &&
1413 sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows())
1414 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1416 } else if (type.isVector()) {
1417 if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2)
1418 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1423 if (sourceType.isVector()) {
1425 if (type.isVector())
1427 if (sourceType.getVectorSize() > type.getVectorSize())
1428 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1430 } else if (type.isMatrix()) {
1431 if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2)
1432 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1437 // scalar -> vector or vec1 -> vector or
1438 // vector -> scalar or
1439 // bigger vector -> smaller vector
1440 if ((node->getType().isScalarOrVec1() && type.isVector()) ||
1441 (node->getType().isVector() && type.isScalar()) ||
1442 (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
1443 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1448 bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const
1450 // integral promotions
1465 bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const
1467 // floating-point promotions
1468 if (to == EbtDouble) {
1480 bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const
1490 return version >= 400 || getSource() == EShSourceHlsl;
1554 if (to == EbtUint64) {
1564 bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const
1570 if (to == EbtFloat && from == EbtFloat16) {
1577 bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
1606 if (to == EbtDouble) {
1618 // See if the 'from' type is allowed to be implicitly converted to the
1619 // 'to' type. This is not about vector/array/struct, only about basic type.
1621 bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
1623 if ((isEsProfile() && version < 310 ) || version == 110)
1629 // TODO: Move more policies into language-specific handlers.
1630 // Some languages allow more general (or potentially, more specific) conversions under some conditions.
1631 if (getSource() == EShSourceHlsl) {
1632 const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool);
1633 const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool);
1635 if (fromConvertable && toConvertable) {
1637 case EOpAndAssign: // assignments can perform arbitrary conversions
1638 case EOpInclusiveOrAssign: // ...
1639 case EOpExclusiveOrAssign: // ...
1640 case EOpAssign: // ...
1641 case EOpAddAssign: // ...
1642 case EOpSubAssign: // ...
1643 case EOpMulAssign: // ...
1644 case EOpVectorTimesScalarAssign: // ...
1645 case EOpMatrixTimesScalarAssign: // ...
1646 case EOpDivAssign: // ...
1647 case EOpModAssign: // ...
1648 case EOpReturn: // function returns can also perform arbitrary conversions
1649 case EOpFunctionCall: // conversion of a calling parameter
1654 case EOpConstructStruct:
1662 if (getSource() == EShSourceHlsl) {
1664 if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
1668 if (isIntegralPromotion(from, to) ||
1669 isFPPromotion(from, to) ||
1670 isIntegralConversion(from, to) ||
1671 isFPConversion(from, to) ||
1672 isFPIntegralConversion(from, to)) {
1674 if (numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1675 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8) ||
1676 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16) ||
1677 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int32) ||
1678 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int64) ||
1679 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16) ||
1680 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float32) ||
1681 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float64)) {
1687 if (isEsProfile()) {
1693 return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
1700 return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
1716 return version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64);
1719 return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
1720 numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1722 return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
1723 numericFeatures.contains(TNumericFeatures::gpu_shader_half_float);
1733 return getSource() == EShSourceHlsl;
1736 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1738 return numericFeatures.contains(TNumericFeatures::gpu_shader_half_float) ||
1739 getSource() == EShSourceHlsl;
1746 return version >= 400 || getSource() == EShSourceHlsl || IsRequestedExtension(E_GL_ARB_gpu_shader5);
1748 return getSource() == EShSourceHlsl;
1751 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1758 return getSource() == EShSourceHlsl;
1760 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1772 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1781 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1789 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1797 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1810 static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType)
1874 static TBasicType getCorrespondingUnsignedType(TBasicType type)
1877 assert(type == EbtInt);
1896 // Implements the following rules
1897 // - If either operand has type float64_t or derived from float64_t,
1898 // the other shall be converted to float64_t or derived type.
1899 // - Otherwise, if either operand has type float32_t or derived from
1900 // float32_t, the other shall be converted to float32_t or derived type.
1901 // - Otherwise, if either operand has type float16_t or derived from
1902 // float16_t, the other shall be converted to float16_t or derived type.
1903 // - Otherwise, if both operands have integer types the following rules
1904 // shall be applied to the operands:
1905 // - If both operands have the same type, no further conversion
1907 // - Otherwise, if both operands have signed integer types or both
1908 // have unsigned integer types, the operand with the type of lesser
1909 // integer conversion rank shall be converted to the type of the
1910 // operand with greater rank.
1911 // - Otherwise, if the operand that has unsigned integer type has rank
1912 // greater than or equal to the rank of the type of the other
1913 // operand, the operand with signed integer type shall be converted
1914 // to the type of the operand with unsigned integer type.
1915 // - Otherwise, if the type of the operand with signed integer type can
1916 // represent all of the values of the type of the operand with
1917 // unsigned integer type, the operand with unsigned integer type
1918 // shall be converted to the type of the operand with signed
1920 // - Otherwise, both operands shall be converted to the unsigned
1921 // integer type corresponding to the type of the operand with signed
1924 std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinationType(TBasicType type0, TBasicType type1, TOperator op) const
1926 TBasicType res0 = EbtNumTypes;
1927 TBasicType res1 = EbtNumTypes;
1929 if ((isEsProfile() &&
1930 (version < 310 || !numericFeatures.contains(TNumericFeatures::shader_implicit_conversions))) ||
1932 return std::make_tuple(res0, res1);
1934 if (getSource() == EShSourceHlsl) {
1935 if (canImplicitlyPromote(type1, type0, op)) {
1938 } else if (canImplicitlyPromote(type0, type1, op)) {
1942 return std::make_tuple(res0, res1);
1945 if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) ||
1946 (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) {
1949 } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) ||
1950 (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) {
1953 } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) ||
1954 (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) {
1957 } else if (isTypeInt(type0) && isTypeInt(type1) &&
1958 (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) {
1959 if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) ||
1960 (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) {
1961 if (getTypeRank(type0) < getTypeRank(type1)) {
1968 } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) {
1971 } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) {
1974 } else if (isTypeSignedInt(type0)) {
1975 if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) {
1979 res0 = getCorrespondingUnsignedType(type0);
1980 res1 = getCorrespondingUnsignedType(type0);
1982 } else if (isTypeSignedInt(type1)) {
1983 if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) {
1987 res0 = getCorrespondingUnsignedType(type1);
1988 res1 = getCorrespondingUnsignedType(type1);
1993 return std::make_tuple(res0, res1);
1997 // Given a type, find what operation would fully construct it.
1999 TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
2001 TOperator op = EOpNull;
2003 if (type.getQualifier().isNonUniform())
2004 return EOpConstructNonuniform;
2006 if (type.isCoopMat())
2007 return EOpConstructCooperativeMatrix;
2009 switch (type.getBasicType()) {
2011 op = EOpConstructStruct;
2014 if (type.getSampler().isCombined())
2015 op = EOpConstructTextureSampler;
2018 if (type.isMatrix()) {
2019 switch (type.getMatrixCols()) {
2021 switch (type.getMatrixRows()) {
2022 case 2: op = EOpConstructMat2x2; break;
2023 case 3: op = EOpConstructMat2x3; break;
2024 case 4: op = EOpConstructMat2x4; break;
2025 default: break; // some compilers want this
2029 switch (type.getMatrixRows()) {
2030 case 2: op = EOpConstructMat3x2; break;
2031 case 3: op = EOpConstructMat3x3; break;
2032 case 4: op = EOpConstructMat3x4; break;
2033 default: break; // some compilers want this
2037 switch (type.getMatrixRows()) {
2038 case 2: op = EOpConstructMat4x2; break;
2039 case 3: op = EOpConstructMat4x3; break;
2040 case 4: op = EOpConstructMat4x4; break;
2041 default: break; // some compilers want this
2044 default: break; // some compilers want this
2047 switch(type.getVectorSize()) {
2048 case 1: op = EOpConstructFloat; break;
2049 case 2: op = EOpConstructVec2; break;
2050 case 3: op = EOpConstructVec3; break;
2051 case 4: op = EOpConstructVec4; break;
2052 default: break; // some compilers want this
2057 if (type.getMatrixCols()) {
2058 switch (type.getMatrixCols()) {
2060 switch (type.getMatrixRows()) {
2061 case 2: op = EOpConstructIMat2x2; break;
2062 case 3: op = EOpConstructIMat2x3; break;
2063 case 4: op = EOpConstructIMat2x4; break;
2064 default: break; // some compilers want this
2068 switch (type.getMatrixRows()) {
2069 case 2: op = EOpConstructIMat3x2; break;
2070 case 3: op = EOpConstructIMat3x3; break;
2071 case 4: op = EOpConstructIMat3x4; break;
2072 default: break; // some compilers want this
2076 switch (type.getMatrixRows()) {
2077 case 2: op = EOpConstructIMat4x2; break;
2078 case 3: op = EOpConstructIMat4x3; break;
2079 case 4: op = EOpConstructIMat4x4; break;
2080 default: break; // some compilers want this
2085 switch(type.getVectorSize()) {
2086 case 1: op = EOpConstructInt; break;
2087 case 2: op = EOpConstructIVec2; break;
2088 case 3: op = EOpConstructIVec3; break;
2089 case 4: op = EOpConstructIVec4; break;
2090 default: break; // some compilers want this
2095 if (type.getMatrixCols()) {
2096 switch (type.getMatrixCols()) {
2098 switch (type.getMatrixRows()) {
2099 case 2: op = EOpConstructUMat2x2; break;
2100 case 3: op = EOpConstructUMat2x3; break;
2101 case 4: op = EOpConstructUMat2x4; break;
2102 default: break; // some compilers want this
2106 switch (type.getMatrixRows()) {
2107 case 2: op = EOpConstructUMat3x2; break;
2108 case 3: op = EOpConstructUMat3x3; break;
2109 case 4: op = EOpConstructUMat3x4; break;
2110 default: break; // some compilers want this
2114 switch (type.getMatrixRows()) {
2115 case 2: op = EOpConstructUMat4x2; break;
2116 case 3: op = EOpConstructUMat4x3; break;
2117 case 4: op = EOpConstructUMat4x4; break;
2118 default: break; // some compilers want this
2123 switch(type.getVectorSize()) {
2124 case 1: op = EOpConstructUint; break;
2125 case 2: op = EOpConstructUVec2; break;
2126 case 3: op = EOpConstructUVec3; break;
2127 case 4: op = EOpConstructUVec4; break;
2128 default: break; // some compilers want this
2133 if (type.getMatrixCols()) {
2134 switch (type.getMatrixCols()) {
2136 switch (type.getMatrixRows()) {
2137 case 2: op = EOpConstructBMat2x2; break;
2138 case 3: op = EOpConstructBMat2x3; break;
2139 case 4: op = EOpConstructBMat2x4; break;
2140 default: break; // some compilers want this
2144 switch (type.getMatrixRows()) {
2145 case 2: op = EOpConstructBMat3x2; break;
2146 case 3: op = EOpConstructBMat3x3; break;
2147 case 4: op = EOpConstructBMat3x4; break;
2148 default: break; // some compilers want this
2152 switch (type.getMatrixRows()) {
2153 case 2: op = EOpConstructBMat4x2; break;
2154 case 3: op = EOpConstructBMat4x3; break;
2155 case 4: op = EOpConstructBMat4x4; break;
2156 default: break; // some compilers want this
2161 switch(type.getVectorSize()) {
2162 case 1: op = EOpConstructBool; break;
2163 case 2: op = EOpConstructBVec2; break;
2164 case 3: op = EOpConstructBVec3; break;
2165 case 4: op = EOpConstructBVec4; break;
2166 default: break; // some compilers want this
2172 if (type.getMatrixCols()) {
2173 switch (type.getMatrixCols()) {
2175 switch (type.getMatrixRows()) {
2176 case 2: op = EOpConstructDMat2x2; break;
2177 case 3: op = EOpConstructDMat2x3; break;
2178 case 4: op = EOpConstructDMat2x4; break;
2179 default: break; // some compilers want this
2183 switch (type.getMatrixRows()) {
2184 case 2: op = EOpConstructDMat3x2; break;
2185 case 3: op = EOpConstructDMat3x3; break;
2186 case 4: op = EOpConstructDMat3x4; break;
2187 default: break; // some compilers want this
2191 switch (type.getMatrixRows()) {
2192 case 2: op = EOpConstructDMat4x2; break;
2193 case 3: op = EOpConstructDMat4x3; break;
2194 case 4: op = EOpConstructDMat4x4; break;
2195 default: break; // some compilers want this
2200 switch(type.getVectorSize()) {
2201 case 1: op = EOpConstructDouble; break;
2202 case 2: op = EOpConstructDVec2; break;
2203 case 3: op = EOpConstructDVec3; break;
2204 case 4: op = EOpConstructDVec4; break;
2205 default: break; // some compilers want this
2210 if (type.getMatrixCols()) {
2211 switch (type.getMatrixCols()) {
2213 switch (type.getMatrixRows()) {
2214 case 2: op = EOpConstructF16Mat2x2; break;
2215 case 3: op = EOpConstructF16Mat2x3; break;
2216 case 4: op = EOpConstructF16Mat2x4; break;
2217 default: break; // some compilers want this
2221 switch (type.getMatrixRows()) {
2222 case 2: op = EOpConstructF16Mat3x2; break;
2223 case 3: op = EOpConstructF16Mat3x3; break;
2224 case 4: op = EOpConstructF16Mat3x4; break;
2225 default: break; // some compilers want this
2229 switch (type.getMatrixRows()) {
2230 case 2: op = EOpConstructF16Mat4x2; break;
2231 case 3: op = EOpConstructF16Mat4x3; break;
2232 case 4: op = EOpConstructF16Mat4x4; break;
2233 default: break; // some compilers want this
2239 switch (type.getVectorSize()) {
2240 case 1: op = EOpConstructFloat16; break;
2241 case 2: op = EOpConstructF16Vec2; break;
2242 case 3: op = EOpConstructF16Vec3; break;
2243 case 4: op = EOpConstructF16Vec4; break;
2244 default: break; // some compilers want this
2249 switch(type.getVectorSize()) {
2250 case 1: op = EOpConstructInt8; break;
2251 case 2: op = EOpConstructI8Vec2; break;
2252 case 3: op = EOpConstructI8Vec3; break;
2253 case 4: op = EOpConstructI8Vec4; break;
2254 default: break; // some compilers want this
2258 switch(type.getVectorSize()) {
2259 case 1: op = EOpConstructUint8; break;
2260 case 2: op = EOpConstructU8Vec2; break;
2261 case 3: op = EOpConstructU8Vec3; break;
2262 case 4: op = EOpConstructU8Vec4; break;
2263 default: break; // some compilers want this
2267 switch(type.getVectorSize()) {
2268 case 1: op = EOpConstructInt16; break;
2269 case 2: op = EOpConstructI16Vec2; break;
2270 case 3: op = EOpConstructI16Vec3; break;
2271 case 4: op = EOpConstructI16Vec4; break;
2272 default: break; // some compilers want this
2276 switch(type.getVectorSize()) {
2277 case 1: op = EOpConstructUint16; break;
2278 case 2: op = EOpConstructU16Vec2; break;
2279 case 3: op = EOpConstructU16Vec3; break;
2280 case 4: op = EOpConstructU16Vec4; break;
2281 default: break; // some compilers want this
2285 switch(type.getVectorSize()) {
2286 case 1: op = EOpConstructInt64; break;
2287 case 2: op = EOpConstructI64Vec2; break;
2288 case 3: op = EOpConstructI64Vec3; break;
2289 case 4: op = EOpConstructI64Vec4; break;
2290 default: break; // some compilers want this
2294 switch(type.getVectorSize()) {
2295 case 1: op = EOpConstructUint64; break;
2296 case 2: op = EOpConstructU64Vec2; break;
2297 case 3: op = EOpConstructU64Vec3; break;
2298 case 4: op = EOpConstructU64Vec4; break;
2299 default: break; // some compilers want this
2303 op = EOpConstructReference;
2307 op = EOpConstructAccStruct;
2318 // Safe way to combine two nodes into an aggregate. Works with null pointers,
2319 // a node that's not a aggregate yet, etc.
2321 // Returns the resulting aggregate, unless nullptr was passed in for
2322 // both existing nodes.
2324 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right)
2326 if (left == nullptr && right == nullptr)
2329 TIntermAggregate* aggNode = nullptr;
2330 if (left != nullptr)
2331 aggNode = left->getAsAggregate();
2332 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
2333 aggNode = new TIntermAggregate;
2334 if (left != nullptr)
2335 aggNode->getSequence().push_back(left);
2338 if (right != nullptr)
2339 aggNode->getSequence().push_back(right);
2344 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc)
2346 TIntermAggregate* aggNode = growAggregate(left, right);
2348 aggNode->setLoc(loc);
2354 // Turn an existing node into an aggregate.
2356 // Returns an aggregate, unless nullptr was passed in for the existing node.
2358 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node)
2360 if (node == nullptr)
2363 TIntermAggregate* aggNode = new TIntermAggregate;
2364 aggNode->getSequence().push_back(node);
2365 aggNode->setLoc(node->getLoc());
2370 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc)
2372 if (node == nullptr)
2375 TIntermAggregate* aggNode = new TIntermAggregate;
2376 aggNode->getSequence().push_back(node);
2377 aggNode->setLoc(loc);
2383 // Make an aggregate with an empty sequence.
2385 TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc)
2387 TIntermAggregate* aggNode = new TIntermAggregate;
2388 aggNode->setLoc(loc);
2394 // For "if" test nodes. There are three children; a condition,
2395 // a true path, and a false path. The two paths are in the
2398 // Returns the selection node created.
2400 TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc)
2403 // Don't prune the false path for compile-time constants; it's needed
2404 // for static access analysis.
2407 TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2);
2413 TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
2415 // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
2416 // ... are not included in the operators that can create a constant expression.
2418 // if (left->getType().getQualifier().storage == EvqConst &&
2419 // right->getType().getQualifier().storage == EvqConst) {
2424 TIntermTyped *commaAggregate = growAggregate(left, right, loc);
2425 commaAggregate->getAsAggregate()->setOperator(EOpComma);
2426 commaAggregate->setType(right->getType());
2427 commaAggregate->getWritableType().getQualifier().makeTemporary();
2429 return commaAggregate;
2432 TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc)
2434 TIntermMethod* method = new TIntermMethod(object, type, *name);
2435 method->setLoc(loc);
2441 // For "?:" test nodes. There are three children; a condition,
2442 // a true path, and a false path. The two paths are specified
2443 // as separate parameters. For vector 'cond', the true and false
2444 // are not paths, but vectors to mix.
2446 // Specialization constant operations include
2447 // - The ternary operator ( ? : )
2449 // Returns the selection node created, or nullptr if one could not be.
2451 TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock,
2452 const TSourceLoc& loc)
2454 // If it's void, go to the if-then-else selection()
2455 if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) {
2456 TIntermNodePair pair = { trueBlock, falseBlock };
2457 TIntermSelection* selection = addSelection(cond, pair, loc);
2458 if (getSource() == EShSourceHlsl)
2459 selection->setNoShortCircuit();
2465 // Get compatible types.
2467 auto children = addPairConversion(EOpSequence, trueBlock, falseBlock);
2468 trueBlock = std::get<0>(children);
2469 falseBlock = std::get<1>(children);
2471 if (trueBlock == nullptr || falseBlock == nullptr)
2474 // Handle a vector condition as a mix
2475 if (!cond->getType().isScalarOrVec1()) {
2476 TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
2477 cond->getType().getVectorSize());
2478 // smear true/false operands as needed
2479 trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
2480 falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
2482 // After conversion, types have to match.
2483 if (falseBlock->getType() != trueBlock->getType())
2486 // make the mix operation
2487 TIntermAggregate* mix = makeAggregate(loc);
2488 mix = growAggregate(mix, falseBlock);
2489 mix = growAggregate(mix, trueBlock);
2490 mix = growAggregate(mix, cond);
2491 mix->setType(targetVectorType);
2497 // Now have a scalar condition...
2499 // Convert true and false expressions to matching types
2500 addBiShapeConversion(EOpMix, trueBlock, falseBlock);
2502 // After conversion, types have to match.
2503 if (falseBlock->getType() != trueBlock->getType())
2506 // Eliminate the selection when the condition is a scalar and all operands are constant.
2507 if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
2508 if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
2515 // Make a selection node.
2517 TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType());
2519 node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision);
2521 if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) ||
2522 (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() &&
2523 falseBlock->getQualifier().isConstant()))
2524 node->getQualifier().makeSpecConstant();
2526 node->getQualifier().makeTemporary();
2528 if (getSource() == EShSourceHlsl)
2529 node->setNoShortCircuit();
2535 // Constant terminal nodes. Has a union that contains bool, float or int constants
2537 // Returns the constant union node created.
2540 TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const
2542 TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t);
2543 node->getQualifier().storage = EvqConst;
2550 TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const
2552 TConstUnionArray unionArray(1);
2553 unionArray[0].setI8Const(i8);
2555 return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal);
2558 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const
2560 TConstUnionArray unionArray(1);
2561 unionArray[0].setUConst(u8);
2563 return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal);
2566 TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const
2568 TConstUnionArray unionArray(1);
2569 unionArray[0].setI16Const(i16);
2571 return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
2574 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
2576 TConstUnionArray unionArray(1);
2577 unionArray[0].setU16Const(u16);
2579 return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
2582 TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const
2584 TConstUnionArray unionArray(1);
2585 unionArray[0].setIConst(i);
2587 return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal);
2590 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const
2592 TConstUnionArray unionArray(1);
2593 unionArray[0].setUConst(u);
2595 return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal);
2598 TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const
2600 TConstUnionArray unionArray(1);
2601 unionArray[0].setI64Const(i64);
2603 return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal);
2606 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const
2608 TConstUnionArray unionArray(1);
2609 unionArray[0].setU64Const(u64);
2611 return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
2614 TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
2616 TConstUnionArray unionArray(1);
2617 unionArray[0].setBConst(b);
2619 return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal);
2622 TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const
2624 assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16);
2626 TConstUnionArray unionArray(1);
2627 unionArray[0].setDConst(d);
2629 return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal);
2632 TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const
2634 TConstUnionArray unionArray(1);
2635 unionArray[0].setSConst(s);
2637 return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal);
2640 // Put vector swizzle selectors onto the given sequence
2641 void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc)
2643 TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc);
2644 sequence.push_back(constIntNode);
2647 // Put matrix swizzle selectors onto the given sequence
2648 void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc)
2650 TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc);
2651 sequence.push_back(constIntNode);
2652 constIntNode = addConstantUnion(selector.coord2, loc);
2653 sequence.push_back(constIntNode);
2656 // Make an aggregate node that has a sequence of all selectors.
2657 template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc);
2658 template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc);
2659 template<typename selectorType>
2660 TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc)
2662 TIntermAggregate* node = new TIntermAggregate(EOpSequence);
2665 TIntermSequence &sequenceVector = node->getSequence();
2667 for (int i = 0; i < selector.size(); i++)
2668 pushSelector(sequenceVector, selector[i], loc);
2674 // Follow the left branches down to the root of an l-value
2675 // expression (just "." and []).
2677 // Return the base of the l-value (where following indexing quits working).
2678 // Return nullptr if a chain following dereferences cannot be followed.
2680 // 'swizzleOkay' says whether or not it is okay to consider a swizzle
2681 // a valid part of the dereference chain.
2683 // 'BufferReferenceOk' says if type is buffer_reference, the routine stop to find the most left node.
2687 const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay , bool bufferReferenceOk)
2690 const TIntermBinary* binary = node->getAsBinaryNode();
2691 if (binary == nullptr)
2693 TOperator op = binary->getOp();
2694 if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle)
2696 if (! swizzleOkay) {
2697 if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle)
2699 if ((op == EOpIndexDirect || op == EOpIndexIndirect) &&
2700 (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) &&
2701 ! binary->getLeft()->getType().isArray())
2704 node = node->getAsBinaryNode()->getLeft();
2705 if (bufferReferenceOk && node->isReference())
2711 // Create while and do-while loop nodes.
2713 TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst,
2714 const TSourceLoc& loc)
2716 TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
2723 // Create a for-loop sequence.
2725 TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test,
2726 TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node)
2728 node = new TIntermLoop(body, test, terminal, testFirst);
2731 // make a sequence of the initializer and statement, but try to reuse the
2732 // aggregate already created for whatever is in the initializer, if there is one
2733 TIntermAggregate* loopSequence = (initializer == nullptr ||
2734 initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
2735 : initializer->getAsAggregate();
2736 if (loopSequence != nullptr && (loopSequence->getOp() == EOpSequence || loopSequence->getOp() == EOpScope))
2737 loopSequence->setOp(EOpNull);
2738 loopSequence = growAggregate(loopSequence, node);
2739 loopSequence->setOperator(getDebugInfo() ? EOpScope : EOpSequence);
2741 return loopSequence;
2747 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc)
2749 return addBranch(branchOp, nullptr, loc);
2752 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc)
2754 TIntermBranch* node = new TIntermBranch(branchOp, expression);
2760 // Propagate precision from formal function return type to actual return type,
2761 // and on to its subtree.
2762 void TIntermBranch::updatePrecision(TPrecisionQualifier parentPrecision)
2764 TIntermTyped* exp = getExpression();
2768 if (exp->getBasicType() == EbtInt || exp->getBasicType() == EbtUint ||
2769 exp->getBasicType() == EbtFloat) {
2770 if (parentPrecision != EpqNone && exp->getQualifier().precision == EpqNone) {
2771 exp->propagatePrecision(parentPrecision);
2777 // This is to be executed after the final root is put on top by the parsing
2780 bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
2782 if (root == nullptr)
2785 // Finish off the top-level sequence
2786 TIntermAggregate* aggRoot = root->getAsAggregate();
2787 if (aggRoot && aggRoot->getOp() == EOpNull)
2788 aggRoot->setOperator(EOpSequence);
2791 // Propagate 'noContraction' label in backward from 'precise' variables.
2792 glslang::PropagateNoContraction(*this);
2794 switch (textureSamplerTransformMode) {
2795 case EShTexSampTransKeep:
2797 case EShTexSampTransUpgradeTextureRemoveSampler:
2798 performTextureUpgradeAndSamplerRemovalTransformation(root);
2800 case EShTexSampTransCount:
2809 void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable)
2811 // Add top-level nodes for declarations that must be checked cross
2812 // compilation unit by a linker, yet might not have been referenced
2815 // Almost entirely, translation of symbols is driven by what's present
2816 // in the AST traversal, not by translating the symbol table.
2818 // However, there are some special cases:
2819 // - From the specification: "Special built-in inputs gl_VertexID and
2820 // gl_InstanceID are also considered active vertex attributes."
2821 // - Linker-based type mismatch error reporting needs to see all
2822 // uniforms/ins/outs variables and blocks.
2823 // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active.
2826 // if (ftransformUsed) {
2827 // TODO: 1.1 lowering functionality: track ftransform() usage
2828 // addSymbolLinkageNode(root, symbolTable, "gl_Vertex");
2829 // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix");
2832 if (language == EShLangVertex) {
2833 // the names won't be found in the symbol table unless the versions are right,
2834 // so version logic does not need to be repeated here
2835 addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID");
2836 addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID");
2839 // Add a child to the root node for the linker objects
2840 linkage->setOperator(EOpLinkerObjects);
2841 treeRoot = growAggregate(treeRoot, linkage);
2845 // Add the given name or symbol to the list of nodes at the end of the tree used
2846 // for link-time checking and external linkage.
2849 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name)
2851 TSymbol* symbol = symbolTable.find(name);
2853 addSymbolLinkageNode(linkage, *symbol->getAsVariable());
2856 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol)
2858 const TVariable* variable = symbol.getAsVariable();
2860 // This must be a member of an anonymous block, and we need to add the whole block
2861 const TAnonMember* anon = symbol.getAsAnonMember();
2862 variable = &anon->getAnonContainer();
2864 TIntermSymbol* node = addSymbol(*variable);
2865 linkage = growAggregate(linkage, node);
2869 // Add a caller->callee relationship to the call graph.
2870 // Assumes the strings are unique per signature.
2872 void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee)
2874 // Duplicates are okay, but faster to not keep them, and they come grouped by caller,
2875 // as long as new ones are push on the same end we check on for duplicates
2876 for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
2877 if (call->caller != caller)
2879 if (call->callee == callee)
2883 callGraph.emplace_front(caller, callee);
2887 // This deletes the tree.
2889 void TIntermediate::removeTree()
2892 RemoveAllTreeNodes(treeRoot);
2896 // Implement the part of KHR_vulkan_glsl that lists the set of operations
2897 // that can result in a specialization constant operation.
2899 // "5.x Specialization Constant Operations"
2901 // Only some operations discussed in this section may be applied to a
2902 // specialization constant and still yield a result that is as
2903 // specialization constant. The operations allowed are listed below.
2904 // When a specialization constant is operated on with one of these
2905 // operators and with another constant or specialization constant, the
2906 // result is implicitly a specialization constant.
2908 // - int(), uint(), and bool() constructors for type conversions
2909 // from any of the following types to any of the following types:
2913 // - vector versions of the above conversion constructors
2914 // - allowed implicit conversions of the above
2915 // - swizzles (e.g., foo.yx)
2916 // - The following when applied to integer or unsigned integer types:
2917 // * unary negative ( - )
2918 // * binary operations ( + , - , * , / , % )
2919 // * shift ( <<, >> )
2920 // * bitwise operations ( & , | , ^ )
2921 // - The following when applied to integer or unsigned integer scalar types:
2922 // * comparison ( == , != , > , >= , < , <= )
2923 // - The following when applied to the Boolean scalar type:
2925 // * logical operations ( && , || , ^^ )
2926 // * comparison ( == , != )"
2928 // This function just handles binary and unary nodes. Construction
2929 // rules are handled in construction paths that are not covered by the unary
2930 // and binary paths, while required conversions will still show up here
2931 // as unary converters in the from a construction operator.
2933 bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
2935 // The operations resulting in floating point are quite limited
2936 // (However, some floating-point operations result in bool, like ">",
2937 // so are handled later.)
2938 if (node.getType().isFloatingDomain()) {
2939 switch (node.getOp()) {
2940 case EOpIndexDirect:
2941 case EOpIndexIndirect:
2942 case EOpIndexDirectStruct:
2943 case EOpVectorSwizzle:
2944 case EOpConvFloatToDouble:
2945 case EOpConvDoubleToFloat:
2946 case EOpConvFloat16ToFloat:
2947 case EOpConvFloatToFloat16:
2948 case EOpConvFloat16ToDouble:
2949 case EOpConvDoubleToFloat16:
2956 // Check for floating-point arguments
2957 if (const TIntermBinary* bin = node.getAsBinaryNode())
2958 if (bin->getLeft() ->getType().isFloatingDomain() ||
2959 bin->getRight()->getType().isFloatingDomain())
2962 // So, for now, we can assume everything left is non-floating-point...
2964 // Now check for integer/bool-based operations
2965 switch (node.getOp()) {
2967 // dereference/swizzle
2968 case EOpIndexDirect:
2969 case EOpIndexIndirect:
2970 case EOpIndexDirectStruct:
2971 case EOpVectorSwizzle:
2974 case EOpConvInt8ToBool:
2975 case EOpConvInt16ToBool:
2976 case EOpConvIntToBool:
2977 case EOpConvInt64ToBool:
2978 case EOpConvUint8ToBool:
2979 case EOpConvUint16ToBool:
2980 case EOpConvUintToBool:
2981 case EOpConvUint64ToBool:
2984 case EOpConvBoolToInt8:
2985 case EOpConvBoolToInt16:
2986 case EOpConvBoolToInt:
2987 case EOpConvBoolToInt64:
2988 case EOpConvBoolToUint8:
2989 case EOpConvBoolToUint16:
2990 case EOpConvBoolToUint:
2991 case EOpConvBoolToUint64:
2993 // int8_t -> (u)int*
2994 case EOpConvInt8ToInt16:
2995 case EOpConvInt8ToInt:
2996 case EOpConvInt8ToInt64:
2997 case EOpConvInt8ToUint8:
2998 case EOpConvInt8ToUint16:
2999 case EOpConvInt8ToUint:
3000 case EOpConvInt8ToUint64:
3002 // int16_t -> (u)int*
3003 case EOpConvInt16ToInt8:
3004 case EOpConvInt16ToInt:
3005 case EOpConvInt16ToInt64:
3006 case EOpConvInt16ToUint8:
3007 case EOpConvInt16ToUint16:
3008 case EOpConvInt16ToUint:
3009 case EOpConvInt16ToUint64:
3011 // int32_t -> (u)int*
3012 case EOpConvIntToInt8:
3013 case EOpConvIntToInt16:
3014 case EOpConvIntToInt64:
3015 case EOpConvIntToUint8:
3016 case EOpConvIntToUint16:
3017 case EOpConvIntToUint:
3018 case EOpConvIntToUint64:
3020 // int64_t -> (u)int*
3021 case EOpConvInt64ToInt8:
3022 case EOpConvInt64ToInt16:
3023 case EOpConvInt64ToInt:
3024 case EOpConvInt64ToUint8:
3025 case EOpConvInt64ToUint16:
3026 case EOpConvInt64ToUint:
3027 case EOpConvInt64ToUint64:
3029 // uint8_t -> (u)int*
3030 case EOpConvUint8ToInt8:
3031 case EOpConvUint8ToInt16:
3032 case EOpConvUint8ToInt:
3033 case EOpConvUint8ToInt64:
3034 case EOpConvUint8ToUint16:
3035 case EOpConvUint8ToUint:
3036 case EOpConvUint8ToUint64:
3038 // uint16_t -> (u)int*
3039 case EOpConvUint16ToInt8:
3040 case EOpConvUint16ToInt16:
3041 case EOpConvUint16ToInt:
3042 case EOpConvUint16ToInt64:
3043 case EOpConvUint16ToUint8:
3044 case EOpConvUint16ToUint:
3045 case EOpConvUint16ToUint64:
3047 // uint32_t -> (u)int*
3048 case EOpConvUintToInt8:
3049 case EOpConvUintToInt16:
3050 case EOpConvUintToInt:
3051 case EOpConvUintToInt64:
3052 case EOpConvUintToUint8:
3053 case EOpConvUintToUint16:
3054 case EOpConvUintToUint64:
3056 // uint64_t -> (u)int*
3057 case EOpConvUint64ToInt8:
3058 case EOpConvUint64ToInt16:
3059 case EOpConvUint64ToInt:
3060 case EOpConvUint64ToInt64:
3061 case EOpConvUint64ToUint8:
3062 case EOpConvUint64ToUint16:
3063 case EOpConvUint64ToUint:
3070 // binary operations
3074 case EOpVectorTimesScalar:
3080 case EOpInclusiveOr:
3081 case EOpExclusiveOr:
3088 case EOpGreaterThan:
3089 case EOpLessThanEqual:
3090 case EOpGreaterThanEqual:
3097 // Is the operation one that must propagate nonuniform?
3098 bool TIntermediate::isNonuniformPropagating(TOperator op) const
3100 // "* All Operators in Section 5.1 (Operators), except for assignment,
3101 // arithmetic assignment, and sequence
3102 // * Component selection in Section 5.5
3103 // * Matrix components in Section 5.6
3104 // * Structure and Array Operations in Section 5.7, except for the length
3107 case EOpPostIncrement:
3108 case EOpPostDecrement:
3109 case EOpPreIncrement:
3110 case EOpPreDecrement:
3114 case EOpVectorLogicalNot:
3125 case EOpInclusiveOr:
3126 case EOpExclusiveOr:
3130 case EOpGreaterThan:
3131 case EOpLessThanEqual:
3132 case EOpGreaterThanEqual:
3133 case EOpVectorTimesScalar:
3134 case EOpVectorTimesMatrix:
3135 case EOpMatrixTimesVector:
3136 case EOpMatrixTimesScalar:
3142 case EOpIndexDirect:
3143 case EOpIndexIndirect:
3144 case EOpIndexDirectStruct:
3145 case EOpVectorSwizzle:
3155 ////////////////////////////////////////////////////////////////
3157 // Member functions of the nodes used for building the tree.
3159 ////////////////////////////////////////////////////////////////
3162 // Say whether or not an operation node changes the value of a variable.
3164 // Returns true if state is modified.
3166 bool TIntermOperator::modifiesState() const
3169 case EOpPostIncrement:
3170 case EOpPostDecrement:
3171 case EOpPreIncrement:
3172 case EOpPreDecrement:
3177 case EOpVectorTimesMatrixAssign:
3178 case EOpVectorTimesScalarAssign:
3179 case EOpMatrixTimesScalarAssign:
3180 case EOpMatrixTimesMatrixAssign:
3184 case EOpInclusiveOrAssign:
3185 case EOpExclusiveOrAssign:
3186 case EOpLeftShiftAssign:
3187 case EOpRightShiftAssign:
3195 // returns true if the operator is for one of the constructors
3197 bool TIntermOperator::isConstructor() const
3199 return op > EOpConstructGuardStart && op < EOpConstructGuardEnd;
3203 // Make sure the type of an operator is appropriate for its
3204 // combination of operation and operand type. This will invoke
3205 // promoteUnary, promoteBinary, etc as needed.
3207 // Returns false if nothing makes sense.
3209 bool TIntermediate::promote(TIntermOperator* node)
3211 if (node == nullptr)
3214 if (node->getAsUnaryNode())
3215 return promoteUnary(*node->getAsUnaryNode());
3217 if (node->getAsBinaryNode())
3218 return promoteBinary(*node->getAsBinaryNode());
3220 if (node->getAsAggregate())
3221 return promoteAggregate(*node->getAsAggregate());
3227 // See TIntermediate::promote
3229 bool TIntermediate::promoteUnary(TIntermUnary& node)
3231 const TOperator op = node.getOp();
3232 TIntermTyped* operand = node.getOperand();
3236 // Convert operand to a boolean type
3237 if (operand->getBasicType() != EbtBool) {
3238 // Add constructor to boolean type. If that fails, we can't do it, so return false.
3239 TIntermTyped* converted = addConversion(op, TType(EbtBool), operand);
3240 if (converted == nullptr)
3243 // Use the result of converting the node to a bool.
3244 node.setOperand(operand = converted); // also updates stack variable
3248 if (!isTypeInt(operand->getBasicType()))
3252 case EOpPostIncrement:
3253 case EOpPostDecrement:
3254 case EOpPreIncrement:
3255 case EOpPreDecrement:
3256 if (!isTypeInt(operand->getBasicType()) &&
3257 operand->getBasicType() != EbtFloat &&
3258 operand->getBasicType() != EbtFloat16 &&
3259 operand->getBasicType() != EbtDouble)
3264 // HLSL uses this path for initial function signature finding for built-ins
3265 // taking a single argument, which generally don't participate in
3266 // operator-based type promotion (type conversion will occur later).
3267 // For now, scalar argument cases are relying on the setType() call below.
3268 if (getSource() == EShSourceHlsl)
3271 // GLSL only allows integer arguments for the cases identified above in the
3273 if (operand->getBasicType() != EbtFloat)
3277 node.setType(operand->getType());
3278 node.getWritableType().getQualifier().makeTemporary();
3283 // Propagate precision qualifiers *up* from children to parent.
3284 void TIntermUnary::updatePrecision()
3286 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3287 getBasicType() == EbtFloat) {
3288 if (operand->getQualifier().precision > getQualifier().precision)
3289 getQualifier().precision = operand->getQualifier().precision;
3294 // See TIntermediate::promote
3296 bool TIntermediate::promoteBinary(TIntermBinary& node)
3298 TOperator op = node.getOp();
3299 TIntermTyped* left = node.getLeft();
3300 TIntermTyped* right = node.getRight();
3302 // Arrays and structures have to be exact matches.
3303 if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct)
3304 && left->getType() != right->getType())
3307 // Base assumption: just make the type the same as the left
3308 // operand. Only deviations from this will be coded.
3309 node.setType(left->getType());
3310 node.getWritableType().getQualifier().clear();
3312 // Composite and opaque types don't having pending operator changes, e.g.,
3313 // array, structure, and samplers. Just establish final type and correctness.
3314 if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) {
3318 if (left->getBasicType() == EbtSampler) {
3319 // can't compare samplers
3322 // Promote to conditional
3323 node.setType(TType(EbtBool));
3329 // Keep type from above
3339 // We now have only scalars, vectors, and matrices to worry about.
3342 // HLSL implicitly promotes bool -> int for numeric operations.
3343 // (Implicit conversions to make the operands match each other's types were already done.)
3344 if (getSource() == EShSourceHlsl &&
3345 (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
3348 case EOpGreaterThan:
3349 case EOpLessThanEqual:
3350 case EOpGreaterThanEqual:
3358 case EOpInclusiveOr:
3359 case EOpExclusiveOr:
3365 if (left->getBasicType() == EbtBool)
3366 left = createConversion(EbtInt, left);
3367 if (right->getBasicType() == EbtBool)
3368 right = createConversion(EbtInt, right);
3369 if (left == nullptr || right == nullptr)
3372 node.setRight(right);
3374 // Update the original base assumption on result type..
3375 node.setType(left->getType());
3376 node.getWritableType().getQualifier().clear();
3385 // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
3388 case EOpGreaterThan:
3389 case EOpLessThanEqual:
3390 case EOpGreaterThanEqual:
3391 // Relational comparisons need numeric types and will promote to scalar Boolean.
3392 if (left->getBasicType() == EbtBool)
3395 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3400 if (getSource() == EShSourceHlsl) {
3401 const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize());
3403 // In HLSL, == or != on vectors means component-wise comparison.
3404 if (resultWidth > 1) {
3405 op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual;
3409 node.setType(TType(EbtBool, EvqTemporary, resultWidth));
3411 // All the above comparisons result in a bool (but not the vector compares)
3412 node.setType(TType(EbtBool));
3419 // logical ops operate only on Booleans or vectors of Booleans.
3420 if (left->getBasicType() != EbtBool || left->isMatrix())
3423 if (getSource() == EShSourceGlsl) {
3424 // logical ops operate only on scalar Booleans and will promote to scalar Boolean.
3425 if (left->isVector())
3429 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3434 case EOpRightShiftAssign:
3435 case EOpLeftShiftAssign:
3441 case EOpInclusiveOr:
3442 case EOpExclusiveOr:
3444 case EOpInclusiveOrAssign:
3445 case EOpExclusiveOrAssign:
3446 if (getSource() == EShSourceHlsl)
3449 // Check for integer-only operands.
3450 if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType()))
3452 if (left->isMatrix() || right->isMatrix())
3465 // check for non-Boolean operands
3466 if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)
3473 // Compare left and right, and finish with the cases where the operand types must match
3476 case EOpGreaterThan:
3477 case EOpLessThanEqual:
3478 case EOpGreaterThanEqual:
3482 case EOpVectorEqual:
3483 case EOpVectorNotEqual:
3488 return left->getType() == right->getType();
3494 case EOpInclusiveOr:
3495 case EOpExclusiveOr:
3497 case EOpInclusiveOrAssign:
3498 case EOpExclusiveOrAssign:
3507 // Quick out in case the types do match
3508 if (left->getType() == right->getType())
3515 // At least the basic type has to match
3516 if (left->getBasicType() != right->getBasicType())
3523 if (left->getType().isCoopMat() || right->getType().isCoopMat()) {
3524 if (left->getType().isCoopMat() && right->getType().isCoopMat() &&
3525 *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) {
3531 if (left->getType().isCoopMat() && right->getType().isCoopMat()) {
3534 if (op == EOpMulAssign && right->getType().isCoopMat()) {
3537 node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar);
3538 if (right->getType().isCoopMat()) {
3539 node.setType(right->getType());
3546 // These require both to be cooperative matrices
3547 if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) {
3557 // Finish handling the case, for all ops, where both operands are scalars.
3558 if (left->isScalar() && right->isScalar())
3561 // Finish handling the case, for all ops, where there are two vectors of different sizes
3562 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
3566 // We now have a mix of scalars, vectors, or matrices, for non-relational operations.
3569 // Can these two operands be combined, what is the resulting type?
3570 TBasicType basicType = left->getBasicType();
3573 if (!left->isMatrix() && right->isMatrix()) {
3574 if (left->isVector()) {
3575 if (left->getVectorSize() != right->getMatrixRows())
3577 node.setOp(op = EOpVectorTimesMatrix);
3578 node.setType(TType(basicType, EvqTemporary, right->getMatrixCols()));
3580 node.setOp(op = EOpMatrixTimesScalar);
3581 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows()));
3583 } else if (left->isMatrix() && !right->isMatrix()) {
3584 if (right->isVector()) {
3585 if (left->getMatrixCols() != right->getVectorSize())
3587 node.setOp(op = EOpMatrixTimesVector);
3588 node.setType(TType(basicType, EvqTemporary, left->getMatrixRows()));
3590 node.setOp(op = EOpMatrixTimesScalar);
3592 } else if (left->isMatrix() && right->isMatrix()) {
3593 if (left->getMatrixCols() != right->getMatrixRows())
3595 node.setOp(op = EOpMatrixTimesMatrix);
3596 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows()));
3597 } else if (! left->isMatrix() && ! right->isMatrix()) {
3598 if (left->isVector() && right->isVector()) {
3599 ; // leave as component product
3600 } else if (left->isVector() || right->isVector()) {
3601 node.setOp(op = EOpVectorTimesScalar);
3602 if (right->isVector())
3603 node.setType(TType(basicType, EvqTemporary, right->getVectorSize()));
3610 if (! left->isMatrix() && right->isMatrix()) {
3611 if (left->isVector()) {
3612 if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols())
3614 node.setOp(op = EOpVectorTimesMatrixAssign);
3618 } else if (left->isMatrix() && !right->isMatrix()) {
3619 if (right->isVector()) {
3622 node.setOp(op = EOpMatrixTimesScalarAssign);
3624 } else if (left->isMatrix() && right->isMatrix()) {
3625 if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows())
3627 node.setOp(op = EOpMatrixTimesMatrixAssign);
3628 } else if (!left->isMatrix() && !right->isMatrix()) {
3629 if (left->isVector() && right->isVector()) {
3630 // leave as component product
3631 } else if (left->isVector() || right->isVector()) {
3632 if (! left->isVector())
3634 node.setOp(op = EOpVectorTimesScalarAssign);
3643 case EOpRightShiftAssign:
3644 case EOpLeftShiftAssign:
3645 if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize()))
3650 if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())
3659 case EOpInclusiveOr:
3660 case EOpExclusiveOr:
3666 case EOpInclusiveOrAssign:
3667 case EOpExclusiveOrAssign:
3669 if ((left->isMatrix() && right->isVector()) ||
3670 (left->isVector() && right->isMatrix()) ||
3671 left->getBasicType() != right->getBasicType())
3673 if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()))
3675 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
3677 if (right->isVector() || right->isMatrix()) {
3678 node.getWritableType().shallowCopy(right->getType());
3679 node.getWritableType().getQualifier().makeTemporary();
3688 // One more check for assignment.
3691 // The resulting type has to match the left operand.
3699 case EOpInclusiveOrAssign:
3700 case EOpExclusiveOrAssign:
3701 case EOpLeftShiftAssign:
3702 case EOpRightShiftAssign:
3703 if (node.getType() != left->getType())
3714 // See TIntermediate::promote
3716 bool TIntermediate::promoteAggregate(TIntermAggregate& node)
3718 TOperator op = node.getOp();
3719 TIntermSequence& args = node.getSequence();
3720 const int numArgs = static_cast<int>(args.size());
3722 // Presently, only hlsl does intrinsic promotions.
3723 if (getSource() != EShSourceHlsl)
3726 // set of opcodes that can be promoted in this manner.
3734 case EOpFaceForward:
3735 // case EOpFindMSB: TODO:
3736 // case EOpFindLSB: TODO:
3746 // case EOpGenMul: TODO:
3750 // case EOpSinCos: TODO:
3758 // TODO: array and struct behavior
3760 // Try converting all nodes to the given node's type
3761 TIntermSequence convertedArgs(numArgs, nullptr);
3763 // Try to convert all types to the nonConvArg type.
3764 for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) {
3765 // Try converting all args to this arg's type
3766 for (int convArg = 0; convArg < numArgs; ++convArg) {
3767 convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(),
3768 args[convArg]->getAsTyped());
3771 // If we successfully converted all the args, use the result.
3772 if (std::all_of(convertedArgs.begin(), convertedArgs.end(),
3773 [](const TIntermNode* node) { return node != nullptr; })) {
3775 std::swap(args, convertedArgs);
3783 // Propagate precision qualifiers *up* from children to parent, and then
3784 // back *down* again to the children's subtrees.
3785 void TIntermAggregate::updatePrecision()
3787 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3788 getBasicType() == EbtFloat) {
3789 TPrecisionQualifier maxPrecision = EpqNone;
3790 TIntermSequence operands = getSequence();
3791 for (unsigned int i = 0; i < operands.size(); ++i) {
3792 TIntermTyped* typedNode = operands[i]->getAsTyped();
3794 maxPrecision = std::max(maxPrecision, typedNode->getQualifier().precision);
3796 getQualifier().precision = maxPrecision;
3797 for (unsigned int i = 0; i < operands.size(); ++i) {
3798 TIntermTyped* typedNode = operands[i]->getAsTyped();
3800 typedNode->propagatePrecision(maxPrecision);
3805 // Propagate precision qualifiers *up* from children to parent, and then
3806 // back *down* again to the children's subtrees.
3807 void TIntermBinary::updatePrecision()
3809 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3810 getBasicType() == EbtFloat) {
3811 if (op == EOpRightShift || op == EOpLeftShift) {
3812 // For shifts get precision from left side only and thus no need to propagate
3813 getQualifier().precision = left->getQualifier().precision;
3815 getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
3816 if (getQualifier().precision != EpqNone) {
3817 left->propagatePrecision(getQualifier().precision);
3818 right->propagatePrecision(getQualifier().precision);
3824 // Recursively propagate precision qualifiers *down* the subtree of the current node,
3825 // until reaching a node that already has a precision qualifier or otherwise does
3826 // not participate in precision propagation.
3827 void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
3829 if (getQualifier().precision != EpqNone ||
3830 (getBasicType() != EbtInt && getBasicType() != EbtUint &&
3831 getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
3834 getQualifier().precision = newPrecision;
3836 TIntermBinary* binaryNode = getAsBinaryNode();
3838 binaryNode->getLeft()->propagatePrecision(newPrecision);
3839 binaryNode->getRight()->propagatePrecision(newPrecision);
3844 TIntermUnary* unaryNode = getAsUnaryNode();
3846 unaryNode->getOperand()->propagatePrecision(newPrecision);
3851 TIntermAggregate* aggregateNode = getAsAggregate();
3852 if (aggregateNode) {
3853 TIntermSequence operands = aggregateNode->getSequence();
3854 for (unsigned int i = 0; i < operands.size(); ++i) {
3855 TIntermTyped* typedNode = operands[i]->getAsTyped();
3858 typedNode->propagatePrecision(newPrecision);
3864 TIntermSelection* selectionNode = getAsSelectionNode();
3865 if (selectionNode) {
3866 TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped();
3868 typedNode->propagatePrecision(newPrecision);
3869 typedNode = selectionNode->getFalseBlock()->getAsTyped();
3871 typedNode->propagatePrecision(newPrecision);
3878 TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const
3880 const TConstUnionArray& rightUnionArray = node->getConstArray();
3881 int size = node->getType().computeNumComponents();
3883 TConstUnionArray leftUnionArray(size);
3885 for (int i=0; i < size; i++) {
3887 #define PROMOTE(Set, CType, Get) leftUnionArray[i].Set(static_cast<CType>(rightUnionArray[i].Get()))
3888 #define PROMOTE_TO_BOOL(Get) leftUnionArray[i].setBConst(rightUnionArray[i].Get() != 0)
3891 #define TO_ALL(Get) \
3892 switch (promoteTo) { \
3893 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3894 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3895 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3896 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3897 default: return node; \
3900 #define TO_ALL(Get) \
3901 switch (promoteTo) { \
3902 case EbtFloat16: PROMOTE(setDConst, double, Get); break; \
3903 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3904 case EbtDouble: PROMOTE(setDConst, double, Get); break; \
3905 case EbtInt8: PROMOTE(setI8Const, signed char, Get); break; \
3906 case EbtInt16: PROMOTE(setI16Const, short, Get); break; \
3907 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3908 case EbtInt64: PROMOTE(setI64Const, long long, Get); break; \
3909 case EbtUint8: PROMOTE(setU8Const, unsigned char, Get); break; \
3910 case EbtUint16: PROMOTE(setU16Const, unsigned short, Get); break; \
3911 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3912 case EbtUint64: PROMOTE(setU64Const, unsigned long long, Get); break; \
3913 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3914 default: return node; \
3918 switch (node->getType().getBasicType()) {
3919 case EbtFloat: TO_ALL(getDConst); break;
3920 case EbtInt: TO_ALL(getIConst); break;
3921 case EbtUint: TO_ALL(getUConst); break;
3922 case EbtBool: TO_ALL(getBConst); break;
3924 case EbtFloat16: TO_ALL(getDConst); break;
3925 case EbtDouble: TO_ALL(getDConst); break;
3926 case EbtInt8: TO_ALL(getI8Const); break;
3927 case EbtInt16: TO_ALL(getI16Const); break;
3928 case EbtInt64: TO_ALL(getI64Const); break;
3929 case EbtUint8: TO_ALL(getU8Const); break;
3930 case EbtUint16: TO_ALL(getU16Const); break;
3931 case EbtUint64: TO_ALL(getU64Const); break;
3933 default: return node;
3937 const TType& t = node->getType();
3939 return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()),
3943 void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable)
3945 assert(pragmaTable == nullptr);
3946 pragmaTable = new TPragmaTable;
3947 *pragmaTable = pTable;
3950 // If either node is a specialization constant, while the other is
3951 // a constant (or specialization constant), the result is still
3952 // a specialization constant.
3953 bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2)
3955 return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) ||
3956 (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
3959 struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
3960 void visitSymbol(TIntermSymbol* symbol) override {
3961 if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
3962 symbol->getWritableType().getSampler().setCombined(true);
3965 bool visitAggregate(TVisit, TIntermAggregate* ag) override {
3966 using namespace std;
3967 TIntermSequence& seq = ag->getSequence();
3968 TQualifierList& qual = ag->getQualifierList();
3970 // qual and seq are indexed using the same indices, so we have to modify both in lock-step
3971 assert(seq.size() == qual.size() || qual.empty());
3974 for (size_t i = 0; i < seq.size(); ++i) {
3975 TIntermSymbol* symbol = seq[i]->getAsSymbolNode();
3976 if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) {
3977 // remove pure sampler variables
3981 TIntermNode* result = seq[i];
3983 // replace constructors with sampler/textures
3984 TIntermAggregate *constructor = seq[i]->getAsAggregate();
3985 if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
3986 if (!constructor->getSequence().empty())
3987 result = constructor->getSequence()[0];
3990 // write new node & qualifier
3991 seq[write] = result;
3993 qual[write] = qual[i];
4005 void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
4007 TextureUpgradeAndSamplerRemovalTransform transform;
4008 root->traverse(&transform);
4011 const char* TIntermediate::getResourceName(TResourceType res)
4014 case EResSampler: return "shift-sampler-binding";
4015 case EResTexture: return "shift-texture-binding";
4016 case EResImage: return "shift-image-binding";
4017 case EResUbo: return "shift-UBO-binding";
4018 case EResSsbo: return "shift-ssbo-binding";
4019 case EResUav: return "shift-uav-binding";
4021 assert(0); // internal error: should only be called with valid resource types.
4027 } // end namespace glslang