2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2015 LunarG, Inc.
4 // Copyright (C) 2015-2020 Google, Inc.
5 // Copyright (C) 2017 ARM Limited.
7 // All rights reserved.
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
40 // Build the intermediate representation.
43 #include "localintermediate.h"
44 #include "RemoveTree.h"
45 #include "SymbolTable.h"
46 #include "propagateNoContraction.h"
54 ////////////////////////////////////////////////////////////////////////////
56 // First set of functions are to help build the intermediate representation.
57 // These functions are not member functions of the nodes.
58 // They are called from parser productions.
60 /////////////////////////////////////////////////////////////////////////////
63 // Add a terminal node for an identifier in an expression.
65 // Returns the added node.
68 TIntermSymbol* TIntermediate::addSymbol(long long id, const TString& name, const TType& type, const TConstUnionArray& constArray,
69 TIntermTyped* constSubtree, const TSourceLoc& loc)
71 TIntermSymbol* node = new TIntermSymbol(id, name, type);
73 node->setConstArray(constArray);
74 node->setConstSubtree(constSubtree);
79 TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol)
81 return addSymbol(intermSymbol.getId(),
82 intermSymbol.getName(),
83 intermSymbol.getType(),
84 intermSymbol.getConstArray(),
85 intermSymbol.getConstSubtree(),
86 intermSymbol.getLoc());
89 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable)
91 glslang::TSourceLoc loc; // just a null location
94 return addSymbol(variable, loc);
97 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc)
99 return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc);
102 TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc)
104 TConstUnionArray unionArray; // just a null constant
106 return addSymbol(0, "", type, unionArray, nullptr, loc);
110 // Connect two nodes with a new parent that does a binary operation on the nodes.
112 // Returns the added node.
114 // Returns nullptr if the working conversions and promotions could not be found.
116 TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
118 // No operations work on blocks
119 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
122 // Convert "reference +/- int" and "reference - reference" to integer math
123 if (op == EOpAdd || op == EOpSub) {
125 // No addressing math on struct with unsized array.
126 if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) ||
127 (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) {
131 if (left->isReference() && isTypeInt(right->getBasicType())) {
132 const TType& referenceType = left->getType();
133 TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
134 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
136 right = createConversion(EbtInt64, right);
137 right = addBinaryMath(EOpMul, right, size, loc);
139 TIntermTyped *node = addBinaryMath(op, left, right, loc);
140 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
145 if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
146 const TType& referenceType = right->getType();
147 TIntermConstantUnion* size =
148 addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
149 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
151 left = createConversion(EbtInt64, left);
152 left = addBinaryMath(EOpMul, left, size, loc);
154 TIntermTyped *node = addBinaryMath(op, left, right, loc);
155 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
159 if (op == EOpSub && left->isReference() && right->isReference()) {
160 TIntermConstantUnion* size =
161 addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
163 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
164 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
166 left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
167 right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
169 left = addBinaryMath(EOpSub, left, right, loc);
171 TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
175 // No other math operators supported on references
176 if (left->isReference() || right->isReference())
179 // Try converting the children's base types to compatible types.
180 auto children = addPairConversion(op, left, right);
181 left = std::get<0>(children);
182 right = std::get<1>(children);
184 if (left == nullptr || right == nullptr)
187 // Convert the children's type shape to be compatible.
188 addBiShapeConversion(op, left, right);
189 if (left == nullptr || right == nullptr)
193 // Need a new node holding things together. Make
194 // one and promote it to the right type.
196 TIntermBinary* node = addBinaryNode(op, left, right, loc);
200 node->updatePrecision();
203 // If they are both (non-specialization) constants, they must be folded.
204 // (Unless it's the sequence (comma) operator, but that's handled in addComma().)
206 TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
207 TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
208 if (leftTempConstant && rightTempConstant) {
209 TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
214 // If can propagate spec-constantness and if the operation is an allowed
215 // specialization-constant operation, make a spec-constant.
216 if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
217 node->getWritableType().getQualifier().makeSpecConstant();
219 // If must propagate nonuniform, make a nonuniform.
220 if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) &&
221 isNonuniformPropagating(node->getOp()))
222 node->getWritableType().getQualifier().nonUniform = true;
228 // Low level: add binary node (no promotions or other argument modifications)
230 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
231 const TSourceLoc& loc) const
234 TIntermBinary* node = new TIntermBinary(op);
235 node->setLoc(loc.line != 0 ? loc : left->getLoc());
237 node->setRight(right);
243 // like non-type form, but sets node's type.
245 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
246 const TSourceLoc& loc, const TType& type) const
248 TIntermBinary* node = addBinaryNode(op, left, right, loc);
254 // Low level: add unary node (no promotions or other argument modifications)
256 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc) const
258 TIntermUnary* node = new TIntermUnary(op);
259 node->setLoc(loc.line != 0 ? loc : child->getLoc());
260 node->setOperand(child);
266 // like non-type form, but sets node's type.
268 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc, const TType& type)
271 TIntermUnary* node = addUnaryNode(op, child, loc);
277 // Connect two nodes through an assignment.
279 // Returns the added node.
281 // Returns nullptr if the 'right' type could not be converted to match the 'left' type,
282 // or the resulting operation cannot be properly promoted.
284 TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right,
285 const TSourceLoc& loc)
287 // No block assignment
288 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
291 // Convert "reference += int" to "reference = reference + int". We need this because the
292 // "reference + int" calculation involves a cast back to the original type, which makes it
294 if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference()) {
295 if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
298 TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc);
302 TIntermSymbol* symbol = left->getAsSymbolNode();
303 left = addSymbol(*symbol);
305 node = addAssign(EOpAssign, left, node, loc);
310 // Like adding binary math, except the conversion can only go
311 // from right to left.
314 // convert base types, nullptr return means not possible
315 right = addConversion(op, left->getType(), right);
316 if (right == nullptr)
320 right = addUniShapeConversion(op, left->getType(), right);
323 TIntermBinary* node = addBinaryNode(op, left, right, loc);
328 node->updatePrecision();
334 // Connect two nodes through an index operator, where the left node is the base
335 // of an array or struct, and the right node is a direct or indirect offset.
337 // Returns the added node.
338 // The caller should set the type of the returned node.
340 TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index,
341 const TSourceLoc& loc)
343 // caller should set the type
344 return addBinaryNode(op, base, index, loc);
348 // Add one node as the parent of another that it operates on.
350 // Returns the added node.
352 TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child,
353 const TSourceLoc& loc)
355 if (child == nullptr)
358 if (child->getType().getBasicType() == EbtBlock)
363 if (getSource() == EShSourceHlsl) {
364 break; // HLSL can promote logical not
367 if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
372 case EOpPostIncrement:
373 case EOpPreIncrement:
374 case EOpPostDecrement:
375 case EOpPreDecrement:
377 if (child->getType().getBasicType() == EbtStruct || child->getType().isArray())
379 default: break; // some compilers want this
383 // Do we need to promote the operand?
385 TBasicType newType = EbtVoid;
387 case EOpConstructBool: newType = EbtBool; break;
388 case EOpConstructFloat: newType = EbtFloat; break;
389 case EOpConstructInt: newType = EbtInt; break;
390 case EOpConstructUint: newType = EbtUint; break;
392 case EOpConstructInt8: newType = EbtInt8; break;
393 case EOpConstructUint8: newType = EbtUint8; break;
394 case EOpConstructInt16: newType = EbtInt16; break;
395 case EOpConstructUint16: newType = EbtUint16; break;
396 case EOpConstructInt64: newType = EbtInt64; break;
397 case EOpConstructUint64: newType = EbtUint64; break;
398 case EOpConstructDouble: newType = EbtDouble; break;
399 case EOpConstructFloat16: newType = EbtFloat16; break;
401 default: break; // some compilers want this
404 if (newType != EbtVoid) {
405 child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(),
406 child->getMatrixCols(),
407 child->getMatrixRows(),
410 if (child == nullptr)
415 // For constructors, we are now done, it was all in the conversion.
416 // TODO: but, did this bypass constant folding?
419 case EOpConstructInt8:
420 case EOpConstructUint8:
421 case EOpConstructInt16:
422 case EOpConstructUint16:
423 case EOpConstructInt:
424 case EOpConstructUint:
425 case EOpConstructInt64:
426 case EOpConstructUint64:
427 case EOpConstructBool:
428 case EOpConstructFloat:
429 case EOpConstructDouble:
430 case EOpConstructFloat16: {
431 TIntermUnary* unary_node = child->getAsUnaryNode();
432 if (unary_node != nullptr)
433 unary_node->updatePrecision();
436 default: break; // some compilers want this
440 // Make a new node for the operator.
442 TIntermUnary* node = addUnaryNode(op, child, loc);
447 node->updatePrecision();
449 // If it's a (non-specialization) constant, it must be folded.
450 if (node->getOperand()->getAsConstantUnion())
451 return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
453 // If it's a specialization constant, the result is too,
454 // if the operation is allowed for specialization constants.
455 if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
456 node->getWritableType().getQualifier().makeSpecConstant();
458 // If must propagate nonuniform, make a nonuniform.
459 if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp()))
460 node->getWritableType().getQualifier().nonUniform = true;
465 TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary,
466 TIntermNode* childNode, const TType& returnType)
470 // Treat it like a unary operator.
471 // addUnaryMath() should get the type correct on its own;
472 // including constness (which would differ from the prototype).
474 TIntermTyped* child = childNode->getAsTyped();
475 if (child == nullptr)
478 if (child->getAsConstantUnion()) {
479 TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType);
484 return addUnaryNode(op, child, child->getLoc(), returnType);
486 // setAggregateOperater() calls fold() for constant folding
487 TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc);
494 // This is the safe way to change the operator on an aggregate, as it
495 // does lots of error checking and fixing. Especially for establishing
496 // a function call's operation on its set of parameters. Sequences
497 // of instructions are also aggregates, but they just directly set
498 // their operator to EOpSequence.
500 // Returns an aggregate node, which could be the one passed in if
501 // it was already an aggregate.
503 TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type,
504 const TSourceLoc& loc)
506 TIntermAggregate* aggNode;
509 // Make sure we have an aggregate. If not turn it into one.
511 if (node != nullptr) {
512 aggNode = node->getAsAggregate();
513 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
515 // Make an aggregate containing this node.
517 aggNode = new TIntermAggregate();
518 aggNode->getSequence().push_back(node);
521 aggNode = new TIntermAggregate();
526 aggNode->setOperator(op);
527 if (loc.line != 0 || node != nullptr)
528 aggNode->setLoc(loc.line != 0 ? loc : node->getLoc());
530 aggNode->setType(type);
532 return fold(aggNode);
535 bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
538 // Does the base type even allow the operation?
540 switch (node->getBasicType()) {
546 // opaque types can be passed to functions
547 if (op == EOpFunction)
550 // HLSL can assign samplers directly (no constructor)
551 if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler)
554 // samplers can get assigned via a sampler constructor
555 // (well, not yet, but code in the rest of this function is ready for it)
556 if (node->getBasicType() == EbtSampler && op == EOpAssign &&
557 node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
560 // otherwise, opaque types can't even be operated on, let alone converted
569 bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const
575 case EbtUint: newOp = EOpConvUintToDouble; break;
576 case EbtBool: newOp = EOpConvBoolToDouble; break;
577 case EbtFloat: newOp = EOpConvFloatToDouble; break;
578 case EbtInt: newOp = EOpConvIntToDouble; break;
579 case EbtInt8: newOp = EOpConvInt8ToDouble; break;
580 case EbtUint8: newOp = EOpConvUint8ToDouble; break;
581 case EbtInt16: newOp = EOpConvInt16ToDouble; break;
582 case EbtUint16: newOp = EOpConvUint16ToDouble; break;
583 case EbtFloat16: newOp = EOpConvFloat16ToDouble; break;
584 case EbtInt64: newOp = EOpConvInt64ToDouble; break;
585 case EbtUint64: newOp = EOpConvUint64ToDouble; break;
593 case EbtInt: newOp = EOpConvIntToFloat; break;
594 case EbtUint: newOp = EOpConvUintToFloat; break;
595 case EbtBool: newOp = EOpConvBoolToFloat; break;
597 case EbtDouble: newOp = EOpConvDoubleToFloat; break;
598 case EbtInt8: newOp = EOpConvInt8ToFloat; break;
599 case EbtUint8: newOp = EOpConvUint8ToFloat; break;
600 case EbtInt16: newOp = EOpConvInt16ToFloat; break;
601 case EbtUint16: newOp = EOpConvUint16ToFloat; break;
602 case EbtFloat16: newOp = EOpConvFloat16ToFloat; break;
603 case EbtInt64: newOp = EOpConvInt64ToFloat; break;
604 case EbtUint64: newOp = EOpConvUint64ToFloat; break;
613 case EbtInt8: newOp = EOpConvInt8ToFloat16; break;
614 case EbtUint8: newOp = EOpConvUint8ToFloat16; break;
615 case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
616 case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
617 case EbtInt: newOp = EOpConvIntToFloat16; break;
618 case EbtUint: newOp = EOpConvUintToFloat16; break;
619 case EbtBool: newOp = EOpConvBoolToFloat16; break;
620 case EbtFloat: newOp = EOpConvFloatToFloat16; break;
621 case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
622 case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
623 case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
631 case EbtInt: newOp = EOpConvIntToBool; break;
632 case EbtUint: newOp = EOpConvUintToBool; break;
633 case EbtFloat: newOp = EOpConvFloatToBool; break;
635 case EbtDouble: newOp = EOpConvDoubleToBool; break;
636 case EbtInt8: newOp = EOpConvInt8ToBool; break;
637 case EbtUint8: newOp = EOpConvUint8ToBool; break;
638 case EbtInt16: newOp = EOpConvInt16ToBool; break;
639 case EbtUint16: newOp = EOpConvUint16ToBool; break;
640 case EbtFloat16: newOp = EOpConvFloat16ToBool; break;
641 case EbtInt64: newOp = EOpConvInt64ToBool; break;
642 case EbtUint64: newOp = EOpConvUint64ToBool; break;
651 case EbtUint8: newOp = EOpConvUint8ToInt8; break;
652 case EbtInt16: newOp = EOpConvInt16ToInt8; break;
653 case EbtUint16: newOp = EOpConvUint16ToInt8; break;
654 case EbtInt: newOp = EOpConvIntToInt8; break;
655 case EbtUint: newOp = EOpConvUintToInt8; break;
656 case EbtInt64: newOp = EOpConvInt64ToInt8; break;
657 case EbtUint64: newOp = EOpConvUint64ToInt8; break;
658 case EbtBool: newOp = EOpConvBoolToInt8; break;
659 case EbtFloat: newOp = EOpConvFloatToInt8; break;
660 case EbtDouble: newOp = EOpConvDoubleToInt8; break;
661 case EbtFloat16: newOp = EOpConvFloat16ToInt8; break;
668 case EbtInt8: newOp = EOpConvInt8ToUint8; break;
669 case EbtInt16: newOp = EOpConvInt16ToUint8; break;
670 case EbtUint16: newOp = EOpConvUint16ToUint8; break;
671 case EbtInt: newOp = EOpConvIntToUint8; break;
672 case EbtUint: newOp = EOpConvUintToUint8; break;
673 case EbtInt64: newOp = EOpConvInt64ToUint8; break;
674 case EbtUint64: newOp = EOpConvUint64ToUint8; break;
675 case EbtBool: newOp = EOpConvBoolToUint8; break;
676 case EbtFloat: newOp = EOpConvFloatToUint8; break;
677 case EbtDouble: newOp = EOpConvDoubleToUint8; break;
678 case EbtFloat16: newOp = EOpConvFloat16ToUint8; break;
686 case EbtUint8: newOp = EOpConvUint8ToInt16; break;
687 case EbtInt8: newOp = EOpConvInt8ToInt16; break;
688 case EbtUint16: newOp = EOpConvUint16ToInt16; break;
689 case EbtInt: newOp = EOpConvIntToInt16; break;
690 case EbtUint: newOp = EOpConvUintToInt16; break;
691 case EbtInt64: newOp = EOpConvInt64ToInt16; break;
692 case EbtUint64: newOp = EOpConvUint64ToInt16; break;
693 case EbtBool: newOp = EOpConvBoolToInt16; break;
694 case EbtFloat: newOp = EOpConvFloatToInt16; break;
695 case EbtDouble: newOp = EOpConvDoubleToInt16; break;
696 case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
703 case EbtInt8: newOp = EOpConvInt8ToUint16; break;
704 case EbtUint8: newOp = EOpConvUint8ToUint16; break;
705 case EbtInt16: newOp = EOpConvInt16ToUint16; break;
706 case EbtInt: newOp = EOpConvIntToUint16; break;
707 case EbtUint: newOp = EOpConvUintToUint16; break;
708 case EbtInt64: newOp = EOpConvInt64ToUint16; break;
709 case EbtUint64: newOp = EOpConvUint64ToUint16; break;
710 case EbtBool: newOp = EOpConvBoolToUint16; break;
711 case EbtFloat: newOp = EOpConvFloatToUint16; break;
712 case EbtDouble: newOp = EOpConvDoubleToUint16; break;
713 case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
722 case EbtUint: newOp = EOpConvUintToInt; break;
723 case EbtBool: newOp = EOpConvBoolToInt; break;
724 case EbtFloat: newOp = EOpConvFloatToInt; break;
726 case EbtInt8: newOp = EOpConvInt8ToInt; break;
727 case EbtUint8: newOp = EOpConvUint8ToInt; break;
728 case EbtInt16: newOp = EOpConvInt16ToInt; break;
729 case EbtUint16: newOp = EOpConvUint16ToInt; break;
730 case EbtDouble: newOp = EOpConvDoubleToInt; break;
731 case EbtFloat16: newOp = EOpConvFloat16ToInt; break;
732 case EbtInt64: newOp = EOpConvInt64ToInt; break;
733 case EbtUint64: newOp = EOpConvUint64ToInt; break;
741 case EbtInt: newOp = EOpConvIntToUint; break;
742 case EbtBool: newOp = EOpConvBoolToUint; break;
743 case EbtFloat: newOp = EOpConvFloatToUint; break;
745 case EbtInt8: newOp = EOpConvInt8ToUint; break;
746 case EbtUint8: newOp = EOpConvUint8ToUint; break;
747 case EbtInt16: newOp = EOpConvInt16ToUint; break;
748 case EbtUint16: newOp = EOpConvUint16ToUint; break;
749 case EbtDouble: newOp = EOpConvDoubleToUint; break;
750 case EbtFloat16: newOp = EOpConvFloat16ToUint; break;
751 case EbtInt64: newOp = EOpConvInt64ToUint; break;
752 case EbtUint64: newOp = EOpConvUint64ToUint; break;
754 // For bindless texture type conversion, add a dummy convert op, just
755 // to generate a new TIntermTyped
756 // uvec2(any sampler type)
757 // uvec2(any image type)
758 case EbtSampler: newOp = EOpConvIntToUint; break;
766 case EbtInt8: newOp = EOpConvInt8ToInt64; break;
767 case EbtUint8: newOp = EOpConvUint8ToInt64; break;
768 case EbtInt16: newOp = EOpConvInt16ToInt64; break;
769 case EbtUint16: newOp = EOpConvUint16ToInt64; break;
770 case EbtInt: newOp = EOpConvIntToInt64; break;
771 case EbtUint: newOp = EOpConvUintToInt64; break;
772 case EbtBool: newOp = EOpConvBoolToInt64; break;
773 case EbtFloat: newOp = EOpConvFloatToInt64; break;
774 case EbtDouble: newOp = EOpConvDoubleToInt64; break;
775 case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
776 case EbtUint64: newOp = EOpConvUint64ToInt64; break;
783 case EbtInt8: newOp = EOpConvInt8ToUint64; break;
784 case EbtUint8: newOp = EOpConvUint8ToUint64; break;
785 case EbtInt16: newOp = EOpConvInt16ToUint64; break;
786 case EbtUint16: newOp = EOpConvUint16ToUint64; break;
787 case EbtInt: newOp = EOpConvIntToUint64; break;
788 case EbtUint: newOp = EOpConvUintToUint64; break;
789 case EbtBool: newOp = EOpConvBoolToUint64; break;
790 case EbtFloat: newOp = EOpConvFloatToUint64; break;
791 case EbtDouble: newOp = EOpConvDoubleToUint64; break;
792 case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
793 case EbtInt64: newOp = EOpConvInt64ToUint64; break;
805 // This is 'mechanism' here, it does any conversion told.
806 // It is about basic type, not about shape.
807 // The policy comes from the shader or the calling code.
808 TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const
811 // Add a new newNode for the conversion.
815 bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 ||
816 convertTo == EbtInt16 || convertTo == EbtUint16 ||
817 convertTo == EbtInt || convertTo == EbtUint ||
818 convertTo == EbtInt64 || convertTo == EbtUint64);
820 bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 ||
821 node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 ||
822 node->getBasicType() == EbtInt || node->getBasicType() == EbtUint ||
823 node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64);
825 bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble);
827 bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 ||
828 node->getBasicType() == EbtFloat ||
829 node->getBasicType() == EbtDouble);
831 if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
832 ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) {
833 if (! getArithemeticInt8Enabled()) {
838 if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
839 ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) {
840 if (! getArithemeticInt16Enabled()) {
845 if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
846 (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) {
847 if (! getArithemeticFloat16Enabled()) {
853 TIntermUnary* newNode = nullptr;
854 TOperator newOp = EOpNull;
855 if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) {
859 TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows());
860 newNode = addUnaryNode(newOp, node, node->getLoc(), newType);
862 if (node->getAsConstantUnion()) {
864 // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions
866 if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) &&
867 (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) &&
868 (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16)))
871 TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType);
877 // Propagate specialization-constant-ness, if allowed
878 if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode))
879 newNode->getWritableType().getQualifier().makeSpecConstant();
884 TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const
886 return createConversion(convertTo, node);
889 // For converting a pair of operands to a binary operation to compatible
890 // types with each other, relative to the operation in 'op'.
891 // This does not cover assignment operations, which is asymmetric in that the
892 // left type is not changeable.
893 // See addConversion(op, type, node) for assignments and unary operation
896 // Generally, this is focused on basic type conversion, not shape conversion.
897 // See addShapeConversion() for shape conversions.
899 // Returns the converted pair of nodes.
900 // Returns <nullptr, nullptr> when there is no conversion.
901 std::tuple<TIntermTyped*, TIntermTyped*>
902 TIntermediate::addPairConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
904 if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
905 return std::make_tuple(nullptr, nullptr);
907 if (node0->getType() != node1->getType()) {
908 // If differing structure, then no conversions.
909 if (node0->isStruct() || node1->isStruct())
910 return std::make_tuple(nullptr, nullptr);
912 // If differing arrays, then no conversions.
913 if (node0->getType().isArray() || node1->getType().isArray())
914 return std::make_tuple(nullptr, nullptr);
916 // No implicit conversions for operations involving cooperative matrices
917 if (node0->getType().isCoopMat() || node1->getType().isCoopMat())
918 return std::make_tuple(node0, node1);
921 auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes);
925 // List all the binary ops that can implicitly convert one operand to the other's type;
926 // This implements the 'policy' for implicit type conversion.
930 case EOpLessThanEqual:
931 case EOpGreaterThanEqual:
941 case EOpVectorTimesScalar:
942 case EOpVectorTimesMatrix:
943 case EOpMatrixTimesVector:
944 case EOpMatrixTimesScalar:
950 case EOpSequence: // used by ?:
952 if (node0->getBasicType() == node1->getBasicType())
953 return std::make_tuple(node0, node1);
955 promoteTo = getConversionDestinationType(node0->getBasicType(), node1->getBasicType(), op);
956 if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
957 return std::make_tuple(nullptr, nullptr);
964 if (getSource() == EShSourceHlsl)
965 promoteTo = std::make_tuple(EbtBool, EbtBool);
967 return std::make_tuple(node0, node1);
970 // There are no conversions needed for GLSL; the shift amount just needs to be an
971 // integer type, as does the base.
972 // HLSL can promote bools to ints to make this work.
975 if (getSource() == EShSourceHlsl) {
976 TBasicType node0BasicType = node0->getBasicType();
977 if (node0BasicType == EbtBool)
978 node0BasicType = EbtInt;
979 if (node1->getBasicType() == EbtBool)
980 promoteTo = std::make_tuple(node0BasicType, EbtInt);
982 promoteTo = std::make_tuple(node0BasicType, node1->getBasicType());
984 if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType()))
985 return std::make_tuple(node0, node1);
987 return std::make_tuple(nullptr, nullptr);
992 if (node0->getType() == node1->getType())
993 return std::make_tuple(node0, node1);
995 return std::make_tuple(nullptr, nullptr);
998 TIntermTyped* newNode0;
999 TIntermTyped* newNode1;
1001 if (std::get<0>(promoteTo) != node0->getType().getBasicType()) {
1002 if (node0->getAsConstantUnion())
1003 newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion());
1005 newNode0 = createConversion(std::get<0>(promoteTo), node0);
1009 if (std::get<1>(promoteTo) != node1->getType().getBasicType()) {
1010 if (node1->getAsConstantUnion())
1011 newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion());
1013 newNode1 = createConversion(std::get<1>(promoteTo), node1);
1017 return std::make_tuple(newNode0, newNode1);
1021 // Convert the node's type to the given type, as allowed by the operation involved: 'op'.
1022 // For implicit conversions, 'op' is not the requested conversion, it is the explicit
1023 // operation requiring the implicit conversion.
1025 // Binary operation conversions should be handled by addConversion(op, node, node), not here.
1027 // Returns a node representing the conversion, which could be the same
1028 // node passed in if no conversion was needed.
1030 // Generally, this is focused on basic type conversion, not shape conversion.
1031 // See addShapeConversion() for shape conversions.
1033 // Return nullptr if a conversion can't be done.
1035 TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node)
1037 if (!isConversionAllowed(op, node))
1040 // Otherwise, if types are identical, no problem
1041 if (type == node->getType())
1044 // If one's a structure, then no conversions.
1045 if (type.isStruct() || node->isStruct())
1048 // If one's an array, then no conversions.
1049 if (type.isArray() || node->getType().isArray())
1052 // Note: callers are responsible for other aspects of shape,
1053 // like vector and matrix sizes.
1057 // Explicit conversions (unary operations)
1059 case EOpConstructBool:
1060 case EOpConstructFloat:
1061 case EOpConstructInt:
1062 case EOpConstructUint:
1064 case EOpConstructDouble:
1065 case EOpConstructFloat16:
1066 case EOpConstructInt8:
1067 case EOpConstructUint8:
1068 case EOpConstructInt16:
1069 case EOpConstructUint16:
1070 case EOpConstructInt64:
1071 case EOpConstructUint64:
1077 // Implicit conversions
1081 case EOpFunctionCall:
1088 case EOpVectorTimesScalarAssign:
1089 case EOpMatrixTimesScalarAssign:
1093 case EOpInclusiveOrAssign:
1094 case EOpExclusiveOrAssign:
1102 case EOpFaceForward:
1119 case EOpConstructStruct:
1120 case EOpConstructCooperativeMatrix:
1122 if (type.isReference() || node->getType().isReference()) {
1123 // types must match to assign a reference
1124 if (type == node->getType())
1130 if (type.getBasicType() == node->getType().getBasicType())
1133 if (! canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
1137 // For GLSL, there are no conversions needed; the shift amount just needs to be an
1138 // integer type, as do the base/result.
1139 // HLSL can convert the shift from a bool to an int.
1140 case EOpLeftShiftAssign:
1141 case EOpRightShiftAssign:
1143 if (!(getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)) {
1144 if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
1153 // default is to require a match; all exceptions should have case statements above
1155 if (type.getBasicType() == node->getType().getBasicType())
1161 bool canPromoteConstant = true;
1163 // GL_EXT_shader_16bit_storage can't do OpConstantComposite with
1164 // 16-bit types, so disable promotion for those types.
1165 // Many issues with this, from JohnK:
1166 // - this isn't really right to discuss SPIR-V here
1167 // - this could easily be entirely about scalars, so is overstepping
1168 // - we should be looking at what the shader asked for, and saying whether or
1169 // not it can be done, in the parser, by calling requireExtensions(), not
1170 // changing language sementics on the fly by asking what extensions are in use
1171 // - at the time of this writing (14-Aug-2020), no test results are changed by this.
1173 case EOpConstructFloat16:
1174 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1175 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16);
1177 case EOpConstructInt8:
1178 case EOpConstructUint8:
1179 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1180 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8);
1182 case EOpConstructInt16:
1183 case EOpConstructUint16:
1184 canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1185 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16);
1192 if (canPromoteConstant && node->getAsConstantUnion())
1193 return promoteConstantUnion(type.getBasicType(), node->getAsConstantUnion());
1196 // Add a new newNode for the conversion.
1198 TIntermTyped* newNode = createConversion(type.getBasicType(), node);
1203 // Convert the node's shape of type for the given type, as allowed by the
1204 // operation involved: 'op'. This is for situations where there is only one
1205 // direction to consider doing the shape conversion.
1207 // This implements policy, it call addShapeConversion() for the mechanism.
1209 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1210 // for GLSL. Bad shapes are caught in conversion or promotion.
1212 // Return 'node' if no conversion was done. Promotion handles final shape
1215 TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
1217 // some source languages don't do this
1218 switch (getSource()) {
1226 // some operations don't do this
1228 case EOpFunctionCall:
1233 // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
1234 // matrix *= scalar, etc.
1240 case EOpInclusiveOrAssign:
1241 case EOpExclusiveOrAssign:
1242 case EOpRightShiftAssign:
1243 case EOpLeftShiftAssign:
1244 if (node->getVectorSize() == 1)
1258 return addShapeConversion(type, node);
1261 // Convert the nodes' shapes to be compatible for the operation 'op'.
1263 // This implements policy, it call addShapeConversion() for the mechanism.
1265 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1266 // for GLSL. Bad shapes are caught in conversion or promotion.
1268 void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
1270 // some source languages don't do this
1271 switch (getSource()) {
1279 // some operations don't do this
1280 // 'break' will mean attempt bidirectional conversion
1288 case EOpInclusiveOrAssign:
1289 case EOpExclusiveOrAssign:
1290 case EOpRightShiftAssign:
1291 case EOpLeftShiftAssign:
1292 // switch to unidirectional conversion (the lhs can't change)
1293 rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
1297 // matrix multiply does not change shapes
1298 if (lhsNode->isMatrix() && rhsNode->isMatrix())
1303 // want to support vector * scalar native ops in AST and lower, not smear, similarly for
1304 // matrix * vector, etc.
1305 if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
1311 // can natively support the right operand being a scalar and the left a vector,
1312 // but not the reverse
1313 if (rhsNode->getVectorSize() == 1)
1318 case EOpGreaterThan:
1319 case EOpLessThanEqual:
1320 case EOpGreaterThanEqual:
1330 case EOpInclusiveOr:
1331 case EOpExclusiveOr:
1340 // Do bidirectional conversions
1341 if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
1342 if (lhsNode->getType().isScalarOrVec1())
1343 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1345 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1347 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1348 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1351 // Convert the node's shape of type for the given type, as allowed by the
1352 // operation involved: 'op'.
1354 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1355 // for GLSL. Bad shapes are caught in conversion or promotion.
1357 // Return 'node' if no conversion was done. Promotion handles final shape
1360 TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
1362 // no conversion needed
1363 if (node->getType() == type)
1366 // structures and arrays don't change shape, either to or from
1367 if (node->getType().isStruct() || node->getType().isArray() ||
1368 type.isStruct() || type.isArray())
1371 // The new node that handles the conversion
1372 TOperator constructorOp = mapTypeToConstructorOp(type);
1374 if (getSource() == EShSourceHlsl) {
1375 // HLSL rules for scalar, vector and matrix conversions:
1376 // 1) scalar can become anything, initializing every component with its value
1377 // 2) vector and matrix can become scalar, first element is used (warning: truncation)
1378 // 3) matrix can become matrix with less rows and/or columns (warning: truncation)
1379 // 4) vector can become vector with less rows size (warning: truncation)
1380 // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret)
1381 // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret)
1383 const TType &sourceType = node->getType();
1385 // rule 1 for scalar to matrix is special
1386 if (sourceType.isScalarOrVec1() && type.isMatrix()) {
1388 // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its
1389 // own devices, the constructor from a scalar would populate the diagonal. This forces replication
1390 // to every matrix element.
1392 // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here
1393 // repeatedly, so we copy it to a temp, then use the temp.
1394 const int matSize = type.computeNumComponents();
1395 TIntermAggregate* rhsAggregate = new TIntermAggregate();
1397 const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
1400 assert(0); // TODO: use node replicator service when available.
1403 for (int x = 0; x < matSize; ++x)
1404 rhsAggregate->getSequence().push_back(node);
1406 return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc());
1410 if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar()))
1411 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1414 if (sourceType.isMatrix()) {
1416 if (type.isMatrix()) {
1417 if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) &&
1418 sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows())
1419 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1421 } else if (type.isVector()) {
1422 if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2)
1423 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1428 if (sourceType.isVector()) {
1430 if (type.isVector())
1432 if (sourceType.getVectorSize() > type.getVectorSize())
1433 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1435 } else if (type.isMatrix()) {
1436 if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2)
1437 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1442 // scalar -> vector or vec1 -> vector or
1443 // vector -> scalar or
1444 // bigger vector -> smaller vector
1445 if ((node->getType().isScalarOrVec1() && type.isVector()) ||
1446 (node->getType().isVector() && type.isScalar()) ||
1447 (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
1448 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1453 bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const
1455 // integral promotions
1470 bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const
1472 // floating-point promotions
1473 if (to == EbtDouble) {
1485 bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const
1495 return version >= 400 || getSource() == EShSourceHlsl;
1559 if (to == EbtUint64) {
1569 bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const
1575 if (to == EbtFloat && from == EbtFloat16) {
1582 bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
1611 if (to == EbtDouble) {
1623 // See if the 'from' type is allowed to be implicitly converted to the
1624 // 'to' type. This is not about vector/array/struct, only about basic type.
1626 bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
1628 if ((isEsProfile() && version < 310 ) || version == 110)
1634 // TODO: Move more policies into language-specific handlers.
1635 // Some languages allow more general (or potentially, more specific) conversions under some conditions.
1636 if (getSource() == EShSourceHlsl) {
1637 const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool);
1638 const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool);
1640 if (fromConvertable && toConvertable) {
1642 case EOpAndAssign: // assignments can perform arbitrary conversions
1643 case EOpInclusiveOrAssign: // ...
1644 case EOpExclusiveOrAssign: // ...
1645 case EOpAssign: // ...
1646 case EOpAddAssign: // ...
1647 case EOpSubAssign: // ...
1648 case EOpMulAssign: // ...
1649 case EOpVectorTimesScalarAssign: // ...
1650 case EOpMatrixTimesScalarAssign: // ...
1651 case EOpDivAssign: // ...
1652 case EOpModAssign: // ...
1653 case EOpReturn: // function returns can also perform arbitrary conversions
1654 case EOpFunctionCall: // conversion of a calling parameter
1659 case EOpConstructStruct:
1667 if (getSource() == EShSourceHlsl) {
1669 if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
1673 if (isIntegralPromotion(from, to) ||
1674 isFPPromotion(from, to) ||
1675 isIntegralConversion(from, to) ||
1676 isFPConversion(from, to) ||
1677 isFPIntegralConversion(from, to)) {
1679 if (numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
1680 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8) ||
1681 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16) ||
1682 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int32) ||
1683 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int64) ||
1684 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16) ||
1685 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float32) ||
1686 numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float64)) {
1692 if (isEsProfile()) {
1698 return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
1705 return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
1721 return version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64);
1724 return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
1725 numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1727 return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
1728 numericFeatures.contains(TNumericFeatures::gpu_shader_half_float);
1738 return getSource() == EShSourceHlsl;
1741 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1743 return numericFeatures.contains(TNumericFeatures::gpu_shader_half_float) ||
1744 getSource() == EShSourceHlsl;
1751 return version >= 400 || getSource() == EShSourceHlsl || IsRequestedExtension(E_GL_ARB_gpu_shader5);
1753 return getSource() == EShSourceHlsl;
1756 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1763 return getSource() == EShSourceHlsl;
1765 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1777 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1786 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1794 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1802 return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
1815 static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType)
1879 static TBasicType getCorrespondingUnsignedType(TBasicType type)
1882 assert(type == EbtInt);
1901 // Implements the following rules
1902 // - If either operand has type float64_t or derived from float64_t,
1903 // the other shall be converted to float64_t or derived type.
1904 // - Otherwise, if either operand has type float32_t or derived from
1905 // float32_t, the other shall be converted to float32_t or derived type.
1906 // - Otherwise, if either operand has type float16_t or derived from
1907 // float16_t, the other shall be converted to float16_t or derived type.
1908 // - Otherwise, if both operands have integer types the following rules
1909 // shall be applied to the operands:
1910 // - If both operands have the same type, no further conversion
1912 // - Otherwise, if both operands have signed integer types or both
1913 // have unsigned integer types, the operand with the type of lesser
1914 // integer conversion rank shall be converted to the type of the
1915 // operand with greater rank.
1916 // - Otherwise, if the operand that has unsigned integer type has rank
1917 // greater than or equal to the rank of the type of the other
1918 // operand, the operand with signed integer type shall be converted
1919 // to the type of the operand with unsigned integer type.
1920 // - Otherwise, if the type of the operand with signed integer type can
1921 // represent all of the values of the type of the operand with
1922 // unsigned integer type, the operand with unsigned integer type
1923 // shall be converted to the type of the operand with signed
1925 // - Otherwise, both operands shall be converted to the unsigned
1926 // integer type corresponding to the type of the operand with signed
1929 std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinationType(TBasicType type0, TBasicType type1, TOperator op) const
1931 TBasicType res0 = EbtNumTypes;
1932 TBasicType res1 = EbtNumTypes;
1934 if ((isEsProfile() &&
1935 (version < 310 || !numericFeatures.contains(TNumericFeatures::shader_implicit_conversions))) ||
1937 return std::make_tuple(res0, res1);
1939 if (getSource() == EShSourceHlsl) {
1940 if (canImplicitlyPromote(type1, type0, op)) {
1943 } else if (canImplicitlyPromote(type0, type1, op)) {
1947 return std::make_tuple(res0, res1);
1950 if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) ||
1951 (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) {
1954 } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) ||
1955 (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) {
1958 } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) ||
1959 (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) {
1962 } else if (isTypeInt(type0) && isTypeInt(type1) &&
1963 (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) {
1964 if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) ||
1965 (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) {
1966 if (getTypeRank(type0) < getTypeRank(type1)) {
1973 } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) {
1976 } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) {
1979 } else if (isTypeSignedInt(type0)) {
1980 if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) {
1984 res0 = getCorrespondingUnsignedType(type0);
1985 res1 = getCorrespondingUnsignedType(type0);
1987 } else if (isTypeSignedInt(type1)) {
1988 if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) {
1992 res0 = getCorrespondingUnsignedType(type1);
1993 res1 = getCorrespondingUnsignedType(type1);
1998 return std::make_tuple(res0, res1);
2002 // Given a type, find what operation would fully construct it.
2004 TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
2006 TOperator op = EOpNull;
2008 if (type.getQualifier().isNonUniform())
2009 return EOpConstructNonuniform;
2011 if (type.isCoopMat())
2012 return EOpConstructCooperativeMatrix;
2014 switch (type.getBasicType()) {
2016 op = EOpConstructStruct;
2019 if (type.getSampler().isCombined())
2020 op = EOpConstructTextureSampler;
2023 if (type.isMatrix()) {
2024 switch (type.getMatrixCols()) {
2026 switch (type.getMatrixRows()) {
2027 case 2: op = EOpConstructMat2x2; break;
2028 case 3: op = EOpConstructMat2x3; break;
2029 case 4: op = EOpConstructMat2x4; break;
2030 default: break; // some compilers want this
2034 switch (type.getMatrixRows()) {
2035 case 2: op = EOpConstructMat3x2; break;
2036 case 3: op = EOpConstructMat3x3; break;
2037 case 4: op = EOpConstructMat3x4; break;
2038 default: break; // some compilers want this
2042 switch (type.getMatrixRows()) {
2043 case 2: op = EOpConstructMat4x2; break;
2044 case 3: op = EOpConstructMat4x3; break;
2045 case 4: op = EOpConstructMat4x4; break;
2046 default: break; // some compilers want this
2049 default: break; // some compilers want this
2052 switch(type.getVectorSize()) {
2053 case 1: op = EOpConstructFloat; break;
2054 case 2: op = EOpConstructVec2; break;
2055 case 3: op = EOpConstructVec3; break;
2056 case 4: op = EOpConstructVec4; break;
2057 default: break; // some compilers want this
2062 if (type.getMatrixCols()) {
2063 switch (type.getMatrixCols()) {
2065 switch (type.getMatrixRows()) {
2066 case 2: op = EOpConstructIMat2x2; break;
2067 case 3: op = EOpConstructIMat2x3; break;
2068 case 4: op = EOpConstructIMat2x4; break;
2069 default: break; // some compilers want this
2073 switch (type.getMatrixRows()) {
2074 case 2: op = EOpConstructIMat3x2; break;
2075 case 3: op = EOpConstructIMat3x3; break;
2076 case 4: op = EOpConstructIMat3x4; break;
2077 default: break; // some compilers want this
2081 switch (type.getMatrixRows()) {
2082 case 2: op = EOpConstructIMat4x2; break;
2083 case 3: op = EOpConstructIMat4x3; break;
2084 case 4: op = EOpConstructIMat4x4; break;
2085 default: break; // some compilers want this
2090 switch(type.getVectorSize()) {
2091 case 1: op = EOpConstructInt; break;
2092 case 2: op = EOpConstructIVec2; break;
2093 case 3: op = EOpConstructIVec3; break;
2094 case 4: op = EOpConstructIVec4; break;
2095 default: break; // some compilers want this
2100 if (type.getMatrixCols()) {
2101 switch (type.getMatrixCols()) {
2103 switch (type.getMatrixRows()) {
2104 case 2: op = EOpConstructUMat2x2; break;
2105 case 3: op = EOpConstructUMat2x3; break;
2106 case 4: op = EOpConstructUMat2x4; break;
2107 default: break; // some compilers want this
2111 switch (type.getMatrixRows()) {
2112 case 2: op = EOpConstructUMat3x2; break;
2113 case 3: op = EOpConstructUMat3x3; break;
2114 case 4: op = EOpConstructUMat3x4; break;
2115 default: break; // some compilers want this
2119 switch (type.getMatrixRows()) {
2120 case 2: op = EOpConstructUMat4x2; break;
2121 case 3: op = EOpConstructUMat4x3; break;
2122 case 4: op = EOpConstructUMat4x4; break;
2123 default: break; // some compilers want this
2128 switch(type.getVectorSize()) {
2129 case 1: op = EOpConstructUint; break;
2130 case 2: op = EOpConstructUVec2; break;
2131 case 3: op = EOpConstructUVec3; break;
2132 case 4: op = EOpConstructUVec4; break;
2133 default: break; // some compilers want this
2138 if (type.getMatrixCols()) {
2139 switch (type.getMatrixCols()) {
2141 switch (type.getMatrixRows()) {
2142 case 2: op = EOpConstructBMat2x2; break;
2143 case 3: op = EOpConstructBMat2x3; break;
2144 case 4: op = EOpConstructBMat2x4; break;
2145 default: break; // some compilers want this
2149 switch (type.getMatrixRows()) {
2150 case 2: op = EOpConstructBMat3x2; break;
2151 case 3: op = EOpConstructBMat3x3; break;
2152 case 4: op = EOpConstructBMat3x4; break;
2153 default: break; // some compilers want this
2157 switch (type.getMatrixRows()) {
2158 case 2: op = EOpConstructBMat4x2; break;
2159 case 3: op = EOpConstructBMat4x3; break;
2160 case 4: op = EOpConstructBMat4x4; break;
2161 default: break; // some compilers want this
2166 switch(type.getVectorSize()) {
2167 case 1: op = EOpConstructBool; break;
2168 case 2: op = EOpConstructBVec2; break;
2169 case 3: op = EOpConstructBVec3; break;
2170 case 4: op = EOpConstructBVec4; break;
2171 default: break; // some compilers want this
2177 if (type.getMatrixCols()) {
2178 switch (type.getMatrixCols()) {
2180 switch (type.getMatrixRows()) {
2181 case 2: op = EOpConstructDMat2x2; break;
2182 case 3: op = EOpConstructDMat2x3; break;
2183 case 4: op = EOpConstructDMat2x4; break;
2184 default: break; // some compilers want this
2188 switch (type.getMatrixRows()) {
2189 case 2: op = EOpConstructDMat3x2; break;
2190 case 3: op = EOpConstructDMat3x3; break;
2191 case 4: op = EOpConstructDMat3x4; break;
2192 default: break; // some compilers want this
2196 switch (type.getMatrixRows()) {
2197 case 2: op = EOpConstructDMat4x2; break;
2198 case 3: op = EOpConstructDMat4x3; break;
2199 case 4: op = EOpConstructDMat4x4; break;
2200 default: break; // some compilers want this
2205 switch(type.getVectorSize()) {
2206 case 1: op = EOpConstructDouble; break;
2207 case 2: op = EOpConstructDVec2; break;
2208 case 3: op = EOpConstructDVec3; break;
2209 case 4: op = EOpConstructDVec4; break;
2210 default: break; // some compilers want this
2215 if (type.getMatrixCols()) {
2216 switch (type.getMatrixCols()) {
2218 switch (type.getMatrixRows()) {
2219 case 2: op = EOpConstructF16Mat2x2; break;
2220 case 3: op = EOpConstructF16Mat2x3; break;
2221 case 4: op = EOpConstructF16Mat2x4; break;
2222 default: break; // some compilers want this
2226 switch (type.getMatrixRows()) {
2227 case 2: op = EOpConstructF16Mat3x2; break;
2228 case 3: op = EOpConstructF16Mat3x3; break;
2229 case 4: op = EOpConstructF16Mat3x4; break;
2230 default: break; // some compilers want this
2234 switch (type.getMatrixRows()) {
2235 case 2: op = EOpConstructF16Mat4x2; break;
2236 case 3: op = EOpConstructF16Mat4x3; break;
2237 case 4: op = EOpConstructF16Mat4x4; break;
2238 default: break; // some compilers want this
2244 switch (type.getVectorSize()) {
2245 case 1: op = EOpConstructFloat16; break;
2246 case 2: op = EOpConstructF16Vec2; break;
2247 case 3: op = EOpConstructF16Vec3; break;
2248 case 4: op = EOpConstructF16Vec4; break;
2249 default: break; // some compilers want this
2254 switch(type.getVectorSize()) {
2255 case 1: op = EOpConstructInt8; break;
2256 case 2: op = EOpConstructI8Vec2; break;
2257 case 3: op = EOpConstructI8Vec3; break;
2258 case 4: op = EOpConstructI8Vec4; break;
2259 default: break; // some compilers want this
2263 switch(type.getVectorSize()) {
2264 case 1: op = EOpConstructUint8; break;
2265 case 2: op = EOpConstructU8Vec2; break;
2266 case 3: op = EOpConstructU8Vec3; break;
2267 case 4: op = EOpConstructU8Vec4; break;
2268 default: break; // some compilers want this
2272 switch(type.getVectorSize()) {
2273 case 1: op = EOpConstructInt16; break;
2274 case 2: op = EOpConstructI16Vec2; break;
2275 case 3: op = EOpConstructI16Vec3; break;
2276 case 4: op = EOpConstructI16Vec4; break;
2277 default: break; // some compilers want this
2281 switch(type.getVectorSize()) {
2282 case 1: op = EOpConstructUint16; break;
2283 case 2: op = EOpConstructU16Vec2; break;
2284 case 3: op = EOpConstructU16Vec3; break;
2285 case 4: op = EOpConstructU16Vec4; break;
2286 default: break; // some compilers want this
2290 switch(type.getVectorSize()) {
2291 case 1: op = EOpConstructInt64; break;
2292 case 2: op = EOpConstructI64Vec2; break;
2293 case 3: op = EOpConstructI64Vec3; break;
2294 case 4: op = EOpConstructI64Vec4; break;
2295 default: break; // some compilers want this
2299 switch(type.getVectorSize()) {
2300 case 1: op = EOpConstructUint64; break;
2301 case 2: op = EOpConstructU64Vec2; break;
2302 case 3: op = EOpConstructU64Vec3; break;
2303 case 4: op = EOpConstructU64Vec4; break;
2304 default: break; // some compilers want this
2308 op = EOpConstructReference;
2312 op = EOpConstructAccStruct;
2323 // Safe way to combine two nodes into an aggregate. Works with null pointers,
2324 // a node that's not a aggregate yet, etc.
2326 // Returns the resulting aggregate, unless nullptr was passed in for
2327 // both existing nodes.
2329 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right)
2331 if (left == nullptr && right == nullptr)
2334 TIntermAggregate* aggNode = nullptr;
2335 if (left != nullptr)
2336 aggNode = left->getAsAggregate();
2337 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
2338 aggNode = new TIntermAggregate;
2339 if (left != nullptr)
2340 aggNode->getSequence().push_back(left);
2343 if (right != nullptr)
2344 aggNode->getSequence().push_back(right);
2349 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc)
2351 TIntermAggregate* aggNode = growAggregate(left, right);
2353 aggNode->setLoc(loc);
2359 // Turn an existing node into an aggregate.
2361 // Returns an aggregate, unless nullptr was passed in for the existing node.
2363 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node)
2365 if (node == nullptr)
2368 TIntermAggregate* aggNode = new TIntermAggregate;
2369 aggNode->getSequence().push_back(node);
2370 aggNode->setLoc(node->getLoc());
2375 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc)
2377 if (node == nullptr)
2380 TIntermAggregate* aggNode = new TIntermAggregate;
2381 aggNode->getSequence().push_back(node);
2382 aggNode->setLoc(loc);
2388 // Make an aggregate with an empty sequence.
2390 TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc)
2392 TIntermAggregate* aggNode = new TIntermAggregate;
2393 aggNode->setLoc(loc);
2399 // For "if" test nodes. There are three children; a condition,
2400 // a true path, and a false path. The two paths are in the
2403 // Returns the selection node created.
2405 TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc)
2408 // Don't prune the false path for compile-time constants; it's needed
2409 // for static access analysis.
2412 TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2);
2418 TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
2420 // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
2421 // ... are not included in the operators that can create a constant expression.
2423 // if (left->getType().getQualifier().storage == EvqConst &&
2424 // right->getType().getQualifier().storage == EvqConst) {
2429 TIntermTyped *commaAggregate = growAggregate(left, right, loc);
2430 commaAggregate->getAsAggregate()->setOperator(EOpComma);
2431 commaAggregate->setType(right->getType());
2432 commaAggregate->getWritableType().getQualifier().makeTemporary();
2434 return commaAggregate;
2437 TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc)
2439 TIntermMethod* method = new TIntermMethod(object, type, *name);
2440 method->setLoc(loc);
2446 // For "?:" test nodes. There are three children; a condition,
2447 // a true path, and a false path. The two paths are specified
2448 // as separate parameters. For vector 'cond', the true and false
2449 // are not paths, but vectors to mix.
2451 // Specialization constant operations include
2452 // - The ternary operator ( ? : )
2454 // Returns the selection node created, or nullptr if one could not be.
2456 TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock,
2457 const TSourceLoc& loc)
2459 // If it's void, go to the if-then-else selection()
2460 if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) {
2461 TIntermNodePair pair = { trueBlock, falseBlock };
2462 TIntermSelection* selection = addSelection(cond, pair, loc);
2463 if (getSource() == EShSourceHlsl)
2464 selection->setNoShortCircuit();
2470 // Get compatible types.
2472 auto children = addPairConversion(EOpSequence, trueBlock, falseBlock);
2473 trueBlock = std::get<0>(children);
2474 falseBlock = std::get<1>(children);
2476 if (trueBlock == nullptr || falseBlock == nullptr)
2479 // Handle a vector condition as a mix
2480 if (!cond->getType().isScalarOrVec1()) {
2481 TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
2482 cond->getType().getVectorSize());
2483 // smear true/false operands as needed
2484 trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
2485 falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
2487 // After conversion, types have to match.
2488 if (falseBlock->getType() != trueBlock->getType())
2491 // make the mix operation
2492 TIntermAggregate* mix = makeAggregate(loc);
2493 mix = growAggregate(mix, falseBlock);
2494 mix = growAggregate(mix, trueBlock);
2495 mix = growAggregate(mix, cond);
2496 mix->setType(targetVectorType);
2502 // Now have a scalar condition...
2504 // Convert true and false expressions to matching types
2505 addBiShapeConversion(EOpMix, trueBlock, falseBlock);
2507 // After conversion, types have to match.
2508 if (falseBlock->getType() != trueBlock->getType())
2511 // Eliminate the selection when the condition is a scalar and all operands are constant.
2512 if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
2513 if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
2520 // Make a selection node.
2522 TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType());
2524 node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision);
2526 if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) ||
2527 (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() &&
2528 falseBlock->getQualifier().isConstant()))
2529 node->getQualifier().makeSpecConstant();
2531 node->getQualifier().makeTemporary();
2533 if (getSource() == EShSourceHlsl)
2534 node->setNoShortCircuit();
2540 // Constant terminal nodes. Has a union that contains bool, float or int constants
2542 // Returns the constant union node created.
2545 TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const
2547 TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t);
2548 node->getQualifier().storage = EvqConst;
2555 TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const
2557 TConstUnionArray unionArray(1);
2558 unionArray[0].setI8Const(i8);
2560 return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal);
2563 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const
2565 TConstUnionArray unionArray(1);
2566 unionArray[0].setUConst(u8);
2568 return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal);
2571 TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const
2573 TConstUnionArray unionArray(1);
2574 unionArray[0].setI16Const(i16);
2576 return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
2579 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
2581 TConstUnionArray unionArray(1);
2582 unionArray[0].setU16Const(u16);
2584 return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
2587 TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const
2589 TConstUnionArray unionArray(1);
2590 unionArray[0].setIConst(i);
2592 return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal);
2595 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const
2597 TConstUnionArray unionArray(1);
2598 unionArray[0].setUConst(u);
2600 return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal);
2603 TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const
2605 TConstUnionArray unionArray(1);
2606 unionArray[0].setI64Const(i64);
2608 return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal);
2611 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const
2613 TConstUnionArray unionArray(1);
2614 unionArray[0].setU64Const(u64);
2616 return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
2619 TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
2621 TConstUnionArray unionArray(1);
2622 unionArray[0].setBConst(b);
2624 return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal);
2627 TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const
2629 assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16);
2631 TConstUnionArray unionArray(1);
2632 unionArray[0].setDConst(d);
2634 return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal);
2637 TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const
2639 TConstUnionArray unionArray(1);
2640 unionArray[0].setSConst(s);
2642 return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal);
2645 // Put vector swizzle selectors onto the given sequence
2646 void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc)
2648 TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc);
2649 sequence.push_back(constIntNode);
2652 // Put matrix swizzle selectors onto the given sequence
2653 void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc)
2655 TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc);
2656 sequence.push_back(constIntNode);
2657 constIntNode = addConstantUnion(selector.coord2, loc);
2658 sequence.push_back(constIntNode);
2661 // Make an aggregate node that has a sequence of all selectors.
2662 template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc);
2663 template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc);
2664 template<typename selectorType>
2665 TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc)
2667 TIntermAggregate* node = new TIntermAggregate(EOpSequence);
2670 TIntermSequence &sequenceVector = node->getSequence();
2672 for (int i = 0; i < selector.size(); i++)
2673 pushSelector(sequenceVector, selector[i], loc);
2679 // Follow the left branches down to the root of an l-value
2680 // expression (just "." and []).
2682 // Return the base of the l-value (where following indexing quits working).
2683 // Return nullptr if a chain following dereferences cannot be followed.
2685 // 'swizzleOkay' says whether or not it is okay to consider a swizzle
2686 // a valid part of the dereference chain.
2688 // 'BufferReferenceOk' says if type is buffer_reference, the routine stop to find the most left node.
2692 const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay , bool bufferReferenceOk)
2695 const TIntermBinary* binary = node->getAsBinaryNode();
2696 if (binary == nullptr)
2698 TOperator op = binary->getOp();
2699 if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle)
2701 if (! swizzleOkay) {
2702 if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle)
2704 if ((op == EOpIndexDirect || op == EOpIndexIndirect) &&
2705 (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) &&
2706 ! binary->getLeft()->getType().isArray())
2709 node = node->getAsBinaryNode()->getLeft();
2710 if (bufferReferenceOk && node->isReference())
2716 // Create while and do-while loop nodes.
2718 TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst,
2719 const TSourceLoc& loc)
2721 TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
2728 // Create a for-loop sequence.
2730 TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test,
2731 TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node)
2733 node = new TIntermLoop(body, test, terminal, testFirst);
2736 // make a sequence of the initializer and statement, but try to reuse the
2737 // aggregate already created for whatever is in the initializer, if there is one
2738 TIntermAggregate* loopSequence = (initializer == nullptr ||
2739 initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
2740 : initializer->getAsAggregate();
2741 if (loopSequence != nullptr && (loopSequence->getOp() == EOpSequence || loopSequence->getOp() == EOpScope))
2742 loopSequence->setOp(EOpNull);
2743 loopSequence = growAggregate(loopSequence, node);
2744 loopSequence->setOperator(getDebugInfo() ? EOpScope : EOpSequence);
2746 return loopSequence;
2752 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc)
2754 return addBranch(branchOp, nullptr, loc);
2757 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc)
2759 TIntermBranch* node = new TIntermBranch(branchOp, expression);
2765 // Propagate precision from formal function return type to actual return type,
2766 // and on to its subtree.
2767 void TIntermBranch::updatePrecision(TPrecisionQualifier parentPrecision)
2769 TIntermTyped* exp = getExpression();
2773 if (exp->getBasicType() == EbtInt || exp->getBasicType() == EbtUint ||
2774 exp->getBasicType() == EbtFloat) {
2775 if (parentPrecision != EpqNone && exp->getQualifier().precision == EpqNone) {
2776 exp->propagatePrecision(parentPrecision);
2782 // This is to be executed after the final root is put on top by the parsing
2785 bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
2787 if (root == nullptr)
2790 // Finish off the top-level sequence
2791 TIntermAggregate* aggRoot = root->getAsAggregate();
2792 if (aggRoot && aggRoot->getOp() == EOpNull)
2793 aggRoot->setOperator(EOpSequence);
2796 // Propagate 'noContraction' label in backward from 'precise' variables.
2797 glslang::PropagateNoContraction(*this);
2799 switch (textureSamplerTransformMode) {
2800 case EShTexSampTransKeep:
2802 case EShTexSampTransUpgradeTextureRemoveSampler:
2803 performTextureUpgradeAndSamplerRemovalTransformation(root);
2805 case EShTexSampTransCount:
2814 void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable)
2816 // Add top-level nodes for declarations that must be checked cross
2817 // compilation unit by a linker, yet might not have been referenced
2820 // Almost entirely, translation of symbols is driven by what's present
2821 // in the AST traversal, not by translating the symbol table.
2823 // However, there are some special cases:
2824 // - From the specification: "Special built-in inputs gl_VertexID and
2825 // gl_InstanceID are also considered active vertex attributes."
2826 // - Linker-based type mismatch error reporting needs to see all
2827 // uniforms/ins/outs variables and blocks.
2828 // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active.
2831 // if (ftransformUsed) {
2832 // TODO: 1.1 lowering functionality: track ftransform() usage
2833 // addSymbolLinkageNode(root, symbolTable, "gl_Vertex");
2834 // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix");
2837 if (language == EShLangVertex) {
2838 // the names won't be found in the symbol table unless the versions are right,
2839 // so version logic does not need to be repeated here
2840 addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID");
2841 addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID");
2844 // Add a child to the root node for the linker objects
2845 linkage->setOperator(EOpLinkerObjects);
2846 treeRoot = growAggregate(treeRoot, linkage);
2850 // Add the given name or symbol to the list of nodes at the end of the tree used
2851 // for link-time checking and external linkage.
2854 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name)
2856 TSymbol* symbol = symbolTable.find(name);
2858 addSymbolLinkageNode(linkage, *symbol->getAsVariable());
2861 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol)
2863 const TVariable* variable = symbol.getAsVariable();
2865 // This must be a member of an anonymous block, and we need to add the whole block
2866 const TAnonMember* anon = symbol.getAsAnonMember();
2867 variable = &anon->getAnonContainer();
2869 TIntermSymbol* node = addSymbol(*variable);
2870 linkage = growAggregate(linkage, node);
2874 // Add a caller->callee relationship to the call graph.
2875 // Assumes the strings are unique per signature.
2877 void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee)
2879 // Duplicates are okay, but faster to not keep them, and they come grouped by caller,
2880 // as long as new ones are push on the same end we check on for duplicates
2881 for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
2882 if (call->caller != caller)
2884 if (call->callee == callee)
2888 callGraph.emplace_front(caller, callee);
2892 // This deletes the tree.
2894 void TIntermediate::removeTree()
2897 RemoveAllTreeNodes(treeRoot);
2901 // Implement the part of KHR_vulkan_glsl that lists the set of operations
2902 // that can result in a specialization constant operation.
2904 // "5.x Specialization Constant Operations"
2906 // Only some operations discussed in this section may be applied to a
2907 // specialization constant and still yield a result that is as
2908 // specialization constant. The operations allowed are listed below.
2909 // When a specialization constant is operated on with one of these
2910 // operators and with another constant or specialization constant, the
2911 // result is implicitly a specialization constant.
2913 // - int(), uint(), and bool() constructors for type conversions
2914 // from any of the following types to any of the following types:
2918 // - vector versions of the above conversion constructors
2919 // - allowed implicit conversions of the above
2920 // - swizzles (e.g., foo.yx)
2921 // - The following when applied to integer or unsigned integer types:
2922 // * unary negative ( - )
2923 // * binary operations ( + , - , * , / , % )
2924 // * shift ( <<, >> )
2925 // * bitwise operations ( & , | , ^ )
2926 // - The following when applied to integer or unsigned integer scalar types:
2927 // * comparison ( == , != , > , >= , < , <= )
2928 // - The following when applied to the Boolean scalar type:
2930 // * logical operations ( && , || , ^^ )
2931 // * comparison ( == , != )"
2933 // This function just handles binary and unary nodes. Construction
2934 // rules are handled in construction paths that are not covered by the unary
2935 // and binary paths, while required conversions will still show up here
2936 // as unary converters in the from a construction operator.
2938 bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
2940 // The operations resulting in floating point are quite limited
2941 // (However, some floating-point operations result in bool, like ">",
2942 // so are handled later.)
2943 if (node.getType().isFloatingDomain()) {
2944 switch (node.getOp()) {
2945 case EOpIndexDirect:
2946 case EOpIndexIndirect:
2947 case EOpIndexDirectStruct:
2948 case EOpVectorSwizzle:
2949 case EOpConvFloatToDouble:
2950 case EOpConvDoubleToFloat:
2951 case EOpConvFloat16ToFloat:
2952 case EOpConvFloatToFloat16:
2953 case EOpConvFloat16ToDouble:
2954 case EOpConvDoubleToFloat16:
2961 // Check for floating-point arguments
2962 if (const TIntermBinary* bin = node.getAsBinaryNode())
2963 if (bin->getLeft() ->getType().isFloatingDomain() ||
2964 bin->getRight()->getType().isFloatingDomain())
2967 // So, for now, we can assume everything left is non-floating-point...
2969 // Now check for integer/bool-based operations
2970 switch (node.getOp()) {
2972 // dereference/swizzle
2973 case EOpIndexDirect:
2974 case EOpIndexIndirect:
2975 case EOpIndexDirectStruct:
2976 case EOpVectorSwizzle:
2979 case EOpConvInt8ToBool:
2980 case EOpConvInt16ToBool:
2981 case EOpConvIntToBool:
2982 case EOpConvInt64ToBool:
2983 case EOpConvUint8ToBool:
2984 case EOpConvUint16ToBool:
2985 case EOpConvUintToBool:
2986 case EOpConvUint64ToBool:
2989 case EOpConvBoolToInt8:
2990 case EOpConvBoolToInt16:
2991 case EOpConvBoolToInt:
2992 case EOpConvBoolToInt64:
2993 case EOpConvBoolToUint8:
2994 case EOpConvBoolToUint16:
2995 case EOpConvBoolToUint:
2996 case EOpConvBoolToUint64:
2998 // int8_t -> (u)int*
2999 case EOpConvInt8ToInt16:
3000 case EOpConvInt8ToInt:
3001 case EOpConvInt8ToInt64:
3002 case EOpConvInt8ToUint8:
3003 case EOpConvInt8ToUint16:
3004 case EOpConvInt8ToUint:
3005 case EOpConvInt8ToUint64:
3007 // int16_t -> (u)int*
3008 case EOpConvInt16ToInt8:
3009 case EOpConvInt16ToInt:
3010 case EOpConvInt16ToInt64:
3011 case EOpConvInt16ToUint8:
3012 case EOpConvInt16ToUint16:
3013 case EOpConvInt16ToUint:
3014 case EOpConvInt16ToUint64:
3016 // int32_t -> (u)int*
3017 case EOpConvIntToInt8:
3018 case EOpConvIntToInt16:
3019 case EOpConvIntToInt64:
3020 case EOpConvIntToUint8:
3021 case EOpConvIntToUint16:
3022 case EOpConvIntToUint:
3023 case EOpConvIntToUint64:
3025 // int64_t -> (u)int*
3026 case EOpConvInt64ToInt8:
3027 case EOpConvInt64ToInt16:
3028 case EOpConvInt64ToInt:
3029 case EOpConvInt64ToUint8:
3030 case EOpConvInt64ToUint16:
3031 case EOpConvInt64ToUint:
3032 case EOpConvInt64ToUint64:
3034 // uint8_t -> (u)int*
3035 case EOpConvUint8ToInt8:
3036 case EOpConvUint8ToInt16:
3037 case EOpConvUint8ToInt:
3038 case EOpConvUint8ToInt64:
3039 case EOpConvUint8ToUint16:
3040 case EOpConvUint8ToUint:
3041 case EOpConvUint8ToUint64:
3043 // uint16_t -> (u)int*
3044 case EOpConvUint16ToInt8:
3045 case EOpConvUint16ToInt16:
3046 case EOpConvUint16ToInt:
3047 case EOpConvUint16ToInt64:
3048 case EOpConvUint16ToUint8:
3049 case EOpConvUint16ToUint:
3050 case EOpConvUint16ToUint64:
3052 // uint32_t -> (u)int*
3053 case EOpConvUintToInt8:
3054 case EOpConvUintToInt16:
3055 case EOpConvUintToInt:
3056 case EOpConvUintToInt64:
3057 case EOpConvUintToUint8:
3058 case EOpConvUintToUint16:
3059 case EOpConvUintToUint64:
3061 // uint64_t -> (u)int*
3062 case EOpConvUint64ToInt8:
3063 case EOpConvUint64ToInt16:
3064 case EOpConvUint64ToInt:
3065 case EOpConvUint64ToInt64:
3066 case EOpConvUint64ToUint8:
3067 case EOpConvUint64ToUint16:
3068 case EOpConvUint64ToUint:
3075 // binary operations
3079 case EOpVectorTimesScalar:
3085 case EOpInclusiveOr:
3086 case EOpExclusiveOr:
3093 case EOpGreaterThan:
3094 case EOpLessThanEqual:
3095 case EOpGreaterThanEqual:
3102 // Is the operation one that must propagate nonuniform?
3103 bool TIntermediate::isNonuniformPropagating(TOperator op) const
3105 // "* All Operators in Section 5.1 (Operators), except for assignment,
3106 // arithmetic assignment, and sequence
3107 // * Component selection in Section 5.5
3108 // * Matrix components in Section 5.6
3109 // * Structure and Array Operations in Section 5.7, except for the length
3112 case EOpPostIncrement:
3113 case EOpPostDecrement:
3114 case EOpPreIncrement:
3115 case EOpPreDecrement:
3119 case EOpVectorLogicalNot:
3130 case EOpInclusiveOr:
3131 case EOpExclusiveOr:
3135 case EOpGreaterThan:
3136 case EOpLessThanEqual:
3137 case EOpGreaterThanEqual:
3138 case EOpVectorTimesScalar:
3139 case EOpVectorTimesMatrix:
3140 case EOpMatrixTimesVector:
3141 case EOpMatrixTimesScalar:
3147 case EOpIndexDirect:
3148 case EOpIndexIndirect:
3149 case EOpIndexDirectStruct:
3150 case EOpVectorSwizzle:
3160 ////////////////////////////////////////////////////////////////
3162 // Member functions of the nodes used for building the tree.
3164 ////////////////////////////////////////////////////////////////
3167 // Say whether or not an operation node changes the value of a variable.
3169 // Returns true if state is modified.
3171 bool TIntermOperator::modifiesState() const
3174 case EOpPostIncrement:
3175 case EOpPostDecrement:
3176 case EOpPreIncrement:
3177 case EOpPreDecrement:
3182 case EOpVectorTimesMatrixAssign:
3183 case EOpVectorTimesScalarAssign:
3184 case EOpMatrixTimesScalarAssign:
3185 case EOpMatrixTimesMatrixAssign:
3189 case EOpInclusiveOrAssign:
3190 case EOpExclusiveOrAssign:
3191 case EOpLeftShiftAssign:
3192 case EOpRightShiftAssign:
3200 // returns true if the operator is for one of the constructors
3202 bool TIntermOperator::isConstructor() const
3204 return op > EOpConstructGuardStart && op < EOpConstructGuardEnd;
3208 // Make sure the type of an operator is appropriate for its
3209 // combination of operation and operand type. This will invoke
3210 // promoteUnary, promoteBinary, etc as needed.
3212 // Returns false if nothing makes sense.
3214 bool TIntermediate::promote(TIntermOperator* node)
3216 if (node == nullptr)
3219 if (node->getAsUnaryNode())
3220 return promoteUnary(*node->getAsUnaryNode());
3222 if (node->getAsBinaryNode())
3223 return promoteBinary(*node->getAsBinaryNode());
3225 if (node->getAsAggregate())
3226 return promoteAggregate(*node->getAsAggregate());
3232 // See TIntermediate::promote
3234 bool TIntermediate::promoteUnary(TIntermUnary& node)
3236 const TOperator op = node.getOp();
3237 TIntermTyped* operand = node.getOperand();
3241 // Convert operand to a boolean type
3242 if (operand->getBasicType() != EbtBool) {
3243 // Add constructor to boolean type. If that fails, we can't do it, so return false.
3244 TIntermTyped* converted = addConversion(op, TType(EbtBool), operand);
3245 if (converted == nullptr)
3248 // Use the result of converting the node to a bool.
3249 node.setOperand(operand = converted); // also updates stack variable
3253 if (!isTypeInt(operand->getBasicType()))
3257 case EOpPostIncrement:
3258 case EOpPostDecrement:
3259 case EOpPreIncrement:
3260 case EOpPreDecrement:
3261 if (!isTypeInt(operand->getBasicType()) &&
3262 operand->getBasicType() != EbtFloat &&
3263 operand->getBasicType() != EbtFloat16 &&
3264 operand->getBasicType() != EbtDouble)
3269 // HLSL uses this path for initial function signature finding for built-ins
3270 // taking a single argument, which generally don't participate in
3271 // operator-based type promotion (type conversion will occur later).
3272 // For now, scalar argument cases are relying on the setType() call below.
3273 if (getSource() == EShSourceHlsl)
3276 // GLSL only allows integer arguments for the cases identified above in the
3278 if (operand->getBasicType() != EbtFloat)
3282 node.setType(operand->getType());
3283 node.getWritableType().getQualifier().makeTemporary();
3288 // Propagate precision qualifiers *up* from children to parent.
3289 void TIntermUnary::updatePrecision()
3291 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3292 getBasicType() == EbtFloat) {
3293 if (operand->getQualifier().precision > getQualifier().precision)
3294 getQualifier().precision = operand->getQualifier().precision;
3299 // See TIntermediate::promote
3301 bool TIntermediate::promoteBinary(TIntermBinary& node)
3303 TOperator op = node.getOp();
3304 TIntermTyped* left = node.getLeft();
3305 TIntermTyped* right = node.getRight();
3307 // Arrays and structures have to be exact matches.
3308 if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct)
3309 && left->getType() != right->getType())
3312 // Base assumption: just make the type the same as the left
3313 // operand. Only deviations from this will be coded.
3314 node.setType(left->getType());
3315 node.getWritableType().getQualifier().clear();
3317 // Composite and opaque types don't having pending operator changes, e.g.,
3318 // array, structure, and samplers. Just establish final type and correctness.
3319 if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) {
3323 if (left->getBasicType() == EbtSampler) {
3324 // can't compare samplers
3327 // Promote to conditional
3328 node.setType(TType(EbtBool));
3334 // Keep type from above
3344 // We now have only scalars, vectors, and matrices to worry about.
3347 // HLSL implicitly promotes bool -> int for numeric operations.
3348 // (Implicit conversions to make the operands match each other's types were already done.)
3349 if (getSource() == EShSourceHlsl &&
3350 (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
3353 case EOpGreaterThan:
3354 case EOpLessThanEqual:
3355 case EOpGreaterThanEqual:
3363 case EOpInclusiveOr:
3364 case EOpExclusiveOr:
3370 if (left->getBasicType() == EbtBool)
3371 left = createConversion(EbtInt, left);
3372 if (right->getBasicType() == EbtBool)
3373 right = createConversion(EbtInt, right);
3374 if (left == nullptr || right == nullptr)
3377 node.setRight(right);
3379 // Update the original base assumption on result type..
3380 node.setType(left->getType());
3381 node.getWritableType().getQualifier().clear();
3390 // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
3393 case EOpGreaterThan:
3394 case EOpLessThanEqual:
3395 case EOpGreaterThanEqual:
3396 // Relational comparisons need numeric types and will promote to scalar Boolean.
3397 if (left->getBasicType() == EbtBool)
3400 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3405 if (getSource() == EShSourceHlsl) {
3406 const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize());
3408 // In HLSL, == or != on vectors means component-wise comparison.
3409 if (resultWidth > 1) {
3410 op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual;
3414 node.setType(TType(EbtBool, EvqTemporary, resultWidth));
3416 // All the above comparisons result in a bool (but not the vector compares)
3417 node.setType(TType(EbtBool));
3424 // logical ops operate only on Booleans or vectors of Booleans.
3425 if (left->getBasicType() != EbtBool || left->isMatrix())
3428 if (getSource() == EShSourceGlsl) {
3429 // logical ops operate only on scalar Booleans and will promote to scalar Boolean.
3430 if (left->isVector())
3434 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3439 case EOpRightShiftAssign:
3440 case EOpLeftShiftAssign:
3446 case EOpInclusiveOr:
3447 case EOpExclusiveOr:
3449 case EOpInclusiveOrAssign:
3450 case EOpExclusiveOrAssign:
3451 if (getSource() == EShSourceHlsl)
3454 // Check for integer-only operands.
3455 if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType()))
3457 if (left->isMatrix() || right->isMatrix())
3470 // check for non-Boolean operands
3471 if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)
3478 // Compare left and right, and finish with the cases where the operand types must match
3481 case EOpGreaterThan:
3482 case EOpLessThanEqual:
3483 case EOpGreaterThanEqual:
3487 case EOpVectorEqual:
3488 case EOpVectorNotEqual:
3493 return left->getType() == right->getType();
3499 case EOpInclusiveOr:
3500 case EOpExclusiveOr:
3502 case EOpInclusiveOrAssign:
3503 case EOpExclusiveOrAssign:
3512 // Quick out in case the types do match
3513 if (left->getType() == right->getType())
3520 // At least the basic type has to match
3521 if (left->getBasicType() != right->getBasicType())
3528 if (left->getType().isCoopMat() || right->getType().isCoopMat()) {
3529 if (left->getType().isCoopMat() && right->getType().isCoopMat() &&
3530 *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) {
3536 if (left->getType().isCoopMat() && right->getType().isCoopMat()) {
3539 if (op == EOpMulAssign && right->getType().isCoopMat()) {
3542 node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar);
3543 if (right->getType().isCoopMat()) {
3544 node.setType(right->getType());
3551 // These require both to be cooperative matrices
3552 if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) {
3562 // Finish handling the case, for all ops, where both operands are scalars.
3563 if (left->isScalar() && right->isScalar())
3566 // Finish handling the case, for all ops, where there are two vectors of different sizes
3567 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
3571 // We now have a mix of scalars, vectors, or matrices, for non-relational operations.
3574 // Can these two operands be combined, what is the resulting type?
3575 TBasicType basicType = left->getBasicType();
3578 if (!left->isMatrix() && right->isMatrix()) {
3579 if (left->isVector()) {
3580 if (left->getVectorSize() != right->getMatrixRows())
3582 node.setOp(op = EOpVectorTimesMatrix);
3583 node.setType(TType(basicType, EvqTemporary, right->getMatrixCols()));
3585 node.setOp(op = EOpMatrixTimesScalar);
3586 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows()));
3588 } else if (left->isMatrix() && !right->isMatrix()) {
3589 if (right->isVector()) {
3590 if (left->getMatrixCols() != right->getVectorSize())
3592 node.setOp(op = EOpMatrixTimesVector);
3593 node.setType(TType(basicType, EvqTemporary, left->getMatrixRows()));
3595 node.setOp(op = EOpMatrixTimesScalar);
3597 } else if (left->isMatrix() && right->isMatrix()) {
3598 if (left->getMatrixCols() != right->getMatrixRows())
3600 node.setOp(op = EOpMatrixTimesMatrix);
3601 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows()));
3602 } else if (! left->isMatrix() && ! right->isMatrix()) {
3603 if (left->isVector() && right->isVector()) {
3604 ; // leave as component product
3605 } else if (left->isVector() || right->isVector()) {
3606 node.setOp(op = EOpVectorTimesScalar);
3607 if (right->isVector())
3608 node.setType(TType(basicType, EvqTemporary, right->getVectorSize()));
3615 if (! left->isMatrix() && right->isMatrix()) {
3616 if (left->isVector()) {
3617 if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols())
3619 node.setOp(op = EOpVectorTimesMatrixAssign);
3623 } else if (left->isMatrix() && !right->isMatrix()) {
3624 if (right->isVector()) {
3627 node.setOp(op = EOpMatrixTimesScalarAssign);
3629 } else if (left->isMatrix() && right->isMatrix()) {
3630 if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows())
3632 node.setOp(op = EOpMatrixTimesMatrixAssign);
3633 } else if (!left->isMatrix() && !right->isMatrix()) {
3634 if (left->isVector() && right->isVector()) {
3635 // leave as component product
3636 } else if (left->isVector() || right->isVector()) {
3637 if (! left->isVector())
3639 node.setOp(op = EOpVectorTimesScalarAssign);
3648 case EOpRightShiftAssign:
3649 case EOpLeftShiftAssign:
3650 if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize()))
3655 if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())
3664 case EOpInclusiveOr:
3665 case EOpExclusiveOr:
3671 case EOpInclusiveOrAssign:
3672 case EOpExclusiveOrAssign:
3674 if ((left->isMatrix() && right->isVector()) ||
3675 (left->isVector() && right->isMatrix()) ||
3676 left->getBasicType() != right->getBasicType())
3678 if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()))
3680 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
3682 if (right->isVector() || right->isMatrix()) {
3683 node.getWritableType().shallowCopy(right->getType());
3684 node.getWritableType().getQualifier().makeTemporary();
3693 // One more check for assignment.
3696 // The resulting type has to match the left operand.
3704 case EOpInclusiveOrAssign:
3705 case EOpExclusiveOrAssign:
3706 case EOpLeftShiftAssign:
3707 case EOpRightShiftAssign:
3708 if (node.getType() != left->getType())
3719 // See TIntermediate::promote
3721 bool TIntermediate::promoteAggregate(TIntermAggregate& node)
3723 TOperator op = node.getOp();
3724 TIntermSequence& args = node.getSequence();
3725 const int numArgs = static_cast<int>(args.size());
3727 // Presently, only hlsl does intrinsic promotions.
3728 if (getSource() != EShSourceHlsl)
3731 // set of opcodes that can be promoted in this manner.
3739 case EOpFaceForward:
3740 // case EOpFindMSB: TODO:
3741 // case EOpFindLSB: TODO:
3751 // case EOpGenMul: TODO:
3755 // case EOpSinCos: TODO:
3763 // TODO: array and struct behavior
3765 // Try converting all nodes to the given node's type
3766 TIntermSequence convertedArgs(numArgs, nullptr);
3768 // Try to convert all types to the nonConvArg type.
3769 for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) {
3770 // Try converting all args to this arg's type
3771 for (int convArg = 0; convArg < numArgs; ++convArg) {
3772 convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(),
3773 args[convArg]->getAsTyped());
3776 // If we successfully converted all the args, use the result.
3777 if (std::all_of(convertedArgs.begin(), convertedArgs.end(),
3778 [](const TIntermNode* node) { return node != nullptr; })) {
3780 std::swap(args, convertedArgs);
3788 // Propagate precision qualifiers *up* from children to parent, and then
3789 // back *down* again to the children's subtrees.
3790 void TIntermAggregate::updatePrecision()
3792 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3793 getBasicType() == EbtFloat) {
3794 TPrecisionQualifier maxPrecision = EpqNone;
3795 TIntermSequence operands = getSequence();
3796 for (unsigned int i = 0; i < operands.size(); ++i) {
3797 TIntermTyped* typedNode = operands[i]->getAsTyped();
3799 maxPrecision = std::max(maxPrecision, typedNode->getQualifier().precision);
3801 getQualifier().precision = maxPrecision;
3802 for (unsigned int i = 0; i < operands.size(); ++i) {
3803 TIntermTyped* typedNode = operands[i]->getAsTyped();
3805 typedNode->propagatePrecision(maxPrecision);
3810 // Propagate precision qualifiers *up* from children to parent, and then
3811 // back *down* again to the children's subtrees.
3812 void TIntermBinary::updatePrecision()
3814 if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
3815 getBasicType() == EbtFloat) {
3816 if (op == EOpRightShift || op == EOpLeftShift) {
3817 // For shifts get precision from left side only and thus no need to propagate
3818 getQualifier().precision = left->getQualifier().precision;
3820 getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
3821 if (getQualifier().precision != EpqNone) {
3822 left->propagatePrecision(getQualifier().precision);
3823 right->propagatePrecision(getQualifier().precision);
3829 // Recursively propagate precision qualifiers *down* the subtree of the current node,
3830 // until reaching a node that already has a precision qualifier or otherwise does
3831 // not participate in precision propagation.
3832 void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
3834 if (getQualifier().precision != EpqNone ||
3835 (getBasicType() != EbtInt && getBasicType() != EbtUint &&
3836 getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
3839 getQualifier().precision = newPrecision;
3841 TIntermBinary* binaryNode = getAsBinaryNode();
3843 binaryNode->getLeft()->propagatePrecision(newPrecision);
3844 binaryNode->getRight()->propagatePrecision(newPrecision);
3849 TIntermUnary* unaryNode = getAsUnaryNode();
3851 unaryNode->getOperand()->propagatePrecision(newPrecision);
3856 TIntermAggregate* aggregateNode = getAsAggregate();
3857 if (aggregateNode) {
3858 TIntermSequence operands = aggregateNode->getSequence();
3859 for (unsigned int i = 0; i < operands.size(); ++i) {
3860 TIntermTyped* typedNode = operands[i]->getAsTyped();
3863 typedNode->propagatePrecision(newPrecision);
3869 TIntermSelection* selectionNode = getAsSelectionNode();
3870 if (selectionNode) {
3871 TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped();
3873 typedNode->propagatePrecision(newPrecision);
3874 typedNode = selectionNode->getFalseBlock()->getAsTyped();
3876 typedNode->propagatePrecision(newPrecision);
3883 TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const
3885 const TConstUnionArray& rightUnionArray = node->getConstArray();
3886 int size = node->getType().computeNumComponents();
3888 TConstUnionArray leftUnionArray(size);
3890 for (int i=0; i < size; i++) {
3892 #define PROMOTE(Set, CType, Get) leftUnionArray[i].Set(static_cast<CType>(rightUnionArray[i].Get()))
3893 #define PROMOTE_TO_BOOL(Get) leftUnionArray[i].setBConst(rightUnionArray[i].Get() != 0)
3896 #define TO_ALL(Get) \
3897 switch (promoteTo) { \
3898 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3899 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3900 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3901 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3902 default: return node; \
3905 #define TO_ALL(Get) \
3906 switch (promoteTo) { \
3907 case EbtFloat16: PROMOTE(setDConst, double, Get); break; \
3908 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3909 case EbtDouble: PROMOTE(setDConst, double, Get); break; \
3910 case EbtInt8: PROMOTE(setI8Const, signed char, Get); break; \
3911 case EbtInt16: PROMOTE(setI16Const, short, Get); break; \
3912 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3913 case EbtInt64: PROMOTE(setI64Const, long long, Get); break; \
3914 case EbtUint8: PROMOTE(setU8Const, unsigned char, Get); break; \
3915 case EbtUint16: PROMOTE(setU16Const, unsigned short, Get); break; \
3916 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3917 case EbtUint64: PROMOTE(setU64Const, unsigned long long, Get); break; \
3918 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3919 default: return node; \
3923 switch (node->getType().getBasicType()) {
3924 case EbtFloat: TO_ALL(getDConst); break;
3925 case EbtInt: TO_ALL(getIConst); break;
3926 case EbtUint: TO_ALL(getUConst); break;
3927 case EbtBool: TO_ALL(getBConst); break;
3929 case EbtFloat16: TO_ALL(getDConst); break;
3930 case EbtDouble: TO_ALL(getDConst); break;
3931 case EbtInt8: TO_ALL(getI8Const); break;
3932 case EbtInt16: TO_ALL(getI16Const); break;
3933 case EbtInt64: TO_ALL(getI64Const); break;
3934 case EbtUint8: TO_ALL(getU8Const); break;
3935 case EbtUint16: TO_ALL(getU16Const); break;
3936 case EbtUint64: TO_ALL(getU64Const); break;
3938 default: return node;
3942 const TType& t = node->getType();
3944 return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()),
3948 void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable)
3950 assert(pragmaTable == nullptr);
3951 pragmaTable = new TPragmaTable;
3952 *pragmaTable = pTable;
3955 // If either node is a specialization constant, while the other is
3956 // a constant (or specialization constant), the result is still
3957 // a specialization constant.
3958 bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2)
3960 return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) ||
3961 (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
3964 struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
3965 void visitSymbol(TIntermSymbol* symbol) override {
3966 if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
3967 symbol->getWritableType().getSampler().setCombined(true);
3970 bool visitAggregate(TVisit, TIntermAggregate* ag) override {
3971 using namespace std;
3972 TIntermSequence& seq = ag->getSequence();
3973 TQualifierList& qual = ag->getQualifierList();
3975 // qual and seq are indexed using the same indices, so we have to modify both in lock-step
3976 assert(seq.size() == qual.size() || qual.empty());
3979 for (size_t i = 0; i < seq.size(); ++i) {
3980 TIntermSymbol* symbol = seq[i]->getAsSymbolNode();
3981 if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) {
3982 // remove pure sampler variables
3986 TIntermNode* result = seq[i];
3988 // replace constructors with sampler/textures
3989 TIntermAggregate *constructor = seq[i]->getAsAggregate();
3990 if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
3991 if (!constructor->getSequence().empty())
3992 result = constructor->getSequence()[0];
3995 // write new node & qualifier
3996 seq[write] = result;
3998 qual[write] = qual[i];
4010 void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
4012 TextureUpgradeAndSamplerRemovalTransform transform;
4013 root->traverse(&transform);
4016 const char* TIntermediate::getResourceName(TResourceType res)
4019 case EResSampler: return "shift-sampler-binding";
4020 case EResTexture: return "shift-texture-binding";
4021 case EResImage: return "shift-image-binding";
4022 case EResUbo: return "shift-UBO-binding";
4023 case EResSsbo: return "shift-ssbo-binding";
4024 case EResUav: return "shift-uav-binding";
4026 assert(0); // internal error: should only be called with valid resource types.
4032 } // end namespace glslang