2 // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
3 // Copyright (C) 2012-2015 LunarG, Inc.
4 // Copyright (C) 2015-2020 Google, Inc.
5 // Copyright (C) 2017 ARM Limited.
7 // All rights reserved.
9 // Redistribution and use in source and binary forms, with or without
10 // modification, are permitted provided that the following conditions
13 // Redistributions of source code must retain the above copyright
14 // notice, this list of conditions and the following disclaimer.
16 // Redistributions in binary form must reproduce the above
17 // copyright notice, this list of conditions and the following
18 // disclaimer in the documentation and/or other materials provided
19 // with the distribution.
21 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
22 // contributors may be used to endorse or promote products derived
23 // from this software without specific prior written permission.
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 // POSSIBILITY OF SUCH DAMAGE.
40 // Build the intermediate representation.
43 #include "localintermediate.h"
44 #include "RemoveTree.h"
45 #include "SymbolTable.h"
46 #include "propagateNoContraction.h"
54 ////////////////////////////////////////////////////////////////////////////
56 // First set of functions are to help build the intermediate representation.
57 // These functions are not member functions of the nodes.
58 // They are called from parser productions.
60 /////////////////////////////////////////////////////////////////////////////
63 // Add a terminal node for an identifier in an expression.
65 // Returns the added node.
68 TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray,
69 TIntermTyped* constSubtree, const TSourceLoc& loc)
71 TIntermSymbol* node = new TIntermSymbol(id, name, type);
73 node->setConstArray(constArray);
74 node->setConstSubtree(constSubtree);
79 TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol)
81 return addSymbol(intermSymbol.getId(),
82 intermSymbol.getName(),
83 intermSymbol.getType(),
84 intermSymbol.getConstArray(),
85 intermSymbol.getConstSubtree(),
86 intermSymbol.getLoc());
89 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable)
91 glslang::TSourceLoc loc; // just a null location
94 return addSymbol(variable, loc);
97 TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc)
99 return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc);
102 TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc)
104 TConstUnionArray unionArray; // just a null constant
106 return addSymbol(0, "", type, unionArray, nullptr, loc);
110 // Connect two nodes with a new parent that does a binary operation on the nodes.
112 // Returns the added node.
114 // Returns nullptr if the working conversions and promotions could not be found.
116 TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
118 // No operations work on blocks
119 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
122 // Convert "reference +/- int" and "reference - reference" to integer math
123 if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) {
125 // No addressing math on struct with unsized array.
126 if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) ||
127 (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) {
131 if (left->isReference() && isTypeInt(right->getBasicType())) {
132 const TType& referenceType = left->getType();
133 TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
134 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
136 right = createConversion(EbtInt64, right);
137 right = addBinaryMath(EOpMul, right, size, loc);
139 TIntermTyped *node = addBinaryMath(op, left, right, loc);
140 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
144 if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
145 const TType& referenceType = right->getType();
146 TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
147 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
149 left = createConversion(EbtInt64, left);
150 left = addBinaryMath(EOpMul, left, size, loc);
152 TIntermTyped *node = addBinaryMath(op, left, right, loc);
153 node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
157 if (op == EOpSub && left->isReference() && right->isReference()) {
158 TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
160 left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
161 right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
163 left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
164 right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
166 left = addBinaryMath(EOpSub, left, right, loc);
168 TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
172 // No other math operators supported on references
173 if (left->isReference() || right->isReference()) {
178 // Try converting the children's base types to compatible types.
179 auto children = addConversion(op, left, right);
180 left = std::get<0>(children);
181 right = std::get<1>(children);
183 if (left == nullptr || right == nullptr)
186 // Convert the children's type shape to be compatible.
187 addBiShapeConversion(op, left, right);
188 if (left == nullptr || right == nullptr)
192 // Need a new node holding things together. Make
193 // one and promote it to the right type.
195 TIntermBinary* node = addBinaryNode(op, left, right, loc);
199 node->updatePrecision();
202 // If they are both (non-specialization) constants, they must be folded.
203 // (Unless it's the sequence (comma) operator, but that's handled in addComma().)
205 TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
206 TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
207 if (leftTempConstant && rightTempConstant) {
208 TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
213 // If can propagate spec-constantness and if the operation is an allowed
214 // specialization-constant operation, make a spec-constant.
215 if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
216 node->getWritableType().getQualifier().makeSpecConstant();
218 // If must propagate nonuniform, make a nonuniform.
219 if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) &&
220 isNonuniformPropagating(node->getOp()))
221 node->getWritableType().getQualifier().nonUniform = true;
227 // Low level: add binary node (no promotions or other argument modifications)
229 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const
232 TIntermBinary* node = new TIntermBinary(op);
234 loc = left->getLoc();
237 node->setRight(right);
243 // like non-type form, but sets node's type.
245 TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const
247 TIntermBinary* node = addBinaryNode(op, left, right, loc);
253 // Low level: add unary node (no promotions or other argument modifications)
255 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const
257 TIntermUnary* node = new TIntermUnary(op);
259 loc = child->getLoc();
261 node->setOperand(child);
267 // like non-type form, but sets node's type.
269 TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const
271 TIntermUnary* node = addUnaryNode(op, child, loc);
277 // Connect two nodes through an assignment.
279 // Returns the added node.
281 // Returns nullptr if the 'right' type could not be converted to match the 'left' type,
282 // or the resulting operation cannot be properly promoted.
284 TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
286 // No block assignment
287 if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
290 // Convert "reference += int" to "reference = reference + int". We need this because the
291 // "reference + int" calculation involves a cast back to the original type, which makes it
293 if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference() &&
294 extensionRequested(E_GL_EXT_buffer_reference2)) {
296 if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
299 TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc);
303 TIntermSymbol* symbol = left->getAsSymbolNode();
304 left = addSymbol(*symbol);
306 node = addAssign(EOpAssign, left, node, loc);
311 // Like adding binary math, except the conversion can only go
312 // from right to left.
315 // convert base types, nullptr return means not possible
316 right = addConversion(op, left->getType(), right);
317 if (right == nullptr)
321 right = addUniShapeConversion(op, left->getType(), right);
324 TIntermBinary* node = addBinaryNode(op, left, right, loc);
329 node->updatePrecision();
335 // Connect two nodes through an index operator, where the left node is the base
336 // of an array or struct, and the right node is a direct or indirect offset.
338 // Returns the added node.
339 // The caller should set the type of the returned node.
341 TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc)
343 // caller should set the type
344 return addBinaryNode(op, base, index, loc);
348 // Add one node as the parent of another that it operates on.
350 // Returns the added node.
352 TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc)
357 if (child->getType().getBasicType() == EbtBlock)
362 if (getSource() == EShSourceHlsl) {
363 break; // HLSL can promote logical not
366 if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
371 case EOpPostIncrement:
372 case EOpPreIncrement:
373 case EOpPostDecrement:
374 case EOpPreDecrement:
376 if (child->getType().getBasicType() == EbtStruct || child->getType().isArray())
378 default: break; // some compilers want this
382 // Do we need to promote the operand?
384 TBasicType newType = EbtVoid;
386 case EOpConstructBool: newType = EbtBool; break;
387 case EOpConstructFloat: newType = EbtFloat; break;
388 case EOpConstructInt: newType = EbtInt; break;
389 case EOpConstructUint: newType = EbtUint; break;
391 case EOpConstructInt8: newType = EbtInt8; break;
392 case EOpConstructUint8: newType = EbtUint8; break;
393 case EOpConstructInt16: newType = EbtInt16; break;
394 case EOpConstructUint16: newType = EbtUint16; break;
395 case EOpConstructInt64: newType = EbtInt64; break;
396 case EOpConstructUint64: newType = EbtUint64; break;
397 case EOpConstructDouble: newType = EbtDouble; break;
398 case EOpConstructFloat16: newType = EbtFloat16; break;
400 default: break; // some compilers want this
403 if (newType != EbtVoid) {
404 child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(),
405 child->getMatrixCols(),
406 child->getMatrixRows(),
409 if (child == nullptr)
414 // For constructors, we are now done, it was all in the conversion.
415 // TODO: but, did this bypass constant folding?
418 case EOpConstructInt8:
419 case EOpConstructUint8:
420 case EOpConstructInt16:
421 case EOpConstructUint16:
422 case EOpConstructInt:
423 case EOpConstructUint:
424 case EOpConstructInt64:
425 case EOpConstructUint64:
426 case EOpConstructBool:
427 case EOpConstructFloat:
428 case EOpConstructDouble:
429 case EOpConstructFloat16:
431 default: break; // some compilers want this
435 // Make a new node for the operator.
437 TIntermUnary* node = addUnaryNode(op, child, loc);
442 node->updatePrecision();
444 // If it's a (non-specialization) constant, it must be folded.
445 if (node->getOperand()->getAsConstantUnion())
446 return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
448 // If it's a specialization constant, the result is too,
449 // if the operation is allowed for specialization constants.
450 if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
451 node->getWritableType().getQualifier().makeSpecConstant();
453 // If must propagate nonuniform, make a nonuniform.
454 if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp()))
455 node->getWritableType().getQualifier().nonUniform = true;
460 TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary,
461 TIntermNode* childNode, const TType& returnType)
465 // Treat it like a unary operator.
466 // addUnaryMath() should get the type correct on its own;
467 // including constness (which would differ from the prototype).
469 TIntermTyped* child = childNode->getAsTyped();
470 if (child == nullptr)
473 if (child->getAsConstantUnion()) {
474 TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType);
479 return addUnaryNode(op, child, child->getLoc(), returnType);
481 // setAggregateOperater() calls fold() for constant folding
482 TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc);
489 // This is the safe way to change the operator on an aggregate, as it
490 // does lots of error checking and fixing. Especially for establishing
491 // a function call's operation on its set of parameters. Sequences
492 // of instructions are also aggregates, but they just directly set
493 // their operator to EOpSequence.
495 // Returns an aggregate node, which could be the one passed in if
496 // it was already an aggregate.
498 TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc)
500 TIntermAggregate* aggNode;
503 // Make sure we have an aggregate. If not turn it into one.
505 if (node != nullptr) {
506 aggNode = node->getAsAggregate();
507 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
509 // Make an aggregate containing this node.
511 aggNode = new TIntermAggregate();
512 aggNode->getSequence().push_back(node);
514 loc = node->getLoc();
517 aggNode = new TIntermAggregate();
522 aggNode->setOperator(op);
524 aggNode->setLoc(loc);
526 aggNode->setType(type);
528 return fold(aggNode);
531 bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
534 // Does the base type even allow the operation?
536 switch (node->getBasicType()) {
542 // opaque types can be passed to functions
543 if (op == EOpFunction)
546 // HLSL can assign samplers directly (no constructor)
547 if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler)
550 // samplers can get assigned via a sampler constructor
551 // (well, not yet, but code in the rest of this function is ready for it)
552 if (node->getBasicType() == EbtSampler && op == EOpAssign &&
553 node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
556 // otherwise, opaque types can't even be operated on, let alone converted
565 bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const
571 case EbtUint: newOp = EOpConvUintToDouble; break;
572 case EbtBool: newOp = EOpConvBoolToDouble; break;
573 case EbtFloat: newOp = EOpConvFloatToDouble; break;
574 case EbtInt: newOp = EOpConvIntToDouble; break;
575 case EbtInt8: newOp = EOpConvInt8ToDouble; break;
576 case EbtUint8: newOp = EOpConvUint8ToDouble; break;
577 case EbtInt16: newOp = EOpConvInt16ToDouble; break;
578 case EbtUint16: newOp = EOpConvUint16ToDouble; break;
579 case EbtFloat16: newOp = EOpConvFloat16ToDouble; break;
580 case EbtInt64: newOp = EOpConvInt64ToDouble; break;
581 case EbtUint64: newOp = EOpConvUint64ToDouble; break;
589 case EbtInt: newOp = EOpConvIntToFloat; break;
590 case EbtUint: newOp = EOpConvUintToFloat; break;
591 case EbtBool: newOp = EOpConvBoolToFloat; break;
593 case EbtDouble: newOp = EOpConvDoubleToFloat; break;
594 case EbtInt8: newOp = EOpConvInt8ToFloat; break;
595 case EbtUint8: newOp = EOpConvUint8ToFloat; break;
596 case EbtInt16: newOp = EOpConvInt16ToFloat; break;
597 case EbtUint16: newOp = EOpConvUint16ToFloat; break;
598 case EbtFloat16: newOp = EOpConvFloat16ToFloat; break;
599 case EbtInt64: newOp = EOpConvInt64ToFloat; break;
600 case EbtUint64: newOp = EOpConvUint64ToFloat; break;
609 case EbtInt8: newOp = EOpConvInt8ToFloat16; break;
610 case EbtUint8: newOp = EOpConvUint8ToFloat16; break;
611 case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
612 case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
613 case EbtInt: newOp = EOpConvIntToFloat16; break;
614 case EbtUint: newOp = EOpConvUintToFloat16; break;
615 case EbtBool: newOp = EOpConvBoolToFloat16; break;
616 case EbtFloat: newOp = EOpConvFloatToFloat16; break;
617 case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
618 case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
619 case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
627 case EbtInt: newOp = EOpConvIntToBool; break;
628 case EbtUint: newOp = EOpConvUintToBool; break;
629 case EbtFloat: newOp = EOpConvFloatToBool; break;
631 case EbtDouble: newOp = EOpConvDoubleToBool; break;
632 case EbtInt8: newOp = EOpConvInt8ToBool; break;
633 case EbtUint8: newOp = EOpConvUint8ToBool; break;
634 case EbtInt16: newOp = EOpConvInt16ToBool; break;
635 case EbtUint16: newOp = EOpConvUint16ToBool; break;
636 case EbtFloat16: newOp = EOpConvFloat16ToBool; break;
637 case EbtInt64: newOp = EOpConvInt64ToBool; break;
638 case EbtUint64: newOp = EOpConvUint64ToBool; break;
647 case EbtUint8: newOp = EOpConvUint8ToInt8; break;
648 case EbtInt16: newOp = EOpConvInt16ToInt8; break;
649 case EbtUint16: newOp = EOpConvUint16ToInt8; break;
650 case EbtInt: newOp = EOpConvIntToInt8; break;
651 case EbtUint: newOp = EOpConvUintToInt8; break;
652 case EbtInt64: newOp = EOpConvInt64ToInt8; break;
653 case EbtUint64: newOp = EOpConvUint64ToInt8; break;
654 case EbtBool: newOp = EOpConvBoolToInt8; break;
655 case EbtFloat: newOp = EOpConvFloatToInt8; break;
656 case EbtDouble: newOp = EOpConvDoubleToInt8; break;
657 case EbtFloat16: newOp = EOpConvFloat16ToInt8; break;
664 case EbtInt8: newOp = EOpConvInt8ToUint8; break;
665 case EbtInt16: newOp = EOpConvInt16ToUint8; break;
666 case EbtUint16: newOp = EOpConvUint16ToUint8; break;
667 case EbtInt: newOp = EOpConvIntToUint8; break;
668 case EbtUint: newOp = EOpConvUintToUint8; break;
669 case EbtInt64: newOp = EOpConvInt64ToUint8; break;
670 case EbtUint64: newOp = EOpConvUint64ToUint8; break;
671 case EbtBool: newOp = EOpConvBoolToUint8; break;
672 case EbtFloat: newOp = EOpConvFloatToUint8; break;
673 case EbtDouble: newOp = EOpConvDoubleToUint8; break;
674 case EbtFloat16: newOp = EOpConvFloat16ToUint8; break;
682 case EbtUint8: newOp = EOpConvUint8ToInt16; break;
683 case EbtInt8: newOp = EOpConvInt8ToInt16; break;
684 case EbtUint16: newOp = EOpConvUint16ToInt16; break;
685 case EbtInt: newOp = EOpConvIntToInt16; break;
686 case EbtUint: newOp = EOpConvUintToInt16; break;
687 case EbtInt64: newOp = EOpConvInt64ToInt16; break;
688 case EbtUint64: newOp = EOpConvUint64ToInt16; break;
689 case EbtBool: newOp = EOpConvBoolToInt16; break;
690 case EbtFloat: newOp = EOpConvFloatToInt16; break;
691 case EbtDouble: newOp = EOpConvDoubleToInt16; break;
692 case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
699 case EbtInt8: newOp = EOpConvInt8ToUint16; break;
700 case EbtUint8: newOp = EOpConvUint8ToUint16; break;
701 case EbtInt16: newOp = EOpConvInt16ToUint16; break;
702 case EbtInt: newOp = EOpConvIntToUint16; break;
703 case EbtUint: newOp = EOpConvUintToUint16; break;
704 case EbtInt64: newOp = EOpConvInt64ToUint16; break;
705 case EbtUint64: newOp = EOpConvUint64ToUint16; break;
706 case EbtBool: newOp = EOpConvBoolToUint16; break;
707 case EbtFloat: newOp = EOpConvFloatToUint16; break;
708 case EbtDouble: newOp = EOpConvDoubleToUint16; break;
709 case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
718 case EbtUint: newOp = EOpConvUintToInt; break;
719 case EbtBool: newOp = EOpConvBoolToInt; break;
720 case EbtFloat: newOp = EOpConvFloatToInt; break;
722 case EbtInt8: newOp = EOpConvInt8ToInt; break;
723 case EbtUint8: newOp = EOpConvUint8ToInt; break;
724 case EbtInt16: newOp = EOpConvInt16ToInt; break;
725 case EbtUint16: newOp = EOpConvUint16ToInt; break;
726 case EbtDouble: newOp = EOpConvDoubleToInt; break;
727 case EbtFloat16: newOp = EOpConvFloat16ToInt; break;
728 case EbtInt64: newOp = EOpConvInt64ToInt; break;
729 case EbtUint64: newOp = EOpConvUint64ToInt; break;
737 case EbtInt: newOp = EOpConvIntToUint; break;
738 case EbtBool: newOp = EOpConvBoolToUint; break;
739 case EbtFloat: newOp = EOpConvFloatToUint; break;
741 case EbtInt8: newOp = EOpConvInt8ToUint; break;
742 case EbtUint8: newOp = EOpConvUint8ToUint; break;
743 case EbtInt16: newOp = EOpConvInt16ToUint; break;
744 case EbtUint16: newOp = EOpConvUint16ToUint; break;
745 case EbtDouble: newOp = EOpConvDoubleToUint; break;
746 case EbtFloat16: newOp = EOpConvFloat16ToUint; break;
747 case EbtInt64: newOp = EOpConvInt64ToUint; break;
748 case EbtUint64: newOp = EOpConvUint64ToUint; break;
757 case EbtInt8: newOp = EOpConvInt8ToInt64; break;
758 case EbtUint8: newOp = EOpConvUint8ToInt64; break;
759 case EbtInt16: newOp = EOpConvInt16ToInt64; break;
760 case EbtUint16: newOp = EOpConvUint16ToInt64; break;
761 case EbtInt: newOp = EOpConvIntToInt64; break;
762 case EbtUint: newOp = EOpConvUintToInt64; break;
763 case EbtBool: newOp = EOpConvBoolToInt64; break;
764 case EbtFloat: newOp = EOpConvFloatToInt64; break;
765 case EbtDouble: newOp = EOpConvDoubleToInt64; break;
766 case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
767 case EbtUint64: newOp = EOpConvUint64ToInt64; break;
774 case EbtInt8: newOp = EOpConvInt8ToUint64; break;
775 case EbtUint8: newOp = EOpConvUint8ToUint64; break;
776 case EbtInt16: newOp = EOpConvInt16ToUint64; break;
777 case EbtUint16: newOp = EOpConvUint16ToUint64; break;
778 case EbtInt: newOp = EOpConvIntToUint64; break;
779 case EbtUint: newOp = EOpConvUintToUint64; break;
780 case EbtBool: newOp = EOpConvBoolToUint64; break;
781 case EbtFloat: newOp = EOpConvFloatToUint64; break;
782 case EbtDouble: newOp = EOpConvDoubleToUint64; break;
783 case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
784 case EbtInt64: newOp = EOpConvInt64ToUint64; break;
796 // This is 'mechanism' here, it does any conversion told.
797 // It is about basic type, not about shape.
798 // The policy comes from the shader or the calling code.
799 TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const
802 // Add a new newNode for the conversion.
806 bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 ||
807 convertTo == EbtInt16 || convertTo == EbtUint16 ||
808 convertTo == EbtInt || convertTo == EbtUint ||
809 convertTo == EbtInt64 || convertTo == EbtUint64);
811 bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 ||
812 node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 ||
813 node->getBasicType() == EbtInt || node->getBasicType() == EbtUint ||
814 node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64);
816 bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble);
818 bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 ||
819 node->getBasicType() == EbtFloat ||
820 node->getBasicType() == EbtDouble);
822 if (! getArithemeticInt8Enabled()) {
823 if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
824 ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes))
828 if (! getArithemeticInt16Enabled()) {
829 if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
830 ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes))
834 if (! getArithemeticFloat16Enabled()) {
835 if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
836 (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes))
841 TIntermUnary* newNode = nullptr;
842 TOperator newOp = EOpNull;
843 if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) {
847 TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows());
848 newNode = addUnaryNode(newOp, node, node->getLoc(), newType);
850 if (node->getAsConstantUnion()) {
852 // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions
854 if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) &&
855 (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) &&
856 (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16)))
859 TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType);
865 // Propagate specialization-constant-ness, if allowed
866 if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode))
867 newNode->getWritableType().getQualifier().makeSpecConstant();
872 TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const
874 return createConversion(convertTo, node);
877 // For converting a pair of operands to a binary operation to compatible
878 // types with each other, relative to the operation in 'op'.
879 // This does not cover assignment operations, which is asymmetric in that the
880 // left type is not changeable.
881 // See addConversion(op, type, node) for assignments and unary operation
884 // Generally, this is focused on basic type conversion, not shape conversion.
885 // See addShapeConversion() for shape conversions.
887 // Returns the converted pair of nodes.
888 // Returns <nullptr, nullptr> when there is no conversion.
889 std::tuple<TIntermTyped*, TIntermTyped*>
890 TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
892 if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
893 return std::make_tuple(nullptr, nullptr);
895 if (node0->getType() != node1->getType()) {
896 // If differing structure, then no conversions.
897 if (node0->isStruct() || node1->isStruct())
898 return std::make_tuple(nullptr, nullptr);
900 // If differing arrays, then no conversions.
901 if (node0->getType().isArray() || node1->getType().isArray())
902 return std::make_tuple(nullptr, nullptr);
904 // No implicit conversions for operations involving cooperative matrices
905 if (node0->getType().isCoopMat() || node1->getType().isCoopMat())
906 return std::make_tuple(node0, node1);
909 auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes);
913 // List all the binary ops that can implicitly convert one operand to the other's type;
914 // This implements the 'policy' for implicit type conversion.
918 case EOpLessThanEqual:
919 case EOpGreaterThanEqual:
929 case EOpVectorTimesScalar:
930 case EOpVectorTimesMatrix:
931 case EOpMatrixTimesVector:
932 case EOpMatrixTimesScalar:
938 case EOpSequence: // used by ?:
940 if (node0->getBasicType() == node1->getBasicType())
941 return std::make_tuple(node0, node1);
943 promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op);
944 if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
945 return std::make_tuple(nullptr, nullptr);
952 if (getSource() == EShSourceHlsl)
953 promoteTo = std::make_tuple(EbtBool, EbtBool);
955 return std::make_tuple(node0, node1);
958 // There are no conversions needed for GLSL; the shift amount just needs to be an
959 // integer type, as does the base.
960 // HLSL can promote bools to ints to make this work.
963 if (getSource() == EShSourceHlsl) {
964 TBasicType node0BasicType = node0->getBasicType();
965 if (node0BasicType == EbtBool)
966 node0BasicType = EbtInt;
967 if (node1->getBasicType() == EbtBool)
968 promoteTo = std::make_tuple(node0BasicType, EbtInt);
970 promoteTo = std::make_tuple(node0BasicType, node1->getBasicType());
972 if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType()))
973 return std::make_tuple(node0, node1);
975 return std::make_tuple(nullptr, nullptr);
980 if (node0->getType() == node1->getType())
981 return std::make_tuple(node0, node1);
983 return std::make_tuple(nullptr, nullptr);
986 TIntermTyped* newNode0;
987 TIntermTyped* newNode1;
989 if (std::get<0>(promoteTo) != node0->getType().getBasicType()) {
990 if (node0->getAsConstantUnion())
991 newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion());
993 newNode0 = createConversion(std::get<0>(promoteTo), node0);
997 if (std::get<1>(promoteTo) != node1->getType().getBasicType()) {
998 if (node1->getAsConstantUnion())
999 newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion());
1001 newNode1 = createConversion(std::get<1>(promoteTo), node1);
1005 return std::make_tuple(newNode0, newNode1);
1009 // Convert the node's type to the given type, as allowed by the operation involved: 'op'.
1010 // For implicit conversions, 'op' is not the requested conversion, it is the explicit
1011 // operation requiring the implicit conversion.
1013 // Binary operation conversions should be handled by addConversion(op, node, node), not here.
1015 // Returns a node representing the conversion, which could be the same
1016 // node passed in if no conversion was needed.
1018 // Generally, this is focused on basic type conversion, not shape conversion.
1019 // See addShapeConversion() for shape conversions.
1021 // Return nullptr if a conversion can't be done.
1023 TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node)
1025 if (!isConversionAllowed(op, node))
1028 // Otherwise, if types are identical, no problem
1029 if (type == node->getType())
1032 // If one's a structure, then no conversions.
1033 if (type.isStruct() || node->isStruct())
1036 // If one's an array, then no conversions.
1037 if (type.isArray() || node->getType().isArray())
1040 // Note: callers are responsible for other aspects of shape,
1041 // like vector and matrix sizes.
1043 TBasicType promoteTo;
1044 // GL_EXT_shader_16bit_storage can't do OpConstantComposite with
1045 // 16-bit types, so disable promotion for those types.
1046 bool canPromoteConstant = true;
1050 // Explicit conversions (unary operations)
1052 case EOpConstructBool:
1053 promoteTo = EbtBool;
1055 case EOpConstructFloat:
1056 promoteTo = EbtFloat;
1058 case EOpConstructInt:
1061 case EOpConstructUint:
1062 promoteTo = EbtUint;
1065 case EOpConstructDouble:
1066 promoteTo = EbtDouble;
1068 case EOpConstructFloat16:
1069 promoteTo = EbtFloat16;
1070 canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1071 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
1073 case EOpConstructInt8:
1074 promoteTo = EbtInt8;
1075 canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1076 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
1078 case EOpConstructUint8:
1079 promoteTo = EbtUint8;
1080 canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1081 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
1083 case EOpConstructInt16:
1084 promoteTo = EbtInt16;
1085 canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1086 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
1088 case EOpConstructUint16:
1089 promoteTo = EbtUint16;
1090 canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1091 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
1093 case EOpConstructInt64:
1094 promoteTo = EbtInt64;
1096 case EOpConstructUint64:
1097 promoteTo = EbtUint64;
1103 case EOpFunctionCall:
1110 case EOpVectorTimesScalarAssign:
1111 case EOpMatrixTimesScalarAssign:
1115 case EOpInclusiveOrAssign:
1116 case EOpExclusiveOrAssign:
1124 case EOpFaceForward:
1141 case EOpConstructStruct:
1142 case EOpConstructCooperativeMatrix:
1144 if (type.isReference() || node->getType().isReference()) {
1145 // types must match to assign a reference
1146 if (type == node->getType())
1152 if (type.getBasicType() == node->getType().getBasicType())
1155 if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
1156 promoteTo = type.getBasicType();
1161 // For GLSL, there are no conversions needed; the shift amount just needs to be an
1162 // integer type, as do the base/result.
1163 // HLSL can convert the shift from a bool to an int.
1164 case EOpLeftShiftAssign:
1165 case EOpRightShiftAssign:
1167 if (getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)
1168 promoteTo = type.getBasicType();
1170 if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
1179 // default is to require a match; all exceptions should have case statements above
1181 if (type.getBasicType() == node->getType().getBasicType())
1187 if (canPromoteConstant && node->getAsConstantUnion())
1188 return promoteConstantUnion(promoteTo, node->getAsConstantUnion());
1191 // Add a new newNode for the conversion.
1193 TIntermTyped* newNode = createConversion(promoteTo, node);
1198 // Convert the node's shape of type for the given type, as allowed by the
1199 // operation involved: 'op'. This is for situations where there is only one
1200 // direction to consider doing the shape conversion.
1202 // This implements policy, it call addShapeConversion() for the mechanism.
1204 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1205 // for GLSL. Bad shapes are caught in conversion or promotion.
1207 // Return 'node' if no conversion was done. Promotion handles final shape
1210 TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
1212 // some source languages don't do this
1213 switch (getSource()) {
1221 // some operations don't do this
1223 case EOpFunctionCall:
1228 // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
1229 // matrix *= scalar, etc.
1235 case EOpInclusiveOrAssign:
1236 case EOpExclusiveOrAssign:
1237 case EOpRightShiftAssign:
1238 case EOpLeftShiftAssign:
1239 if (node->getVectorSize() == 1)
1253 return addShapeConversion(type, node);
1256 // Convert the nodes' shapes to be compatible for the operation 'op'.
1258 // This implements policy, it call addShapeConversion() for the mechanism.
1260 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1261 // for GLSL. Bad shapes are caught in conversion or promotion.
1263 void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
1265 // some source languages don't do this
1266 switch (getSource()) {
1274 // some operations don't do this
1275 // 'break' will mean attempt bidirectional conversion
1283 case EOpInclusiveOrAssign:
1284 case EOpExclusiveOrAssign:
1285 case EOpRightShiftAssign:
1286 case EOpLeftShiftAssign:
1287 // switch to unidirectional conversion (the lhs can't change)
1288 rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
1292 // matrix multiply does not change shapes
1293 if (lhsNode->isMatrix() && rhsNode->isMatrix())
1298 // want to support vector * scalar native ops in AST and lower, not smear, similarly for
1299 // matrix * vector, etc.
1300 if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
1306 // can natively support the right operand being a scalar and the left a vector,
1307 // but not the reverse
1308 if (rhsNode->getVectorSize() == 1)
1313 case EOpGreaterThan:
1314 case EOpLessThanEqual:
1315 case EOpGreaterThanEqual:
1325 case EOpInclusiveOr:
1326 case EOpExclusiveOr:
1335 // Do bidirectional conversions
1336 if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
1337 if (lhsNode->getType().isScalarOrVec1())
1338 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1340 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1342 lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
1343 rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
1346 // Convert the node's shape of type for the given type, as allowed by the
1347 // operation involved: 'op'.
1349 // Generally, the AST represents allowed GLSL shapes, so this isn't needed
1350 // for GLSL. Bad shapes are caught in conversion or promotion.
1352 // Return 'node' if no conversion was done. Promotion handles final shape
1355 TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
1357 // no conversion needed
1358 if (node->getType() == type)
1361 // structures and arrays don't change shape, either to or from
1362 if (node->getType().isStruct() || node->getType().isArray() ||
1363 type.isStruct() || type.isArray())
1366 // The new node that handles the conversion
1367 TOperator constructorOp = mapTypeToConstructorOp(type);
1369 if (getSource() == EShSourceHlsl) {
1370 // HLSL rules for scalar, vector and matrix conversions:
1371 // 1) scalar can become anything, initializing every component with its value
1372 // 2) vector and matrix can become scalar, first element is used (warning: truncation)
1373 // 3) matrix can become matrix with less rows and/or columns (warning: truncation)
1374 // 4) vector can become vector with less rows size (warning: truncation)
1375 // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret)
1376 // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret)
1378 const TType &sourceType = node->getType();
1380 // rule 1 for scalar to matrix is special
1381 if (sourceType.isScalarOrVec1() && type.isMatrix()) {
1383 // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its
1384 // own devices, the constructor from a scalar would populate the diagonal. This forces replication
1385 // to every matrix element.
1387 // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here
1388 // repeatedly, so we copy it to a temp, then use the temp.
1389 const int matSize = type.computeNumComponents();
1390 TIntermAggregate* rhsAggregate = new TIntermAggregate();
1392 const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
1395 assert(0); // TODO: use node replicator service when available.
1398 for (int x = 0; x < matSize; ++x)
1399 rhsAggregate->getSequence().push_back(node);
1401 return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc());
1405 if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar()))
1406 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1409 if (sourceType.isMatrix()) {
1411 if (type.isMatrix()) {
1412 if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) &&
1413 sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows())
1414 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1416 } else if (type.isVector()) {
1417 if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2)
1418 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1423 if (sourceType.isVector()) {
1425 if (type.isVector())
1427 if (sourceType.getVectorSize() > type.getVectorSize())
1428 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1430 } else if (type.isMatrix()) {
1431 if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2)
1432 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1437 // scalar -> vector or vec1 -> vector or
1438 // vector -> scalar or
1439 // bigger vector -> smaller vector
1440 if ((node->getType().isScalarOrVec1() && type.isVector()) ||
1441 (node->getType().isVector() && type.isScalar()) ||
1442 (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
1443 return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
1448 bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const
1450 // integral promotions
1465 bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const
1467 // floating-point promotions
1468 if (to == EbtDouble) {
1480 bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const
1490 return version >= 400 || getSource() == EShSourceHlsl;
1554 if (to == EbtUint64) {
1564 bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const
1570 if (to == EbtFloat && from == EbtFloat16) {
1577 bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
1606 if (to == EbtDouble) {
1618 // See if the 'from' type is allowed to be implicitly converted to the
1619 // 'to' type. This is not about vector/array/struct, only about basic type.
1621 bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
1623 if (isEsProfile() || version == 110)
1629 // TODO: Move more policies into language-specific handlers.
1630 // Some languages allow more general (or potentially, more specific) conversions under some conditions.
1631 if (getSource() == EShSourceHlsl) {
1632 const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool);
1633 const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool);
1635 if (fromConvertable && toConvertable) {
1637 case EOpAndAssign: // assignments can perform arbitrary conversions
1638 case EOpInclusiveOrAssign: // ...
1639 case EOpExclusiveOrAssign: // ...
1640 case EOpAssign: // ...
1641 case EOpAddAssign: // ...
1642 case EOpSubAssign: // ...
1643 case EOpMulAssign: // ...
1644 case EOpVectorTimesScalarAssign: // ...
1645 case EOpMatrixTimesScalarAssign: // ...
1646 case EOpDivAssign: // ...
1647 case EOpModAssign: // ...
1648 case EOpReturn: // function returns can also perform arbitrary conversions
1649 case EOpFunctionCall: // conversion of a calling parameter
1654 case EOpConstructStruct:
1662 bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
1663 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
1664 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
1665 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
1666 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
1667 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
1668 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
1669 extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64);
1671 if (explicitTypesEnabled) {
1672 // integral promotions
1673 if (isIntegralPromotion(from, to)) {
1677 // floating-point promotions
1678 if (isFPPromotion(from, to)) {
1682 // integral conversions
1683 if (isIntegralConversion(from, to)) {
1687 // floating-point conversions
1688 if (isFPConversion(from, to)) {
1692 // floating-integral conversions
1693 if (isFPIntegralConversion(from, to)) {
1697 // hlsl supported conversions
1698 if (getSource() == EShSourceHlsl) {
1699 if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
1715 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1717 return extensionRequested(E_GL_AMD_gpu_shader_half_float);
1728 return getSource() == EShSourceHlsl;
1731 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1734 extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
1735 getSource() == EShSourceHlsl;
1742 return version >= 400 || getSource() == EShSourceHlsl;
1746 return getSource() == EShSourceHlsl;
1749 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1758 return getSource() == EShSourceHlsl;
1760 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1773 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1783 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1791 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1793 return extensionRequested(E_GL_AMD_gpu_shader_half_float);
1802 return extensionRequested(E_GL_AMD_gpu_shader_int16);
1815 static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType)
1879 static TBasicType getCorrespondingUnsignedType(TBasicType type)
1882 assert(type == EbtInt);
1901 // Implements the following rules
1902 // - If either operand has type float64_t or derived from float64_t,
1903 // the other shall be converted to float64_t or derived type.
1904 // - Otherwise, if either operand has type float32_t or derived from
1905 // float32_t, the other shall be converted to float32_t or derived type.
1906 // - Otherwise, if either operand has type float16_t or derived from
1907 // float16_t, the other shall be converted to float16_t or derived type.
1908 // - Otherwise, if both operands have integer types the following rules
1909 // shall be applied to the operands:
1910 // - If both operands have the same type, no further conversion
1912 // - Otherwise, if both operands have signed integer types or both
1913 // have unsigned integer types, the operand with the type of lesser
1914 // integer conversion rank shall be converted to the type of the
1915 // operand with greater rank.
1916 // - Otherwise, if the operand that has unsigned integer type has rank
1917 // greater than or equal to the rank of the type of the other
1918 // operand, the operand with signed integer type shall be converted
1919 // to the type of the operand with unsigned integer type.
1920 // - Otherwise, if the type of the operand with signed integer type can
1921 // represent all of the values of the type of the operand with
1922 // unsigned integer type, the operand with unsigned integer type
1923 // shall be converted to the type of the operand with signed
1925 // - Otherwise, both operands shall be converted to the unsigned
1926 // integer type corresponding to the type of the operand with signed
1929 std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const
1931 TBasicType res0 = EbtNumTypes;
1932 TBasicType res1 = EbtNumTypes;
1934 if (isEsProfile() || version == 110)
1935 return std::make_tuple(res0, res1);
1937 if (getSource() == EShSourceHlsl) {
1938 if (canImplicitlyPromote(type1, type0, op)) {
1941 } else if (canImplicitlyPromote(type0, type1, op)) {
1945 return std::make_tuple(res0, res1);
1948 if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) ||
1949 (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) {
1952 } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) ||
1953 (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) {
1956 } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) ||
1957 (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) {
1960 } else if (isTypeInt(type0) && isTypeInt(type1) &&
1961 (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) {
1962 if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) ||
1963 (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) {
1964 if (getTypeRank(type0) < getTypeRank(type1)) {
1971 } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) {
1974 } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) {
1977 } else if (isTypeSignedInt(type0)) {
1978 if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) {
1982 res0 = getCorrespondingUnsignedType(type0);
1983 res1 = getCorrespondingUnsignedType(type0);
1985 } else if (isTypeSignedInt(type1)) {
1986 if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) {
1990 res0 = getCorrespondingUnsignedType(type1);
1991 res1 = getCorrespondingUnsignedType(type1);
1996 return std::make_tuple(res0, res1);
2000 // Given a type, find what operation would fully construct it.
2002 TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
2004 TOperator op = EOpNull;
2006 if (type.getQualifier().isNonUniform())
2007 return EOpConstructNonuniform;
2009 if (type.isCoopMat())
2010 return EOpConstructCooperativeMatrix;
2012 switch (type.getBasicType()) {
2014 op = EOpConstructStruct;
2017 if (type.getSampler().isCombined())
2018 op = EOpConstructTextureSampler;
2021 if (type.isMatrix()) {
2022 switch (type.getMatrixCols()) {
2024 switch (type.getMatrixRows()) {
2025 case 2: op = EOpConstructMat2x2; break;
2026 case 3: op = EOpConstructMat2x3; break;
2027 case 4: op = EOpConstructMat2x4; break;
2028 default: break; // some compilers want this
2032 switch (type.getMatrixRows()) {
2033 case 2: op = EOpConstructMat3x2; break;
2034 case 3: op = EOpConstructMat3x3; break;
2035 case 4: op = EOpConstructMat3x4; break;
2036 default: break; // some compilers want this
2040 switch (type.getMatrixRows()) {
2041 case 2: op = EOpConstructMat4x2; break;
2042 case 3: op = EOpConstructMat4x3; break;
2043 case 4: op = EOpConstructMat4x4; break;
2044 default: break; // some compilers want this
2047 default: break; // some compilers want this
2050 switch(type.getVectorSize()) {
2051 case 1: op = EOpConstructFloat; break;
2052 case 2: op = EOpConstructVec2; break;
2053 case 3: op = EOpConstructVec3; break;
2054 case 4: op = EOpConstructVec4; break;
2055 default: break; // some compilers want this
2060 if (type.getMatrixCols()) {
2061 switch (type.getMatrixCols()) {
2063 switch (type.getMatrixRows()) {
2064 case 2: op = EOpConstructIMat2x2; break;
2065 case 3: op = EOpConstructIMat2x3; break;
2066 case 4: op = EOpConstructIMat2x4; break;
2067 default: break; // some compilers want this
2071 switch (type.getMatrixRows()) {
2072 case 2: op = EOpConstructIMat3x2; break;
2073 case 3: op = EOpConstructIMat3x3; break;
2074 case 4: op = EOpConstructIMat3x4; break;
2075 default: break; // some compilers want this
2079 switch (type.getMatrixRows()) {
2080 case 2: op = EOpConstructIMat4x2; break;
2081 case 3: op = EOpConstructIMat4x3; break;
2082 case 4: op = EOpConstructIMat4x4; break;
2083 default: break; // some compilers want this
2088 switch(type.getVectorSize()) {
2089 case 1: op = EOpConstructInt; break;
2090 case 2: op = EOpConstructIVec2; break;
2091 case 3: op = EOpConstructIVec3; break;
2092 case 4: op = EOpConstructIVec4; break;
2093 default: break; // some compilers want this
2098 if (type.getMatrixCols()) {
2099 switch (type.getMatrixCols()) {
2101 switch (type.getMatrixRows()) {
2102 case 2: op = EOpConstructUMat2x2; break;
2103 case 3: op = EOpConstructUMat2x3; break;
2104 case 4: op = EOpConstructUMat2x4; break;
2105 default: break; // some compilers want this
2109 switch (type.getMatrixRows()) {
2110 case 2: op = EOpConstructUMat3x2; break;
2111 case 3: op = EOpConstructUMat3x3; break;
2112 case 4: op = EOpConstructUMat3x4; break;
2113 default: break; // some compilers want this
2117 switch (type.getMatrixRows()) {
2118 case 2: op = EOpConstructUMat4x2; break;
2119 case 3: op = EOpConstructUMat4x3; break;
2120 case 4: op = EOpConstructUMat4x4; break;
2121 default: break; // some compilers want this
2126 switch(type.getVectorSize()) {
2127 case 1: op = EOpConstructUint; break;
2128 case 2: op = EOpConstructUVec2; break;
2129 case 3: op = EOpConstructUVec3; break;
2130 case 4: op = EOpConstructUVec4; break;
2131 default: break; // some compilers want this
2136 if (type.getMatrixCols()) {
2137 switch (type.getMatrixCols()) {
2139 switch (type.getMatrixRows()) {
2140 case 2: op = EOpConstructBMat2x2; break;
2141 case 3: op = EOpConstructBMat2x3; break;
2142 case 4: op = EOpConstructBMat2x4; break;
2143 default: break; // some compilers want this
2147 switch (type.getMatrixRows()) {
2148 case 2: op = EOpConstructBMat3x2; break;
2149 case 3: op = EOpConstructBMat3x3; break;
2150 case 4: op = EOpConstructBMat3x4; break;
2151 default: break; // some compilers want this
2155 switch (type.getMatrixRows()) {
2156 case 2: op = EOpConstructBMat4x2; break;
2157 case 3: op = EOpConstructBMat4x3; break;
2158 case 4: op = EOpConstructBMat4x4; break;
2159 default: break; // some compilers want this
2164 switch(type.getVectorSize()) {
2165 case 1: op = EOpConstructBool; break;
2166 case 2: op = EOpConstructBVec2; break;
2167 case 3: op = EOpConstructBVec3; break;
2168 case 4: op = EOpConstructBVec4; break;
2169 default: break; // some compilers want this
2175 if (type.getMatrixCols()) {
2176 switch (type.getMatrixCols()) {
2178 switch (type.getMatrixRows()) {
2179 case 2: op = EOpConstructDMat2x2; break;
2180 case 3: op = EOpConstructDMat2x3; break;
2181 case 4: op = EOpConstructDMat2x4; break;
2182 default: break; // some compilers want this
2186 switch (type.getMatrixRows()) {
2187 case 2: op = EOpConstructDMat3x2; break;
2188 case 3: op = EOpConstructDMat3x3; break;
2189 case 4: op = EOpConstructDMat3x4; break;
2190 default: break; // some compilers want this
2194 switch (type.getMatrixRows()) {
2195 case 2: op = EOpConstructDMat4x2; break;
2196 case 3: op = EOpConstructDMat4x3; break;
2197 case 4: op = EOpConstructDMat4x4; break;
2198 default: break; // some compilers want this
2203 switch(type.getVectorSize()) {
2204 case 1: op = EOpConstructDouble; break;
2205 case 2: op = EOpConstructDVec2; break;
2206 case 3: op = EOpConstructDVec3; break;
2207 case 4: op = EOpConstructDVec4; break;
2208 default: break; // some compilers want this
2213 if (type.getMatrixCols()) {
2214 switch (type.getMatrixCols()) {
2216 switch (type.getMatrixRows()) {
2217 case 2: op = EOpConstructF16Mat2x2; break;
2218 case 3: op = EOpConstructF16Mat2x3; break;
2219 case 4: op = EOpConstructF16Mat2x4; break;
2220 default: break; // some compilers want this
2224 switch (type.getMatrixRows()) {
2225 case 2: op = EOpConstructF16Mat3x2; break;
2226 case 3: op = EOpConstructF16Mat3x3; break;
2227 case 4: op = EOpConstructF16Mat3x4; break;
2228 default: break; // some compilers want this
2232 switch (type.getMatrixRows()) {
2233 case 2: op = EOpConstructF16Mat4x2; break;
2234 case 3: op = EOpConstructF16Mat4x3; break;
2235 case 4: op = EOpConstructF16Mat4x4; break;
2236 default: break; // some compilers want this
2242 switch (type.getVectorSize()) {
2243 case 1: op = EOpConstructFloat16; break;
2244 case 2: op = EOpConstructF16Vec2; break;
2245 case 3: op = EOpConstructF16Vec3; break;
2246 case 4: op = EOpConstructF16Vec4; break;
2247 default: break; // some compilers want this
2252 switch(type.getVectorSize()) {
2253 case 1: op = EOpConstructInt8; break;
2254 case 2: op = EOpConstructI8Vec2; break;
2255 case 3: op = EOpConstructI8Vec3; break;
2256 case 4: op = EOpConstructI8Vec4; break;
2257 default: break; // some compilers want this
2261 switch(type.getVectorSize()) {
2262 case 1: op = EOpConstructUint8; break;
2263 case 2: op = EOpConstructU8Vec2; break;
2264 case 3: op = EOpConstructU8Vec3; break;
2265 case 4: op = EOpConstructU8Vec4; break;
2266 default: break; // some compilers want this
2270 switch(type.getVectorSize()) {
2271 case 1: op = EOpConstructInt16; break;
2272 case 2: op = EOpConstructI16Vec2; break;
2273 case 3: op = EOpConstructI16Vec3; break;
2274 case 4: op = EOpConstructI16Vec4; break;
2275 default: break; // some compilers want this
2279 switch(type.getVectorSize()) {
2280 case 1: op = EOpConstructUint16; break;
2281 case 2: op = EOpConstructU16Vec2; break;
2282 case 3: op = EOpConstructU16Vec3; break;
2283 case 4: op = EOpConstructU16Vec4; break;
2284 default: break; // some compilers want this
2288 switch(type.getVectorSize()) {
2289 case 1: op = EOpConstructInt64; break;
2290 case 2: op = EOpConstructI64Vec2; break;
2291 case 3: op = EOpConstructI64Vec3; break;
2292 case 4: op = EOpConstructI64Vec4; break;
2293 default: break; // some compilers want this
2297 switch(type.getVectorSize()) {
2298 case 1: op = EOpConstructUint64; break;
2299 case 2: op = EOpConstructU64Vec2; break;
2300 case 3: op = EOpConstructU64Vec3; break;
2301 case 4: op = EOpConstructU64Vec4; break;
2302 default: break; // some compilers want this
2306 op = EOpConstructReference;
2317 // Safe way to combine two nodes into an aggregate. Works with null pointers,
2318 // a node that's not a aggregate yet, etc.
2320 // Returns the resulting aggregate, unless nullptr was passed in for
2321 // both existing nodes.
2323 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right)
2325 if (left == nullptr && right == nullptr)
2328 TIntermAggregate* aggNode = nullptr;
2329 if (left != nullptr)
2330 aggNode = left->getAsAggregate();
2331 if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
2332 aggNode = new TIntermAggregate;
2333 if (left != nullptr)
2334 aggNode->getSequence().push_back(left);
2337 if (right != nullptr)
2338 aggNode->getSequence().push_back(right);
2343 TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc)
2345 TIntermAggregate* aggNode = growAggregate(left, right);
2347 aggNode->setLoc(loc);
2353 // Turn an existing node into an aggregate.
2355 // Returns an aggregate, unless nullptr was passed in for the existing node.
2357 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node)
2359 if (node == nullptr)
2362 TIntermAggregate* aggNode = new TIntermAggregate;
2363 aggNode->getSequence().push_back(node);
2364 aggNode->setLoc(node->getLoc());
2369 TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc)
2371 if (node == nullptr)
2374 TIntermAggregate* aggNode = new TIntermAggregate;
2375 aggNode->getSequence().push_back(node);
2376 aggNode->setLoc(loc);
2382 // Make an aggregate with an empty sequence.
2384 TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc)
2386 TIntermAggregate* aggNode = new TIntermAggregate;
2387 aggNode->setLoc(loc);
2393 // For "if" test nodes. There are three children; a condition,
2394 // a true path, and a false path. The two paths are in the
2397 // Returns the selection node created.
2399 TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc)
2402 // Don't prune the false path for compile-time constants; it's needed
2403 // for static access analysis.
2406 TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2);
2412 TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
2414 // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
2415 // ... are not included in the operators that can create a constant expression.
2417 // if (left->getType().getQualifier().storage == EvqConst &&
2418 // right->getType().getQualifier().storage == EvqConst) {
2423 TIntermTyped *commaAggregate = growAggregate(left, right, loc);
2424 commaAggregate->getAsAggregate()->setOperator(EOpComma);
2425 commaAggregate->setType(right->getType());
2426 commaAggregate->getWritableType().getQualifier().makeTemporary();
2428 return commaAggregate;
2431 TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc)
2433 TIntermMethod* method = new TIntermMethod(object, type, *name);
2434 method->setLoc(loc);
2440 // For "?:" test nodes. There are three children; a condition,
2441 // a true path, and a false path. The two paths are specified
2442 // as separate parameters. For vector 'cond', the true and false
2443 // are not paths, but vectors to mix.
2445 // Specialization constant operations include
2446 // - The ternary operator ( ? : )
2448 // Returns the selection node created, or nullptr if one could not be.
2450 TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock,
2451 const TSourceLoc& loc)
2453 // If it's void, go to the if-then-else selection()
2454 if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) {
2455 TIntermNodePair pair = { trueBlock, falseBlock };
2456 TIntermSelection* selection = addSelection(cond, pair, loc);
2457 if (getSource() == EShSourceHlsl)
2458 selection->setNoShortCircuit();
2464 // Get compatible types.
2466 auto children = addConversion(EOpSequence, trueBlock, falseBlock);
2467 trueBlock = std::get<0>(children);
2468 falseBlock = std::get<1>(children);
2470 if (trueBlock == nullptr || falseBlock == nullptr)
2473 // Handle a vector condition as a mix
2474 if (!cond->getType().isScalarOrVec1()) {
2475 TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
2476 cond->getType().getVectorSize());
2477 // smear true/false operands as needed
2478 trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
2479 falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
2481 // After conversion, types have to match.
2482 if (falseBlock->getType() != trueBlock->getType())
2485 // make the mix operation
2486 TIntermAggregate* mix = makeAggregate(loc);
2487 mix = growAggregate(mix, falseBlock);
2488 mix = growAggregate(mix, trueBlock);
2489 mix = growAggregate(mix, cond);
2490 mix->setType(targetVectorType);
2496 // Now have a scalar condition...
2498 // Convert true and false expressions to matching types
2499 addBiShapeConversion(EOpMix, trueBlock, falseBlock);
2501 // After conversion, types have to match.
2502 if (falseBlock->getType() != trueBlock->getType())
2505 // Eliminate the selection when the condition is a scalar and all operands are constant.
2506 if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
2507 if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
2514 // Make a selection node.
2516 TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType());
2518 node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision);
2520 if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) ||
2521 (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() &&
2522 falseBlock->getQualifier().isConstant()))
2523 node->getQualifier().makeSpecConstant();
2525 node->getQualifier().makeTemporary();
2527 if (getSource() == EShSourceHlsl)
2528 node->setNoShortCircuit();
2534 // Constant terminal nodes. Has a union that contains bool, float or int constants
2536 // Returns the constant union node created.
2539 TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const
2541 TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t);
2542 node->getQualifier().storage = EvqConst;
2549 TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const
2551 TConstUnionArray unionArray(1);
2552 unionArray[0].setI8Const(i8);
2554 return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal);
2557 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const
2559 TConstUnionArray unionArray(1);
2560 unionArray[0].setUConst(u8);
2562 return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal);
2565 TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const
2567 TConstUnionArray unionArray(1);
2568 unionArray[0].setI16Const(i16);
2570 return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
2573 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
2575 TConstUnionArray unionArray(1);
2576 unionArray[0].setU16Const(u16);
2578 return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
2581 TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const
2583 TConstUnionArray unionArray(1);
2584 unionArray[0].setIConst(i);
2586 return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal);
2589 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const
2591 TConstUnionArray unionArray(1);
2592 unionArray[0].setUConst(u);
2594 return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal);
2597 TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const
2599 TConstUnionArray unionArray(1);
2600 unionArray[0].setI64Const(i64);
2602 return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal);
2605 TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const
2607 TConstUnionArray unionArray(1);
2608 unionArray[0].setU64Const(u64);
2610 return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
2613 TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
2615 TConstUnionArray unionArray(1);
2616 unionArray[0].setBConst(b);
2618 return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal);
2621 TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const
2623 assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16);
2625 TConstUnionArray unionArray(1);
2626 unionArray[0].setDConst(d);
2628 return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal);
2631 TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const
2633 TConstUnionArray unionArray(1);
2634 unionArray[0].setSConst(s);
2636 return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal);
2639 // Put vector swizzle selectors onto the given sequence
2640 void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc)
2642 TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc);
2643 sequence.push_back(constIntNode);
2646 // Put matrix swizzle selectors onto the given sequence
2647 void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc)
2649 TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc);
2650 sequence.push_back(constIntNode);
2651 constIntNode = addConstantUnion(selector.coord2, loc);
2652 sequence.push_back(constIntNode);
2655 // Make an aggregate node that has a sequence of all selectors.
2656 template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc);
2657 template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc);
2658 template<typename selectorType>
2659 TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc)
2661 TIntermAggregate* node = new TIntermAggregate(EOpSequence);
2664 TIntermSequence &sequenceVector = node->getSequence();
2666 for (int i = 0; i < selector.size(); i++)
2667 pushSelector(sequenceVector, selector[i], loc);
2673 // Follow the left branches down to the root of an l-value
2674 // expression (just "." and []).
2676 // Return the base of the l-value (where following indexing quits working).
2677 // Return nullptr if a chain following dereferences cannot be followed.
2679 // 'swizzleOkay' says whether or not it is okay to consider a swizzle
2680 // a valid part of the dereference chain.
2682 const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay)
2685 const TIntermBinary* binary = node->getAsBinaryNode();
2686 if (binary == nullptr)
2688 TOperator op = binary->getOp();
2689 if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle)
2691 if (! swizzleOkay) {
2692 if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle)
2694 if ((op == EOpIndexDirect || op == EOpIndexIndirect) &&
2695 (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) &&
2696 ! binary->getLeft()->getType().isArray())
2699 node = node->getAsBinaryNode()->getLeft();
2704 // Create while and do-while loop nodes.
2706 TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst,
2707 const TSourceLoc& loc)
2709 TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
2716 // Create a for-loop sequence.
2718 TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test,
2719 TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node)
2721 node = new TIntermLoop(body, test, terminal, testFirst);
2724 // make a sequence of the initializer and statement, but try to reuse the
2725 // aggregate already created for whatever is in the initializer, if there is one
2726 TIntermAggregate* loopSequence = (initializer == nullptr ||
2727 initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
2728 : initializer->getAsAggregate();
2729 if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence)
2730 loopSequence->setOp(EOpNull);
2731 loopSequence = growAggregate(loopSequence, node);
2732 loopSequence->setOperator(EOpSequence);
2734 return loopSequence;
2740 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc)
2742 return addBranch(branchOp, nullptr, loc);
2745 TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc)
2747 TIntermBranch* node = new TIntermBranch(branchOp, expression);
2754 // This is to be executed after the final root is put on top by the parsing
2757 bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
2759 if (root == nullptr)
2762 // Finish off the top-level sequence
2763 TIntermAggregate* aggRoot = root->getAsAggregate();
2764 if (aggRoot && aggRoot->getOp() == EOpNull)
2765 aggRoot->setOperator(EOpSequence);
2768 // Propagate 'noContraction' label in backward from 'precise' variables.
2769 glslang::PropagateNoContraction(*this);
2771 switch (textureSamplerTransformMode) {
2772 case EShTexSampTransKeep:
2774 case EShTexSampTransUpgradeTextureRemoveSampler:
2775 performTextureUpgradeAndSamplerRemovalTransformation(root);
2777 case EShTexSampTransCount:
2786 void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable)
2788 // Add top-level nodes for declarations that must be checked cross
2789 // compilation unit by a linker, yet might not have been referenced
2792 // Almost entirely, translation of symbols is driven by what's present
2793 // in the AST traversal, not by translating the symbol table.
2795 // However, there are some special cases:
2796 // - From the specification: "Special built-in inputs gl_VertexID and
2797 // gl_InstanceID are also considered active vertex attributes."
2798 // - Linker-based type mismatch error reporting needs to see all
2799 // uniforms/ins/outs variables and blocks.
2800 // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active.
2803 // if (ftransformUsed) {
2804 // TODO: 1.1 lowering functionality: track ftransform() usage
2805 // addSymbolLinkageNode(root, symbolTable, "gl_Vertex");
2806 // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix");
2809 if (language == EShLangVertex) {
2810 // the names won't be found in the symbol table unless the versions are right,
2811 // so version logic does not need to be repeated here
2812 addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID");
2813 addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID");
2816 // Add a child to the root node for the linker objects
2817 linkage->setOperator(EOpLinkerObjects);
2818 treeRoot = growAggregate(treeRoot, linkage);
2822 // Add the given name or symbol to the list of nodes at the end of the tree used
2823 // for link-time checking and external linkage.
2826 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name)
2828 TSymbol* symbol = symbolTable.find(name);
2830 addSymbolLinkageNode(linkage, *symbol->getAsVariable());
2833 void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol)
2835 const TVariable* variable = symbol.getAsVariable();
2837 // This must be a member of an anonymous block, and we need to add the whole block
2838 const TAnonMember* anon = symbol.getAsAnonMember();
2839 variable = &anon->getAnonContainer();
2841 TIntermSymbol* node = addSymbol(*variable);
2842 linkage = growAggregate(linkage, node);
2846 // Add a caller->callee relationship to the call graph.
2847 // Assumes the strings are unique per signature.
2849 void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee)
2851 // Duplicates are okay, but faster to not keep them, and they come grouped by caller,
2852 // as long as new ones are push on the same end we check on for duplicates
2853 for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
2854 if (call->caller != caller)
2856 if (call->callee == callee)
2860 callGraph.push_front(TCall(caller, callee));
2864 // This deletes the tree.
2866 void TIntermediate::removeTree()
2869 RemoveAllTreeNodes(treeRoot);
2873 // Implement the part of KHR_vulkan_glsl that lists the set of operations
2874 // that can result in a specialization constant operation.
2876 // "5.x Specialization Constant Operations"
2878 // Only some operations discussed in this section may be applied to a
2879 // specialization constant and still yield a result that is as
2880 // specialization constant. The operations allowed are listed below.
2881 // When a specialization constant is operated on with one of these
2882 // operators and with another constant or specialization constant, the
2883 // result is implicitly a specialization constant.
2885 // - int(), uint(), and bool() constructors for type conversions
2886 // from any of the following types to any of the following types:
2890 // - vector versions of the above conversion constructors
2891 // - allowed implicit conversions of the above
2892 // - swizzles (e.g., foo.yx)
2893 // - The following when applied to integer or unsigned integer types:
2894 // * unary negative ( - )
2895 // * binary operations ( + , - , * , / , % )
2896 // * shift ( <<, >> )
2897 // * bitwise operations ( & , | , ^ )
2898 // - The following when applied to integer or unsigned integer scalar types:
2899 // * comparison ( == , != , > , >= , < , <= )
2900 // - The following when applied to the Boolean scalar type:
2902 // * logical operations ( && , || , ^^ )
2903 // * comparison ( == , != )"
2905 // This function just handles binary and unary nodes. Construction
2906 // rules are handled in construction paths that are not covered by the unary
2907 // and binary paths, while required conversions will still show up here
2908 // as unary converters in the from a construction operator.
2910 bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
2912 // The operations resulting in floating point are quite limited
2913 // (However, some floating-point operations result in bool, like ">",
2914 // so are handled later.)
2915 if (node.getType().isFloatingDomain()) {
2916 switch (node.getOp()) {
2917 case EOpIndexDirect:
2918 case EOpIndexIndirect:
2919 case EOpIndexDirectStruct:
2920 case EOpVectorSwizzle:
2921 case EOpConvFloatToDouble:
2922 case EOpConvDoubleToFloat:
2923 case EOpConvFloat16ToFloat:
2924 case EOpConvFloatToFloat16:
2925 case EOpConvFloat16ToDouble:
2926 case EOpConvDoubleToFloat16:
2933 // Check for floating-point arguments
2934 if (const TIntermBinary* bin = node.getAsBinaryNode())
2935 if (bin->getLeft() ->getType().isFloatingDomain() ||
2936 bin->getRight()->getType().isFloatingDomain())
2939 // So, for now, we can assume everything left is non-floating-point...
2941 // Now check for integer/bool-based operations
2942 switch (node.getOp()) {
2944 // dereference/swizzle
2945 case EOpIndexDirect:
2946 case EOpIndexIndirect:
2947 case EOpIndexDirectStruct:
2948 case EOpVectorSwizzle:
2951 case EOpConvInt8ToBool:
2952 case EOpConvInt16ToBool:
2953 case EOpConvIntToBool:
2954 case EOpConvInt64ToBool:
2955 case EOpConvUint8ToBool:
2956 case EOpConvUint16ToBool:
2957 case EOpConvUintToBool:
2958 case EOpConvUint64ToBool:
2961 case EOpConvBoolToInt8:
2962 case EOpConvBoolToInt16:
2963 case EOpConvBoolToInt:
2964 case EOpConvBoolToInt64:
2965 case EOpConvBoolToUint8:
2966 case EOpConvBoolToUint16:
2967 case EOpConvBoolToUint:
2968 case EOpConvBoolToUint64:
2970 // int8_t -> (u)int*
2971 case EOpConvInt8ToInt16:
2972 case EOpConvInt8ToInt:
2973 case EOpConvInt8ToInt64:
2974 case EOpConvInt8ToUint8:
2975 case EOpConvInt8ToUint16:
2976 case EOpConvInt8ToUint:
2977 case EOpConvInt8ToUint64:
2979 // int16_t -> (u)int*
2980 case EOpConvInt16ToInt8:
2981 case EOpConvInt16ToInt:
2982 case EOpConvInt16ToInt64:
2983 case EOpConvInt16ToUint8:
2984 case EOpConvInt16ToUint16:
2985 case EOpConvInt16ToUint:
2986 case EOpConvInt16ToUint64:
2988 // int32_t -> (u)int*
2989 case EOpConvIntToInt8:
2990 case EOpConvIntToInt16:
2991 case EOpConvIntToInt64:
2992 case EOpConvIntToUint8:
2993 case EOpConvIntToUint16:
2994 case EOpConvIntToUint:
2995 case EOpConvIntToUint64:
2997 // int64_t -> (u)int*
2998 case EOpConvInt64ToInt8:
2999 case EOpConvInt64ToInt16:
3000 case EOpConvInt64ToInt:
3001 case EOpConvInt64ToUint8:
3002 case EOpConvInt64ToUint16:
3003 case EOpConvInt64ToUint:
3004 case EOpConvInt64ToUint64:
3006 // uint8_t -> (u)int*
3007 case EOpConvUint8ToInt8:
3008 case EOpConvUint8ToInt16:
3009 case EOpConvUint8ToInt:
3010 case EOpConvUint8ToInt64:
3011 case EOpConvUint8ToUint16:
3012 case EOpConvUint8ToUint:
3013 case EOpConvUint8ToUint64:
3015 // uint16_t -> (u)int*
3016 case EOpConvUint16ToInt8:
3017 case EOpConvUint16ToInt16:
3018 case EOpConvUint16ToInt:
3019 case EOpConvUint16ToInt64:
3020 case EOpConvUint16ToUint8:
3021 case EOpConvUint16ToUint:
3022 case EOpConvUint16ToUint64:
3024 // uint32_t -> (u)int*
3025 case EOpConvUintToInt8:
3026 case EOpConvUintToInt16:
3027 case EOpConvUintToInt:
3028 case EOpConvUintToInt64:
3029 case EOpConvUintToUint8:
3030 case EOpConvUintToUint16:
3031 case EOpConvUintToUint64:
3033 // uint64_t -> (u)int*
3034 case EOpConvUint64ToInt8:
3035 case EOpConvUint64ToInt16:
3036 case EOpConvUint64ToInt:
3037 case EOpConvUint64ToInt64:
3038 case EOpConvUint64ToUint8:
3039 case EOpConvUint64ToUint16:
3040 case EOpConvUint64ToUint:
3047 // binary operations
3051 case EOpVectorTimesScalar:
3057 case EOpInclusiveOr:
3058 case EOpExclusiveOr:
3065 case EOpGreaterThan:
3066 case EOpLessThanEqual:
3067 case EOpGreaterThanEqual:
3074 // Is the operation one that must propagate nonuniform?
3075 bool TIntermediate::isNonuniformPropagating(TOperator op) const
3077 // "* All Operators in Section 5.1 (Operators), except for assignment,
3078 // arithmetic assignment, and sequence
3079 // * Component selection in Section 5.5
3080 // * Matrix components in Section 5.6
3081 // * Structure and Array Operations in Section 5.7, except for the length
3084 case EOpPostIncrement:
3085 case EOpPostDecrement:
3086 case EOpPreIncrement:
3087 case EOpPreDecrement:
3091 case EOpVectorLogicalNot:
3102 case EOpInclusiveOr:
3103 case EOpExclusiveOr:
3107 case EOpGreaterThan:
3108 case EOpLessThanEqual:
3109 case EOpGreaterThanEqual:
3110 case EOpVectorTimesScalar:
3111 case EOpVectorTimesMatrix:
3112 case EOpMatrixTimesVector:
3113 case EOpMatrixTimesScalar:
3119 case EOpIndexDirect:
3120 case EOpIndexIndirect:
3121 case EOpIndexDirectStruct:
3122 case EOpVectorSwizzle:
3132 ////////////////////////////////////////////////////////////////
3134 // Member functions of the nodes used for building the tree.
3136 ////////////////////////////////////////////////////////////////
3139 // Say whether or not an operation node changes the value of a variable.
3141 // Returns true if state is modified.
3143 bool TIntermOperator::modifiesState() const
3146 case EOpPostIncrement:
3147 case EOpPostDecrement:
3148 case EOpPreIncrement:
3149 case EOpPreDecrement:
3154 case EOpVectorTimesMatrixAssign:
3155 case EOpVectorTimesScalarAssign:
3156 case EOpMatrixTimesScalarAssign:
3157 case EOpMatrixTimesMatrixAssign:
3161 case EOpInclusiveOrAssign:
3162 case EOpExclusiveOrAssign:
3163 case EOpLeftShiftAssign:
3164 case EOpRightShiftAssign:
3172 // returns true if the operator is for one of the constructors
3174 bool TIntermOperator::isConstructor() const
3176 return op > EOpConstructGuardStart && op < EOpConstructGuardEnd;
3180 // Make sure the type of an operator is appropriate for its
3181 // combination of operation and operand type. This will invoke
3182 // promoteUnary, promoteBinary, etc as needed.
3184 // Returns false if nothing makes sense.
3186 bool TIntermediate::promote(TIntermOperator* node)
3188 if (node == nullptr)
3191 if (node->getAsUnaryNode())
3192 return promoteUnary(*node->getAsUnaryNode());
3194 if (node->getAsBinaryNode())
3195 return promoteBinary(*node->getAsBinaryNode());
3197 if (node->getAsAggregate())
3198 return promoteAggregate(*node->getAsAggregate());
3204 // See TIntermediate::promote
3206 bool TIntermediate::promoteUnary(TIntermUnary& node)
3208 const TOperator op = node.getOp();
3209 TIntermTyped* operand = node.getOperand();
3213 // Convert operand to a boolean type
3214 if (operand->getBasicType() != EbtBool) {
3215 // Add constructor to boolean type. If that fails, we can't do it, so return false.
3216 TIntermTyped* converted = addConversion(op, TType(EbtBool), operand);
3217 if (converted == nullptr)
3220 // Use the result of converting the node to a bool.
3221 node.setOperand(operand = converted); // also updates stack variable
3225 if (!isTypeInt(operand->getBasicType()))
3229 case EOpPostIncrement:
3230 case EOpPostDecrement:
3231 case EOpPreIncrement:
3232 case EOpPreDecrement:
3233 if (!isTypeInt(operand->getBasicType()) &&
3234 operand->getBasicType() != EbtFloat &&
3235 operand->getBasicType() != EbtFloat16 &&
3236 operand->getBasicType() != EbtDouble)
3241 // HLSL uses this path for initial function signature finding for built-ins
3242 // taking a single argument, which generally don't participate in
3243 // operator-based type promotion (type conversion will occur later).
3244 // For now, scalar argument cases are relying on the setType() call below.
3245 if (getSource() == EShSourceHlsl)
3248 // GLSL only allows integer arguments for the cases identified above in the
3250 if (operand->getBasicType() != EbtFloat)
3254 node.setType(operand->getType());
3255 node.getWritableType().getQualifier().makeTemporary();
3260 void TIntermUnary::updatePrecision()
3262 if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
3263 if (operand->getQualifier().precision > getQualifier().precision)
3264 getQualifier().precision = operand->getQualifier().precision;
3269 // See TIntermediate::promote
3271 bool TIntermediate::promoteBinary(TIntermBinary& node)
3273 TOperator op = node.getOp();
3274 TIntermTyped* left = node.getLeft();
3275 TIntermTyped* right = node.getRight();
3277 // Arrays and structures have to be exact matches.
3278 if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct)
3279 && left->getType() != right->getType())
3282 // Base assumption: just make the type the same as the left
3283 // operand. Only deviations from this will be coded.
3284 node.setType(left->getType());
3285 node.getWritableType().getQualifier().clear();
3287 // Composite and opaque types don't having pending operator changes, e.g.,
3288 // array, structure, and samplers. Just establish final type and correctness.
3289 if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) {
3293 if (left->getBasicType() == EbtSampler) {
3294 // can't compare samplers
3297 // Promote to conditional
3298 node.setType(TType(EbtBool));
3304 // Keep type from above
3314 // We now have only scalars, vectors, and matrices to worry about.
3317 // HLSL implicitly promotes bool -> int for numeric operations.
3318 // (Implicit conversions to make the operands match each other's types were already done.)
3319 if (getSource() == EShSourceHlsl &&
3320 (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
3323 case EOpGreaterThan:
3324 case EOpLessThanEqual:
3325 case EOpGreaterThanEqual:
3333 case EOpInclusiveOr:
3334 case EOpExclusiveOr:
3340 if (left->getBasicType() == EbtBool)
3341 left = createConversion(EbtInt, left);
3342 if (right->getBasicType() == EbtBool)
3343 right = createConversion(EbtInt, right);
3344 if (left == nullptr || right == nullptr)
3347 node.setRight(right);
3349 // Update the original base assumption on result type..
3350 node.setType(left->getType());
3351 node.getWritableType().getQualifier().clear();
3360 // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
3363 case EOpGreaterThan:
3364 case EOpLessThanEqual:
3365 case EOpGreaterThanEqual:
3366 // Relational comparisons need numeric types and will promote to scalar Boolean.
3367 if (left->getBasicType() == EbtBool)
3370 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3375 if (getSource() == EShSourceHlsl) {
3376 const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize());
3378 // In HLSL, == or != on vectors means component-wise comparison.
3379 if (resultWidth > 1) {
3380 op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual;
3384 node.setType(TType(EbtBool, EvqTemporary, resultWidth));
3386 // All the above comparisons result in a bool (but not the vector compares)
3387 node.setType(TType(EbtBool));
3394 // logical ops operate only on Booleans or vectors of Booleans.
3395 if (left->getBasicType() != EbtBool || left->isMatrix())
3398 if (getSource() == EShSourceGlsl) {
3399 // logical ops operate only on scalar Booleans and will promote to scalar Boolean.
3400 if (left->isVector())
3404 node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
3409 case EOpRightShiftAssign:
3410 case EOpLeftShiftAssign:
3416 case EOpInclusiveOr:
3417 case EOpExclusiveOr:
3419 case EOpInclusiveOrAssign:
3420 case EOpExclusiveOrAssign:
3421 if (getSource() == EShSourceHlsl)
3424 // Check for integer-only operands.
3425 if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType()))
3427 if (left->isMatrix() || right->isMatrix())
3440 // check for non-Boolean operands
3441 if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)
3448 // Compare left and right, and finish with the cases where the operand types must match
3451 case EOpGreaterThan:
3452 case EOpLessThanEqual:
3453 case EOpGreaterThanEqual:
3457 case EOpVectorEqual:
3458 case EOpVectorNotEqual:
3463 return left->getType() == right->getType();
3469 case EOpInclusiveOr:
3470 case EOpExclusiveOr:
3472 case EOpInclusiveOrAssign:
3473 case EOpExclusiveOrAssign:
3482 // Quick out in case the types do match
3483 if (left->getType() == right->getType())
3490 // At least the basic type has to match
3491 if (left->getBasicType() != right->getBasicType())
3498 if (left->getType().isCoopMat() || right->getType().isCoopMat()) {
3499 if (left->getType().isCoopMat() && right->getType().isCoopMat() &&
3500 *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) {
3506 if (left->getType().isCoopMat() && right->getType().isCoopMat()) {
3509 if (op == EOpMulAssign && right->getType().isCoopMat()) {
3512 node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar);
3513 if (right->getType().isCoopMat()) {
3514 node.setType(right->getType());
3521 // These require both to be cooperative matrices
3522 if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) {
3532 // Finish handling the case, for all ops, where both operands are scalars.
3533 if (left->isScalar() && right->isScalar())
3536 // Finish handling the case, for all ops, where there are two vectors of different sizes
3537 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
3541 // We now have a mix of scalars, vectors, or matrices, for non-relational operations.
3544 // Can these two operands be combined, what is the resulting type?
3545 TBasicType basicType = left->getBasicType();
3548 if (!left->isMatrix() && right->isMatrix()) {
3549 if (left->isVector()) {
3550 if (left->getVectorSize() != right->getMatrixRows())
3552 node.setOp(op = EOpVectorTimesMatrix);
3553 node.setType(TType(basicType, EvqTemporary, right->getMatrixCols()));
3555 node.setOp(op = EOpMatrixTimesScalar);
3556 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows()));
3558 } else if (left->isMatrix() && !right->isMatrix()) {
3559 if (right->isVector()) {
3560 if (left->getMatrixCols() != right->getVectorSize())
3562 node.setOp(op = EOpMatrixTimesVector);
3563 node.setType(TType(basicType, EvqTemporary, left->getMatrixRows()));
3565 node.setOp(op = EOpMatrixTimesScalar);
3567 } else if (left->isMatrix() && right->isMatrix()) {
3568 if (left->getMatrixCols() != right->getMatrixRows())
3570 node.setOp(op = EOpMatrixTimesMatrix);
3571 node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows()));
3572 } else if (! left->isMatrix() && ! right->isMatrix()) {
3573 if (left->isVector() && right->isVector()) {
3574 ; // leave as component product
3575 } else if (left->isVector() || right->isVector()) {
3576 node.setOp(op = EOpVectorTimesScalar);
3577 if (right->isVector())
3578 node.setType(TType(basicType, EvqTemporary, right->getVectorSize()));
3585 if (! left->isMatrix() && right->isMatrix()) {
3586 if (left->isVector()) {
3587 if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols())
3589 node.setOp(op = EOpVectorTimesMatrixAssign);
3593 } else if (left->isMatrix() && !right->isMatrix()) {
3594 if (right->isVector()) {
3597 node.setOp(op = EOpMatrixTimesScalarAssign);
3599 } else if (left->isMatrix() && right->isMatrix()) {
3600 if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows())
3602 node.setOp(op = EOpMatrixTimesMatrixAssign);
3603 } else if (!left->isMatrix() && !right->isMatrix()) {
3604 if (left->isVector() && right->isVector()) {
3605 // leave as component product
3606 } else if (left->isVector() || right->isVector()) {
3607 if (! left->isVector())
3609 node.setOp(op = EOpVectorTimesScalarAssign);
3618 case EOpRightShiftAssign:
3619 case EOpLeftShiftAssign:
3620 if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize()))
3625 if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())
3634 case EOpInclusiveOr:
3635 case EOpExclusiveOr:
3641 case EOpInclusiveOrAssign:
3642 case EOpExclusiveOrAssign:
3644 if ((left->isMatrix() && right->isVector()) ||
3645 (left->isVector() && right->isMatrix()) ||
3646 left->getBasicType() != right->getBasicType())
3648 if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()))
3650 if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
3652 if (right->isVector() || right->isMatrix()) {
3653 node.getWritableType().shallowCopy(right->getType());
3654 node.getWritableType().getQualifier().makeTemporary();
3663 // One more check for assignment.
3666 // The resulting type has to match the left operand.
3674 case EOpInclusiveOrAssign:
3675 case EOpExclusiveOrAssign:
3676 case EOpLeftShiftAssign:
3677 case EOpRightShiftAssign:
3678 if (node.getType() != left->getType())
3689 // See TIntermediate::promote
3691 bool TIntermediate::promoteAggregate(TIntermAggregate& node)
3693 TOperator op = node.getOp();
3694 TIntermSequence& args = node.getSequence();
3695 const int numArgs = static_cast<int>(args.size());
3697 // Presently, only hlsl does intrinsic promotions.
3698 if (getSource() != EShSourceHlsl)
3701 // set of opcodes that can be promoted in this manner.
3709 case EOpFaceForward:
3710 // case EOpFindMSB: TODO:
3711 // case EOpFindLSB: TODO:
3721 // case EOpGenMul: TODO:
3725 // case EOpSinCos: TODO:
3733 // TODO: array and struct behavior
3735 // Try converting all nodes to the given node's type
3736 TIntermSequence convertedArgs(numArgs, nullptr);
3738 // Try to convert all types to the nonConvArg type.
3739 for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) {
3740 // Try converting all args to this arg's type
3741 for (int convArg = 0; convArg < numArgs; ++convArg) {
3742 convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(),
3743 args[convArg]->getAsTyped());
3746 // If we successfully converted all the args, use the result.
3747 if (std::all_of(convertedArgs.begin(), convertedArgs.end(),
3748 [](const TIntermNode* node) { return node != nullptr; })) {
3750 std::swap(args, convertedArgs);
3758 void TIntermBinary::updatePrecision()
3760 if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
3761 getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
3762 if (getQualifier().precision != EpqNone) {
3763 left->propagatePrecision(getQualifier().precision);
3764 right->propagatePrecision(getQualifier().precision);
3769 void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
3771 if (getQualifier().precision != EpqNone || (getBasicType() != EbtInt && getBasicType() != EbtUint && getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
3774 getQualifier().precision = newPrecision;
3776 TIntermBinary* binaryNode = getAsBinaryNode();
3778 binaryNode->getLeft()->propagatePrecision(newPrecision);
3779 binaryNode->getRight()->propagatePrecision(newPrecision);
3784 TIntermUnary* unaryNode = getAsUnaryNode();
3786 unaryNode->getOperand()->propagatePrecision(newPrecision);
3791 TIntermAggregate* aggregateNode = getAsAggregate();
3792 if (aggregateNode) {
3793 TIntermSequence operands = aggregateNode->getSequence();
3794 for (unsigned int i = 0; i < operands.size(); ++i) {
3795 TIntermTyped* typedNode = operands[i]->getAsTyped();
3798 typedNode->propagatePrecision(newPrecision);
3804 TIntermSelection* selectionNode = getAsSelectionNode();
3805 if (selectionNode) {
3806 TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped();
3808 typedNode->propagatePrecision(newPrecision);
3809 typedNode = selectionNode->getFalseBlock()->getAsTyped();
3811 typedNode->propagatePrecision(newPrecision);
3818 TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const
3820 const TConstUnionArray& rightUnionArray = node->getConstArray();
3821 int size = node->getType().computeNumComponents();
3823 TConstUnionArray leftUnionArray(size);
3825 for (int i=0; i < size; i++) {
3827 #define PROMOTE(Set, CType, Get) leftUnionArray[i].Set(static_cast<CType>(rightUnionArray[i].Get()))
3828 #define PROMOTE_TO_BOOL(Get) leftUnionArray[i].setBConst(rightUnionArray[i].Get() != 0)
3831 #define TO_ALL(Get) \
3832 switch (promoteTo) { \
3833 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3834 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3835 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3836 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3837 default: return node; \
3840 #define TO_ALL(Get) \
3841 switch (promoteTo) { \
3842 case EbtFloat16: PROMOTE(setDConst, double, Get); break; \
3843 case EbtFloat: PROMOTE(setDConst, double, Get); break; \
3844 case EbtDouble: PROMOTE(setDConst, double, Get); break; \
3845 case EbtInt8: PROMOTE(setI8Const, char, Get); break; \
3846 case EbtInt16: PROMOTE(setI16Const, short, Get); break; \
3847 case EbtInt: PROMOTE(setIConst, int, Get); break; \
3848 case EbtInt64: PROMOTE(setI64Const, long long, Get); break; \
3849 case EbtUint8: PROMOTE(setU8Const, unsigned char, Get); break; \
3850 case EbtUint16: PROMOTE(setU16Const, unsigned short, Get); break; \
3851 case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \
3852 case EbtUint64: PROMOTE(setU64Const, unsigned long long, Get); break; \
3853 case EbtBool: PROMOTE_TO_BOOL(Get); break; \
3854 default: return node; \
3858 switch (node->getType().getBasicType()) {
3859 case EbtFloat: TO_ALL(getDConst); break;
3860 case EbtInt: TO_ALL(getIConst); break;
3861 case EbtUint: TO_ALL(getUConst); break;
3862 case EbtBool: TO_ALL(getBConst); break;
3864 case EbtFloat16: TO_ALL(getDConst); break;
3865 case EbtDouble: TO_ALL(getDConst); break;
3866 case EbtInt8: TO_ALL(getI8Const); break;
3867 case EbtInt16: TO_ALL(getI16Const); break;
3868 case EbtInt64: TO_ALL(getI64Const); break;
3869 case EbtUint8: TO_ALL(getU8Const); break;
3870 case EbtUint16: TO_ALL(getU16Const); break;
3871 case EbtUint64: TO_ALL(getU64Const); break;
3873 default: return node;
3877 const TType& t = node->getType();
3879 return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()),
3883 void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable)
3885 assert(pragmaTable == nullptr);
3886 pragmaTable = new TPragmaTable;
3887 *pragmaTable = pTable;
3890 // If either node is a specialization constant, while the other is
3891 // a constant (or specialization constant), the result is still
3892 // a specialization constant.
3893 bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2)
3895 return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) ||
3896 (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
3899 struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
3900 void visitSymbol(TIntermSymbol* symbol) override {
3901 if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
3902 symbol->getWritableType().getSampler().setCombined(true);
3905 bool visitAggregate(TVisit, TIntermAggregate* ag) override {
3906 using namespace std;
3907 TIntermSequence& seq = ag->getSequence();
3908 TQualifierList& qual = ag->getQualifierList();
3910 // qual and seq are indexed using the same indices, so we have to modify both in lock-step
3911 assert(seq.size() == qual.size() || qual.empty());
3914 for (size_t i = 0; i < seq.size(); ++i) {
3915 TIntermSymbol* symbol = seq[i]->getAsSymbolNode();
3916 if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) {
3917 // remove pure sampler variables
3921 TIntermNode* result = seq[i];
3923 // replace constructors with sampler/textures
3924 TIntermAggregate *constructor = seq[i]->getAsAggregate();
3925 if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
3926 if (!constructor->getSequence().empty())
3927 result = constructor->getSequence()[0];
3930 // write new node & qualifier
3931 seq[write] = result;
3933 qual[write] = qual[i];
3945 void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
3947 TextureUpgradeAndSamplerRemovalTransform transform;
3948 root->traverse(&transform);
3951 const char* TIntermediate::getResourceName(TResourceType res)
3954 case EResSampler: return "shift-sampler-binding";
3955 case EResTexture: return "shift-texture-binding";
3956 case EResImage: return "shift-image-binding";
3957 case EResUbo: return "shift-UBO-binding";
3958 case EResSsbo: return "shift-ssbo-binding";
3959 case EResUav: return "shift-uav-binding";
3961 assert(0); // internal error: should only be called with valid resource types.
3967 } // end namespace glslang