#endif
GenTreePtr Compiler::fgMorphSmpOp(GenTreePtr tree, MorphAddrContext* mac)
{
- // this extra scope is a workaround for a gcc bug
- // the inline destructor for ALLOCA_CHECK confuses the control
- // flow and gcc thinks that the function never returns
- {
- ALLOCA_CHECK();
- assert(tree->OperKind() & GTK_SMPOP);
-
- /* The steps in this function are :
- o Perform required preorder processing
- o Process the first, then second operand, if any
- o Perform required postorder morphing
- o Perform optional postorder morphing if optimizing
- */
+ ALLOCA_CHECK();
+ assert(tree->OperKind() & GTK_SMPOP);
+
+ /* The steps in this function are :
+ o Perform required preorder processing
+ o Process the first, then second operand, if any
+ o Perform required postorder morphing
+ o Perform optional postorder morphing if optimizing
+ */
- bool isQmarkColon = false;
+ bool isQmarkColon = false;
#if LOCAL_ASSERTION_PROP
- AssertionIndex origAssertionCount = DUMMY_INIT(0);
- AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
+ AssertionIndex origAssertionCount = DUMMY_INIT(0);
+ AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
- AssertionIndex thenAssertionCount = DUMMY_INIT(0);
- AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
+ AssertionIndex thenAssertionCount = DUMMY_INIT(0);
+ AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
#endif
- if (fgGlobalMorph)
- {
- tree = fgMorphForRegisterFP(tree);
- }
+ if (fgGlobalMorph)
+ {
+ tree = fgMorphForRegisterFP(tree);
+ }
- genTreeOps oper = tree->OperGet();
- var_types typ = tree->TypeGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2IfPresent();
+ genTreeOps oper = tree->OperGet();
+ var_types typ = tree->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2IfPresent();
- /*-------------------------------------------------------------------------
- * First do any PRE-ORDER processing
- */
+ /*-------------------------------------------------------------------------
+ * First do any PRE-ORDER processing
+ */
- switch (oper)
- {
- // Some arithmetic operators need to use a helper call to the EE
- int helper;
+ switch (oper)
+ {
+ // Some arithmetic operators need to use a helper call to the EE
+ int helper;
- case GT_ASG:
- tree = fgDoNormalizeOnStore(tree);
- /* fgDoNormalizeOnStore can change op2 */
- noway_assert(op1 == tree->gtOp.gtOp1);
- op2 = tree->gtOp.gtOp2;
+ case GT_ASG:
+ tree = fgDoNormalizeOnStore(tree);
+ /* fgDoNormalizeOnStore can change op2 */
+ noway_assert(op1 == tree->gtOp.gtOp1);
+ op2 = tree->gtOp.gtOp2;
#ifdef FEATURE_SIMD
- {
- // We should check whether op2 should be assigned to a SIMD field or not.
- // If it is, we should tranlate the tree to simd intrinsic.
- assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
- GenTreePtr newTree = fgMorphFieldAssignToSIMDIntrinsicSet(tree);
- typ = tree->TypeGet();
- op1 = tree->gtGetOp1();
- op2 = tree->gtGetOp2();
+ {
+ // We should check whether op2 should be assigned to a SIMD field or not.
+ // If it is, we should tranlate the tree to simd intrinsic.
+ assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
+ GenTreePtr newTree = fgMorphFieldAssignToSIMDIntrinsicSet(tree);
+ typ = tree->TypeGet();
+ op1 = tree->gtGetOp1();
+ op2 = tree->gtGetOp2();
#ifdef DEBUG
- assert((tree == newTree) && (tree->OperGet() == oper));
- if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
- {
- tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
- }
-#endif // DEBUG
+ assert((tree == newTree) && (tree->OperGet() == oper));
+ if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
+ {
+ tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
+#endif // DEBUG
+ }
#endif
- __fallthrough;
+ __fallthrough;
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_MOD:
- case GT_ASG_UDIV:
- case GT_ASG_UMOD:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
- case GT_CHS:
-
- // We can't CSE the LHS of an assignment. Only r-values can be CSEed.
- // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
- // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
- // TODO-1stClassStructs: improve this.
- if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
- {
- op1->gtFlags |= GTF_DONT_CSE;
- }
- break;
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ case GT_CHS:
+
+ // We can't CSE the LHS of an assignment. Only r-values can be CSEed.
+ // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
+ // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
+ // TODO-1stClassStructs: improve this.
+ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
+ {
+ op1->gtFlags |= GTF_DONT_CSE;
+ }
+ break;
- case GT_ADDR:
+ case GT_ADDR:
- /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_QMARK:
- case GT_JTRUE:
+ case GT_QMARK:
+ case GT_JTRUE:
- noway_assert(op1);
+ noway_assert(op1);
- if (op1->OperKind() & GTK_RELOP)
- {
- noway_assert((oper == GT_JTRUE) || (op1->gtFlags & GTF_RELOP_QMARK));
- /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
- not need to materialize the result as a 0 or 1. */
+ if (op1->OperKind() & GTK_RELOP)
+ {
+ noway_assert((oper == GT_JTRUE) || (op1->gtFlags & GTF_RELOP_QMARK));
+ /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
+ not need to materialize the result as a 0 or 1. */
- /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
- op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
+ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
+ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
- // Request that the codegen for op1 sets the condition flags
- // when it generates the code for op1.
- //
- // Codegen for op1 must set the condition flags if
- // this method returns true.
- //
- op1->gtRequestSetFlags();
- }
- else
- {
- GenTreePtr effOp1 = op1->gtEffectiveVal();
- noway_assert((effOp1->gtOper == GT_CNS_INT) &&
- (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
- }
- break;
+ // Request that the codegen for op1 sets the condition flags
+ // when it generates the code for op1.
+ //
+ // Codegen for op1 must set the condition flags if
+ // this method returns true.
+ //
+ op1->gtRequestSetFlags();
+ }
+ else
+ {
+ GenTreePtr effOp1 = op1->gtEffectiveVal();
+ noway_assert((effOp1->gtOper == GT_CNS_INT) &&
+ (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
+ }
+ break;
- case GT_COLON:
+ case GT_COLON:
#if LOCAL_ASSERTION_PROP
- if (optLocalAssertionProp)
+ if (optLocalAssertionProp)
#endif
- {
- isQmarkColon = true;
- }
- break;
+ {
+ isQmarkColon = true;
+ }
+ break;
- case GT_INDEX:
- return fgMorphArrayIndex(tree);
+ case GT_INDEX:
+ return fgMorphArrayIndex(tree);
- case GT_CAST:
- return fgMorphCast(tree);
+ case GT_CAST:
+ return fgMorphCast(tree);
- case GT_MUL:
+ case GT_MUL:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- /* For (long)int1 * (long)int2, we dont actually do the
- casts, and just multiply the 32 bit values, which will
- give us the 64 bit result in edx:eax */
+ if (typ == TYP_LONG)
+ {
+ /* For (long)int1 * (long)int2, we dont actually do the
+ casts, and just multiply the 32 bit values, which will
+ give us the 64 bit result in edx:eax */
- noway_assert(op2);
- if ((op1->gtOper == GT_CAST && op2->gtOper == GT_CAST &&
- genActualType(op1->CastFromType()) == TYP_INT &&
- genActualType(op2->CastFromType()) == TYP_INT) &&
- !op1->gtOverflow() && !op2->gtOverflow())
+ noway_assert(op2);
+ if ((op1->gtOper == GT_CAST && op2->gtOper == GT_CAST &&
+ genActualType(op1->CastFromType()) == TYP_INT && genActualType(op2->CastFromType()) == TYP_INT) &&
+ !op1->gtOverflow() && !op2->gtOverflow())
+ {
+ // The casts have to be of the same signedness.
+ if ((op1->gtFlags & GTF_UNSIGNED) != (op2->gtFlags & GTF_UNSIGNED))
{
- // The casts have to be of the same signedness.
- if ((op1->gtFlags & GTF_UNSIGNED) != (op2->gtFlags & GTF_UNSIGNED))
- {
- // We see if we can force an int constant to change its signedness
- GenTreePtr constOp;
- if (op1->gtCast.CastOp()->gtOper == GT_CNS_INT)
- constOp = op1;
- else if (op2->gtCast.CastOp()->gtOper == GT_CNS_INT)
- constOp = op2;
- else
- goto NO_MUL_64RSLT;
-
- if (((unsigned)(constOp->gtCast.CastOp()->gtIntCon.gtIconVal) < (unsigned)(0x80000000)))
- constOp->gtFlags ^= GTF_UNSIGNED;
- else
- goto NO_MUL_64RSLT;
- }
+ // We see if we can force an int constant to change its signedness
+ GenTreePtr constOp;
+ if (op1->gtCast.CastOp()->gtOper == GT_CNS_INT)
+ constOp = op1;
+ else if (op2->gtCast.CastOp()->gtOper == GT_CNS_INT)
+ constOp = op2;
+ else
+ goto NO_MUL_64RSLT;
- // The only combination that can overflow
- if (tree->gtOverflow() && (tree->gtFlags & GTF_UNSIGNED) && !(op1->gtFlags & GTF_UNSIGNED))
+ if (((unsigned)(constOp->gtCast.CastOp()->gtIntCon.gtIconVal) < (unsigned)(0x80000000)))
+ constOp->gtFlags ^= GTF_UNSIGNED;
+ else
goto NO_MUL_64RSLT;
+ }
- /* Remaining combinations can never overflow during long mul. */
+ // The only combination that can overflow
+ if (tree->gtOverflow() && (tree->gtFlags & GTF_UNSIGNED) && !(op1->gtFlags & GTF_UNSIGNED))
+ goto NO_MUL_64RSLT;
- tree->gtFlags &= ~GTF_OVERFLOW;
+ /* Remaining combinations can never overflow during long mul. */
- /* Do unsigned mul only if the casts were unsigned */
+ tree->gtFlags &= ~GTF_OVERFLOW;
- tree->gtFlags &= ~GTF_UNSIGNED;
- tree->gtFlags |= op1->gtFlags & GTF_UNSIGNED;
+ /* Do unsigned mul only if the casts were unsigned */
- /* Since we are committing to GTF_MUL_64RSLT, we don't want
- the casts to be folded away. So morph the castees directly */
+ tree->gtFlags &= ~GTF_UNSIGNED;
+ tree->gtFlags |= op1->gtFlags & GTF_UNSIGNED;
- op1->gtOp.gtOp1 = fgMorphTree(op1->gtOp.gtOp1);
- op2->gtOp.gtOp1 = fgMorphTree(op2->gtOp.gtOp1);
+ /* Since we are committing to GTF_MUL_64RSLT, we don't want
+ the casts to be folded away. So morph the castees directly */
- // Propagate side effect flags up the tree
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ op1->gtOp.gtOp1 = fgMorphTree(op1->gtOp.gtOp1);
+ op2->gtOp.gtOp1 = fgMorphTree(op2->gtOp.gtOp1);
- // If the GT_MUL can be altogether folded away, we should do that.
+ // Propagate side effect flags up the tree
+ op1->gtFlags &= ~GTF_ALL_EFFECT;
+ op1->gtFlags |= (op1->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ op2->gtFlags &= ~GTF_ALL_EFFECT;
+ op2->gtFlags |= (op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- if ((op1->gtCast.CastOp()->OperKind() & op2->gtCast.CastOp()->OperKind() & GTK_CONST) &&
- opts.OptEnabled(CLFLG_CONSTANTFOLD))
- {
- tree->gtOp.gtOp1 = op1 = gtFoldExprConst(op1);
- tree->gtOp.gtOp2 = op2 = gtFoldExprConst(op2);
- noway_assert(op1->OperKind() & op2->OperKind() & GTK_CONST);
- tree = gtFoldExprConst(tree);
- noway_assert(tree->OperIsConst());
- return tree;
- }
+ // If the GT_MUL can be altogether folded away, we should do that.
- tree->gtFlags |= GTF_MUL_64RSLT;
+ if ((op1->gtCast.CastOp()->OperKind() & op2->gtCast.CastOp()->OperKind() & GTK_CONST) &&
+ opts.OptEnabled(CLFLG_CONSTANTFOLD))
+ {
+ tree->gtOp.gtOp1 = op1 = gtFoldExprConst(op1);
+ tree->gtOp.gtOp2 = op2 = gtFoldExprConst(op2);
+ noway_assert(op1->OperKind() & op2->OperKind() & GTK_CONST);
+ tree = gtFoldExprConst(tree);
+ noway_assert(tree->OperIsConst());
+ return tree;
+ }
- // If op1 and op2 are unsigned casts, we need to do an unsigned mult
- tree->gtFlags |= (op1->gtFlags & GTF_UNSIGNED);
+ tree->gtFlags |= GTF_MUL_64RSLT;
- // Insert GT_NOP nodes for the cast operands so that they do not get folded
- // And propagate the new flags. We don't want to CSE the casts because
- // codegen expects GTF_MUL_64RSLT muls to have a certain layout.
+ // If op1 and op2 are unsigned casts, we need to do an unsigned mult
+ tree->gtFlags |= (op1->gtFlags & GTF_UNSIGNED);
- if (op1->gtCast.CastOp()->OperGet() != GT_NOP)
- {
- op1->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op1->gtCast.CastOp());
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
- }
+ // Insert GT_NOP nodes for the cast operands so that they do not get folded
+ // And propagate the new flags. We don't want to CSE the casts because
+ // codegen expects GTF_MUL_64RSLT muls to have a certain layout.
- if (op2->gtCast.CastOp()->OperGet() != GT_NOP)
- {
- op2->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op2->gtCast.CastOp());
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
- }
+ if (op1->gtCast.CastOp()->OperGet() != GT_NOP)
+ {
+ op1->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op1->gtCast.CastOp());
+ op1->gtFlags &= ~GTF_ALL_EFFECT;
+ op1->gtFlags |= (op1->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
+ }
- op1->gtFlags |= GTF_DONT_CSE;
- op2->gtFlags |= GTF_DONT_CSE;
+ if (op2->gtCast.CastOp()->OperGet() != GT_NOP)
+ {
+ op2->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op2->gtCast.CastOp());
+ op2->gtFlags &= ~GTF_ALL_EFFECT;
+ op2->gtFlags |= (op2->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
+ }
- tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= ((op1->gtFlags | op2->gtFlags) & GTF_ALL_EFFECT);
+ op1->gtFlags |= GTF_DONT_CSE;
+ op2->gtFlags |= GTF_DONT_CSE;
- goto DONE_MORPHING_CHILDREN;
- }
- else if ((tree->gtFlags & GTF_MUL_64RSLT) == 0)
- {
- NO_MUL_64RSLT:
- if (tree->gtOverflow())
- helper = (tree->gtFlags & GTF_UNSIGNED) ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
- else
- helper = CORINFO_HELP_LMUL;
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
+ tree->gtFlags |= ((op1->gtFlags | op2->gtFlags) & GTF_ALL_EFFECT);
- goto USE_HELPER_FOR_ARITH;
- }
+ goto DONE_MORPHING_CHILDREN;
+ }
+ else if ((tree->gtFlags & GTF_MUL_64RSLT) == 0)
+ {
+ NO_MUL_64RSLT:
+ if (tree->gtOverflow())
+ helper = (tree->gtFlags & GTF_UNSIGNED) ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
- {
- /* We are seeing this node again. We have decided to use
- GTF_MUL_64RSLT, so leave it alone. */
+ helper = CORINFO_HELP_LMUL;
- assert(tree->gtIsValid64RsltMul());
- }
+ goto USE_HELPER_FOR_ARITH;
+ }
+ else
+ {
+ /* We are seeing this node again. We have decided to use
+ GTF_MUL_64RSLT, so leave it alone. */
+
+ assert(tree->gtIsValid64RsltMul());
}
+ }
#endif // !_TARGET_64BIT_
- break;
+ break;
- case GT_DIV:
+ case GT_DIV:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- helper = CORINFO_HELP_LDIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_LONG)
+ {
+ helper = CORINFO_HELP_LDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT && !fgIsSignedDivOptimizable(op2))
- {
- helper = CORINFO_HELP_DIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_INT && !fgIsSignedDivOptimizable(op2))
+ {
+ helper = CORINFO_HELP_DIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#endif
#endif // !_TARGET_64BIT_
#ifndef LEGACY_BACKEND
- if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
- {
- op2 = gtFoldExprConst(op2);
- }
+ if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
+ {
+ op2 = gtFoldExprConst(op2);
+ }
#endif // !LEGACY_BACKEND
- break;
+ break;
- case GT_UDIV:
+ case GT_UDIV:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- helper = CORINFO_HELP_ULDIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_LONG)
+ {
+ helper = CORINFO_HELP_ULDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT && !fgIsUnsignedDivOptimizable(op2))
- {
- helper = CORINFO_HELP_UDIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_INT && !fgIsUnsignedDivOptimizable(op2))
+ {
+ helper = CORINFO_HELP_UDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#endif
#endif // _TARGET_64BIT_
- break;
+ break;
- case GT_MOD:
+ case GT_MOD:
- if (varTypeIsFloating(typ))
+ if (varTypeIsFloating(typ))
+ {
+ helper = CORINFO_HELP_DBLREM;
+ noway_assert(op2);
+ if (op1->TypeGet() == TYP_FLOAT)
{
- helper = CORINFO_HELP_DBLREM;
- noway_assert(op2);
- if (op1->TypeGet() == TYP_FLOAT)
+ if (op2->TypeGet() == TYP_FLOAT)
{
- if (op2->TypeGet() == TYP_FLOAT)
- {
- helper = CORINFO_HELP_FLTREM;
- }
- else
- {
- tree->gtOp.gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
- }
+ helper = CORINFO_HELP_FLTREM;
}
- else if (op2->TypeGet() == TYP_FLOAT)
+ else
{
- tree->gtOp.gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ tree->gtOp.gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
}
- goto USE_HELPER_FOR_ARITH;
}
+ else if (op2->TypeGet() == TYP_FLOAT)
+ {
+ tree->gtOp.gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ }
+ goto USE_HELPER_FOR_ARITH;
+ }
- // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
- // A similar optimization for signed mod will not work for a negative perfectly divisible
- // HI-word. To make it correct, we would need to divide without the sign and then flip the
- // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
- goto ASSIGN_HELPER_FOR_MOD;
+ // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
+ // A similar optimization for signed mod will not work for a negative perfectly divisible
+ // HI-word. To make it correct, we would need to divide without the sign and then flip the
+ // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
+ goto ASSIGN_HELPER_FOR_MOD;
- case GT_UMOD:
+ case GT_UMOD:
#ifdef _TARGET_ARMARCH_
//
// Note for _TARGET_ARMARCH_ we don't have a remainder instruction, so we don't do this optimization
//
#else // _TARGET_XARCH
- /* If this is an unsigned long mod with op2 which is a cast to long from a
- constant int, then don't morph to a call to the helper. This can be done
- faster inline using idiv.
- */
+ /* If this is an unsigned long mod with op2 which is a cast to long from a
+ constant int, then don't morph to a call to the helper. This can be done
+ faster inline using idiv.
+ */
- noway_assert(op2);
- if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD) &&
- ((tree->gtFlags & GTF_UNSIGNED) == (op1->gtFlags & GTF_UNSIGNED)) &&
- ((tree->gtFlags & GTF_UNSIGNED) == (op2->gtFlags & GTF_UNSIGNED)))
- {
- if (op2->gtOper == GT_CAST && op2->gtCast.CastOp()->gtOper == GT_CNS_INT &&
- op2->gtCast.CastOp()->gtIntCon.gtIconVal >= 2 &&
- op2->gtCast.CastOp()->gtIntCon.gtIconVal <= 0x3fffffff &&
- (tree->gtFlags & GTF_UNSIGNED) == (op2->gtCast.CastOp()->gtFlags & GTF_UNSIGNED))
- {
- tree->gtOp.gtOp2 = op2 = fgMorphCast(op2);
- noway_assert(op2->gtOper == GT_CNS_NATIVELONG);
- }
+ noway_assert(op2);
+ if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD) &&
+ ((tree->gtFlags & GTF_UNSIGNED) == (op1->gtFlags & GTF_UNSIGNED)) &&
+ ((tree->gtFlags & GTF_UNSIGNED) == (op2->gtFlags & GTF_UNSIGNED)))
+ {
+ if (op2->gtOper == GT_CAST && op2->gtCast.CastOp()->gtOper == GT_CNS_INT &&
+ op2->gtCast.CastOp()->gtIntCon.gtIconVal >= 2 &&
+ op2->gtCast.CastOp()->gtIntCon.gtIconVal <= 0x3fffffff &&
+ (tree->gtFlags & GTF_UNSIGNED) == (op2->gtCast.CastOp()->gtFlags & GTF_UNSIGNED))
+ {
+ tree->gtOp.gtOp2 = op2 = fgMorphCast(op2);
+ noway_assert(op2->gtOper == GT_CNS_NATIVELONG);
+ }
- if (op2->gtOper == GT_CNS_NATIVELONG && op2->gtIntConCommon.LngValue() >= 2 &&
- op2->gtIntConCommon.LngValue() <= 0x3fffffff)
- {
- tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
- noway_assert(op1->TypeGet() == TYP_LONG);
+ if (op2->gtOper == GT_CNS_NATIVELONG && op2->gtIntConCommon.LngValue() >= 2 &&
+ op2->gtIntConCommon.LngValue() <= 0x3fffffff)
+ {
+ tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
+ noway_assert(op1->TypeGet() == TYP_LONG);
- // Update flags for op1 morph
- tree->gtFlags &= ~GTF_ALL_EFFECT;
+ // Update flags for op1 morph
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // Only update with op1 as op2 is a constant
+ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // Only update with op1 as op2 is a constant
- // If op1 is a constant, then do constant folding of the division operator
- if (op1->gtOper == GT_CNS_NATIVELONG)
- {
- tree = gtFoldExpr(tree);
- }
- return tree;
+ // If op1 is a constant, then do constant folding of the division operator
+ if (op1->gtOper == GT_CNS_NATIVELONG)
+ {
+ tree = gtFoldExpr(tree);
}
+ return tree;
}
+ }
#endif // _TARGET_XARCH
- ASSIGN_HELPER_FOR_MOD:
+ ASSIGN_HELPER_FOR_MOD:
- // For "val % 1", return 0 if op1 doesn't have any side effects
- // and we are not in the CSE phase, we cannot discard 'tree'
- // because it may contain CSE expressions that we haven't yet examined.
- //
- if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
+ // For "val % 1", return 0 if op1 doesn't have any side effects
+ // and we are not in the CSE phase, we cannot discard 'tree'
+ // because it may contain CSE expressions that we haven't yet examined.
+ //
+ if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
+ {
+ if (op2->IsIntegralConst(1))
{
- if (op2->IsIntegralConst(1))
- {
- GenTreePtr zeroNode = gtNewZeroConNode(typ);
+ GenTreePtr zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
- zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- DEBUG_DESTROY_NODE(tree);
- return zeroNode;
- }
+ DEBUG_DESTROY_NODE(tree);
+ return zeroNode;
}
+ }
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
+ if (typ == TYP_LONG)
+ {
+ helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
+ goto USE_HELPER_FOR_ARITH;
+ }
+
+#if USE_HELPERS_FOR_INT_DIV
+ if (typ == TYP_INT)
+ {
+ if (oper == GT_UMOD && !fgIsUnsignedModOptimizable(op2))
{
- helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
+ helper = CORINFO_HELP_UMOD;
goto USE_HELPER_FOR_ARITH;
}
-
-#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT)
+ else if (oper == GT_MOD && !fgIsSignedModOptimizable(op2))
{
- if (oper == GT_UMOD && !fgIsUnsignedModOptimizable(op2))
- {
- helper = CORINFO_HELP_UMOD;
- goto USE_HELPER_FOR_ARITH;
- }
- else if (oper == GT_MOD && !fgIsSignedModOptimizable(op2))
- {
- helper = CORINFO_HELP_MOD;
- goto USE_HELPER_FOR_ARITH;
- }
+ helper = CORINFO_HELP_MOD;
+ goto USE_HELPER_FOR_ARITH;
}
+ }
#endif
#endif // !_TARGET_64BIT_
#ifndef LEGACY_BACKEND
- if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
- {
- op2 = gtFoldExprConst(op2);
- }
+ if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
+ {
+ op2 = gtFoldExprConst(op2);
+ }
#ifdef _TARGET_ARM64_
- // For ARM64 we don't have a remainder instruction,
- // The architecture manual suggests the following transformation to
- // generate code for such operator:
- //
- // a % b = a - (a / b) * b;
- //
- // We will use the suggested transform except in the special case
- // when the modulo operation is unsigned and the divisor is a
- // integer constant power of two. In this case, we will rely on lower
- // to make the transform:
- //
- // a % b = a & (b - 1);
- //
- // Note: We must always perform one or the other of these transforms.
- // Therefore we must also detect the special cases where lower does not do the
- // % to & transform. In our case there is only currently one extra condition:
- //
- // * Dividend must not be constant. Lower disables this rare const % const case
- //
- {
- // Do "a % b = a - (a / b) * b" morph if ...........................
- bool doMorphModToSubMulDiv =
- (tree->OperGet() == GT_MOD) || // Modulo operation is signed
- !op2->IsIntegralConst() || // Divisor is not an integer constant
- !isPow2(op2->AsIntCon()->IconValue()) || // Divisor is not a power of two
- op1->IsCnsIntOrI(); // Dividend is constant
+ // For ARM64 we don't have a remainder instruction,
+ // The architecture manual suggests the following transformation to
+ // generate code for such operator:
+ //
+ // a % b = a - (a / b) * b;
+ //
+ // We will use the suggested transform except in the special case
+ // when the modulo operation is unsigned and the divisor is a
+ // integer constant power of two. In this case, we will rely on lower
+ // to make the transform:
+ //
+ // a % b = a & (b - 1);
+ //
+ // Note: We must always perform one or the other of these transforms.
+ // Therefore we must also detect the special cases where lower does not do the
+ // % to & transform. In our case there is only currently one extra condition:
+ //
+ // * Dividend must not be constant. Lower disables this rare const % const case
+ //
+ {
+ // Do "a % b = a - (a / b) * b" morph if ...........................
+ bool doMorphModToSubMulDiv = (tree->OperGet() == GT_MOD) || // Modulo operation is signed
+ !op2->IsIntegralConst() || // Divisor is not an integer constant
+ !isPow2(op2->AsIntCon()->IconValue()) || // Divisor is not a power of two
+ op1->IsCnsIntOrI(); // Dividend is constant
- if (doMorphModToSubMulDiv)
- {
- assert(!optValnumCSE_phase);
+ if (doMorphModToSubMulDiv)
+ {
+ assert(!optValnumCSE_phase);
- tree = fgMorphModToSubMulDiv(tree->AsOp());
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
- }
+ tree = fgMorphModToSubMulDiv(tree->AsOp());
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
}
+ }
#else // !_TARGET_ARM64_
- // If b is not a power of 2 constant then lowering replaces a % b
- // with a - (a / b) * b and applies magic division optimization to
- // a / b. The code may already contain an a / b expression (e.g.
- // x = a / 10; y = a % 10;) and then we end up with redundant code.
- // If we convert % to / here we give CSE the opportunity to eliminate
- // the redundant division. If there's no redundant division then
- // nothing is lost, lowering would have done this transform anyway.
+ // If b is not a power of 2 constant then lowering replaces a % b
+ // with a - (a / b) * b and applies magic division optimization to
+ // a / b. The code may already contain an a / b expression (e.g.
+ // x = a / 10; y = a % 10;) and then we end up with redundant code.
+ // If we convert % to / here we give CSE the opportunity to eliminate
+ // the redundant division. If there's no redundant division then
+ // nothing is lost, lowering would have done this transform anyway.
- if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst()))
- {
- ssize_t divisorValue = op2->AsIntCon()->IconValue();
- size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue)
- : static_cast<size_t>(abs(divisorValue));
+ if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst()))
+ {
+ ssize_t divisorValue = op2->AsIntCon()->IconValue();
+ size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue)
+ : static_cast<size_t>(abs(divisorValue));
- if (!isPow2(absDivisorValue))
- {
- tree = fgMorphModToSubMulDiv(tree->AsOp());
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
- }
+ if (!isPow2(absDivisorValue))
+ {
+ tree = fgMorphModToSubMulDiv(tree->AsOp());
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
}
+ }
#endif //_TARGET_ARM64_
#endif // !LEGACY_BACKEND
- break;
+ break;
- USE_HELPER_FOR_ARITH:
- {
- /* We have to morph these arithmetic operations into helper calls
- before morphing the arguments (preorder), else the arguments
- won't get correct values of fgPtrArgCntCur.
- However, try to fold the tree first in case we end up with a
- simple node which won't need a helper call at all */
+ USE_HELPER_FOR_ARITH:
+ {
+ /* We have to morph these arithmetic operations into helper calls
+ before morphing the arguments (preorder), else the arguments
+ won't get correct values of fgPtrArgCntCur.
+ However, try to fold the tree first in case we end up with a
+ simple node which won't need a helper call at all */
- noway_assert(tree->OperIsBinary());
+ noway_assert(tree->OperIsBinary());
- GenTreePtr oldTree = tree;
+ GenTreePtr oldTree = tree;
- tree = gtFoldExpr(tree);
+ tree = gtFoldExpr(tree);
- // Were we able to fold it ?
- // Note that gtFoldExpr may return a non-leaf even if successful
- // e.g. for something like "expr / 1" - see also bug #290853
- if (tree->OperIsLeaf() || (oldTree != tree))
- {
- return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
- }
+ // Were we able to fold it ?
+ // Note that gtFoldExpr may return a non-leaf even if successful
+ // e.g. for something like "expr / 1" - see also bug #290853
+ if (tree->OperIsLeaf() || (oldTree != tree))
+ {
+ return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
+ }
- // Did we fold it into a comma node with throw?
- if (tree->gtOper == GT_COMMA)
- {
- noway_assert(fgIsCommaThrow(tree));
- return fgMorphTree(tree);
- }
+ // Did we fold it into a comma node with throw?
+ if (tree->gtOper == GT_COMMA)
+ {
+ noway_assert(fgIsCommaThrow(tree));
+ return fgMorphTree(tree);
}
- return fgMorphIntoHelperCall(tree, helper, gtNewArgList(op1, op2));
+ }
+ return fgMorphIntoHelperCall(tree, helper, gtNewArgList(op1, op2));
- case GT_RETURN:
- // normalize small integer return values
- if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) &&
- (op1->TypeGet() != TYP_VOID) && fgCastNeeded(op1, info.compRetType))
- {
- // Small-typed return values are normalized by the callee
- op1 = gtNewCastNode(TYP_INT, op1, info.compRetType);
+ case GT_RETURN:
+ // normalize small integer return values
+ if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && (op1->TypeGet() != TYP_VOID) &&
+ fgCastNeeded(op1, info.compRetType))
+ {
+ // Small-typed return values are normalized by the callee
+ op1 = gtNewCastNode(TYP_INT, op1, info.compRetType);
- // Propagate GTF_COLON_COND
- op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
+ // Propagate GTF_COLON_COND
+ op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
- tree->gtOp.gtOp1 = fgMorphCast(op1);
+ tree->gtOp.gtOp1 = fgMorphCast(op1);
- // Propagate side effect flags
- tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ // Propagate side effect flags
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
+ tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- return tree;
- }
- break;
+ return tree;
+ }
+ break;
- case GT_EQ:
- case GT_NE:
+ case GT_EQ:
+ case GT_NE:
- // Check for typeof(...) == obj.GetType()
- // Also check for typeof(...) == typeof(...)
- // IMPORTANT NOTE: this optimization relies on a one-to-one mapping between
- // type handles and instances of System.Type
- // If this invariant is ever broken, the optimization will need updating
- CLANG_FORMAT_COMMENT_ANCHOR;
+ // Check for typeof(...) == obj.GetType()
+ // Also check for typeof(...) == typeof(...)
+ // IMPORTANT NOTE: this optimization relies on a one-to-one mapping between
+ // type handles and instances of System.Type
+ // If this invariant is ever broken, the optimization will need updating
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef LEGACY_BACKEND
- if (op1->gtOper == GT_CALL && op2->gtOper == GT_CALL &&
- ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) ||
- (op1->gtCall.gtCallType == CT_HELPER)) &&
- ((op2->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) ||
- (op2->gtCall.gtCallType == CT_HELPER)))
+ if (op1->gtOper == GT_CALL && op2->gtOper == GT_CALL &&
+ ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) ||
+ (op1->gtCall.gtCallType == CT_HELPER)) &&
+ ((op2->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) || (op2->gtCall.gtCallType == CT_HELPER)))
#else
- if ((((op1->gtOper == GT_INTRINSIC) &&
- (op1->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
- ((op1->gtOper == GT_CALL) && (op1->gtCall.gtCallType == CT_HELPER))) &&
- (((op2->gtOper == GT_INTRINSIC) &&
- (op2->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
- ((op2->gtOper == GT_CALL) && (op2->gtCall.gtCallType == CT_HELPER))))
+ if ((((op1->gtOper == GT_INTRINSIC) &&
+ (op1->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
+ ((op1->gtOper == GT_CALL) && (op1->gtCall.gtCallType == CT_HELPER))) &&
+ (((op2->gtOper == GT_INTRINSIC) &&
+ (op2->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
+ ((op2->gtOper == GT_CALL) && (op2->gtCall.gtCallType == CT_HELPER))))
#endif
- {
- GenTreePtr pGetClassFromHandle;
- GenTreePtr pGetType;
+ {
+ GenTreePtr pGetClassFromHandle;
+ GenTreePtr pGetType;
#ifdef LEGACY_BACKEND
- bool bOp1ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall());
- bool bOp2ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op2->AsCall());
+ bool bOp1ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall());
+ bool bOp2ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op2->AsCall());
#else
- bool bOp1ClassFromHandle =
- op1->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()) : false;
- bool bOp2ClassFromHandle =
- op2->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op2->AsCall()) : false;
+ bool bOp1ClassFromHandle =
+ op1->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()) : false;
+ bool bOp2ClassFromHandle =
+ op2->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op2->AsCall()) : false;
#endif
- // Optimize typeof(...) == typeof(...)
- // Typically this occurs in generic code that attempts a type switch
- // e.g. typeof(T) == typeof(int)
+ // Optimize typeof(...) == typeof(...)
+ // Typically this occurs in generic code that attempts a type switch
+ // e.g. typeof(T) == typeof(int)
- if (bOp1ClassFromHandle && bOp2ClassFromHandle)
- {
- JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
+ if (bOp1ClassFromHandle && bOp2ClassFromHandle)
+ {
+ JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
- GenTreePtr classFromHandleArg1 = tree->gtOp.gtOp1->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr classFromHandleArg2 = tree->gtOp.gtOp2->gtCall.gtCallArgs->gtOp.gtOp1;
+ GenTreePtr classFromHandleArg1 = tree->gtOp.gtOp1->gtCall.gtCallArgs->gtOp.gtOp1;
+ GenTreePtr classFromHandleArg2 = tree->gtOp.gtOp2->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr compare = gtNewOperNode(oper, TYP_INT, classFromHandleArg1, classFromHandleArg2);
+ GenTreePtr compare = gtNewOperNode(oper, TYP_INT, classFromHandleArg1, classFromHandleArg2);
- compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+
+ // Morph and return
+ return fgMorphTree(compare);
+ }
+ else if (bOp1ClassFromHandle || bOp2ClassFromHandle)
+ {
+ //
+ // Now check for GetClassFromHandle(handle) == obj.GetType()
+ //
- // Morph and return
- return fgMorphTree(compare);
+ if (bOp1ClassFromHandle)
+ {
+ pGetClassFromHandle = tree->gtOp.gtOp1;
+ pGetType = op2;
}
- else if (bOp1ClassFromHandle || bOp2ClassFromHandle)
+ else
{
- //
- // Now check for GetClassFromHandle(handle) == obj.GetType()
- //
-
- if (bOp1ClassFromHandle)
- {
- pGetClassFromHandle = tree->gtOp.gtOp1;
- pGetType = op2;
- }
- else
- {
- pGetClassFromHandle = tree->gtOp.gtOp2;
- pGetType = op1;
- }
+ pGetClassFromHandle = tree->gtOp.gtOp2;
+ pGetType = op1;
+ }
- GenTreePtr pGetClassFromHandleArgument = pGetClassFromHandle->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr pConstLiteral = pGetClassFromHandleArgument;
+ GenTreePtr pGetClassFromHandleArgument = pGetClassFromHandle->gtCall.gtCallArgs->gtOp.gtOp1;
+ GenTreePtr pConstLiteral = pGetClassFromHandleArgument;
- // Unwrap GT_NOP node used to prevent constant folding
- if (pConstLiteral->gtOper == GT_NOP && pConstLiteral->gtType == TYP_I_IMPL)
- {
- pConstLiteral = pConstLiteral->gtOp.gtOp1;
- }
+ // Unwrap GT_NOP node used to prevent constant folding
+ if (pConstLiteral->gtOper == GT_NOP && pConstLiteral->gtType == TYP_I_IMPL)
+ {
+ pConstLiteral = pConstLiteral->gtOp.gtOp1;
+ }
- // In the ngen case, we have to go thru an indirection to get the right handle.
- if (pConstLiteral->gtOper == GT_IND)
- {
- pConstLiteral = pConstLiteral->gtOp.gtOp1;
- }
+ // In the ngen case, we have to go thru an indirection to get the right handle.
+ if (pConstLiteral->gtOper == GT_IND)
+ {
+ pConstLiteral = pConstLiteral->gtOp.gtOp1;
+ }
#ifdef LEGACY_BACKEND
- if (pGetType->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC &&
- info.compCompHnd->getIntrinsicID(pGetType->gtCall.gtCallMethHnd) ==
- CORINFO_INTRINSIC_Object_GetType &&
+ if (pGetType->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC &&
+ info.compCompHnd->getIntrinsicID(pGetType->gtCall.gtCallMethHnd) ==
+ CORINFO_INTRINSIC_Object_GetType &&
#else
- if ((pGetType->gtOper == GT_INTRINSIC) &&
- (pGetType->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType) &&
+ if ((pGetType->gtOper == GT_INTRINSIC) &&
+ (pGetType->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType) &&
#endif
- pConstLiteral->gtOper == GT_CNS_INT && pConstLiteral->gtType == TYP_I_IMPL)
- {
- CORINFO_CLASS_HANDLE clsHnd =
- CORINFO_CLASS_HANDLE(pConstLiteral->gtIntCon.gtCompileTimeHandle);
+ pConstLiteral->gtOper == GT_CNS_INT && pConstLiteral->gtType == TYP_I_IMPL)
+ {
+ CORINFO_CLASS_HANDLE clsHnd = CORINFO_CLASS_HANDLE(pConstLiteral->gtIntCon.gtCompileTimeHandle);
- if (info.compCompHnd->canInlineTypeCheckWithObjectVTable(clsHnd))
- {
- // Fetch object method table from the object itself
- JITDUMP("Optimizing compare of obj.GetType()"
- " and type-from-handle to compare handles\n");
+ if (info.compCompHnd->canInlineTypeCheckWithObjectVTable(clsHnd))
+ {
+ // Fetch object method table from the object itself
+ JITDUMP("Optimizing compare of obj.GetType()"
+ " and type-from-handle to compare handles\n");
- // Method table constant
- GenTree* cnsMT = pGetClassFromHandleArgument;
+ // Method table constant
+ GenTree* cnsMT = pGetClassFromHandleArgument;
#ifdef LEGACY_BACKEND
- // Method table from object
- GenTree* objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtCall.gtCallObjp);
+ // Method table from object
+ GenTree* objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtCall.gtCallObjp);
#else
- // Method table from object
- GenTree* objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtUnOp.gtOp1);
+ // Method table from object
+ GenTree* objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtUnOp.gtOp1);
#endif
- objMT->gtFlags |= GTF_EXCEPT; // Null ref exception if object is null
- compCurBB->bbFlags |= BBF_HAS_VTABREF;
- optMethodFlags |= OMF_HAS_VTABLEREF;
+ objMT->gtFlags |= GTF_EXCEPT; // Null ref exception if object is null
+ compCurBB->bbFlags |= BBF_HAS_VTABREF;
+ optMethodFlags |= OMF_HAS_VTABLEREF;
- GenTreePtr compare = gtNewOperNode(oper, TYP_INT, objMT, cnsMT);
+ GenTreePtr compare = gtNewOperNode(oper, TYP_INT, objMT, cnsMT);
- compare->gtFlags |=
- tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- // Morph and return
- return fgMorphTree(compare);
- }
+ // Morph and return
+ return fgMorphTree(compare);
}
}
}
+ }
- __fallthrough;
+ __fallthrough;
- case GT_GT:
+ case GT_GT:
- // Try to optimize away calls to CORINFO_HELP_BOX_NULLABLE for GT_EQ, GT_NE, and unsigned GT_GT.
- if ((oper != GT_GT) || tree->IsUnsigned())
- {
- fgMorphRecognizeBoxNullable(tree);
- }
+ // Try to optimize away calls to CORINFO_HELP_BOX_NULLABLE for GT_EQ, GT_NE, and unsigned GT_GT.
+ if ((oper != GT_GT) || tree->IsUnsigned())
+ {
+ fgMorphRecognizeBoxNullable(tree);
+ }
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2IfPresent();
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2IfPresent();
- break;
+ break;
#ifdef _TARGET_ARM_
- case GT_INTRINSIC:
- if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round)
+ case GT_INTRINSIC:
+ if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round)
+ {
+ switch (tree->TypeGet())
{
- switch (tree->TypeGet())
- {
- case TYP_DOUBLE:
- return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewArgList(op1));
- case TYP_FLOAT:
- return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewArgList(op1));
- default:
- unreached();
- }
+ case TYP_DOUBLE:
+ return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewArgList(op1));
+ case TYP_FLOAT:
+ return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewArgList(op1));
+ default:
+ unreached();
}
- break;
+ }
+ break;
#endif
- default:
- break;
- }
+ default:
+ break;
+ }
#if !CPU_HAS_FP_SUPPORT
- tree = fgMorphToEmulatedFP(tree);
+ tree = fgMorphToEmulatedFP(tree);
#endif
- /*-------------------------------------------------------------------------
- * Process the first operand, if any
- */
+ /*-------------------------------------------------------------------------
+ * Process the first operand, if any
+ */
- if (op1)
- {
+ if (op1)
+ {
#if LOCAL_ASSERTION_PROP
- // If we are entering the "then" part of a Qmark-Colon we must
- // save the state of the current copy assignment table
- // so that we can restore this state when entering the "else" part
- if (isQmarkColon)
+ // If we are entering the "then" part of a Qmark-Colon we must
+ // save the state of the current copy assignment table
+ // so that we can restore this state when entering the "else" part
+ if (isQmarkColon)
+ {
+ noway_assert(optLocalAssertionProp);
+ if (optAssertionCount)
{
- noway_assert(optLocalAssertionProp);
- if (optAssertionCount)
- {
- noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
- unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
- origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
- origAssertionCount = optAssertionCount;
- memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
- }
- else
- {
- origAssertionCount = 0;
- origAssertionTab = nullptr;
- }
+ noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
+ unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
+ origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
+ origAssertionCount = optAssertionCount;
+ memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
+ }
+ else
+ {
+ origAssertionCount = 0;
+ origAssertionTab = nullptr;
}
+ }
#endif // LOCAL_ASSERTION_PROP
- // We might need a new MorphAddressContext context. (These are used to convey
- // parent context about how addresses being calculated will be used; see the
- // specification comment for MorphAddrContext for full details.)
- // Assume it's an Ind context to start.
- MorphAddrContext subIndMac1(MACK_Ind);
- MorphAddrContext* subMac1 = mac;
- if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
+ // We might need a new MorphAddressContext context. (These are used to convey
+ // parent context about how addresses being calculated will be used; see the
+ // specification comment for MorphAddrContext for full details.)
+ // Assume it's an Ind context to start.
+ MorphAddrContext subIndMac1(MACK_Ind);
+ MorphAddrContext* subMac1 = mac;
+ if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
+ {
+ switch (tree->gtOper)
{
- switch (tree->gtOper)
- {
- case GT_ADDR:
- if (subMac1 == nullptr)
- {
- subMac1 = &subIndMac1;
- subMac1->m_kind = MACK_Addr;
- }
- break;
- case GT_COMMA:
- // In a comma, the incoming context only applies to the rightmost arg of the
- // comma list. The left arg (op1) gets a fresh context.
- subMac1 = nullptr;
- break;
- case GT_OBJ:
- case GT_BLK:
- case GT_DYN_BLK:
- case GT_IND:
- subMac1 = &subIndMac1;
- break;
- default:
- break;
- }
+ case GT_ADDR:
+ if (subMac1 == nullptr)
+ {
+ subMac1 = &subIndMac1;
+ subMac1->m_kind = MACK_Addr;
+ }
+ break;
+ case GT_COMMA:
+ // In a comma, the incoming context only applies to the rightmost arg of the
+ // comma list. The left arg (op1) gets a fresh context.
+ subMac1 = nullptr;
+ break;
+ case GT_OBJ:
+ case GT_BLK:
+ case GT_DYN_BLK:
+ case GT_IND:
+ subMac1 = &subIndMac1;
+ break;
+ default:
+ break;
}
+ }
- // For additions, if we're in an IND context keep track of whether
- // all offsets added to the address are constant, and their sum.
- if (tree->gtOper == GT_ADD && subMac1 != nullptr)
+ // For additions, if we're in an IND context keep track of whether
+ // all offsets added to the address are constant, and their sum.
+ if (tree->gtOper == GT_ADD && subMac1 != nullptr)
+ {
+ assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
+ GenTreePtr otherOp = tree->gtOp.gtOp2;
+ // Is the other operator a constant?
+ if (otherOp->IsCnsIntOrI())
{
- assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
- GenTreePtr otherOp = tree->gtOp.gtOp2;
- // Is the other operator a constant?
- if (otherOp->IsCnsIntOrI())
+ ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
+ totalOffset += otherOp->gtIntConCommon.IconValue();
+ if (totalOffset.IsOverflow())
{
- ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
- totalOffset += otherOp->gtIntConCommon.IconValue();
- if (totalOffset.IsOverflow())
- {
- // We will consider an offset so large as to overflow as "not a constant" --
- // we will do a null check.
- subMac1->m_allConstantOffsets = false;
- }
- else
- {
- subMac1->m_totalOffset += otherOp->gtIntConCommon.IconValue();
- }
+ // We will consider an offset so large as to overflow as "not a constant" --
+ // we will do a null check.
+ subMac1->m_allConstantOffsets = false;
}
else
{
- subMac1->m_allConstantOffsets = false;
+ subMac1->m_totalOffset += otherOp->gtIntConCommon.IconValue();
}
}
-
- // If gtOp1 is a GT_FIELD, we need to pass down the mac if
- // its parent is GT_ADDR, since the address of the field
- // is part of an ongoing address computation. Otherwise
- // op1 represents the value of the field and so any address
- // calculations it does are in a new context.
- if ((op1->gtOper == GT_FIELD) && (tree->gtOper != GT_ADDR))
+ else
{
- subMac1 = nullptr;
-
- // The impact of this field's value to any ongoing
- // address computation is handled below when looking
- // at op2.
+ subMac1->m_allConstantOffsets = false;
}
+ }
- tree->gtOp.gtOp1 = op1 = fgMorphTree(op1, subMac1);
+ // If gtOp1 is a GT_FIELD, we need to pass down the mac if
+ // its parent is GT_ADDR, since the address of the field
+ // is part of an ongoing address computation. Otherwise
+ // op1 represents the value of the field and so any address
+ // calculations it does are in a new context.
+ if ((op1->gtOper == GT_FIELD) && (tree->gtOper != GT_ADDR))
+ {
+ subMac1 = nullptr;
+
+ // The impact of this field's value to any ongoing
+ // address computation is handled below when looking
+ // at op2.
+ }
+
+ tree->gtOp.gtOp1 = op1 = fgMorphTree(op1, subMac1);
#if LOCAL_ASSERTION_PROP
- // If we are exiting the "then" part of a Qmark-Colon we must
- // save the state of the current copy assignment table
- // so that we can merge this state with the "else" part exit
- if (isQmarkColon)
+ // If we are exiting the "then" part of a Qmark-Colon we must
+ // save the state of the current copy assignment table
+ // so that we can merge this state with the "else" part exit
+ if (isQmarkColon)
+ {
+ noway_assert(optLocalAssertionProp);
+ if (optAssertionCount)
{
- noway_assert(optLocalAssertionProp);
- if (optAssertionCount)
- {
- noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
- unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
- thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
- thenAssertionCount = optAssertionCount;
- memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
- }
- else
- {
- thenAssertionCount = 0;
- thenAssertionTab = nullptr;
- }
+ noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
+ unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
+ thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
+ thenAssertionCount = optAssertionCount;
+ memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
+ }
+ else
+ {
+ thenAssertionCount = 0;
+ thenAssertionTab = nullptr;
}
+ }
#endif // LOCAL_ASSERTION_PROP
- /* Morphing along with folding and inlining may have changed the
- * side effect flags, so we have to reset them
- *
- * NOTE: Don't reset the exception flags on nodes that may throw */
+ /* Morphing along with folding and inlining may have changed the
+ * side effect flags, so we have to reset them
+ *
+ * NOTE: Don't reset the exception flags on nodes that may throw */
- assert(tree->gtOper != GT_CALL);
+ assert(tree->gtOper != GT_CALL);
- if ((tree->gtOper != GT_INTRINSIC) || !IsIntrinsicImplementedByUserCall(tree->gtIntrinsic.gtIntrinsicId))
- {
- tree->gtFlags &= ~GTF_CALL;
- }
+ if ((tree->gtOper != GT_INTRINSIC) || !IsIntrinsicImplementedByUserCall(tree->gtIntrinsic.gtIntrinsicId))
+ {
+ tree->gtFlags &= ~GTF_CALL;
+ }
- /* Propagate the new flags */
- tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
+ /* Propagate the new flags */
+ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
- // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
- // Similarly for clsVar
- if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
- {
- tree->gtFlags &= ~GTF_GLOB_REF;
- }
- } // if (op1)
+ // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
+ // Similarly for clsVar
+ if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
+ {
+ tree->gtFlags &= ~GTF_GLOB_REF;
+ }
+ } // if (op1)
- /*-------------------------------------------------------------------------
- * Process the second operand, if any
- */
+ /*-------------------------------------------------------------------------
+ * Process the second operand, if any
+ */
- if (op2)
- {
+ if (op2)
+ {
#if LOCAL_ASSERTION_PROP
- // If we are entering the "else" part of a Qmark-Colon we must
- // reset the state of the current copy assignment table
- if (isQmarkColon)
+ // If we are entering the "else" part of a Qmark-Colon we must
+ // reset the state of the current copy assignment table
+ if (isQmarkColon)
+ {
+ noway_assert(optLocalAssertionProp);
+ optAssertionReset(0);
+ if (origAssertionCount)
{
- noway_assert(optLocalAssertionProp);
- optAssertionReset(0);
- if (origAssertionCount)
- {
- size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
- memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
- optAssertionReset(origAssertionCount);
- }
+ size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
+ memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
+ optAssertionReset(origAssertionCount);
}
+ }
#endif // LOCAL_ASSERTION_PROP
- // We might need a new MorphAddressContext context to use in evaluating op2.
- // (These are used to convey parent context about how addresses being calculated
- // will be used; see the specification comment for MorphAddrContext for full details.)
- // Assume it's an Ind context to start.
- switch (tree->gtOper)
- {
- case GT_ADD:
- if (mac != nullptr && mac->m_kind == MACK_Ind)
+ // We might need a new MorphAddressContext context to use in evaluating op2.
+ // (These are used to convey parent context about how addresses being calculated
+ // will be used; see the specification comment for MorphAddrContext for full details.)
+ // Assume it's an Ind context to start.
+ switch (tree->gtOper)
+ {
+ case GT_ADD:
+ if (mac != nullptr && mac->m_kind == MACK_Ind)
+ {
+ GenTreePtr otherOp = tree->gtOp.gtOp1;
+ // Is the other operator a constant?
+ if (otherOp->IsCnsIntOrI())
{
- GenTreePtr otherOp = tree->gtOp.gtOp1;
- // Is the other operator a constant?
- if (otherOp->IsCnsIntOrI())
- {
- mac->m_totalOffset += otherOp->gtIntConCommon.IconValue();
- }
- else
- {
- mac->m_allConstantOffsets = false;
- }
+ mac->m_totalOffset += otherOp->gtIntConCommon.IconValue();
}
- break;
- default:
- break;
- }
+ else
+ {
+ mac->m_allConstantOffsets = false;
+ }
+ }
+ break;
+ default:
+ break;
+ }
- // If gtOp2 is a GT_FIELD, we must be taking its value,
- // so it should evaluate its address in a new context.
- if (op2->gtOper == GT_FIELD)
- {
- // The impact of this field's value to any ongoing
- // address computation is handled above when looking
- // at op1.
- mac = nullptr;
- }
+ // If gtOp2 is a GT_FIELD, we must be taking its value,
+ // so it should evaluate its address in a new context.
+ if (op2->gtOper == GT_FIELD)
+ {
+ // The impact of this field's value to any ongoing
+ // address computation is handled above when looking
+ // at op1.
+ mac = nullptr;
+ }
- tree->gtOp.gtOp2 = op2 = fgMorphTree(op2, mac);
+ tree->gtOp.gtOp2 = op2 = fgMorphTree(op2, mac);
- /* Propagate the side effect flags from op2 */
+ /* Propagate the side effect flags from op2 */
- tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
+ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
#if LOCAL_ASSERTION_PROP
- // If we are exiting the "else" part of a Qmark-Colon we must
- // merge the state of the current copy assignment table with
- // that of the exit of the "then" part.
- if (isQmarkColon)
+ // If we are exiting the "else" part of a Qmark-Colon we must
+ // merge the state of the current copy assignment table with
+ // that of the exit of the "then" part.
+ if (isQmarkColon)
+ {
+ noway_assert(optLocalAssertionProp);
+ // If either exit table has zero entries then
+ // the merged table also has zero entries
+ if (optAssertionCount == 0 || thenAssertionCount == 0)
{
- noway_assert(optLocalAssertionProp);
- // If either exit table has zero entries then
- // the merged table also has zero entries
- if (optAssertionCount == 0 || thenAssertionCount == 0)
- {
- optAssertionReset(0);
- }
- else
+ optAssertionReset(0);
+ }
+ else
+ {
+ size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
+ if ((optAssertionCount != thenAssertionCount) ||
+ (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
- size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
- if ((optAssertionCount != thenAssertionCount) ||
- (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
+ // Yes they are different so we have to find the merged set
+ // Iterate over the copy asgn table removing any entries
+ // that do not have an exact match in the thenAssertionTab
+ AssertionIndex index = 1;
+ while (index <= optAssertionCount)
{
- // Yes they are different so we have to find the merged set
- // Iterate over the copy asgn table removing any entries
- // that do not have an exact match in the thenAssertionTab
- AssertionIndex index = 1;
- while (index <= optAssertionCount)
+ AssertionDsc* curAssertion = optGetAssertion(index);
+
+ for (unsigned j = 0; j < thenAssertionCount; j++)
{
- AssertionDsc* curAssertion = optGetAssertion(index);
+ AssertionDsc* thenAssertion = &thenAssertionTab[j];
- for (unsigned j = 0; j < thenAssertionCount; j++)
+ // Do the left sides match?
+ if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
+ (curAssertion->assertionKind == thenAssertion->assertionKind))
{
- AssertionDsc* thenAssertion = &thenAssertionTab[j];
-
- // Do the left sides match?
- if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
- (curAssertion->assertionKind == thenAssertion->assertionKind))
+ // Do the right sides match?
+ if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
+ (curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
{
- // Do the right sides match?
- if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
- (curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
- {
- goto KEEP;
- }
- else
- {
- goto REMOVE;
- }
+ goto KEEP;
+ }
+ else
+ {
+ goto REMOVE;
}
}
- //
- // If we fall out of the loop above then we didn't find
- // any matching entry in the thenAssertionTab so it must
- // have been killed on that path so we remove it here
- //
- REMOVE:
- // The data at optAssertionTabPrivate[i] is to be removed
- CLANG_FORMAT_COMMENT_ANCHOR;
+ }
+ //
+ // If we fall out of the loop above then we didn't find
+ // any matching entry in the thenAssertionTab so it must
+ // have been killed on that path so we remove it here
+ //
+ REMOVE:
+ // The data at optAssertionTabPrivate[i] is to be removed
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
- {
- printf("The QMARK-COLON ");
- printTreeID(tree);
- printf(" removes assertion candidate #%d\n", index);
- }
-#endif
- optAssertionRemove(index);
- continue;
- KEEP:
- // The data at optAssertionTabPrivate[i] is to be kept
- index++;
+ if (verbose)
+ {
+ printf("The QMARK-COLON ");
+ printTreeID(tree);
+ printf(" removes assertion candidate #%d\n", index);
}
+#endif
+ optAssertionRemove(index);
+ continue;
+ KEEP:
+ // The data at optAssertionTabPrivate[i] is to be kept
+ index++;
}
}
}
-#endif // LOCAL_ASSERTION_PROP
- } // if (op2)
+ }
+#endif // LOCAL_ASSERTION_PROP
+ } // if (op2)
- DONE_MORPHING_CHILDREN:
+DONE_MORPHING_CHILDREN:
- if (tree->OperMayThrow(this))
+ if (tree->OperMayThrow(this))
+ {
+ // Mark the tree node as potentially throwing an exception
+ tree->gtFlags |= GTF_EXCEPT;
+ }
+ else
+ {
+ if (tree->OperIsIndirOrArrLength())
{
- // Mark the tree node as potentially throwing an exception
- tree->gtFlags |= GTF_EXCEPT;
+ tree->gtFlags |= GTF_IND_NONFAULTING;
}
- else
+ if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
+ ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
{
- if (tree->OperIsIndirOrArrLength())
- {
- tree->gtFlags |= GTF_IND_NONFAULTING;
- }
- if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
- ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
- {
- tree->gtFlags &= ~GTF_EXCEPT;
- }
+ tree->gtFlags &= ~GTF_EXCEPT;
}
+ }
- if (tree->OperRequiresAsgFlag())
- {
- tree->gtFlags |= GTF_ASG;
- }
- else
+ if (tree->OperRequiresAsgFlag())
+ {
+ tree->gtFlags |= GTF_ASG;
+ }
+ else
+ {
+ if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
+ ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
{
- if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
- ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
- {
- tree->gtFlags &= ~GTF_ASG;
- }
+ tree->gtFlags &= ~GTF_ASG;
}
+ }
/*-------------------------------------------------------------------------
* Now do POST-ORDER processing
*/
#if FEATURE_FIXED_OUT_ARGS && !defined(_TARGET_64BIT_)
- // Variable shifts of a long end up being helper calls, so mark the tree as such. This
- // is potentially too conservative, since they'll get treated as having side effects.
- // It is important to mark them as calls so if they are part of an argument list,
- // they will get sorted and processed properly (for example, it is important to handle
- // all nested calls before putting struct arguments in the argument registers). We
- // could mark the trees just before argument processing, but it would require a full
- // tree walk of the argument tree, so we just do it here, instead, even though we'll
- // mark non-argument trees (that will still get converted to calls, anyway).
- if (GenTree::OperIsShift(oper) && (tree->TypeGet() == TYP_LONG) && (op2->OperGet() != GT_CNS_INT))
- {
- tree->gtFlags |= GTF_CALL;
- }
+ // Variable shifts of a long end up being helper calls, so mark the tree as such. This
+ // is potentially too conservative, since they'll get treated as having side effects.
+ // It is important to mark them as calls so if they are part of an argument list,
+ // they will get sorted and processed properly (for example, it is important to handle
+ // all nested calls before putting struct arguments in the argument registers). We
+ // could mark the trees just before argument processing, but it would require a full
+ // tree walk of the argument tree, so we just do it here, instead, even though we'll
+ // mark non-argument trees (that will still get converted to calls, anyway).
+ if (GenTree::OperIsShift(oper) && (tree->TypeGet() == TYP_LONG) && (op2->OperGet() != GT_CNS_INT))
+ {
+ tree->gtFlags |= GTF_CALL;
+ }
#endif // FEATURE_FIXED_OUT_ARGS && !_TARGET_64BIT_
- if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) &&
- (op2 && !varTypeIsGC(op2->TypeGet())))
- {
- // The tree is really not GC but was marked as such. Now that the
- // children have been unmarked, unmark the tree too.
+ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet())))
+ {
+ // The tree is really not GC but was marked as such. Now that the
+ // children have been unmarked, unmark the tree too.
- // Remember that GT_COMMA inherits it's type only from op2
- if (tree->gtOper == GT_COMMA)
- {
- tree->gtType = genActualType(op2->TypeGet());
- }
- else
- {
- tree->gtType = genActualType(op1->TypeGet());
- }
+ // Remember that GT_COMMA inherits it's type only from op2
+ if (tree->gtOper == GT_COMMA)
+ {
+ tree->gtType = genActualType(op2->TypeGet());
}
-
- GenTreePtr oldTree = tree;
-
- GenTreePtr qmarkOp1 = nullptr;
- GenTreePtr qmarkOp2 = nullptr;
-
- if ((tree->OperGet() == GT_QMARK) && (tree->gtOp.gtOp2->OperGet() == GT_COLON))
+ else
{
- qmarkOp1 = oldTree->gtOp.gtOp2->gtOp.gtOp1;
- qmarkOp2 = oldTree->gtOp.gtOp2->gtOp.gtOp2;
+ tree->gtType = genActualType(op1->TypeGet());
}
+ }
- // Try to fold it, maybe we get lucky,
- tree = gtFoldExpr(tree);
+ GenTreePtr oldTree = tree;
- if (oldTree != tree)
- {
- /* if gtFoldExpr returned op1 or op2 then we are done */
- if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
- {
- return tree;
- }
+ GenTreePtr qmarkOp1 = nullptr;
+ GenTreePtr qmarkOp2 = nullptr;
- /* If we created a comma-throw tree then we need to morph op1 */
- if (fgIsCommaThrow(tree))
- {
- tree->gtOp.gtOp1 = fgMorphTree(tree->gtOp.gtOp1);
- fgMorphTreeDone(tree);
- return tree;
- }
+ if ((tree->OperGet() == GT_QMARK) && (tree->gtOp.gtOp2->OperGet() == GT_COLON))
+ {
+ qmarkOp1 = oldTree->gtOp.gtOp2->gtOp.gtOp1;
+ qmarkOp2 = oldTree->gtOp.gtOp2->gtOp.gtOp2;
+ }
+
+ // Try to fold it, maybe we get lucky,
+ tree = gtFoldExpr(tree);
+ if (oldTree != tree)
+ {
+ /* if gtFoldExpr returned op1 or op2 then we are done */
+ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
+ {
return tree;
}
- else if (tree->OperKind() & GTK_CONST)
+
+ /* If we created a comma-throw tree then we need to morph op1 */
+ if (fgIsCommaThrow(tree))
{
+ tree->gtOp.gtOp1 = fgMorphTree(tree->gtOp.gtOp1);
+ fgMorphTreeDone(tree);
return tree;
}
- /* gtFoldExpr could have used setOper to change the oper */
- oper = tree->OperGet();
- typ = tree->TypeGet();
+ return tree;
+ }
+ else if (tree->OperKind() & GTK_CONST)
+ {
+ return tree;
+ }
- /* gtFoldExpr could have changed op1 and op2 */
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2IfPresent();
+ /* gtFoldExpr could have used setOper to change the oper */
+ oper = tree->OperGet();
+ typ = tree->TypeGet();
- // Do we have an integer compare operation?
+ /* gtFoldExpr could have changed op1 and op2 */
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2IfPresent();
+
+ // Do we have an integer compare operation?
+ //
+ if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
+ {
+ // Are we comparing against zero?
//
- if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
+ if (op2->IsIntegralConst(0))
{
- // Are we comparing against zero?
+ // Request that the codegen for op1 sets the condition flags
+ // when it generates the code for op1.
//
- if (op2->IsIntegralConst(0))
- {
- // Request that the codegen for op1 sets the condition flags
- // when it generates the code for op1.
- //
- // Codegen for op1 must set the condition flags if
- // this method returns true.
- //
- op1->gtRequestSetFlags();
- }
+ // Codegen for op1 must set the condition flags if
+ // this method returns true.
+ //
+ op1->gtRequestSetFlags();
}
- /*-------------------------------------------------------------------------
- * Perform the required oper-specific postorder morphing
- */
+ }
+ /*-------------------------------------------------------------------------
+ * Perform the required oper-specific postorder morphing
+ */
- GenTreePtr temp;
- GenTreePtr cns1, cns2;
- GenTreePtr thenNode;
- GenTreePtr elseNode;
- size_t ival1, ival2;
- GenTreePtr lclVarTree;
- GenTreeLclVarCommon* lclVarCmnTree;
- FieldSeqNode* fieldSeq = nullptr;
+ GenTreePtr temp;
+ GenTreePtr cns1, cns2;
+ GenTreePtr thenNode;
+ GenTreePtr elseNode;
+ size_t ival1, ival2;
+ GenTreePtr lclVarTree;
+ GenTreeLclVarCommon* lclVarCmnTree;
+ FieldSeqNode* fieldSeq = nullptr;
- switch (oper)
- {
- case GT_ASG:
+ switch (oper)
+ {
+ case GT_ASG:
- lclVarTree = fgIsIndirOfAddrOfLocal(op1);
- if (lclVarTree != nullptr)
- {
- lclVarTree->gtFlags |= GTF_VAR_DEF;
- }
+ lclVarTree = fgIsIndirOfAddrOfLocal(op1);
+ if (lclVarTree != nullptr)
+ {
+ lclVarTree->gtFlags |= GTF_VAR_DEF;
+ }
- if (op1->gtEffectiveVal()->OperIsConst())
- {
- op1 = gtNewOperNode(GT_IND, tree->TypeGet(), op1);
- tree->gtOp.gtOp1 = op1;
- }
+ if (op1->gtEffectiveVal()->OperIsConst())
+ {
+ op1 = gtNewOperNode(GT_IND, tree->TypeGet(), op1);
+ tree->gtOp.gtOp1 = op1;
+ }
- /* If we are storing a small type, we might be able to omit a cast */
- if ((op1->gtOper == GT_IND) && varTypeIsSmall(op1->TypeGet()))
+ /* If we are storing a small type, we might be able to omit a cast */
+ if ((op1->gtOper == GT_IND) && varTypeIsSmall(op1->TypeGet()))
+ {
+ if (!gtIsActiveCSE_Candidate(op2) && (op2->gtOper == GT_CAST) && !op2->gtOverflow())
{
- if (!gtIsActiveCSE_Candidate(op2) && (op2->gtOper == GT_CAST) && !op2->gtOverflow())
- {
- var_types castType = op2->CastToType();
+ var_types castType = op2->CastToType();
- // If we are performing a narrowing cast and
- // castType is larger or the same as op1's type
- // then we can discard the cast.
+ // If we are performing a narrowing cast and
+ // castType is larger or the same as op1's type
+ // then we can discard the cast.
- if (varTypeIsSmall(castType) && (castType >= op1->TypeGet()))
- {
- tree->gtOp.gtOp2 = op2 = op2->gtCast.CastOp();
- }
- }
- else if (op2->OperIsCompare() && varTypeIsByte(op1->TypeGet()))
+ if (varTypeIsSmall(castType) && (castType >= op1->TypeGet()))
{
- /* We don't need to zero extend the setcc instruction */
- op2->gtType = TYP_BYTE;
+ tree->gtOp.gtOp2 = op2 = op2->gtCast.CastOp();
}
}
- // If we introduced a CSE we may need to undo the optimization above
- // (i.e. " op2->gtType = TYP_BYTE;" which depends upon op1 being a GT_IND of a byte type)
- // When we introduce the CSE we remove the GT_IND and subsitute a GT_LCL_VAR in it place.
- else if (op2->OperIsCompare() && (op2->gtType == TYP_BYTE) && (op1->gtOper == GT_LCL_VAR))
+ else if (op2->OperIsCompare() && varTypeIsByte(op1->TypeGet()))
{
- unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc* varDsc = &lvaTable[varNum];
-
- /* We again need to zero extend the setcc instruction */
- op2->gtType = varDsc->TypeGet();
+ /* We don't need to zero extend the setcc instruction */
+ op2->gtType = TYP_BYTE;
}
- fgAssignSetVarDef(tree);
+ }
+ // If we introduced a CSE we may need to undo the optimization above
+ // (i.e. " op2->gtType = TYP_BYTE;" which depends upon op1 being a GT_IND of a byte type)
+ // When we introduce the CSE we remove the GT_IND and subsitute a GT_LCL_VAR in it place.
+ else if (op2->OperIsCompare() && (op2->gtType == TYP_BYTE) && (op1->gtOper == GT_LCL_VAR))
+ {
+ unsigned varNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[varNum];
- __fallthrough;
+ /* We again need to zero extend the setcc instruction */
+ op2->gtType = varDsc->TypeGet();
+ }
+ fgAssignSetVarDef(tree);
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_MOD:
- case GT_ASG_UDIV:
- case GT_ASG_UMOD:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
-
- /* We can't CSE the LHS of an assignment */
- /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
- if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
- {
- op1->gtFlags |= GTF_DONT_CSE;
- }
- break;
+ __fallthrough;
- case GT_EQ:
- case GT_NE:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+
+ /* We can't CSE the LHS of an assignment */
+ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
+ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
+ {
+ op1->gtFlags |= GTF_DONT_CSE;
+ }
+ break;
- /* Make sure we're allowed to do this */
+ case GT_EQ:
+ case GT_NE:
- if (optValnumCSE_phase)
- {
- // It is not safe to reorder/delete CSE's
- break;
- }
+ /* Make sure we're allowed to do this */
- cns2 = op2;
+ if (optValnumCSE_phase)
+ {
+ // It is not safe to reorder/delete CSE's
+ break;
+ }
- /* Check for "(expr +/- icon1) ==/!= (non-zero-icon2)" */
+ cns2 = op2;
- if (cns2->gtOper == GT_CNS_INT && cns2->gtIntCon.gtIconVal != 0)
- {
- op1 = tree->gtOp.gtOp1;
+ /* Check for "(expr +/- icon1) ==/!= (non-zero-icon2)" */
- /* Since this can occur repeatedly we use a while loop */
+ if (cns2->gtOper == GT_CNS_INT && cns2->gtIntCon.gtIconVal != 0)
+ {
+ op1 = tree->gtOp.gtOp1;
- while ((op1->gtOper == GT_ADD || op1->gtOper == GT_SUB) &&
- (op1->gtOp.gtOp2->gtOper == GT_CNS_INT) && (op1->gtType == TYP_INT) &&
- (op1->gtOverflow() == false))
- {
- /* Got it; change "x+icon1==icon2" to "x==icon2-icon1" */
+ /* Since this can occur repeatedly we use a while loop */
- ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- ival2 = cns2->gtIntCon.gtIconVal;
+ while ((op1->gtOper == GT_ADD || op1->gtOper == GT_SUB) && (op1->gtOp.gtOp2->gtOper == GT_CNS_INT) &&
+ (op1->gtType == TYP_INT) && (op1->gtOverflow() == false))
+ {
+ /* Got it; change "x+icon1==icon2" to "x==icon2-icon1" */
- if (op1->gtOper == GT_ADD)
- {
- ival2 -= ival1;
- }
- else
- {
- ival2 += ival1;
- }
- cns2->gtIntCon.gtIconVal = ival2;
+ ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
+ ival2 = cns2->gtIntCon.gtIconVal;
+
+ if (op1->gtOper == GT_ADD)
+ {
+ ival2 -= ival1;
+ }
+ else
+ {
+ ival2 += ival1;
+ }
+ cns2->gtIntCon.gtIconVal = ival2;
#ifdef _TARGET_64BIT_
- // we need to properly re-sign-extend or truncate as needed.
- cns2->AsIntCon()->TruncateOrSignExtend32();
+ // we need to properly re-sign-extend or truncate as needed.
+ cns2->AsIntCon()->TruncateOrSignExtend32();
#endif // _TARGET_64BIT_
- op1 = tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
- }
+ op1 = tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
}
+ }
- //
- // Here we look for the following tree
- //
- // EQ/NE
- // / \
+ //
+ // Here we look for the following tree
+ //
+ // EQ/NE
+ // / \
// op1 CNS 0/1
- //
- ival2 = INT_MAX; // The value of INT_MAX for ival2 just means that the constant value is not 0 or 1
+ //
+ ival2 = INT_MAX; // The value of INT_MAX for ival2 just means that the constant value is not 0 or 1
- // cast to unsigned allows test for both 0 and 1
- if ((cns2->gtOper == GT_CNS_INT) && (((size_t)cns2->gtIntConCommon.IconValue()) <= 1U))
- {
- ival2 = (size_t)cns2->gtIntConCommon.IconValue();
- }
- else // cast to UINT64 allows test for both 0 and 1
- if ((cns2->gtOper == GT_CNS_LNG) && (((UINT64)cns2->gtIntConCommon.LngValue()) <= 1ULL))
- {
- ival2 = (size_t)cns2->gtIntConCommon.LngValue();
- }
+ // cast to unsigned allows test for both 0 and 1
+ if ((cns2->gtOper == GT_CNS_INT) && (((size_t)cns2->gtIntConCommon.IconValue()) <= 1U))
+ {
+ ival2 = (size_t)cns2->gtIntConCommon.IconValue();
+ }
+ else // cast to UINT64 allows test for both 0 and 1
+ if ((cns2->gtOper == GT_CNS_LNG) && (((UINT64)cns2->gtIntConCommon.LngValue()) <= 1ULL))
+ {
+ ival2 = (size_t)cns2->gtIntConCommon.LngValue();
+ }
- if (ival2 != INT_MAX)
+ if (ival2 != INT_MAX)
+ {
+ // If we don't have a comma and relop, we can't do this optimization
+ //
+ if ((op1->gtOper == GT_COMMA) && (op1->gtOp.gtOp2->OperIsCompare()))
{
- // If we don't have a comma and relop, we can't do this optimization
+ // Here we look for the following transformation
//
- if ((op1->gtOper == GT_COMMA) && (op1->gtOp.gtOp2->OperIsCompare()))
- {
- // Here we look for the following transformation
- //
- // EQ/NE Possible REVERSE(RELOP)
- // / \ / \
+ // EQ/NE Possible REVERSE(RELOP)
+ // / \ / \
// COMMA CNS 0/1 -> COMMA relop_op2
- // / \ / \
+ // / \ / \
// x RELOP x relop_op1
- // / \
+ // / \
// relop_op1 relop_op2
- //
- //
- //
- GenTreePtr comma = op1;
- GenTreePtr relop = comma->gtOp.gtOp2;
+ //
+ //
+ //
+ GenTreePtr comma = op1;
+ GenTreePtr relop = comma->gtOp.gtOp2;
- GenTreePtr relop_op1 = relop->gtOp.gtOp1;
+ GenTreePtr relop_op1 = relop->gtOp.gtOp1;
- bool reverse = ((ival2 == 0) == (oper == GT_EQ));
+ bool reverse = ((ival2 == 0) == (oper == GT_EQ));
- if (reverse)
- {
- gtReverseCond(relop);
- }
+ if (reverse)
+ {
+ gtReverseCond(relop);
+ }
- relop->gtOp.gtOp1 = comma;
- comma->gtOp.gtOp2 = relop_op1;
+ relop->gtOp.gtOp1 = comma;
+ comma->gtOp.gtOp2 = relop_op1;
- // Comma now has fewer nodes underneath it, so we need to regenerate its flags
- comma->gtFlags &= ~GTF_ALL_EFFECT;
- comma->gtFlags |= (comma->gtOp.gtOp1->gtFlags) & GTF_ALL_EFFECT;
- comma->gtFlags |= (comma->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
+ // Comma now has fewer nodes underneath it, so we need to regenerate its flags
+ comma->gtFlags &= ~GTF_ALL_EFFECT;
+ comma->gtFlags |= (comma->gtOp.gtOp1->gtFlags) & GTF_ALL_EFFECT;
+ comma->gtFlags |= (comma->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
- noway_assert((relop->gtFlags & GTF_RELOP_JMP_USED) == 0);
- noway_assert((relop->gtFlags & GTF_REVERSE_OPS) == 0);
- relop->gtFlags |=
- tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE | GTF_ALL_EFFECT);
+ noway_assert((relop->gtFlags & GTF_RELOP_JMP_USED) == 0);
+ noway_assert((relop->gtFlags & GTF_REVERSE_OPS) == 0);
+ relop->gtFlags |=
+ tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE | GTF_ALL_EFFECT);
- return relop;
- }
+ return relop;
+ }
- if (op1->gtOper == GT_COMMA)
- {
- // Here we look for the following tree
- // and when the LCL_VAR is a temp we can fold the tree:
- //
- // EQ/NE EQ/NE
- // / \ / \
+ if (op1->gtOper == GT_COMMA)
+ {
+ // Here we look for the following tree
+ // and when the LCL_VAR is a temp we can fold the tree:
+ //
+ // EQ/NE EQ/NE
+ // / \ / \
// COMMA CNS 0/1 -> RELOP CNS 0/1
- // / \ / \
+ // / \ / \
// ASG LCL_VAR
- // / \
+ // / \
// LCL_VAR RELOP
- // / \
+ // / \
//
- GenTreePtr asg = op1->gtOp.gtOp1;
- GenTreePtr lcl = op1->gtOp.gtOp2;
+ GenTreePtr asg = op1->gtOp.gtOp1;
+ GenTreePtr lcl = op1->gtOp.gtOp2;
- /* Make sure that the left side of the comma is the assignment of the LCL_VAR */
- if (asg->gtOper != GT_ASG)
- {
- goto SKIP;
- }
+ /* Make sure that the left side of the comma is the assignment of the LCL_VAR */
+ if (asg->gtOper != GT_ASG)
+ {
+ goto SKIP;
+ }
- /* The right side of the comma must be a LCL_VAR temp */
- if (lcl->gtOper != GT_LCL_VAR)
- {
- goto SKIP;
- }
+ /* The right side of the comma must be a LCL_VAR temp */
+ if (lcl->gtOper != GT_LCL_VAR)
+ {
+ goto SKIP;
+ }
- unsigned lclNum = lcl->gtLclVarCommon.gtLclNum;
- noway_assert(lclNum < lvaCount);
+ unsigned lclNum = lcl->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
- /* If the LCL_VAR is not a temp then bail, a temp has a single def */
- if (!lvaTable[lclNum].lvIsTemp)
- {
- goto SKIP;
- }
+ /* If the LCL_VAR is not a temp then bail, a temp has a single def */
+ if (!lvaTable[lclNum].lvIsTemp)
+ {
+ goto SKIP;
+ }
#if FEATURE_ANYCSE
- /* If the LCL_VAR is a CSE temp then bail, it could have multiple defs/uses */
- // Fix 383856 X86/ARM ILGEN
- if (lclNumIsCSE(lclNum))
- {
- goto SKIP;
- }
+ /* If the LCL_VAR is a CSE temp then bail, it could have multiple defs/uses */
+ // Fix 383856 X86/ARM ILGEN
+ if (lclNumIsCSE(lclNum))
+ {
+ goto SKIP;
+ }
#endif
- /* We also must be assigning the result of a RELOP */
- if (asg->gtOp.gtOp1->gtOper != GT_LCL_VAR)
- {
- goto SKIP;
- }
-
- /* Both of the LCL_VAR must match */
- if (asg->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lclNum)
- {
- goto SKIP;
- }
-
- /* If right side of asg is not a RELOP then skip */
- if (!asg->gtOp.gtOp2->OperIsCompare())
- {
- goto SKIP;
- }
+ /* We also must be assigning the result of a RELOP */
+ if (asg->gtOp.gtOp1->gtOper != GT_LCL_VAR)
+ {
+ goto SKIP;
+ }
- LclVarDsc* varDsc = lvaTable + lclNum;
-
- /* Set op1 to the right side of asg, (i.e. the RELOP) */
- op1 = asg->gtOp.gtOp2;
-
- DEBUG_DESTROY_NODE(asg->gtOp.gtOp1);
- DEBUG_DESTROY_NODE(lcl);
-
- /* This local variable should never be used again */
- // <BUGNUM>
- // VSW 184221: Make RefCnt to zero to indicate that this local var
- // is not used any more. (Keey the lvType as is.)
- // Otherwise lvOnFrame will be set to true in Compiler::raMarkStkVars
- // And then emitter::emitEndCodeGen will assert in the following line:
- // noway_assert( dsc->lvTracked);
- // </BUGNUM>
- noway_assert(varDsc->lvRefCnt == 0 || // lvRefCnt may not have been set yet.
- varDsc->lvRefCnt == 2 // Or, we assume this tmp should only be used here,
- // and it only shows up twice.
- );
- lvaTable[lclNum].lvRefCnt = 0;
- lvaTable[lclNum].lvaResetSortAgainFlag(this);
+ /* Both of the LCL_VAR must match */
+ if (asg->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lclNum)
+ {
+ goto SKIP;
}
- if (op1->OperIsCompare())
+ /* If right side of asg is not a RELOP then skip */
+ if (!asg->gtOp.gtOp2->OperIsCompare())
{
- // Here we look for the following tree
- //
- // EQ/NE -> RELOP/!RELOP
- // / \ / \
+ goto SKIP;
+ }
+
+ LclVarDsc* varDsc = lvaTable + lclNum;
+
+ /* Set op1 to the right side of asg, (i.e. the RELOP) */
+ op1 = asg->gtOp.gtOp2;
+
+ DEBUG_DESTROY_NODE(asg->gtOp.gtOp1);
+ DEBUG_DESTROY_NODE(lcl);
+
+ /* This local variable should never be used again */
+ // <BUGNUM>
+ // VSW 184221: Make RefCnt to zero to indicate that this local var
+ // is not used any more. (Keey the lvType as is.)
+ // Otherwise lvOnFrame will be set to true in Compiler::raMarkStkVars
+ // And then emitter::emitEndCodeGen will assert in the following line:
+ // noway_assert( dsc->lvTracked);
+ // </BUGNUM>
+ noway_assert(varDsc->lvRefCnt == 0 || // lvRefCnt may not have been set yet.
+ varDsc->lvRefCnt == 2 // Or, we assume this tmp should only be used here,
+ // and it only shows up twice.
+ );
+ lvaTable[lclNum].lvRefCnt = 0;
+ lvaTable[lclNum].lvaResetSortAgainFlag(this);
+ }
+
+ if (op1->OperIsCompare())
+ {
+ // Here we look for the following tree
+ //
+ // EQ/NE -> RELOP/!RELOP
+ // / \ / \
// RELOP CNS 0/1
- // / \
+ // / \
//
- // Note that we will remove/destroy the EQ/NE node and move
- // the RELOP up into it's location.
+ // Note that we will remove/destroy the EQ/NE node and move
+ // the RELOP up into it's location.
- /* Here we reverse the RELOP if necessary */
+ /* Here we reverse the RELOP if necessary */
- bool reverse = ((ival2 == 0) == (oper == GT_EQ));
+ bool reverse = ((ival2 == 0) == (oper == GT_EQ));
- if (reverse)
- {
- gtReverseCond(op1);
- }
+ if (reverse)
+ {
+ gtReverseCond(op1);
+ }
- /* Propagate gtType of tree into op1 in case it is TYP_BYTE for setcc optimization */
- op1->gtType = tree->gtType;
+ /* Propagate gtType of tree into op1 in case it is TYP_BYTE for setcc optimization */
+ op1->gtType = tree->gtType;
- noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
- op1->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
+ op1->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- DEBUG_DESTROY_NODE(tree);
- return op1;
- }
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
+ }
- //
- // Now we check for a compare with the result of an '&' operator
- //
- // Here we look for the following transformation:
- //
- // EQ/NE EQ/NE
- // / \ / \
+ //
+ // Now we check for a compare with the result of an '&' operator
+ //
+ // Here we look for the following transformation:
+ //
+ // EQ/NE EQ/NE
+ // / \ / \
// AND CNS 0/1 -> AND CNS 0
- // / \ / \
+ // / \ / \
// RSZ/RSH CNS 1 x CNS (1 << y)
- // / \
+ // / \
// x CNS_INT +y
- if (op1->gtOper == GT_AND)
+ if (op1->gtOper == GT_AND)
+ {
+ GenTreePtr andOp = op1;
+ GenTreePtr rshiftOp = andOp->gtOp.gtOp1;
+
+ if ((rshiftOp->gtOper != GT_RSZ) && (rshiftOp->gtOper != GT_RSH))
{
- GenTreePtr andOp = op1;
- GenTreePtr rshiftOp = andOp->gtOp.gtOp1;
+ goto SKIP;
+ }
- if ((rshiftOp->gtOper != GT_RSZ) && (rshiftOp->gtOper != GT_RSH))
- {
- goto SKIP;
- }
+ if (!rshiftOp->gtOp.gtOp2->IsCnsIntOrI())
+ {
+ goto SKIP;
+ }
- if (!rshiftOp->gtOp.gtOp2->IsCnsIntOrI())
- {
- goto SKIP;
- }
+ ssize_t shiftAmount = rshiftOp->gtOp.gtOp2->gtIntCon.gtIconVal;
- ssize_t shiftAmount = rshiftOp->gtOp.gtOp2->gtIntCon.gtIconVal;
+ if (shiftAmount < 0)
+ {
+ goto SKIP;
+ }
- if (shiftAmount < 0)
- {
- goto SKIP;
- }
+ if (!andOp->gtOp.gtOp2->IsIntegralConst(1))
+ {
+ goto SKIP;
+ }
- if (!andOp->gtOp.gtOp2->IsIntegralConst(1))
+ if (andOp->gtType == TYP_INT)
+ {
+ if (shiftAmount > 31)
{
goto SKIP;
}
- if (andOp->gtType == TYP_INT)
- {
- if (shiftAmount > 31)
- {
- goto SKIP;
- }
-
- UINT32 newAndOperand = ((UINT32)1) << shiftAmount;
+ UINT32 newAndOperand = ((UINT32)1) << shiftAmount;
- andOp->gtOp.gtOp2->gtIntCon.gtIconVal = newAndOperand;
+ andOp->gtOp.gtOp2->gtIntCon.gtIconVal = newAndOperand;
- // Reverse the cond if necessary
- if (ival2 == 1)
- {
- gtReverseCond(tree);
- cns2->gtIntCon.gtIconVal = 0;
- oper = tree->gtOper;
- }
+ // Reverse the cond if necessary
+ if (ival2 == 1)
+ {
+ gtReverseCond(tree);
+ cns2->gtIntCon.gtIconVal = 0;
+ oper = tree->gtOper;
}
- else if (andOp->gtType == TYP_LONG)
+ }
+ else if (andOp->gtType == TYP_LONG)
+ {
+ if (shiftAmount > 63)
{
- if (shiftAmount > 63)
- {
- goto SKIP;
- }
+ goto SKIP;
+ }
- UINT64 newAndOperand = ((UINT64)1) << shiftAmount;
+ UINT64 newAndOperand = ((UINT64)1) << shiftAmount;
- andOp->gtOp.gtOp2->gtIntConCommon.SetLngValue(newAndOperand);
+ andOp->gtOp.gtOp2->gtIntConCommon.SetLngValue(newAndOperand);
- // Reverse the cond if necessary
- if (ival2 == 1)
- {
- gtReverseCond(tree);
- cns2->gtIntConCommon.SetLngValue(0);
- oper = tree->gtOper;
- }
+ // Reverse the cond if necessary
+ if (ival2 == 1)
+ {
+ gtReverseCond(tree);
+ cns2->gtIntConCommon.SetLngValue(0);
+ oper = tree->gtOper;
}
-
- andOp->gtOp.gtOp1 = rshiftOp->gtOp.gtOp1;
-
- DEBUG_DESTROY_NODE(rshiftOp->gtOp.gtOp2);
- DEBUG_DESTROY_NODE(rshiftOp);
}
- } // END if (ival2 != INT_MAX)
- SKIP:
- /* Now check for compares with small constant longs that can be cast to int */
+ andOp->gtOp.gtOp1 = rshiftOp->gtOp.gtOp1;
- if (!cns2->OperIsConst())
- {
- goto COMPARE;
+ DEBUG_DESTROY_NODE(rshiftOp->gtOp.gtOp2);
+ DEBUG_DESTROY_NODE(rshiftOp);
}
+ } // END if (ival2 != INT_MAX)
- if (cns2->TypeGet() != TYP_LONG)
- {
- goto COMPARE;
- }
+ SKIP:
+ /* Now check for compares with small constant longs that can be cast to int */
- /* Is the constant 31 bits or smaller? */
+ if (!cns2->OperIsConst())
+ {
+ goto COMPARE;
+ }
- if ((cns2->gtIntConCommon.LngValue() >> 31) != 0)
- {
- goto COMPARE;
- }
+ if (cns2->TypeGet() != TYP_LONG)
+ {
+ goto COMPARE;
+ }
- /* Is the first comparand mask operation of type long ? */
+ /* Is the constant 31 bits or smaller? */
- if (op1->gtOper != GT_AND)
- {
- /* Another interesting case: cast from int */
+ if ((cns2->gtIntConCommon.LngValue() >> 31) != 0)
+ {
+ goto COMPARE;
+ }
- if (op1->gtOper == GT_CAST && op1->CastFromType() == TYP_INT &&
- !gtIsActiveCSE_Candidate(op1) && // op1 cannot be a CSE candidate
- !op1->gtOverflow()) // cannot be an overflow checking cast
- {
- /* Simply make this into an integer comparison */
+ /* Is the first comparand mask operation of type long ? */
- tree->gtOp.gtOp1 = op1->gtCast.CastOp();
- tree->gtOp.gtOp2 = gtNewIconNode((int)cns2->gtIntConCommon.LngValue(), TYP_INT);
- }
+ if (op1->gtOper != GT_AND)
+ {
+ /* Another interesting case: cast from int */
- goto COMPARE;
+ if (op1->gtOper == GT_CAST && op1->CastFromType() == TYP_INT &&
+ !gtIsActiveCSE_Candidate(op1) && // op1 cannot be a CSE candidate
+ !op1->gtOverflow()) // cannot be an overflow checking cast
+ {
+ /* Simply make this into an integer comparison */
+
+ tree->gtOp.gtOp1 = op1->gtCast.CastOp();
+ tree->gtOp.gtOp2 = gtNewIconNode((int)cns2->gtIntConCommon.LngValue(), TYP_INT);
}
- noway_assert(op1->TypeGet() == TYP_LONG && op1->OperGet() == GT_AND);
+ goto COMPARE;
+ }
+
+ noway_assert(op1->TypeGet() == TYP_LONG && op1->OperGet() == GT_AND);
- /* Is the result of the mask effectively an INT ? */
+ /* Is the result of the mask effectively an INT ? */
- GenTreePtr andMask;
- andMask = op1->gtOp.gtOp2;
- if (andMask->gtOper != GT_CNS_NATIVELONG)
- {
- goto COMPARE;
- }
- if ((andMask->gtIntConCommon.LngValue() >> 32) != 0)
- {
- goto COMPARE;
- }
+ GenTreePtr andMask;
+ andMask = op1->gtOp.gtOp2;
+ if (andMask->gtOper != GT_CNS_NATIVELONG)
+ {
+ goto COMPARE;
+ }
+ if ((andMask->gtIntConCommon.LngValue() >> 32) != 0)
+ {
+ goto COMPARE;
+ }
- /* Now we know that we can cast gtOp.gtOp1 of AND to int */
+ /* Now we know that we can cast gtOp.gtOp1 of AND to int */
- op1->gtOp.gtOp1 = gtNewCastNode(TYP_INT, op1->gtOp.gtOp1, TYP_INT);
+ op1->gtOp.gtOp1 = gtNewCastNode(TYP_INT, op1->gtOp.gtOp1, TYP_INT);
- /* now replace the mask node (gtOp.gtOp2 of AND node) */
+ /* now replace the mask node (gtOp.gtOp2 of AND node) */
- noway_assert(andMask == op1->gtOp.gtOp2);
+ noway_assert(andMask == op1->gtOp.gtOp2);
- ival1 = (int)andMask->gtIntConCommon.LngValue();
- andMask->SetOper(GT_CNS_INT);
- andMask->gtType = TYP_INT;
- andMask->gtIntCon.gtIconVal = ival1;
+ ival1 = (int)andMask->gtIntConCommon.LngValue();
+ andMask->SetOper(GT_CNS_INT);
+ andMask->gtType = TYP_INT;
+ andMask->gtIntCon.gtIconVal = ival1;
- /* now change the type of the AND node */
+ /* now change the type of the AND node */
- op1->gtType = TYP_INT;
+ op1->gtType = TYP_INT;
- /* finally we replace the comparand */
+ /* finally we replace the comparand */
- ival2 = (int)cns2->gtIntConCommon.LngValue();
- cns2->SetOper(GT_CNS_INT);
- cns2->gtType = TYP_INT;
+ ival2 = (int)cns2->gtIntConCommon.LngValue();
+ cns2->SetOper(GT_CNS_INT);
+ cns2->gtType = TYP_INT;
- noway_assert(cns2 == op2);
- cns2->gtIntCon.gtIconVal = ival2;
+ noway_assert(cns2 == op2);
+ cns2->gtIntCon.gtIconVal = ival2;
- goto COMPARE;
+ goto COMPARE;
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
- if ((tree->gtFlags & GTF_UNSIGNED) == 0)
+ if ((tree->gtFlags & GTF_UNSIGNED) == 0)
+ {
+ if (op2->gtOper == GT_CNS_INT)
{
- if (op2->gtOper == GT_CNS_INT)
+ cns2 = op2;
+ /* Check for "expr relop 1" */
+ if (cns2->IsIntegralConst(1))
{
- cns2 = op2;
- /* Check for "expr relop 1" */
- if (cns2->IsIntegralConst(1))
+ /* Check for "expr >= 1" */
+ if (oper == GT_GE)
{
- /* Check for "expr >= 1" */
- if (oper == GT_GE)
- {
- /* Change to "expr > 0" */
- oper = GT_GT;
- goto SET_OPER;
- }
- /* Check for "expr < 1" */
- else if (oper == GT_LT)
- {
- /* Change to "expr <= 0" */
- oper = GT_LE;
- goto SET_OPER;
- }
+ /* Change to "expr > 0" */
+ oper = GT_GT;
+ goto SET_OPER;
}
- /* Check for "expr relop -1" */
- else if (cns2->IsIntegralConst(-1) && ((oper == GT_LE) || (oper == GT_GT)))
+ /* Check for "expr < 1" */
+ else if (oper == GT_LT)
{
- /* Check for "expr <= -1" */
- if (oper == GT_LE)
- {
- /* Change to "expr < 0" */
- oper = GT_LT;
- goto SET_OPER;
- }
- /* Check for "expr > -1" */
- else if (oper == GT_GT)
- {
- /* Change to "expr >= 0" */
- oper = GT_GE;
-
- SET_OPER:
- // IF we get here we should be changing 'oper'
- assert(tree->OperGet() != oper);
+ /* Change to "expr <= 0" */
+ oper = GT_LE;
+ goto SET_OPER;
+ }
+ }
+ /* Check for "expr relop -1" */
+ else if (cns2->IsIntegralConst(-1) && ((oper == GT_LE) || (oper == GT_GT)))
+ {
+ /* Check for "expr <= -1" */
+ if (oper == GT_LE)
+ {
+ /* Change to "expr < 0" */
+ oper = GT_LT;
+ goto SET_OPER;
+ }
+ /* Check for "expr > -1" */
+ else if (oper == GT_GT)
+ {
+ /* Change to "expr >= 0" */
+ oper = GT_GE;
- // Keep the old ValueNumber for 'tree' as the new expr
- // will still compute the same value as before
- tree->SetOper(oper, GenTree::PRESERVE_VN);
- cns2->gtIntCon.gtIconVal = 0;
+ SET_OPER:
+ // IF we get here we should be changing 'oper'
+ assert(tree->OperGet() != oper);
- // vnStore is null before the ValueNumber phase has run
- if (vnStore != nullptr)
- {
- // Update the ValueNumber for 'cns2', as we just changed it to 0
- fgValueNumberTreeConst(cns2);
- }
+ // Keep the old ValueNumber for 'tree' as the new expr
+ // will still compute the same value as before
+ tree->SetOper(oper, GenTree::PRESERVE_VN);
+ cns2->gtIntCon.gtIconVal = 0;
- op2 = tree->gtOp.gtOp2 = gtFoldExpr(op2);
+ // vnStore is null before the ValueNumber phase has run
+ if (vnStore != nullptr)
+ {
+ // Update the ValueNumber for 'cns2', as we just changed it to 0
+ fgValueNumberTreeConst(cns2);
}
+
+ op2 = tree->gtOp.gtOp2 = gtFoldExpr(op2);
}
}
}
- else // we have an unsigned comparison
+ }
+ else // we have an unsigned comparison
+ {
+ if (op2->IsIntegralConst(0))
{
- if (op2->IsIntegralConst(0))
+ if ((oper == GT_GT) || (oper == GT_LE))
{
- if ((oper == GT_GT) || (oper == GT_LE))
- {
- // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
- // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
- // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
- // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
- // occurs as a result of branch inversion.
- oper = (oper == GT_LE) ? GT_EQ : GT_NE;
- tree->SetOper(oper, GenTree::PRESERVE_VN);
- tree->gtFlags &= ~GTF_UNSIGNED;
- }
+ // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
+ // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
+ // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
+ // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
+ // occurs as a result of branch inversion.
+ oper = (oper == GT_LE) ? GT_EQ : GT_NE;
+ tree->SetOper(oper, GenTree::PRESERVE_VN);
+ tree->gtFlags &= ~GTF_UNSIGNED;
}
}
+ }
- COMPARE:
+ COMPARE:
- noway_assert(tree->OperKind() & GTK_RELOP);
+ noway_assert(tree->OperKind() & GTK_RELOP);
#ifdef LEGACY_BACKEND
- /* Check if the result of the comparison is used for a jump.
- * If not then only the int (i.e. 32 bit) case is handled in
- * the code generator through the (x86) "set" instructions.
- * For the rest of the cases, the simplest way is to
- * "simulate" the comparison with ?:
- *
- * On ARM, we previously used the IT instruction, but the IT instructions
- * have mostly been declared obsolete and off-limits, so all cases on ARM
- * get converted to ?: */
+ /* Check if the result of the comparison is used for a jump.
+ * If not then only the int (i.e. 32 bit) case is handled in
+ * the code generator through the (x86) "set" instructions.
+ * For the rest of the cases, the simplest way is to
+ * "simulate" the comparison with ?:
+ *
+ * On ARM, we previously used the IT instruction, but the IT instructions
+ * have mostly been declared obsolete and off-limits, so all cases on ARM
+ * get converted to ?: */
- if (!(tree->gtFlags & GTF_RELOP_JMP_USED) && fgMorphRelopToQmark(op1))
- {
- /* We convert it to "(CMP_TRUE) ? (1):(0)" */
+ if (!(tree->gtFlags & GTF_RELOP_JMP_USED) && fgMorphRelopToQmark(op1))
+ {
+ /* We convert it to "(CMP_TRUE) ? (1):(0)" */
- op1 = tree;
- op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- op1->gtRequestSetFlags();
+ op1 = tree;
+ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ op1->gtRequestSetFlags();
- op2 = new (this, GT_COLON) GenTreeColon(TYP_INT, gtNewIconNode(1), gtNewIconNode(0));
- op2 = fgMorphTree(op2);
+ op2 = new (this, GT_COLON) GenTreeColon(TYP_INT, gtNewIconNode(1), gtNewIconNode(0));
+ op2 = fgMorphTree(op2);
- tree = gtNewQmarkNode(TYP_INT, op1, op2);
+ tree = gtNewQmarkNode(TYP_INT, op1, op2);
- fgMorphTreeDone(tree);
+ fgMorphTreeDone(tree);
- return tree;
- }
+ return tree;
+ }
#endif // LEGACY_BACKEND
- break;
+ break;
#ifdef LEGACY_BACKEND
- case GT_QMARK:
+ case GT_QMARK:
- /* If op1 is a comma throw node then we won't be keeping op2 */
- if (fgIsCommaThrow(op1))
- {
- break;
- }
+ /* If op1 is a comma throw node then we won't be keeping op2 */
+ if (fgIsCommaThrow(op1))
+ {
+ break;
+ }
- /* Get hold of the two branches */
+ /* Get hold of the two branches */
- noway_assert(op2->OperGet() == GT_COLON);
- elseNode = op2->AsColon()->ElseNode();
- thenNode = op2->AsColon()->ThenNode();
+ noway_assert(op2->OperGet() == GT_COLON);
+ elseNode = op2->AsColon()->ElseNode();
+ thenNode = op2->AsColon()->ThenNode();
- /* Try to hoist assignments out of qmark colon constructs.
- ie. replace (cond?(x=a):(x=b)) with (x=(cond?a:b)). */
+ /* Try to hoist assignments out of qmark colon constructs.
+ ie. replace (cond?(x=a):(x=b)) with (x=(cond?a:b)). */
- if (tree->TypeGet() == TYP_VOID && thenNode->OperGet() == GT_ASG && elseNode->OperGet() == GT_ASG &&
- thenNode->TypeGet() != TYP_LONG && GenTree::Compare(thenNode->gtOp.gtOp1, elseNode->gtOp.gtOp1) &&
- thenNode->gtOp.gtOp2->TypeGet() == elseNode->gtOp.gtOp2->TypeGet())
- {
- noway_assert(thenNode->TypeGet() == elseNode->TypeGet());
+ if (tree->TypeGet() == TYP_VOID && thenNode->OperGet() == GT_ASG && elseNode->OperGet() == GT_ASG &&
+ thenNode->TypeGet() != TYP_LONG && GenTree::Compare(thenNode->gtOp.gtOp1, elseNode->gtOp.gtOp1) &&
+ thenNode->gtOp.gtOp2->TypeGet() == elseNode->gtOp.gtOp2->TypeGet())
+ {
+ noway_assert(thenNode->TypeGet() == elseNode->TypeGet());
- GenTreePtr asg = thenNode;
- GenTreePtr colon = op2;
- colon->gtOp.gtOp1 = thenNode->gtOp.gtOp2;
- colon->gtOp.gtOp2 = elseNode->gtOp.gtOp2;
- tree->gtType = colon->gtType = asg->gtOp.gtOp2->gtType;
- asg->gtOp.gtOp2 = tree;
+ GenTreePtr asg = thenNode;
+ GenTreePtr colon = op2;
+ colon->gtOp.gtOp1 = thenNode->gtOp.gtOp2;
+ colon->gtOp.gtOp2 = elseNode->gtOp.gtOp2;
+ tree->gtType = colon->gtType = asg->gtOp.gtOp2->gtType;
+ asg->gtOp.gtOp2 = tree;
- // Asg will have all the flags that the QMARK had
- asg->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
+ // Asg will have all the flags that the QMARK had
+ asg->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
- // Colon flag won't have the flags that x had.
- colon->gtFlags &= ~GTF_ALL_EFFECT;
- colon->gtFlags |= (colon->gtOp.gtOp1->gtFlags | colon->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
+ // Colon flag won't have the flags that x had.
+ colon->gtFlags &= ~GTF_ALL_EFFECT;
+ colon->gtFlags |= (colon->gtOp.gtOp1->gtFlags | colon->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
- DEBUG_DESTROY_NODE(elseNode->gtOp.gtOp1);
- DEBUG_DESTROY_NODE(elseNode);
+ DEBUG_DESTROY_NODE(elseNode->gtOp.gtOp1);
+ DEBUG_DESTROY_NODE(elseNode);
- return asg;
- }
+ return asg;
+ }
- /* If the 'else' branch is empty swap the two branches and reverse the condition */
+ /* If the 'else' branch is empty swap the two branches and reverse the condition */
- if (elseNode->IsNothingNode())
+ if (elseNode->IsNothingNode())
+ {
+ /* This can only happen for VOID ?: */
+ noway_assert(op2->gtType == TYP_VOID);
+
+ /* If the thenNode and elseNode are both nop nodes then optimize away the QMARK */
+ if (thenNode->IsNothingNode())
{
- /* This can only happen for VOID ?: */
- noway_assert(op2->gtType == TYP_VOID);
+ // We may be able to throw away op1 (unless it has side-effects)
- /* If the thenNode and elseNode are both nop nodes then optimize away the QMARK */
- if (thenNode->IsNothingNode())
+ if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
{
- // We may be able to throw away op1 (unless it has side-effects)
-
- if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
- {
- /* Just return a a Nop Node */
- return thenNode;
- }
- else
- {
- /* Just return the relop, but clear the special flags. Note
- that we can't do that for longs and floats (see code under
- COMPARE label above) */
-
- if (!fgMorphRelopToQmark(op1->gtOp.gtOp1))
- {
- op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
- return op1;
- }
- }
+ /* Just return a a Nop Node */
+ return thenNode;
}
else
{
- GenTreePtr tmp = elseNode;
+ /* Just return the relop, but clear the special flags. Note
+ that we can't do that for longs and floats (see code under
+ COMPARE label above) */
- op2->AsColon()->ElseNode() = elseNode = thenNode;
- op2->AsColon()->ThenNode() = thenNode = tmp;
- gtReverseCond(op1);
+ if (!fgMorphRelopToQmark(op1->gtOp.gtOp1))
+ {
+ op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
+ return op1;
+ }
}
}
+ else
+ {
+ GenTreePtr tmp = elseNode;
+
+ op2->AsColon()->ElseNode() = elseNode = thenNode;
+ op2->AsColon()->ThenNode() = thenNode = tmp;
+ gtReverseCond(op1);
+ }
+ }
#if !defined(_TARGET_ARM_)
- // If we have (cond)?0:1, then we just return "cond" for TYP_INTs
- //
- // Don't do this optimization for ARM: we always require assignment
- // to boolean to remain ?:, since we don't have any way to generate
- // this with straight-line code, like x86 does using setcc (at least
- // after the IT instruction is deprecated).
+ // If we have (cond)?0:1, then we just return "cond" for TYP_INTs
+ //
+ // Don't do this optimization for ARM: we always require assignment
+ // to boolean to remain ?:, since we don't have any way to generate
+ // this with straight-line code, like x86 does using setcc (at least
+ // after the IT instruction is deprecated).
- if (genActualType(op1->gtOp.gtOp1->gtType) == TYP_INT && genActualType(typ) == TYP_INT &&
- thenNode->gtOper == GT_CNS_INT && elseNode->gtOper == GT_CNS_INT)
- {
- ival1 = thenNode->gtIntCon.gtIconVal;
- ival2 = elseNode->gtIntCon.gtIconVal;
+ if (genActualType(op1->gtOp.gtOp1->gtType) == TYP_INT && genActualType(typ) == TYP_INT &&
+ thenNode->gtOper == GT_CNS_INT && elseNode->gtOper == GT_CNS_INT)
+ {
+ ival1 = thenNode->gtIntCon.gtIconVal;
+ ival2 = elseNode->gtIntCon.gtIconVal;
- // Is one constant 0 and the other 1?
- if ((ival1 | ival2) == 1 && (ival1 & ival2) == 0)
+ // Is one constant 0 and the other 1?
+ if ((ival1 | ival2) == 1 && (ival1 & ival2) == 0)
+ {
+ // If the constants are {1, 0}, reverse the condition
+ if (ival1 == 1)
{
- // If the constants are {1, 0}, reverse the condition
- if (ival1 == 1)
- {
- gtReverseCond(op1);
- }
+ gtReverseCond(op1);
+ }
- // Unmark GTF_RELOP_JMP_USED on the condition node so it knows that it
- // needs to materialize the result as a 0 or 1.
- noway_assert(op1->gtFlags & (GTF_RELOP_QMARK | GTF_RELOP_JMP_USED));
- op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
+ // Unmark GTF_RELOP_JMP_USED on the condition node so it knows that it
+ // needs to materialize the result as a 0 or 1.
+ noway_assert(op1->gtFlags & (GTF_RELOP_QMARK | GTF_RELOP_JMP_USED));
+ op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
- return op1;
- }
+ return op1;
}
+ }
#endif // !_TARGET_ARM_
- break; // end case GT_QMARK
-#endif // LEGACY_BACKEND
+ break; // end case GT_QMARK
+#endif // LEGACY_BACKEND
- case GT_MUL:
+ case GT_MUL:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
+ if (typ == TYP_LONG)
+ {
+ // This must be GTF_MUL_64RSLT
+ assert(tree->gtIsValid64RsltMul());
+ return tree;
+ }
+#endif // _TARGET_64BIT_
+ goto CM_OVF_OP;
+
+ case GT_SUB:
+
+ if (tree->gtOverflow())
+ {
+ goto CM_OVF_OP;
+ }
+
+ // TODO #4104: there are a lot of other places where
+ // this condition is not checked before transformations.
+ if (fgGlobalMorph)
+ {
+ /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
+
+ noway_assert(op2);
+ if (op2->IsCnsIntOrI())
{
- // This must be GTF_MUL_64RSLT
- assert(tree->gtIsValid64RsltMul());
- return tree;
+ /* Negate the constant and change the node to be "+" */
+
+ op2->gtIntConCommon.SetIconValue(-op2->gtIntConCommon.IconValue());
+ oper = GT_ADD;
+ tree->ChangeOper(oper);
+ goto CM_ADD_OP;
}
-#endif // _TARGET_64BIT_
- goto CM_OVF_OP;
- case GT_SUB:
+ /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
- if (tree->gtOverflow())
+ noway_assert(op1);
+ if (op1->IsCnsIntOrI())
{
- goto CM_OVF_OP;
- }
+ noway_assert(varTypeIsIntOrI(tree));
- // TODO #4104: there are a lot of other places where
- // this condition is not checked before transformations.
- if (fgGlobalMorph)
- {
- /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
+ tree->gtOp.gtOp2 = op2 = gtNewOperNode(GT_NEG, tree->gtType, op2); // The type of the new GT_NEG
+ // node should be the same
+ // as the type of the tree, i.e. tree->gtType.
+ fgMorphTreeDone(op2);
- noway_assert(op2);
- if (op2->IsCnsIntOrI())
- {
- /* Negate the constant and change the node to be "+" */
+ oper = GT_ADD;
+ tree->ChangeOper(oper);
+ goto CM_ADD_OP;
+ }
- op2->gtIntConCommon.SetIconValue(-op2->gtIntConCommon.IconValue());
- oper = GT_ADD;
- tree->ChangeOper(oper);
- goto CM_ADD_OP;
- }
+ /* No match - exit */
+ }
+ break;
- /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
+#ifdef _TARGET_ARM64_
+ case GT_DIV:
+ if (!varTypeIsFloating(tree->gtType))
+ {
+ // Codegen for this instruction needs to be able to throw two exceptions:
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
+ }
+ break;
+ case GT_UDIV:
+ // Codegen for this instruction needs to be able to throw one exception:
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
+ break;
+#endif
- noway_assert(op1);
- if (op1->IsCnsIntOrI())
- {
- noway_assert(varTypeIsIntOrI(tree));
+ case GT_ADD:
- tree->gtOp.gtOp2 = op2 = gtNewOperNode(GT_NEG, tree->gtType, op2); // The type of the new GT_NEG
- // node should be the same
- // as the type of the tree, i.e. tree->gtType.
- fgMorphTreeDone(op2);
+ CM_OVF_OP:
+ if (tree->gtOverflow())
+ {
+ tree->gtRequestSetFlags();
- oper = GT_ADD;
- tree->ChangeOper(oper);
- goto CM_ADD_OP;
- }
+ // Add the excptn-throwing basic block to jump to on overflow
- /* No match - exit */
- }
- break;
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
-#ifdef _TARGET_ARM64_
- case GT_DIV:
- if (!varTypeIsFloating(tree->gtType))
- {
- // Codegen for this instruction needs to be able to throw two exceptions:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
- }
- break;
- case GT_UDIV:
- // Codegen for this instruction needs to be able to throw one exception:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
- break;
-#endif
+ // We can't do any commutative morphing for overflow instructions
- case GT_ADD:
+ break;
+ }
- CM_OVF_OP:
- if (tree->gtOverflow())
- {
- tree->gtRequestSetFlags();
+ CM_ADD_OP:
- // Add the excptn-throwing basic block to jump to on overflow
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
+ /* Commute any non-REF constants to the right */
- // We can't do any commutative morphing for overflow instructions
+ noway_assert(op1);
+ if (op1->OperIsConst() && (op1->gtType != TYP_REF))
+ {
+ // TODO-Review: We used to assert here that
+ // noway_assert(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD));
+ // With modifications to AddrTaken==>AddrExposed, we did more assertion propagation,
+ // and would sometimes hit this assertion. This may indicate a missed "remorph".
+ // Task is to re-enable this assertion and investigate.
- break;
- }
+ /* Swap the operands */
+ tree->gtOp.gtOp1 = op2;
+ tree->gtOp.gtOp2 = op1;
- CM_ADD_OP:
+ op1 = op2;
+ op2 = tree->gtOp.gtOp2;
+ }
- case GT_OR:
- case GT_XOR:
- case GT_AND:
+ /* See if we can fold GT_ADD nodes. */
- /* Commute any non-REF constants to the right */
+ if (oper == GT_ADD)
+ {
+ /* Fold "((x+icon1)+(y+icon2)) to ((x+y)+(icon1+icon2))" */
- noway_assert(op1);
- if (op1->OperIsConst() && (op1->gtType != TYP_REF))
+ if (op1->gtOper == GT_ADD && op2->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op2) &&
+ op1->gtOp.gtOp2->gtOper == GT_CNS_INT && op2->gtOp.gtOp2->gtOper == GT_CNS_INT &&
+ !op1->gtOverflow() && !op2->gtOverflow())
{
- // TODO-Review: We used to assert here that
- // noway_assert(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD));
- // With modifications to AddrTaken==>AddrExposed, we did more assertion propagation,
- // and would sometimes hit this assertion. This may indicate a missed "remorph".
- // Task is to re-enable this assertion and investigate.
+ cns1 = op1->gtOp.gtOp2;
+ cns2 = op2->gtOp.gtOp2;
+ cns1->gtIntCon.gtIconVal += cns2->gtIntCon.gtIconVal;
+#ifdef _TARGET_64BIT_
+ if (cns1->TypeGet() == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after adding two int constants above
+ cns1->AsIntCon()->TruncateOrSignExtend32();
+ }
+#endif //_TARGET_64BIT_
- /* Swap the operands */
- tree->gtOp.gtOp1 = op2;
- tree->gtOp.gtOp2 = op1;
+ tree->gtOp.gtOp2 = cns1;
+ DEBUG_DESTROY_NODE(cns2);
- op1 = op2;
+ op1->gtOp.gtOp2 = op2->gtOp.gtOp1;
+ op1->gtFlags |= (op1->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT);
+ DEBUG_DESTROY_NODE(op2);
op2 = tree->gtOp.gtOp2;
}
- /* See if we can fold GT_ADD nodes. */
-
- if (oper == GT_ADD)
+ if (op2->IsCnsIntOrI() && varTypeIsIntegralOrI(typ))
{
- /* Fold "((x+icon1)+(y+icon2)) to ((x+y)+(icon1+icon2))" */
+ /* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
- if (op1->gtOper == GT_ADD && op2->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op2) &&
- op1->gtOp.gtOp2->gtOper == GT_CNS_INT && op2->gtOp.gtOp2->gtOper == GT_CNS_INT &&
- !op1->gtOverflow() && !op2->gtOverflow())
+ if (op1->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op1) && op1->gtOp.gtOp2->IsCnsIntOrI() &&
+ !op1->gtOverflow() && op1->gtOp.gtOp2->OperGet() == op2->OperGet())
{
cns1 = op1->gtOp.gtOp2;
- cns2 = op2->gtOp.gtOp2;
- cns1->gtIntCon.gtIconVal += cns2->gtIntCon.gtIconVal;
+ op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() +
+ op2->gtIntConCommon.IconValue());
#ifdef _TARGET_64BIT_
- if (cns1->TypeGet() == TYP_INT)
+ if (op2->TypeGet() == TYP_INT)
{
// we need to properly re-sign-extend or truncate after adding two int constants above
- cns1->AsIntCon()->TruncateOrSignExtend32();
+ op2->AsIntCon()->TruncateOrSignExtend32();
}
#endif //_TARGET_64BIT_
- tree->gtOp.gtOp2 = cns1;
- DEBUG_DESTROY_NODE(cns2);
+ if (cns1->OperGet() == GT_CNS_INT)
+ {
+ op2->gtIntCon.gtFieldSeq =
+ GetFieldSeqStore()->Append(cns1->gtIntCon.gtFieldSeq, op2->gtIntCon.gtFieldSeq);
+ }
+ DEBUG_DESTROY_NODE(cns1);
- op1->gtOp.gtOp2 = op2->gtOp.gtOp1;
- op1->gtFlags |= (op1->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT);
- DEBUG_DESTROY_NODE(op2);
- op2 = tree->gtOp.gtOp2;
+ tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
+ DEBUG_DESTROY_NODE(op1);
+ op1 = tree->gtOp.gtOp1;
}
- if (op2->IsCnsIntOrI() && varTypeIsIntegralOrI(typ))
- {
- /* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
+ // Fold (x + 0).
- if (op1->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op1) && op1->gtOp.gtOp2->IsCnsIntOrI() &&
- !op1->gtOverflow() && op1->gtOp.gtOp2->OperGet() == op2->OperGet())
- {
- cns1 = op1->gtOp.gtOp2;
- op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() +
- op2->gtIntConCommon.IconValue());
-#ifdef _TARGET_64BIT_
- if (op2->TypeGet() == TYP_INT)
- {
- // we need to properly re-sign-extend or truncate after adding two int constants above
- op2->AsIntCon()->TruncateOrSignExtend32();
- }
-#endif //_TARGET_64BIT_
+ if ((op2->gtIntConCommon.IconValue() == 0) && !gtIsActiveCSE_Candidate(tree))
+ {
- if (cns1->OperGet() == GT_CNS_INT)
- {
- op2->gtIntCon.gtFieldSeq =
- GetFieldSeqStore()->Append(cns1->gtIntCon.gtFieldSeq, op2->gtIntCon.gtFieldSeq);
- }
- DEBUG_DESTROY_NODE(cns1);
+ // If this addition is adding an offset to a null pointer,
+ // avoid the work and yield the null pointer immediately.
+ // Dereferencing the pointer in either case will have the
+ // same effect.
- tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
+ if (!optValnumCSE_phase && varTypeIsGC(op2->TypeGet()) &&
+ ((op1->gtFlags & GTF_ALL_EFFECT) == 0))
+ {
+ op2->gtType = tree->gtType;
DEBUG_DESTROY_NODE(op1);
- op1 = tree->gtOp.gtOp1;
+ DEBUG_DESTROY_NODE(tree);
+ return op2;
}
- // Fold (x + 0).
+ // Remove the addition iff it won't change the tree type
+ // to TYP_REF.
- if ((op2->gtIntConCommon.IconValue() == 0) && !gtIsActiveCSE_Candidate(tree))
+ if (!gtIsActiveCSE_Candidate(op2) &&
+ ((op1->TypeGet() == tree->TypeGet()) || (op1->TypeGet() != TYP_REF)))
{
-
- // If this addition is adding an offset to a null pointer,
- // avoid the work and yield the null pointer immediately.
- // Dereferencing the pointer in either case will have the
- // same effect.
-
- if (!optValnumCSE_phase && varTypeIsGC(op2->TypeGet()) &&
- ((op1->gtFlags & GTF_ALL_EFFECT) == 0))
+ if (fgGlobalMorph && (op2->OperGet() == GT_CNS_INT) &&
+ (op2->gtIntCon.gtFieldSeq != nullptr) &&
+ (op2->gtIntCon.gtFieldSeq != FieldSeqStore::NotAField()))
{
- op2->gtType = tree->gtType;
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
- return op2;
+ fgAddFieldSeqForZeroOffset(op1, op2->gtIntCon.gtFieldSeq);
}
- // Remove the addition iff it won't change the tree type
- // to TYP_REF.
-
- if (!gtIsActiveCSE_Candidate(op2) &&
- ((op1->TypeGet() == tree->TypeGet()) || (op1->TypeGet() != TYP_REF)))
- {
- if (fgGlobalMorph && (op2->OperGet() == GT_CNS_INT) &&
- (op2->gtIntCon.gtFieldSeq != nullptr) &&
- (op2->gtIntCon.gtFieldSeq != FieldSeqStore::NotAField()))
- {
- fgAddFieldSeqForZeroOffset(op1, op2->gtIntCon.gtFieldSeq);
- }
-
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
- return op1;
- }
+ return op1;
}
}
}
- /* See if we can fold GT_MUL by const nodes */
- else if (oper == GT_MUL && op2->IsCnsIntOrI() && !optValnumCSE_phase)
- {
+ }
+ /* See if we can fold GT_MUL by const nodes */
+ else if (oper == GT_MUL && op2->IsCnsIntOrI() && !optValnumCSE_phase)
+ {
#ifndef _TARGET_64BIT_
- noway_assert(typ <= TYP_UINT);
+ noway_assert(typ <= TYP_UINT);
#endif // _TARGET_64BIT_
- noway_assert(!tree->gtOverflow());
+ noway_assert(!tree->gtOverflow());
+
+ ssize_t mult = op2->gtIntConCommon.IconValue();
+ bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
+ op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq();
+
+ assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr);
+
+ if (mult == 0)
+ {
+ // We may be able to throw away op1 (unless it has side-effects)
+
+ if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
+ {
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(tree);
+ return op2; // Just return the "0" node
+ }
+
+ // We need to keep op1 for the side-effects. Hang it off
+ // a GT_COMMA node
- ssize_t mult = op2->gtIntConCommon.IconValue();
- bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
- op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq();
+ tree->ChangeOper(GT_COMMA);
+ return tree;
+ }
- assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr);
+ size_t abs_mult = (mult >= 0) ? mult : -mult;
+ size_t lowestBit = genFindLowestBit(abs_mult);
+ bool changeToShift = false;
- if (mult == 0)
+ // is it a power of two? (positive or negative)
+ if (abs_mult == lowestBit)
+ {
+ // if negative negate (min-int does not need negation)
+ if (mult < 0 && mult != SSIZE_T_MIN)
{
- // We may be able to throw away op1 (unless it has side-effects)
+ tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
+ fgMorphTreeDone(op1);
+ }
- if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
+ // If "op2" is a constant array index, the other multiplicand must be a constant.
+ // Transfer the annotation to the other one.
+ if (op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
+ op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ assert(op2->gtIntCon.gtFieldSeq->m_next == nullptr);
+ GenTreePtr otherOp = op1;
+ if (otherOp->OperGet() == GT_NEG)
{
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
- return op2; // Just return the "0" node
+ otherOp = otherOp->gtOp.gtOp1;
}
+ assert(otherOp->OperGet() == GT_CNS_INT);
+ assert(otherOp->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
+ otherOp->gtIntCon.gtFieldSeq = op2->gtIntCon.gtFieldSeq;
+ }
- // We need to keep op1 for the side-effects. Hang it off
- // a GT_COMMA node
-
- tree->ChangeOper(GT_COMMA);
- return tree;
+ if (abs_mult == 1)
+ {
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
}
- size_t abs_mult = (mult >= 0) ? mult : -mult;
- size_t lowestBit = genFindLowestBit(abs_mult);
- bool changeToShift = false;
+ /* Change the multiplication into a shift by log2(val) bits */
+ op2->gtIntConCommon.SetIconValue(genLog2(abs_mult));
+ changeToShift = true;
+ }
+#if LEA_AVAILABLE
+ else if ((lowestBit > 1) && jitIsScaleIndexMul(lowestBit) && optAvoidIntMult())
+ {
+ int shift = genLog2(lowestBit);
+ ssize_t factor = abs_mult >> shift;
- // is it a power of two? (positive or negative)
- if (abs_mult == lowestBit)
+ if (factor == 3 || factor == 5 || factor == 9)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
fgMorphTreeDone(op1);
}
- // If "op2" is a constant array index, the other multiplicand must be a constant.
- // Transfer the annotation to the other one.
- if (op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
- op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ GenTreePtr factorIcon = gtNewIconNode(factor, TYP_I_IMPL);
+ if (op2IsConstIndex)
{
- assert(op2->gtIntCon.gtFieldSeq->m_next == nullptr);
- GenTreePtr otherOp = op1;
- if (otherOp->OperGet() == GT_NEG)
- {
- otherOp = otherOp->gtOp.gtOp1;
- }
- assert(otherOp->OperGet() == GT_CNS_INT);
- assert(otherOp->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
- otherOp->gtIntCon.gtFieldSeq = op2->gtIntCon.gtFieldSeq;
+ factorIcon->AsIntCon()->gtFieldSeq =
+ GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
- if (abs_mult == 1)
- {
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(tree);
- return op1;
- }
+ // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
+ tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_MUL, tree->gtType, op1, factorIcon);
+ fgMorphTreeDone(op1);
- /* Change the multiplication into a shift by log2(val) bits */
- op2->gtIntConCommon.SetIconValue(genLog2(abs_mult));
+ op2->gtIntConCommon.SetIconValue(shift);
changeToShift = true;
}
-#if LEA_AVAILABLE
- else if ((lowestBit > 1) && jitIsScaleIndexMul(lowestBit) && optAvoidIntMult())
- {
- int shift = genLog2(lowestBit);
- ssize_t factor = abs_mult >> shift;
-
- if (factor == 3 || factor == 5 || factor == 9)
- {
- // if negative negate (min-int does not need negation)
- if (mult < 0 && mult != SSIZE_T_MIN)
- {
- tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
- fgMorphTreeDone(op1);
- }
-
- GenTreePtr factorIcon = gtNewIconNode(factor, TYP_I_IMPL);
- if (op2IsConstIndex)
- {
- factorIcon->AsIntCon()->gtFieldSeq =
- GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
- }
-
- // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
- tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_MUL, tree->gtType, op1, factorIcon);
- fgMorphTreeDone(op1);
-
- op2->gtIntConCommon.SetIconValue(shift);
- changeToShift = true;
- }
- }
+ }
#endif // LEA_AVAILABLE
- if (changeToShift)
+ if (changeToShift)
+ {
+ // vnStore is null before the ValueNumber phase has run
+ if (vnStore != nullptr)
{
- // vnStore is null before the ValueNumber phase has run
- if (vnStore != nullptr)
- {
- // Update the ValueNumber for 'op2', as we just changed the constant
- fgValueNumberTreeConst(op2);
- }
- oper = GT_LSH;
- // Keep the old ValueNumber for 'tree' as the new expr
- // will still compute the same value as before
- tree->ChangeOper(oper, GenTree::PRESERVE_VN);
-
- goto DONE_MORPHING_CHILDREN;
+ // Update the ValueNumber for 'op2', as we just changed the constant
+ fgValueNumberTreeConst(op2);
}
- }
- else if (fgOperIsBitwiseRotationRoot(oper))
- {
- tree = fgRecognizeAndMorphBitwiseRotation(tree);
+ oper = GT_LSH;
+ // Keep the old ValueNumber for 'tree' as the new expr
+ // will still compute the same value as before
+ tree->ChangeOper(oper, GenTree::PRESERVE_VN);
- // fgRecognizeAndMorphBitwiseRotation may return a new tree
- oper = tree->OperGet();
- typ = tree->TypeGet();
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
+ goto DONE_MORPHING_CHILDREN;
}
+ }
+ else if (fgOperIsBitwiseRotationRoot(oper))
+ {
+ tree = fgRecognizeAndMorphBitwiseRotation(tree);
- break;
+ // fgRecognizeAndMorphBitwiseRotation may return a new tree
+ oper = tree->OperGet();
+ typ = tree->TypeGet();
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
+ }
+
+ break;
- case GT_CHS:
- case GT_NOT:
- case GT_NEG:
+ case GT_CHS:
+ case GT_NOT:
+ case GT_NEG:
- /* Any constant cases should have been folded earlier */
- noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
- break;
+ /* Any constant cases should have been folded earlier */
+ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
+ break;
- case GT_CKFINITE:
+ case GT_CKFINITE:
- noway_assert(varTypeIsFloating(op1->TypeGet()));
+ noway_assert(varTypeIsFloating(op1->TypeGet()));
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN, fgPtrArgCntCur);
- break;
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN, fgPtrArgCntCur);
+ break;
- case GT_OBJ:
- // If we have GT_OBJ(GT_ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
- // the GT_OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
- // is a local or clsVar, even if it has been address-exposed.
- if (op1->OperGet() == GT_ADDR)
- {
- tree->gtFlags |= (op1->gtGetOp1()->gtFlags & GTF_GLOB_REF);
- }
+ case GT_OBJ:
+ // If we have GT_OBJ(GT_ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
+ // the GT_OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
+ // is a local or clsVar, even if it has been address-exposed.
+ if (op1->OperGet() == GT_ADDR)
+ {
+ tree->gtFlags |= (op1->gtGetOp1()->gtFlags & GTF_GLOB_REF);
+ }
+ break;
+
+ case GT_IND:
+
+ // Can not remove a GT_IND if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(tree))
+ {
break;
+ }
- case GT_IND:
+ bool foldAndReturnTemp;
+ foldAndReturnTemp = false;
+ temp = nullptr;
+ ival1 = 0;
- // Can not remove a GT_IND if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(tree))
+ /* Try to Fold *(&X) into X */
+ if (op1->gtOper == GT_ADDR)
+ {
+ // Can not remove a GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1))
{
break;
}
- bool foldAndReturnTemp;
- foldAndReturnTemp = false;
- temp = nullptr;
- ival1 = 0;
+ temp = op1->gtOp.gtOp1; // X
- /* Try to Fold *(&X) into X */
- if (op1->gtOper == GT_ADDR)
+ // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
+ // they are the *same* struct type. In fact, they almost certainly aren't. If the
+ // address has an associated field sequence, that identifies this case; go through
+ // the "lcl_fld" path rather than this one.
+ FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
+ if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
{
- // Can not remove a GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1))
- {
- break;
- }
-
- temp = op1->gtOp.gtOp1; // X
+ foldAndReturnTemp = true;
+ }
+ else if (temp->OperIsLocal())
+ {
+ unsigned lclNum = temp->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
- // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
- // they are the *same* struct type. In fact, they almost certainly aren't. If the
- // address has an associated field sequence, that identifies this case; go through
- // the "lcl_fld" path rather than this one.
- FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
- if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
- {
- foldAndReturnTemp = true;
- }
- else if (temp->OperIsLocal())
+ // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
+ if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
{
- unsigned lclNum = temp->gtLclVarCommon.gtLclNum;
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ noway_assert(varTypeIsStruct(varDsc));
- // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
- if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
+ // We will try to optimize when we have a single field struct that is being struct promoted
+ if (varDsc->lvFieldCnt == 1)
{
- noway_assert(varTypeIsStruct(varDsc));
+ unsigned lclNumFld = varDsc->lvFieldLclStart;
+ // just grab the promoted field
+ LclVarDsc* fieldVarDsc = &lvaTable[lclNumFld];
- // We will try to optimize when we have a single field struct that is being struct promoted
- if (varDsc->lvFieldCnt == 1)
+ // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
+ // is zero
+ if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
{
- unsigned lclNumFld = varDsc->lvFieldLclStart;
- // just grab the promoted field
- LclVarDsc* fieldVarDsc = &lvaTable[lclNumFld];
+ // We can just use the existing promoted field LclNum
+ temp->gtLclVarCommon.SetLclNum(lclNumFld);
+ temp->gtType = fieldVarDsc->TypeGet();
- // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
- // is zero
- if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
- {
- // We can just use the existing promoted field LclNum
- temp->gtLclVarCommon.SetLclNum(lclNumFld);
- temp->gtType = fieldVarDsc->TypeGet();
-
- foldAndReturnTemp = true;
- }
+ foldAndReturnTemp = true;
}
}
- // If the type of the IND (typ) is a "small int", and the type of the local has the
- // same width, then we can reduce to just the local variable -- it will be
- // correctly normalized, and signed/unsigned differences won't matter.
- //
- // The below transformation cannot be applied if the local var needs to be normalized on load.
- else if (varTypeIsSmall(typ) && (genTypeSize(lvaTable[lclNum].lvType) == genTypeSize(typ)) &&
- !lvaTable[lclNum].lvNormalizeOnLoad())
- {
- tree->gtType = typ = temp->TypeGet();
- foldAndReturnTemp = true;
- }
- else
- {
- // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
- // nullptr)
- assert(fieldSeq == nullptr);
- bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
- assert(b || fieldSeq == nullptr);
+ }
+ // If the type of the IND (typ) is a "small int", and the type of the local has the
+ // same width, then we can reduce to just the local variable -- it will be
+ // correctly normalized, and signed/unsigned differences won't matter.
+ //
+ // The below transformation cannot be applied if the local var needs to be normalized on load.
+ else if (varTypeIsSmall(typ) && (genTypeSize(lvaTable[lclNum].lvType) == genTypeSize(typ)) &&
+ !lvaTable[lclNum].lvNormalizeOnLoad())
+ {
+ tree->gtType = typ = temp->TypeGet();
+ foldAndReturnTemp = true;
+ }
+ else
+ {
+ // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
+ // nullptr)
+ assert(fieldSeq == nullptr);
+ bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
+ assert(b || fieldSeq == nullptr);
- if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
- {
- // Append the field sequence, change the type.
- temp->AsLclFld()->gtFieldSeq =
- GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
- temp->gtType = typ;
+ if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
+ {
+ // Append the field sequence, change the type.
+ temp->AsLclFld()->gtFieldSeq =
+ GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
+ temp->gtType = typ;
- foldAndReturnTemp = true;
- }
+ foldAndReturnTemp = true;
}
- // Otherwise will will fold this into a GT_LCL_FLD below
- // where we check (temp != nullptr)
}
- else // !temp->OperIsLocal()
+ // Otherwise will will fold this into a GT_LCL_FLD below
+ // where we check (temp != nullptr)
+ }
+ else // !temp->OperIsLocal()
+ {
+ // We don't try to fold away the GT_IND/GT_ADDR for this case
+ temp = nullptr;
+ }
+ }
+ else if (op1->OperGet() == GT_ADD)
+ {
+ /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
+
+ if (op1->gtOp.gtOp1->OperGet() == GT_ADDR && op1->gtOp.gtOp2->OperGet() == GT_CNS_INT &&
+ (!(opts.MinOpts() || opts.compDbgCode)))
+ {
+ // No overflow arithmetic with pointers
+ noway_assert(!op1->gtOverflow());
+
+ temp = op1->gtOp.gtOp1->gtOp.gtOp1;
+ if (!temp->OperIsLocal())
{
- // We don't try to fold away the GT_IND/GT_ADDR for this case
temp = nullptr;
+ break;
}
- }
- else if (op1->OperGet() == GT_ADD)
- {
- /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
- if (op1->gtOp.gtOp1->OperGet() == GT_ADDR && op1->gtOp.gtOp2->OperGet() == GT_CNS_INT &&
- (!(opts.MinOpts() || opts.compDbgCode)))
+ // Can not remove the GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1->gtOp.gtOp1))
{
- // No overflow arithmetic with pointers
- noway_assert(!op1->gtOverflow());
+ break;
+ }
- temp = op1->gtOp.gtOp1->gtOp.gtOp1;
- if (!temp->OperIsLocal())
- {
- temp = nullptr;
- break;
- }
+ ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
+ fieldSeq = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
+
+ // Does the address have an associated zero-offset field sequence?
+ FieldSeqNode* addrFieldSeq = nullptr;
+ if (GetZeroOffsetFieldMap()->Lookup(op1->gtOp.gtOp1, &addrFieldSeq))
+ {
+ fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
+ }
- // Can not remove the GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1->gtOp.gtOp1))
+ if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
+ {
+ noway_assert(!varTypeIsGC(temp->TypeGet()));
+ foldAndReturnTemp = true;
+ }
+ else
+ {
+ // The emitter can't handle large offsets
+ if (ival1 != (unsigned short)ival1)
{
break;
}
- ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- fieldSeq = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
-
- // Does the address have an associated zero-offset field sequence?
- FieldSeqNode* addrFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(op1->gtOp.gtOp1, &addrFieldSeq))
+ // The emitter can get confused by invalid offsets
+ if (ival1 >= Compiler::lvaLclSize(temp->gtLclVarCommon.gtLclNum))
{
- fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
+ break;
}
- if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
- {
- noway_assert(!varTypeIsGC(temp->TypeGet()));
- foldAndReturnTemp = true;
- }
- else
+#ifdef _TARGET_ARM_
+ // Check for a LclVar TYP_STRUCT with misalignment on a Floating Point field
+ //
+ if (varTypeIsFloating(typ))
{
- // The emitter can't handle large offsets
- if (ival1 != (unsigned short)ival1)
- {
- break;
- }
-
- // The emitter can get confused by invalid offsets
- if (ival1 >= Compiler::lvaLclSize(temp->gtLclVarCommon.gtLclNum))
+ if ((ival1 % emitTypeSize(typ)) != 0)
{
+ tree->gtFlags |= GTF_IND_UNALIGNED;
break;
}
-
-#ifdef _TARGET_ARM_
- // Check for a LclVar TYP_STRUCT with misalignment on a Floating Point field
- //
- if (varTypeIsFloating(typ))
- {
- if ((ival1 % emitTypeSize(typ)) != 0)
- {
- tree->gtFlags |= GTF_IND_UNALIGNED;
- break;
- }
- }
-#endif
}
- // Now we can fold this into a GT_LCL_FLD below
- // where we check (temp != nullptr)
+#endif
}
+ // Now we can fold this into a GT_LCL_FLD below
+ // where we check (temp != nullptr)
}
+ }
- // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
- // - We may have a load of a local where the load has a different type than the local
- // - We may have a load of a local plus an offset
- //
- // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
- // offset if doing so is legal. The only cases in which this transformation is illegal are if the load
- // begins before the local or if the load extends beyond the end of the local (i.e. if the load is
- // out-of-bounds w.r.t. the local).
- if ((temp != nullptr) && !foldAndReturnTemp)
- {
- assert(temp->OperIsLocal());
+ // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
+ // - We may have a load of a local where the load has a different type than the local
+ // - We may have a load of a local plus an offset
+ //
+ // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
+ // offset if doing so is legal. The only cases in which this transformation is illegal are if the load
+ // begins before the local or if the load extends beyond the end of the local (i.e. if the load is
+ // out-of-bounds w.r.t. the local).
+ if ((temp != nullptr) && !foldAndReturnTemp)
+ {
+ assert(temp->OperIsLocal());
- const unsigned lclNum = temp->AsLclVarCommon()->gtLclNum;
- LclVarDsc* const varDsc = &lvaTable[lclNum];
+ const unsigned lclNum = temp->AsLclVarCommon()->gtLclNum;
+ LclVarDsc* const varDsc = &lvaTable[lclNum];
- const var_types tempTyp = temp->TypeGet();
- const bool useExactSize =
- varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK);
- const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp);
+ const var_types tempTyp = temp->TypeGet();
+ const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK);
+ const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp);
- // Make sure we do not enregister this lclVar.
- lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField));
+ // Make sure we do not enregister this lclVar.
+ lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField));
- // If the size of the load is greater than the size of the lclVar, we cannot fold this access into
- // a lclFld: the access represented by an lclFld node must begin at or after the start of the
- // lclVar and must not extend beyond the end of the lclVar.
- if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize))
+ // If the size of the load is greater than the size of the lclVar, we cannot fold this access into
+ // a lclFld: the access represented by an lclFld node must begin at or after the start of the
+ // lclVar and must not extend beyond the end of the lclVar.
+ if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize))
+ {
+ // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
+ // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
+ // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
+ //
+ if (temp->OperGet() == GT_LCL_FLD)
+ {
+ temp->AsLclFld()->gtLclOffs += (unsigned short)ival1;
+ temp->AsLclFld()->gtFieldSeq =
+ GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
+ }
+ else
{
- // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
- // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
- // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
- //
- if (temp->OperGet() == GT_LCL_FLD)
- {
- temp->AsLclFld()->gtLclOffs += (unsigned short)ival1;
- temp->AsLclFld()->gtFieldSeq =
- GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
- }
- else
- {
- temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField"...
- temp->AsLclFld()->gtLclOffs = (unsigned short)ival1;
- if (fieldSeq != nullptr)
- { // If it does represent a field, note that.
- temp->AsLclFld()->gtFieldSeq = fieldSeq;
- }
+ temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField"...
+ temp->AsLclFld()->gtLclOffs = (unsigned short)ival1;
+ if (fieldSeq != nullptr)
+ { // If it does represent a field, note that.
+ temp->AsLclFld()->gtFieldSeq = fieldSeq;
}
- temp->gtType = tree->gtType;
- foldAndReturnTemp = true;
}
+ temp->gtType = tree->gtType;
+ foldAndReturnTemp = true;
}
+ }
- if (foldAndReturnTemp)
- {
- assert(temp != nullptr);
- assert(temp->TypeGet() == typ);
- assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
+ if (foldAndReturnTemp)
+ {
+ assert(temp != nullptr);
+ assert(temp->TypeGet() == typ);
+ assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
- // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
- // 'temp' because a GT_ADDR always marks it for its operand.
- temp->gtFlags &= ~GTF_DONT_CSE;
- temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
+ // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
+ // 'temp' because a GT_ADDR always marks it for its operand.
+ temp->gtFlags &= ~GTF_DONT_CSE;
+ temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
- if (op1->OperGet() == GT_ADD)
- {
- DEBUG_DESTROY_NODE(op1->gtOp.gtOp1); // GT_ADDR
- DEBUG_DESTROY_NODE(op1->gtOp.gtOp2); // GT_CNS_INT
- }
- DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
- DEBUG_DESTROY_NODE(tree); // GT_IND
+ if (op1->OperGet() == GT_ADD)
+ {
+ DEBUG_DESTROY_NODE(op1->gtOp.gtOp1); // GT_ADDR
+ DEBUG_DESTROY_NODE(op1->gtOp.gtOp2); // GT_CNS_INT
+ }
+ DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
+ DEBUG_DESTROY_NODE(tree); // GT_IND
- // If the result of the fold is a local var, we may need to perform further adjustments e.g. for
- // normalization.
- if (temp->OperIs(GT_LCL_VAR))
- {
+ // If the result of the fold is a local var, we may need to perform further adjustments e.g. for
+ // normalization.
+ if (temp->OperIs(GT_LCL_VAR))
+ {
#ifdef DEBUG
- // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
- // and the node in question must have this bit set (as it has already been morphed).
- temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
+ // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
+ // and the node in question must have this bit set (as it has already been morphed).
+ temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
- const bool forceRemorph = true;
- temp = fgMorphLocalVar(temp, forceRemorph);
+ const bool forceRemorph = true;
+ temp = fgMorphLocalVar(temp, forceRemorph);
#ifdef DEBUG
- // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
- // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
- // returns.
- temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
+ // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
+ // returns.
+ temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
- }
-
- return temp;
}
- // Only do this optimization when we are in the global optimizer. Doing this after value numbering
- // could result in an invalid value number for the newly generated GT_IND node.
- if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
- {
- // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
- // TBD: this transformation is currently necessary for correctness -- it might
- // be good to analyze the failures that result if we don't do this, and fix them
- // in other ways. Ideally, this should be optional.
- GenTreePtr commaNode = op1;
- unsigned treeFlags = tree->gtFlags;
- commaNode->gtType = typ;
- commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
- // dangerous, clear the GTF_REVERSE_OPS at
- // least.
-#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
-#endif
- while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
- {
- commaNode = commaNode->gtOp.gtOp2;
- commaNode->gtType = typ;
- commaNode->gtFlags =
- (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG); // Bashing the GT_COMMA flags here is
+ return temp;
+ }
+
+ // Only do this optimization when we are in the global optimizer. Doing this after value numbering
+ // could result in an invalid value number for the newly generated GT_IND node.
+ if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
+ {
+ // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
+ // TBD: this transformation is currently necessary for correctness -- it might
+ // be good to analyze the failures that result if we don't do this, and fix them
+ // in other ways. Ideally, this should be optional.
+ GenTreePtr commaNode = op1;
+ unsigned treeFlags = tree->gtFlags;
+ commaNode->gtType = typ;
+ commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS at
// least.
- commaNode->gtFlags |=
- ((commaNode->gtOp.gtOp1->gtFlags & GTF_ASG) | (commaNode->gtOp.gtOp2->gtFlags & GTF_ASG));
#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- }
- bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
- ArrayInfo arrInfo;
- if (wasArrIndex)
- {
- bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
- assert(b);
- GetArrayInfoMap()->Remove(tree);
- }
- tree = op1;
- GenTree* addr = commaNode->gtOp.gtOp2;
- op1 = gtNewIndir(typ, addr);
- // This is very conservative
- op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
- op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
-
- if (wasArrIndex)
- {
- GetArrayInfoMap()->Set(op1, arrInfo);
- }
+ while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
+ {
+ commaNode = commaNode->gtOp.gtOp2;
+ commaNode->gtType = typ;
+ commaNode->gtFlags =
+ (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG); // Bashing the GT_COMMA flags here is
+ // dangerous, clear the GTF_REVERSE_OPS at
+ // least.
+ commaNode->gtFlags |=
+ ((commaNode->gtOp.gtOp1->gtFlags & GTF_ASG) | (commaNode->gtOp.gtOp2->gtFlags & GTF_ASG));
#ifdef DEBUG
- op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- commaNode->gtOp.gtOp2 = op1;
- commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
- return tree;
}
-
- break;
-
- case GT_ADDR:
-
- // Can not remove op1 if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1))
+ bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
+ ArrayInfo arrInfo;
+ if (wasArrIndex)
{
- break;
+ bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
+ assert(b);
+ GetArrayInfoMap()->Remove(tree);
}
+ tree = op1;
+ GenTree* addr = commaNode->gtOp.gtOp2;
+ op1 = gtNewIndir(typ, addr);
+ // This is very conservative
+ op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
+ op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
- if (op1->OperGet() == GT_IND)
+ if (wasArrIndex)
{
- if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
- {
- // Can not remove a GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(tree))
- {
- break;
- }
+ GetArrayInfoMap()->Set(op1, arrInfo);
+ }
+#ifdef DEBUG
+ op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+#endif
+ commaNode->gtOp.gtOp2 = op1;
+ commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
+ return tree;
+ }
- // Perform the transform ADDR(IND(...)) == (...).
- GenTreePtr addr = op1->gtOp.gtOp1;
+ break;
- noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
+ case GT_ADDR:
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
+ // Can not remove op1 if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1))
+ {
+ break;
+ }
- return addr;
- }
- }
- else if (op1->OperGet() == GT_OBJ)
+ if (op1->OperGet() == GT_IND)
+ {
+ if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
break;
}
- // Perform the transform ADDR(OBJ(...)) == (...).
- GenTreePtr addr = op1->AsObj()->Addr();
+ // Perform the transform ADDR(IND(...)) == (...).
+ GenTreePtr addr = op1->gtOp.gtOp1;
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
return addr;
}
- else if (op1->gtOper == GT_CAST)
+ }
+ else if (op1->OperGet() == GT_OBJ)
+ {
+ // Can not remove a GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(tree))
+ {
+ break;
+ }
+
+ // Perform the transform ADDR(OBJ(...)) == (...).
+ GenTreePtr addr = op1->AsObj()->Addr();
+
+ noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
+
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(tree);
+
+ return addr;
+ }
+ else if (op1->gtOper == GT_CAST)
+ {
+ GenTreePtr casting = op1->gtCast.CastOp();
+ if (casting->gtOper == GT_LCL_VAR || casting->gtOper == GT_CLS_VAR)
{
- GenTreePtr casting = op1->gtCast.CastOp();
- if (casting->gtOper == GT_LCL_VAR || casting->gtOper == GT_CLS_VAR)
- {
- DEBUG_DESTROY_NODE(op1);
- tree->gtOp.gtOp1 = op1 = casting;
- }
+ DEBUG_DESTROY_NODE(op1);
+ tree->gtOp.gtOp1 = op1 = casting;
}
- else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
+ }
+ else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
+ {
+ // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
+ // (Be sure to mark "z" as an l-value...)
+ GenTreePtr commaNode = op1;
+ while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
{
- // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
- // (Be sure to mark "z" as an l-value...)
- GenTreePtr commaNode = op1;
- while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
- {
- commaNode = commaNode->gtOp.gtOp2;
- }
- // The top-level addr might be annotated with a zeroOffset field.
- FieldSeqNode* zeroFieldSeq = nullptr;
- bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
- tree = op1;
- commaNode->gtOp.gtOp2->gtFlags |= GTF_DONT_CSE;
-
- // If the node we're about to put under a GT_ADDR is an indirection, it
- // doesn't need to be materialized, since we only want the addressing mode. Because
- // of this, this GT_IND is not a faulting indirection and we don't have to extract it
- // as a side effect.
- GenTree* commaOp2 = commaNode->gtOp.gtOp2;
- if (commaOp2->OperIsBlk())
- {
- commaOp2 = fgMorphBlkToInd(commaOp2->AsBlk(), commaOp2->TypeGet());
- }
- if (commaOp2->gtOper == GT_IND)
- {
- commaOp2->gtFlags |= GTF_IND_NONFAULTING;
- commaOp2->gtFlags &= ~GTF_EXCEPT;
- commaOp2->gtFlags |= (commaOp2->gtOp.gtOp1->gtFlags & GTF_EXCEPT);
- }
+ commaNode = commaNode->gtOp.gtOp2;
+ }
+ // The top-level addr might be annotated with a zeroOffset field.
+ FieldSeqNode* zeroFieldSeq = nullptr;
+ bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
+ tree = op1;
+ commaNode->gtOp.gtOp2->gtFlags |= GTF_DONT_CSE;
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
+ // If the node we're about to put under a GT_ADDR is an indirection, it
+ // doesn't need to be materialized, since we only want the addressing mode. Because
+ // of this, this GT_IND is not a faulting indirection and we don't have to extract it
+ // as a side effect.
+ GenTree* commaOp2 = commaNode->gtOp.gtOp2;
+ if (commaOp2->OperIsBlk())
+ {
+ commaOp2 = fgMorphBlkToInd(commaOp2->AsBlk(), commaOp2->TypeGet());
+ }
+ if (commaOp2->gtOper == GT_IND)
+ {
+ commaOp2->gtFlags |= GTF_IND_NONFAULTING;
+ commaOp2->gtFlags &= ~GTF_EXCEPT;
+ commaOp2->gtFlags |= (commaOp2->gtOp.gtOp1->gtFlags & GTF_EXCEPT);
+ }
- if (isZeroOffset)
- {
- // Transfer the annotation to the new GT_ADDR node.
- GetZeroOffsetFieldMap()->Set(op1, zeroFieldSeq);
- }
- commaNode->gtOp.gtOp2 = op1;
- // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
- // might give op1 a type different from byref (like, say, native int). So now go back and give
- // all the comma nodes the type of op1.
- // TODO: the comma flag update below is conservative and can be improved.
- // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
- // get rid of some of the the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
- commaNode = tree;
- while (commaNode->gtOper == GT_COMMA)
- {
- commaNode->gtType = op1->gtType;
- commaNode->gtFlags |= op1->gtFlags;
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
+
+ if (isZeroOffset)
+ {
+ // Transfer the annotation to the new GT_ADDR node.
+ GetZeroOffsetFieldMap()->Set(op1, zeroFieldSeq);
+ }
+ commaNode->gtOp.gtOp2 = op1;
+ // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
+ // might give op1 a type different from byref (like, say, native int). So now go back and give
+ // all the comma nodes the type of op1.
+ // TODO: the comma flag update below is conservative and can be improved.
+ // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
+ // get rid of some of the the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
+ commaNode = tree;
+ while (commaNode->gtOper == GT_COMMA)
+ {
+ commaNode->gtType = op1->gtType;
+ commaNode->gtFlags |= op1->gtFlags;
#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- commaNode = commaNode->gtOp.gtOp2;
- }
+ commaNode = commaNode->gtOp.gtOp2;
+ }
- tree->gtFlags &= ~GTF_EXCEPT;
+ tree->gtFlags &= ~GTF_EXCEPT;
- // Propagate the new flags
- tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_EXCEPT);
- tree->gtFlags |= (tree->gtOp.gtOp2->gtFlags & GTF_EXCEPT);
+ // Propagate the new flags
+ tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_EXCEPT);
+ tree->gtFlags |= (tree->gtOp.gtOp2->gtFlags & GTF_EXCEPT);
- return tree;
- }
+ return tree;
+ }
- /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_COLON:
- if (fgGlobalMorph)
- {
- /* Mark the nodes that are conditionally executed */
- fgWalkTreePre(&tree, gtMarkColonCond);
- }
- /* Since we're doing this postorder we clear this if it got set by a child */
- fgRemoveRestOfBlock = false;
- break;
+ case GT_COLON:
+ if (fgGlobalMorph)
+ {
+ /* Mark the nodes that are conditionally executed */
+ fgWalkTreePre(&tree, gtMarkColonCond);
+ }
+ /* Since we're doing this postorder we clear this if it got set by a child */
+ fgRemoveRestOfBlock = false;
+ break;
- case GT_COMMA:
+ case GT_COMMA:
+
+ /* Special case: trees that don't produce a value */
+ if ((op2->OperKind() & GTK_ASGOP) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) ||
+ fgIsThrow(op2))
+ {
+ typ = tree->gtType = TYP_VOID;
+ }
+
+ // If we are in the Valuenum CSE phase then don't morph away anything as these
+ // nodes may have CSE defs/uses in them.
+ //
+ if (!optValnumCSE_phase)
+ {
+ // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
+ // is all we need.
- /* Special case: trees that don't produce a value */
- if ((op2->OperKind() & GTK_ASGOP) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) ||
- fgIsThrow(op2))
+ GenTreePtr op1SideEffects = nullptr;
+ // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
+ // hoisted expressions in loops.
+ gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
+ if (op1SideEffects)
{
- typ = tree->gtType = TYP_VOID;
+ // Replace the left hand side with the side effect list.
+ tree->gtOp.gtOp1 = op1SideEffects;
+ tree->gtFlags |= (op1SideEffects->gtFlags & GTF_ALL_EFFECT);
}
-
- // If we are in the Valuenum CSE phase then don't morph away anything as these
- // nodes may have CSE defs/uses in them.
- //
- if (!optValnumCSE_phase)
+ else
{
- // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
- // is all we need.
-
- GenTreePtr op1SideEffects = nullptr;
- // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
- // hoisted expressions in loops.
- gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
- if (op1SideEffects)
- {
- // Replace the left hand side with the side effect list.
- tree->gtOp.gtOp1 = op1SideEffects;
- tree->gtFlags |= (op1SideEffects->gtFlags & GTF_ALL_EFFECT);
- }
- else
+ /* The left operand is worthless, throw it away */
+ if (lvaLocalVarRefCounted)
{
- /* The left operand is worthless, throw it away */
- if (lvaLocalVarRefCounted)
- {
- lvaRecursiveDecRefCounts(op1);
- }
- op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op1);
- return op2;
+ lvaRecursiveDecRefCounts(op1);
}
+ op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op1);
+ return op2;
+ }
- /* If the right operand is just a void nop node, throw it away */
- if (op2->IsNothingNode() && op1->gtType == TYP_VOID)
- {
- op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op2);
- return op1;
- }
+ /* If the right operand is just a void nop node, throw it away */
+ if (op2->IsNothingNode() && op1->gtType == TYP_VOID)
+ {
+ op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
+ return op1;
}
+ }
- break;
+ break;
- case GT_JTRUE:
+ case GT_JTRUE:
- /* Special case if fgRemoveRestOfBlock is set to true */
- if (fgRemoveRestOfBlock)
+ /* Special case if fgRemoveRestOfBlock is set to true */
+ if (fgRemoveRestOfBlock)
+ {
+ if (fgIsCommaThrow(op1, true))
{
- if (fgIsCommaThrow(op1, true))
- {
- GenTreePtr throwNode = op1->gtOp.gtOp1;
- noway_assert(throwNode->gtType == TYP_VOID);
+ GenTreePtr throwNode = op1->gtOp.gtOp1;
+ noway_assert(throwNode->gtType == TYP_VOID);
- return throwNode;
- }
+ return throwNode;
+ }
- noway_assert(op1->OperKind() & GTK_RELOP);
- noway_assert(op1->gtFlags & GTF_EXCEPT);
+ noway_assert(op1->OperKind() & GTK_RELOP);
+ noway_assert(op1->gtFlags & GTF_EXCEPT);
- // We need to keep op1 for the side-effects. Hang it off
- // a GT_COMMA node
+ // We need to keep op1 for the side-effects. Hang it off
+ // a GT_COMMA node
- tree->ChangeOper(GT_COMMA);
- tree->gtOp.gtOp2 = op2 = gtNewNothingNode();
+ tree->ChangeOper(GT_COMMA);
+ tree->gtOp.gtOp2 = op2 = gtNewNothingNode();
- // Additionally since we're eliminating the JTRUE
- // codegen won't like it if op1 is a RELOP of longs, floats or doubles.
- // So we change it into a GT_COMMA as well.
- op1->ChangeOper(GT_COMMA);
- op1->gtType = op1->gtOp.gtOp1->gtType;
+ // Additionally since we're eliminating the JTRUE
+ // codegen won't like it if op1 is a RELOP of longs, floats or doubles.
+ // So we change it into a GT_COMMA as well.
+ op1->ChangeOper(GT_COMMA);
+ op1->gtType = op1->gtOp.gtOp1->gtType;
- return tree;
- }
+ return tree;
+ }
- default:
- break;
- }
+ default:
+ break;
+ }
- assert(oper == tree->gtOper);
+ assert(oper == tree->gtOper);
- // If we are in the Valuenum CSE phase then don't morph away anything as these
- // nodes may have CSE defs/uses in them.
- //
- if (!optValnumCSE_phase && (oper != GT_ASG) && (oper != GT_COLON) && !tree->OperIsAnyList())
+ // If we are in the Valuenum CSE phase then don't morph away anything as these
+ // nodes may have CSE defs/uses in them.
+ //
+ if (!optValnumCSE_phase && (oper != GT_ASG) && (oper != GT_COLON) && !tree->OperIsAnyList())
+ {
+ /* Check for op1 as a GT_COMMA with a unconditional throw node */
+ if (op1 && fgIsCommaThrow(op1, true))
{
- /* Check for op1 as a GT_COMMA with a unconditional throw node */
- if (op1 && fgIsCommaThrow(op1, true))
+ if ((op1->gtFlags & GTF_COLON_COND) == 0)
{
- if ((op1->gtFlags & GTF_COLON_COND) == 0)
- {
- /* We can safely throw out the rest of the statements */
- fgRemoveRestOfBlock = true;
- }
+ /* We can safely throw out the rest of the statements */
+ fgRemoveRestOfBlock = true;
+ }
- GenTreePtr throwNode = op1->gtOp.gtOp1;
- noway_assert(throwNode->gtType == TYP_VOID);
+ GenTreePtr throwNode = op1->gtOp.gtOp1;
+ noway_assert(throwNode->gtType == TYP_VOID);
- if (oper == GT_COMMA)
- {
- /* Both tree and op1 are GT_COMMA nodes */
- /* Change the tree's op1 to the throw node: op1->gtOp.gtOp1 */
- tree->gtOp.gtOp1 = throwNode;
+ if (oper == GT_COMMA)
+ {
+ /* Both tree and op1 are GT_COMMA nodes */
+ /* Change the tree's op1 to the throw node: op1->gtOp.gtOp1 */
+ tree->gtOp.gtOp1 = throwNode;
- // Possibly reset the assignment flag
- if (((throwNode->gtFlags & GTF_ASG) == 0) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
- {
- tree->gtFlags &= ~GTF_ASG;
- }
+ // Possibly reset the assignment flag
+ if (((throwNode->gtFlags & GTF_ASG) == 0) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
+ {
+ tree->gtFlags &= ~GTF_ASG;
+ }
- return tree;
+ return tree;
+ }
+ else if (oper != GT_NOP)
+ {
+ if (genActualType(typ) == genActualType(op1->gtType))
+ {
+ /* The types match so, return the comma throw node as the new tree */
+ return op1;
}
- else if (oper != GT_NOP)
+ else
{
- if (genActualType(typ) == genActualType(op1->gtType))
+ if (typ == TYP_VOID)
{
- /* The types match so, return the comma throw node as the new tree */
- return op1;
+ // Return the throw node
+ return throwNode;
}
else
{
- if (typ == TYP_VOID)
+ GenTreePtr commaOp2 = op1->gtOp.gtOp2;
+
+ // need type of oper to be same as tree
+ if (typ == TYP_LONG)
+ {
+ commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
+ commaOp2->gtIntConCommon.SetLngValue(0);
+ /* Change the types of oper and commaOp2 to TYP_LONG */
+ op1->gtType = commaOp2->gtType = TYP_LONG;
+ }
+ else if (varTypeIsFloating(typ))
{
- // Return the throw node
- return throwNode;
+ commaOp2->ChangeOperConst(GT_CNS_DBL);
+ commaOp2->gtDblCon.gtDconVal = 0.0;
+ /* Change the types of oper and commaOp2 to TYP_DOUBLE */
+ op1->gtType = commaOp2->gtType = TYP_DOUBLE;
}
else
{
- GenTreePtr commaOp2 = op1->gtOp.gtOp2;
-
- // need type of oper to be same as tree
- if (typ == TYP_LONG)
- {
- commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
- commaOp2->gtIntConCommon.SetLngValue(0);
- /* Change the types of oper and commaOp2 to TYP_LONG */
- op1->gtType = commaOp2->gtType = TYP_LONG;
- }
- else if (varTypeIsFloating(typ))
- {
- commaOp2->ChangeOperConst(GT_CNS_DBL);
- commaOp2->gtDblCon.gtDconVal = 0.0;
- /* Change the types of oper and commaOp2 to TYP_DOUBLE */
- op1->gtType = commaOp2->gtType = TYP_DOUBLE;
- }
- else
- {
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntConCommon.SetIconValue(0);
- /* Change the types of oper and commaOp2 to TYP_INT */
- op1->gtType = commaOp2->gtType = TYP_INT;
- }
-
- /* Return the GT_COMMA node as the new tree */
- return op1;
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntConCommon.SetIconValue(0);
+ /* Change the types of oper and commaOp2 to TYP_INT */
+ op1->gtType = commaOp2->gtType = TYP_INT;
}
+
+ /* Return the GT_COMMA node as the new tree */
+ return op1;
}
}
}
+ }
- /* Check for op2 as a GT_COMMA with a unconditional throw */
+ /* Check for op2 as a GT_COMMA with a unconditional throw */
- if (op2 && fgIsCommaThrow(op2, true))
+ if (op2 && fgIsCommaThrow(op2, true))
+ {
+ if ((op2->gtFlags & GTF_COLON_COND) == 0)
+ {
+ /* We can safely throw out the rest of the statements */
+ fgRemoveRestOfBlock = true;
+ }
+
+ // If op1 has no side-effects
+ if ((op1->gtFlags & GTF_ALL_EFFECT) == 0)
{
- if ((op2->gtFlags & GTF_COLON_COND) == 0)
+ // If tree is an asg node
+ if (tree->OperIsAssignment())
{
- /* We can safely throw out the rest of the statements */
- fgRemoveRestOfBlock = true;
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
}
- // If op1 has no side-effects
- if ((op1->gtFlags & GTF_ALL_EFFECT) == 0)
+ if (tree->OperGet() == GT_ARR_BOUNDS_CHECK)
{
- // If tree is an asg node
- if (tree->OperIsAssignment())
- {
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
- }
-
- if (tree->OperGet() == GT_ARR_BOUNDS_CHECK)
- {
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
- }
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
+ }
- // If tree is a comma node
- if (tree->OperGet() == GT_COMMA)
- {
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
- }
+ // If tree is a comma node
+ if (tree->OperGet() == GT_COMMA)
+ {
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
+ }
- /* for the shift nodes the type of op2 can differ from the tree type */
- if ((typ == TYP_LONG) && (genActualType(op2->gtType) == TYP_INT))
- {
- noway_assert(GenTree::OperIsShiftOrRotate(oper));
+ /* for the shift nodes the type of op2 can differ from the tree type */
+ if ((typ == TYP_LONG) && (genActualType(op2->gtType) == TYP_INT))
+ {
+ noway_assert(GenTree::OperIsShiftOrRotate(oper));
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
- commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
- commaOp2->gtIntConCommon.SetLngValue(0);
+ commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
+ commaOp2->gtIntConCommon.SetLngValue(0);
- /* Change the types of oper and commaOp2 to TYP_LONG */
- op2->gtType = commaOp2->gtType = TYP_LONG;
- }
+ /* Change the types of oper and commaOp2 to TYP_LONG */
+ op2->gtType = commaOp2->gtType = TYP_LONG;
+ }
- if ((genActualType(typ) == TYP_INT) &&
- (genActualType(op2->gtType) == TYP_LONG || varTypeIsFloating(op2->TypeGet())))
- {
- // An example case is comparison (say GT_GT) of two longs or floating point values.
+ if ((genActualType(typ) == TYP_INT) &&
+ (genActualType(op2->gtType) == TYP_LONG || varTypeIsFloating(op2->TypeGet())))
+ {
+ // An example case is comparison (say GT_GT) of two longs or floating point values.
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntCon.gtIconVal = 0;
- /* Change the types of oper and commaOp2 to TYP_INT */
- op2->gtType = commaOp2->gtType = TYP_INT;
- }
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntCon.gtIconVal = 0;
+ /* Change the types of oper and commaOp2 to TYP_INT */
+ op2->gtType = commaOp2->gtType = TYP_INT;
+ }
- if ((typ == TYP_BYREF) && (genActualType(op2->gtType) == TYP_I_IMPL))
- {
- noway_assert(tree->OperGet() == GT_ADD);
+ if ((typ == TYP_BYREF) && (genActualType(op2->gtType) == TYP_I_IMPL))
+ {
+ noway_assert(tree->OperGet() == GT_ADD);
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntCon.gtIconVal = 0;
- /* Change the types of oper and commaOp2 to TYP_BYREF */
- op2->gtType = commaOp2->gtType = TYP_BYREF;
- }
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntCon.gtIconVal = 0;
+ /* Change the types of oper and commaOp2 to TYP_BYREF */
+ op2->gtType = commaOp2->gtType = TYP_BYREF;
+ }
- /* types should now match */
- noway_assert((genActualType(typ) == genActualType(op2->gtType)));
+ /* types should now match */
+ noway_assert((genActualType(typ) == genActualType(op2->gtType)));
- /* Return the GT_COMMA node as the new tree */
- return op2;
- }
+ /* Return the GT_COMMA node as the new tree */
+ return op2;
}
}
+ }
- /*-------------------------------------------------------------------------
- * Optional morphing is done if tree transformations is permitted
- */
+ /*-------------------------------------------------------------------------
+ * Optional morphing is done if tree transformations is permitted
+ */
- if ((opts.compFlags & CLFLG_TREETRANS) == 0)
- {
- return tree;
- }
+ if ((opts.compFlags & CLFLG_TREETRANS) == 0)
+ {
+ return tree;
+ }
- tree = fgMorphSmpOpOptional(tree->AsOp());
+ tree = fgMorphSmpOpOptional(tree->AsOp());
- } // extra scope for gcc workaround
return tree;
}
#ifdef _PREFAST_