switch (op1->gtType)
{
- case TYP_INT:
+ case TYP_INT:
- /* Fold constant INT unary operator */
- assert(op1->gtIntCon.ImmedValCanBeFolded(this, tree->OperGet()));
- i1 = (int) op1->gtIntCon.gtIconVal;
+ /* Fold constant INT unary operator */
+ assert(op1->gtIntCon.ImmedValCanBeFolded(this, tree->OperGet()));
+ i1 = (int)op1->gtIntCon.gtIconVal;
- // If we fold a unary oper, then the folded constant
- // is considered a ConstantIndexField if op1 was one
- //
+ // If we fold a unary oper, then the folded constant
+ // is considered a ConstantIndexField if op1 was one
+ //
- if ((op1->gtIntCon.gtFieldSeq != nullptr) &&
- op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- fieldSeq = op1->gtIntCon.gtFieldSeq;
- }
+ if ((op1->gtIntCon.gtFieldSeq != nullptr) && op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ fieldSeq = op1->gtIntCon.gtFieldSeq;
+ }
- switch (tree->gtOper)
- {
- case GT_NOT: i1 = ~i1; break;
+ switch (tree->gtOper)
+ {
+ case GT_NOT:
+ i1 = ~i1;
+ break;
- case GT_NEG:
- case GT_CHS: i1 = -i1; break;
+ case GT_NEG:
+ case GT_CHS:
+ i1 = -i1;
+ break;
- case GT_CAST:
- // assert (genActualType(tree->CastToType()) == tree->gtType);
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- itemp = INT32(INT8(i1));
- goto CHK_OVF;
+ case GT_CAST:
+ // assert (genActualType(tree->CastToType()) == tree->gtType);
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ itemp = INT32(INT8(i1));
+ goto CHK_OVF;
+
+ case TYP_SHORT:
+ itemp = INT32(INT16(i1));
+ CHK_OVF:
+ if (tree->gtOverflow() && ((itemp != i1) || ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)))
+ {
+ goto INT_OVF;
+ }
+ i1 = itemp;
+ goto CNS_INT;
- case TYP_SHORT:
- itemp = INT32(INT16(i1));
- CHK_OVF:
- if (tree->gtOverflow() &&
- ((itemp != i1) ||
- ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)))
- {
- goto INT_OVF;
- }
- i1 = itemp; goto CNS_INT;
+ case TYP_CHAR:
+ itemp = INT32(UINT16(i1));
+ if (tree->gtOverflow())
+ {
+ if (itemp != i1)
+ {
+ goto INT_OVF;
+ }
+ }
+ i1 = itemp;
+ goto CNS_INT;
- case TYP_CHAR:
- itemp = INT32(UINT16(i1));
- if (tree->gtOverflow())
- if (itemp != i1) goto INT_OVF;
- i1 = itemp;
- goto CNS_INT;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ itemp = INT32(UINT8(i1));
+ if (tree->gtOverflow())
+ {
+ if (itemp != i1)
+ {
+ goto INT_OVF;
+ }
+ }
+ i1 = itemp;
+ goto CNS_INT;
+
+ case TYP_UINT:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- case TYP_BOOL:
- case TYP_UBYTE:
- itemp = INT32(UINT8(i1));
- if (tree->gtOverflow()) if (itemp != i1) goto INT_OVF;
- i1 = itemp; goto CNS_INT;
+ case TYP_INT:
+ if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- case TYP_UINT:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_ULONG:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ op1->ChangeOperConst(GT_CNS_NATIVELONG); // need type of oper to be same as tree
+ op1->gtType = TYP_LONG;
+ // We don't care about the value as we are throwing an exception
+ goto LNG_OVF;
+ }
+ lval1 = UINT64(UINT32(i1));
+ goto CNS_LONG;
- case TYP_INT:
- if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_LONG:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ lval1 = INT64(UINT32(i1));
+ }
+ else
+ {
+ lval1 = INT64(INT32(i1));
+ }
+ goto CNS_LONG;
- case TYP_ULONG:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- {
- op1->ChangeOperConst(GT_CNS_NATIVELONG); // need type of oper to be same as tree
- op1->gtType = TYP_LONG;
- // We don't care about the value as we are throwing an exception
- goto LNG_OVF;
- }
- lval1 = UINT64(UINT32(i1));
- goto CNS_LONG;
+ case TYP_FLOAT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ f1 = forceCastToFloat(UINT32(i1));
+ }
+ else
+ {
+ f1 = forceCastToFloat(INT32(i1));
+ }
+ d1 = f1;
+ goto CNS_DOUBLE;
- case TYP_LONG:
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- lval1 = INT64(UINT32(i1));
- }
- else
- {
- lval1 = INT64(INT32(i1));
- }
- goto CNS_LONG;
+ case TYP_DOUBLE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ d1 = (double)UINT32(i1);
+ }
+ else
+ {
+ d1 = (double)INT32(i1);
+ }
+ goto CNS_DOUBLE;
- case TYP_FLOAT:
- if (tree->gtFlags & GTF_UNSIGNED)
- f1 = forceCastToFloat(UINT32(i1));
- else
- f1 = forceCastToFloat(INT32(i1));
- d1 = f1;
- goto CNS_DOUBLE;
-
- case TYP_DOUBLE:
- if (tree->gtFlags & GTF_UNSIGNED)
- d1 = (double) UINT32(i1);
- else
- d1 = (double) INT32(i1);
- goto CNS_DOUBLE;
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
- default:
- assert(!"BAD_TYP");
- break;
+ default:
+ return tree;
}
- return tree;
- default:
- return tree;
- }
+ goto CNS_INT;
- goto CNS_INT;
+ case TYP_LONG:
- case TYP_LONG:
+ /* Fold constant LONG unary operator */
- /* Fold constant LONG unary operator */
+ assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ lval1 = op1->gtIntConCommon.LngValue();
- assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- lval1 = op1->gtIntConCommon.LngValue();
+ switch (tree->gtOper)
+ {
+ case GT_NOT:
+ lval1 = ~lval1;
+ break;
- switch (tree->gtOper)
- {
- case GT_NOT: lval1 = ~lval1; break;
+ case GT_NEG:
+ case GT_CHS:
+ lval1 = -lval1;
+ break;
- case GT_NEG:
- case GT_CHS: lval1 = -lval1; break;
+ case GT_CAST:
+ assert(genActualType(tree->CastToType()) == tree->gtType);
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ i1 = INT32(INT8(lval1));
+ goto CHECK_INT_OVERFLOW;
- case GT_CAST:
- assert (genActualType(tree->CastToType()) == tree->gtType);
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = INT32(INT8(lval1));
- goto CHECK_INT_OVERFLOW;
+ case TYP_SHORT:
+ i1 = INT32(INT16(lval1));
+ goto CHECK_INT_OVERFLOW;
- case TYP_SHORT:
- i1 = INT32(INT16(lval1));
- goto CHECK_INT_OVERFLOW;
+ case TYP_CHAR:
+ i1 = INT32(UINT16(lval1));
+ goto CHECK_UINT_OVERFLOW;
- case TYP_CHAR:
- i1 = INT32(UINT16(lval1));
- goto CHECK_UINT_OVERFLOW;
+ case TYP_UBYTE:
+ i1 = INT32(UINT8(lval1));
+ goto CHECK_UINT_OVERFLOW;
- case TYP_UBYTE:
- i1 = INT32(UINT8(lval1));
- goto CHECK_UINT_OVERFLOW;
+ case TYP_INT:
+ i1 = INT32(lval1);
- case TYP_INT:
- i1 = INT32(lval1);
+ CHECK_INT_OVERFLOW:
+ if (tree->gtOverflow())
+ {
+ if (i1 != lval1)
+ {
+ goto INT_OVF;
+ }
+ if ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ }
+ goto CNS_INT;
- CHECK_INT_OVERFLOW:
- if (tree->gtOverflow())
- {
- if (i1 != lval1)
- goto INT_OVF;
- if ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)
- goto INT_OVF;
- }
- goto CNS_INT;
+ case TYP_UINT:
+ i1 = UINT32(lval1);
- case TYP_UINT:
- i1 = UINT32(lval1);
+ CHECK_UINT_OVERFLOW:
+ if (tree->gtOverflow() && UINT32(i1) != lval1)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- CHECK_UINT_OVERFLOW:
- if (tree->gtOverflow() && UINT32(i1) != lval1)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_ULONG:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
+ {
+ goto LNG_OVF;
+ }
+ goto CNS_LONG;
- case TYP_ULONG:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
- goto LNG_OVF;
- goto CNS_LONG;
+ case TYP_LONG:
+ if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
+ {
+ goto LNG_OVF;
+ }
+ goto CNS_LONG;
- case TYP_LONG:
- if ( (tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
- goto LNG_OVF;
- goto CNS_LONG;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if ((tree->gtFlags & GTF_UNSIGNED) && lval1 < 0)
+ {
+ d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
+ }
+ else
+ {
+ d1 = (double)lval1;
+ }
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if ((tree->gtFlags & GTF_UNSIGNED) && lval1 < 0)
- {
- d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
- }
- else
- {
- d1 = (double)lval1;
- }
+ if (tree->CastToType() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1); // truncate precision
+ d1 = f1;
+ }
+ goto CNS_DOUBLE;
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
- if (tree->CastToType() == TYP_FLOAT)
- {
- f1 = forceCastToFloat(d1); // truncate precision
- d1 = f1;
- }
- goto CNS_DOUBLE;
- default:
- assert(!"BAD_TYP");
- break;
+ default:
+ return tree;
}
- return tree;
- default:
- return tree;
- }
+ goto CNS_LONG;
- goto CNS_LONG;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ assert(op1->gtOper == GT_CNS_DBL);
- case TYP_FLOAT:
- case TYP_DOUBLE:
- assert(op1->gtOper == GT_CNS_DBL);
+ /* Fold constant DOUBLE unary operator */
- /* Fold constant DOUBLE unary operator */
-
- d1 = op1->gtDblCon.gtDconVal;
-
- switch (tree->gtOper)
- {
- case GT_NEG:
- case GT_CHS:
- d1 = -d1;
- break;
+ d1 = op1->gtDblCon.gtDconVal;
- case GT_CAST:
+ switch (tree->gtOper)
+ {
+ case GT_NEG:
+ case GT_CHS:
+ d1 = -d1;
+ break;
- if (tree->gtOverflowEx())
- return tree;
+ case GT_CAST:
- assert (genActualType(tree->CastToType()) == tree->gtType);
-
- if ((op1->gtType == TYP_FLOAT && !_finite(forceCastToFloat(d1))) ||
- (op1->gtType == TYP_DOUBLE && !_finite(d1)))
- {
- // The floating point constant is not finite. The ECMA spec says, in
- // III 3.27, that "...if overflow occurs converting a floating point type
- // to an integer, ..., the value returned is unspecified." However, it would
- // at least be desirable to have the same value returned for casting an overflowing
- // constant to an int as would obtained by passing that constant as a parameter
- // then casting that parameter to an int type. We will assume that the C compiler's
- // cast logic will yield the desired result (and trust testing to tell otherwise).
- // Cross-compilation is an issue here; if that becomes an important scenario, we should
- // capture the target-specific values of overflow casts to the various integral types as
- // constants in a target-specific function.
- CLANG_FORMAT_COMMENT_ANCHOR;
-
- // Don't fold conversions of +inf/-inf to integral value as the value returned by JIT helper
- // doesn't match with the C compiler's cast result.
- return tree;
- }
+ if (tree->gtOverflowEx())
+ {
+ return tree;
+ }
- if (d1 < 0.0) {
- if (tree->CastToType() == TYP_CHAR || tree->CastToType() == TYP_UBYTE ||
- tree->CastToType() == TYP_UINT || tree->CastToType() == TYP_ULONG)
- return tree;
- }
+ assert(genActualType(tree->CastToType()) == tree->gtType);
+
+ if ((op1->gtType == TYP_FLOAT && !_finite(forceCastToFloat(d1))) ||
+ (op1->gtType == TYP_DOUBLE && !_finite(d1)))
+ {
+ // The floating point constant is not finite. The ECMA spec says, in
+ // III 3.27, that "...if overflow occurs converting a floating point type
+ // to an integer, ..., the value returned is unspecified." However, it would
+ // at least be desirable to have the same value returned for casting an overflowing
+ // constant to an int as would obtained by passing that constant as a parameter
+ // then casting that parameter to an int type. We will assume that the C compiler's
+ // cast logic will yield the desired result (and trust testing to tell otherwise).
+ // Cross-compilation is an issue here; if that becomes an important scenario, we should
+ // capture the target-specific values of overflow casts to the various integral types as
+ // constants in a target-specific function.
+ CLANG_FORMAT_COMMENT_ANCHOR;
+
-#ifdef _TARGET_XARCH_
+ // Don't fold conversions of +inf/-inf to integral value as the value returned by JIT helper
+ // doesn't match with the C compiler's cast result.
+ return tree;
-#else //!_TARGET_XARCH_
-
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = ssize_t(INT8(d1));
- goto CNS_INT;
- case TYP_UBYTE:
- i1 = ssize_t(UINT8(d1));
- goto CNS_INT;
- case TYP_SHORT:
- i1 = ssize_t(INT16(d1));
- goto CNS_INT;
- case TYP_CHAR:
- i1 = ssize_t(UINT16(d1));
- goto CNS_INT;
- case TYP_INT:
- i1 = ssize_t(INT32(d1));
- goto CNS_INT;
- case TYP_UINT:
- i1 = ssize_t(UINT32(d1));
- goto CNS_INT;
- case TYP_LONG:
- lval1 = INT64(d1);
- goto CNS_LONG;
- case TYP_ULONG:
- lval1 = UINT64(d1);
- goto CNS_LONG;
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if (op1->gtType == TYP_FLOAT)
- d1 = forceCastToFloat(d1); // it's only !_finite() after this conversion
- goto CNS_DOUBLE;
- default:
- unreached();
- }
-#endif //!_TARGET_XARCH_
+ }
+
++ if (d1 < 0.0) {
++ if (tree->CastToType() == TYP_CHAR || tree->CastToType() == TYP_UBYTE ||
++ tree->CastToType() == TYP_UINT || tree->CastToType() == TYP_ULONG)
++ return tree;
++ }
+
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = INT32(INT8(d1)); goto CNS_INT;
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ i1 = INT32(INT8(d1));
+ goto CNS_INT;
+
+ case TYP_SHORT:
+ i1 = INT32(INT16(d1));
+ goto CNS_INT;
- case TYP_SHORT:
- i1 = INT32(INT16(d1)); goto CNS_INT;
+ case TYP_CHAR:
+ i1 = INT32(UINT16(d1));
+ goto CNS_INT;
- case TYP_CHAR:
- i1 = INT32(UINT16(d1)); goto CNS_INT;
+ case TYP_UBYTE:
+ i1 = INT32(UINT8(d1));
+ goto CNS_INT;
- case TYP_UBYTE:
- i1 = INT32(UINT8(d1)); goto CNS_INT;
+ case TYP_INT:
+ i1 = INT32(d1);
+ goto CNS_INT;
- case TYP_INT:
- i1 = INT32(d1); goto CNS_INT;
+ case TYP_UINT:
+ i1 = forceCastToUInt32(d1);
+ goto CNS_INT;
- case TYP_UINT:
- i1 = forceCastToUInt32(d1); goto CNS_INT;
+ case TYP_LONG:
+ lval1 = INT64(d1);
+ goto CNS_LONG;
- case TYP_LONG:
- lval1 = INT64(d1); goto CNS_LONG;
+ case TYP_ULONG:
+ lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
+ goto CNS_LONG;
- case TYP_ULONG:
- lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
- goto CNS_LONG;
+ case TYP_FLOAT:
+ d1 = forceCastToFloat(d1);
+ goto CNS_DOUBLE;
- case TYP_FLOAT:
- d1 = forceCastToFloat(d1);
- goto CNS_DOUBLE;
+ case TYP_DOUBLE:
+ if (op1->gtType == TYP_FLOAT)
+ {
+ d1 = forceCastToFloat(d1); // truncate precision
+ }
+ goto CNS_DOUBLE; // redundant cast
- case TYP_DOUBLE:
- if (op1->gtType == TYP_FLOAT)
- d1 = forceCastToFloat(d1); // truncate precision
- goto CNS_DOUBLE; // redundant cast
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
- default:
- assert(!"BAD_TYP");
- break;
+ default:
+ return tree;
}
- return tree;
+ goto CNS_DOUBLE;
default:
+ /* not a foldable typ - e.g. RET const */
return tree;
- }
- goto CNS_DOUBLE;
-
- default:
- /* not a foldable typ - e.g. RET const */
- return tree;
}
}