Record the result of genJumpKindForOper() in a local before generating the jump instruction
Added support for using Logical instructions with genJumpKindForOper()
else
return REG_SPBASE; }
- static emitJumpKind genJumpKindForOper(genTreeOps cmp, bool isUnsigned);
-
+ enum CompareKind { CK_SIGNED, CK_UNSIGNED, CK_LOGICAL };
+ static emitJumpKind genJumpKindForOper(genTreeOps cmp, CompareKind compareKind);
// For a given compare oper tree, returns the conditions to use with jmp/set in 'jmpKind' array.
// The corresponding elements of jmpToTrueLabel indicate whether the target of the jump is to the
// Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
// is governed by a flag NOT by the inherent type of the node
// TODO-ARM-CQ: Check if we can use the currently set flags.
+ CompareKind compareKind = ((cmp->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
- emitJumpKind jmpKind = genJumpKindForOper(cmp->gtOper, (cmp->gtFlags & GTF_UNSIGNED) != 0);
+ emitJumpKind jmpKind = genJumpKindForOper(cmp->gtOper, compareKind);
BasicBlock * jmpTarget = compiler->compCurBB->bbJumpDest;
inst_JMP(jmpKind, jmpTarget);
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
// constant operand in the second position
src1 = arrLen;
src2 = arrIdx;
- jmpKind = genJumpKindForOper(GT_LE, true); // unsigned compare
+ jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
}
else
{
src1 = arrIdx;
src2 = arrLen;
- jmpKind = genJumpKindForOper(GT_GE, true); // unsigned compare
+ jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
}
genConsumeIfReg(src1);
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock *gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
}
if (divisorOp->IsZero())
{
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
// We don't need to generate the sdiv/udiv instruction
}
else
{
// Check if the divisor is zero throw a DivideByZeroException
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, 0);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
// Check if the divisor is not -1 branch to 'sdivLabel'
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, -1);
- inst_JMP(genJumpKindForOper(GT_NE, false), sdivLabel);
+
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ inst_JMP(jmpNotEqual, sdivLabel);
// If control flow continues past here the 'divisorReg' is known to be -1
}
// this will set the Z and V flags only when dividendReg is MinInt
//
emit->emitIns_R_R_R(INS_adds, cmpSize, REG_ZR, dividendReg, dividendReg);
- inst_JMP(genJumpKindForOper(GT_NE, false), sdivLabel); // goto sdiv if the Z flag is clear
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ inst_JMP(jmpNotEqual, sdivLabel); // goto sdiv if the Z flag is clear
genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw ArithmeticException
}
if (!divisorOp->isContainedIntOrIImmed())
{
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, 0);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
}
genCodeForBinary(treeNode); // Generate the udiv instruction
// Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
// is governed by a flag NOT by the inherent type of the node
- // TODO-XArch-CQ: Check if we can use the currently set flags.
emitJumpKind jumpKind[2];
bool branchToTrueLabel[2];
genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
- BasicBlock* skipLabel = nullptr;
if (jumpKind[0] != EJ_NONE)
{
- BasicBlock *jmpTarget;
- if (branchToTrueLabel[0])
- {
- jmpTarget = compiler->compCurBB->bbJumpDest;
- }
- else
- {
- // This case arises only for ordered GT_EQ right now
- assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0));
- skipLabel = genCreateTempLabel();
- jmpTarget = skipLabel;
- }
-
- inst_JMP(jumpKind[0], jmpTarget);
+ // On Arm64 the branches will always branch to the true label
+ assert(branchToTrueLabel[0]);
+ inst_JMP(jumpKind[0], compiler->compCurBB->bbJumpDest);
}
if (jumpKind[1] != EJ_NONE)
assert(branchToTrueLabel[1]);
inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest);
}
-
- if (skipLabel != nullptr)
- genDefineTempLabel(skipLabel);
}
break;
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
getEmitter()->emitIns_R_R(INS_TEST, easz, targetReg, targetReg);
- inst_JMP(genJumpKindForOper(GT_EQ, false), endLabel);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If the method has no PSPSym and compInitMem=true, we can reuse targetReg as regcnt,
// Therefore we need to subtract 16 from regcnt here.
assert(genIsValidIntReg(regCnt));
inst_RV_IV(INS_subs, regCnt, 16, emitActualTypeSize(type));
- inst_JMP(genJumpKindForOper(GT_NE, false), loop);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ inst_JMP(jmpNotEqual, loop);
}
else
{
getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, CORINFO_PAGE_SIZE);
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
- inst_JMP(genJumpKindForOper(GT_LT, true), done);
+ emitJumpKind jmpLTU = genJumpKindForOper(GT_LT, CK_UNSIGNED);
+ inst_JMP(jmpLTU, done);
// Update SP to be at the next page of stack that we will tickle
getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
// constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
- jmpKind = genJumpKindForOper(GT_LE, true); // unsigned compare
+ jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
}
else
{
src1 = arrIndex;
src2 = arrLen;
- jmpKind = genJumpKindForOper(GT_GE, true); // unsigned compare
+ jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
}
GenTreeIntConCommon* intConst = nullptr;
emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
/*****************************************************************************
* The condition to use for (the jmp/set for) the given type of compare operation are
- * returned in 'jmpKind' array. The corresponding elements of jmpToTrueLabel indicate
- * the branch target when the condition being true.
- *
- * jmpToTrueLabel[i]= true implies branch to the target when the compare operation is true.
- * jmpToTrueLabel[i]= false implies branch to the target when the compare operation is false.
+ * returned in 'jmpKind' array.
+ * If only one branch is necessary the value of jmpKind[1] will be EJ_NONE
+ * On Arm64 both branches will always branch to the true label
*/
// static
void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
emitJumpKind jmpKind[2],
bool jmpToTrueLabel[2])
{
- // Except for BEQ (ordered GT_EQ) both jumps are to the true label.
+ // On Arm64 both branches will always branch to the true label
jmpToTrueLabel[0] = true;
jmpToTrueLabel[1] = true;
// For integer comparisons just use genJumpKindForOper
if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
{
- jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, (cmpTree->gtFlags & GTF_UNSIGNED) != 0);
+ CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
jmpKind[1] = EJ_NONE;
}
- else
+ else // We have a Floating Point Compare operation
{
assert(cmpTree->OperIsCompare());
// For details on this mapping, see the ARM64 Condition Code
// table at section C1.2.3 in the ARMV8 architecture manual
//
+
+ // We must check the GTF_RELOP_NAN_UN to find out
+ // if we need to branch when we have a NaN operand.
+ //
if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0)
{
// Must branch if we have an NaN, unordered
unreached();
}
}
- else
+ else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
{
// Do not branch if we have an NaN, unordered
switch (cmpTree->gtOper)
{
// We only need to check for a negative value in sourceReg
emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, 0);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
if (dstType == TYP_ULONG)
{
// cast to TYP_ULONG:
noway_assert(castInfo.typeMask != 0);
emit->emitIns_R_I(INS_tst, cmpSize, sourceReg, castInfo.typeMask);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
}
else
{
emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
}
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_GT, false), SCK_OVERFLOW);
+ emitJumpKind jmpGTS = genJumpKindForOper(GT_GT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpGTS, SCK_OVERFLOW);
// Compare with the MIN
emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
}
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
ins = INS_mov;
}
emit->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, expMask);
// If exponent is all 1's, throw ArithmeticException
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_ARITH_EXCPN);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
if (treeNode->gtRegNum != fpReg)
*/
// static
-emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, bool isUnsigned)
+emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, CompareKind compareKind)
{
const static
BYTE genJCCinsSgn[] =
EJ_hi, // GT_GT
#endif
};
+
+ const static
+ BYTE genJCCinsLog[] = /* logical operation */
+ {
+#if defined(_TARGET_XARCH_)
+ EJ_je, // GT_EQ (Z == 1)
+ EJ_jne, // GT_NE (Z == 0)
+ EJ_js, // GT_LT (S == 1)
+ EJ_NONE, // GT_LE
+ EJ_jns, // GT_GE (S == 0)
+ EJ_NONE, // GT_GT
+#elif defined(_TARGET_ARMARCH_)
+ EJ_eq, // GT_EQ (Z == 1)
+ EJ_ne, // GT_NE (Z == 0)
+ EJ_mi, // GT_LT (N == 1)
+ EJ_NONE, // GT_LE
+ EJ_pl, // GT_GE (N == 0)
+ EJ_NONE, // GT_GT
+#endif
+ };
+
#if defined(_TARGET_XARCH_)
assert(genJCCinsSgn[GT_EQ - GT_EQ] == EJ_je);
assert(genJCCinsSgn[GT_NE - GT_EQ] == EJ_jne);
assert(genJCCinsUns[GT_LE - GT_EQ] == EJ_jbe);
assert(genJCCinsUns[GT_GE - GT_EQ] == EJ_jae);
assert(genJCCinsUns[GT_GT - GT_EQ] == EJ_ja);
+
+ assert(genJCCinsLog[GT_EQ - GT_EQ] == EJ_je);
+ assert(genJCCinsLog[GT_NE - GT_EQ] == EJ_jne);
+ assert(genJCCinsLog[GT_LT - GT_EQ] == EJ_js);
+ assert(genJCCinsLog[GT_GE - GT_EQ] == EJ_jns);
#elif defined(_TARGET_ARMARCH_)
assert(genJCCinsSgn[GT_EQ - GT_EQ] == EJ_eq);
assert(genJCCinsSgn[GT_NE - GT_EQ] == EJ_ne);
assert(genJCCinsUns[GT_LE - GT_EQ] == EJ_ls);
assert(genJCCinsUns[GT_GE - GT_EQ] == EJ_hs);
assert(genJCCinsUns[GT_GT - GT_EQ] == EJ_hi);
+
+ assert(genJCCinsLog[GT_EQ - GT_EQ] == EJ_eq);
+ assert(genJCCinsLog[GT_NE - GT_EQ] == EJ_ne);
+ assert(genJCCinsLog[GT_LT - GT_EQ] == EJ_mi);
+ assert(genJCCinsLog[GT_GE - GT_EQ] == EJ_pl);
#else
assert(!"unknown arch");
#endif
assert(GenTree::OperIsCompare(cmp));
- if (isUnsigned)
+ emitJumpKind result = EJ_COUNT;
+
+ if (compareKind == CK_UNSIGNED)
{
- return (emitJumpKind)genJCCinsUns[cmp - GT_EQ];
+ result = (emitJumpKind)genJCCinsUns[cmp - GT_EQ];
}
- else
+ else if (compareKind == CK_SIGNED)
{
- return (emitJumpKind)genJCCinsSgn[cmp - GT_EQ];
+ result = (emitJumpKind)genJCCinsSgn[cmp - GT_EQ];
}
+ else if (compareKind == CK_LOGICAL)
+ {
+ result = (emitJumpKind)genJCCinsLog[cmp - GT_EQ];
+ }
+ assert(result != EJ_COUNT);
+ return result;
}
/*****************************************************************************
clab_nostop = genCreateTempLabel();
/* Generate the conditional jump */
-
- inst_JMP(genJumpKindForOper(GT_EQ, false), clab_nostop);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, clab_nostop);
#ifdef _TARGET_ARM_
// The helper preserves the return value on ARM
/* Generate "jae <fail_label>" */
noway_assert(oper->gtOper == GT_ARR_BOUNDS_CHECK);
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
else
{
/* Generate "cmp [arrRef+LenOffs], ixv" */
inst_AT_IV(INS_cmp, EA_4BYTE, arrRef, ixv, lenOffset);
// Generate "jbe <fail_label>"
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ emitJumpKind jmpLEU = genJumpKindForOper(GT_LE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpLEU, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
else if (arrLen->IsCnsIntOrI())
{
/* Generate "cmp arrLen, ixv" */
inst_RV_IV(INS_cmp, arrLen->gtRegNum, ixv, EA_4BYTE);
// Generate "jbe <fail_label>"
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ emitJumpKind jmpLEU = genJumpKindForOper(GT_LE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpLEU, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
}
arrReg,
compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
#endif
-
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
if (dim == 0)
{
}
gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
jumpTrue = genTransitionBlockStackFP(&compCurFPState, compiler->compCurBB, jumpTrue);
}
#endif
- if (cmp == GT_EQ)
- {
- inst_JMP(genJumpKindForOper(GT_EQ, false), jumpTrue);
- }
- else
- {
- inst_JMP(genJumpKindForOper(GT_NE, false), jumpTrue);
- }
+ emitJumpKind jmpKind = genJumpKindForOper(cmp, CK_SIGNED);
+ inst_JMP(jmpKind, jumpTrue);
}
else // specialCaseCmp == false
{
regMaskTP regNeed;
regMaskTP addrReg1 = RBM_NONE;
regMaskTP addrReg2 = RBM_NONE;
- emitJumpKind jumpKind = EJ_jmp; // We borrow EJ_jmp for the cases where we don't know yet
- // which conditional instruction to use.
-
+ emitJumpKind jumpKind = EJ_COUNT; // Initialize with an invalid value
+
bool byteCmp;
bool shortCmp;
#ifdef _TARGET_ARM_
case GT_EQ: jumpKind = EJ_eq; break;
case GT_NE: jumpKind = EJ_ne; break;
- case GT_LT: break;
+ case GT_LT: jumpKind = EJ_NONE; break;
case GT_LE: jumpKind = EJ_eq; break;
- case GT_GE: break;
+ case GT_GE: jumpKind = EJ_NONE; break;
case GT_GT: jumpKind = EJ_ne; break;
#elif defined(_TARGET_X86_)
case GT_EQ: jumpKind = EJ_je; break;
case GT_NE: jumpKind = EJ_jne; break;
- case GT_LT: break;
+ case GT_LT: jumpKind = EJ_NONE; break;
case GT_LE: jumpKind = EJ_je; break;
- case GT_GE: break;
+ case GT_GE: jumpKind = EJ_NONE; break;
case GT_GT: jumpKind = EJ_jne; break;
#endif // TARGET
default:
-----------------------------------------------------
| > 0 | N/A | N/A |
-----------------------------------------------------
- */
+ */
+
switch (cmp)
{
#ifdef _TARGET_ARM_
case GT_EQ: jumpKind = EJ_eq; break;
case GT_NE: jumpKind = EJ_ne; break;
case GT_LT: jumpKind = EJ_mi; break;
- case GT_LE: break;
+ case GT_LE: jumpKind = EJ_NONE; break;
case GT_GE: jumpKind = EJ_pl; break;
- case GT_GT: break;
+ case GT_GT: jumpKind = EJ_NONE; break;
#elif defined(_TARGET_X86_)
case GT_EQ: jumpKind = EJ_je; break;
case GT_NE: jumpKind = EJ_jne; break;
case GT_LT: jumpKind = EJ_js; break;
- case GT_LE: break;
+ case GT_LE: jumpKind = EJ_NONE; break;
case GT_GE: jumpKind = EJ_jns; break;
- case GT_GT: break;
+ case GT_GT: jumpKind = EJ_NONE; break;
#endif // TARGET
- default:
- noway_assert(!"Unexpected comparison OpCode");
- break;
+ default:
+ noway_assert(!"Unexpected comparison OpCode");
+ break;
}
+ assert(jumpKind == genJumpKindForOper(cmp, CK_LOGICAL));
}
+ assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value above
/* Is the value a simple local variable? */
if (genFlagsAreVar(op1->gtLclVarCommon.gtLclNum))
{
- if (jumpKind != EJ_jmp)
+ if (jumpKind != EJ_NONE)
{
addrReg1 = RBM_NONE;
genUpdateLife(op1);
if (flags)
{
- if (jumpKind != EJ_jmp)
+ if (jumpKind != EJ_NONE)
{
goto DONE_FLAGS;
}
DONE:
- jumpKind = genJumpKindForOper(cmp, unsignedCmp);
+ jumpKind = genJumpKindForOper(cmp, unsignedCmp ? CK_UNSIGNED : CK_SIGNED);
DONE_FLAGS: // We have determined what jumpKind to use
genDoneAddressable(op1, addrReg1, RegSet::KEEP_REG);
genDoneAddressable(op2, addrReg2, RegSet::KEEP_REG);
- noway_assert(jumpKind != EJ_jmp);
+ noway_assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value
return jumpKind;
}
getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, regTmpHi, 0);
// Jump to the block which will throw the expection
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
// Unlock regLo [and regHi] after generating code for the gtOverflow() case
//
regTracker.rsTrackRegTrash(reg);
- /* Generate "jns skip" */
-#ifdef _TARGET_ARM_
- inst_JMP(EJ_pl, skip);
-#else
- inst_JMP(EJ_jns, skip);
-#endif
+ /* Check and branch for a postive value */
+ emitJumpKind jmpGEL = genJumpKindForOper(GT_GE, CK_LOGICAL);
+ inst_JMP(jmpGEL, skip);
+
/* Generate the rest of the sequence and we're done */
genIncRegBy(reg, -1, NULL, treeType);
inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, emitTypeSize(treeType), reg, genLog2(ival), INS_FLAGS_SET);
- /* Generate "jns onNegDivisee" followed by "adc reg, 0" */
-#ifdef _TARGET_ARM_
- inst_JMP(EJ_pl, onNegDivisee);
-#else
- inst_JMP(EJ_jns, onNegDivisee);
-#endif
+ // Check and branch for a postive value, skipping the INS_ADDC instruction
+ emitJumpKind jmpGEL = genJumpKindForOper(GT_GE, CK_LOGICAL);
+ inst_JMP(jmpGEL, onNegDivisee);
+
+ // Add the carry flag to 'reg'
inst_RV_IV(INS_ADDC, reg, 0, emitActualTypeSize(treeType));
/* Define the 'onNegDivisee' label and we're done */
onNegDivisee:
sar reg, log2(ival)
*/
+
instGen_Compare_Reg_To_Zero(emitTypeSize(treeType), reg);
-#ifdef _TARGET_ARM_
- inst_JMP(EJ_pl, onNegDivisee);
-#else
- inst_JMP(EJ_jns, onNegDivisee);
-#endif
+
+ // Check and branch for a postive value, skipping the INS_add instruction
+ emitJumpKind jmpGEL = genJumpKindForOper(GT_GE, CK_LOGICAL);
+ inst_JMP(jmpGEL, onNegDivisee);
+
inst_RV_IV(INS_add, reg, (int)ival-1, emitActualTypeSize(treeType));
/* Define the 'onNegDivisee' label and we're done */
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
getEmitter()->emitIns_R_I(INS_add, dstType, regDst, 2 * TARGET_POINTER_SIZE);
regTracker.rsTrackRegTrash(regDst);
getEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, regLoopIndex, 1, INS_FLAGS_SET);
- inst_JMP(genJumpKindForOper(GT_GT, false), loopTopBlock);
+ emitJumpKind jmpGTS = genJumpKindForOper(GT_GT, CK_SIGNED);
+ inst_JMP(jmpGTS, loopTopBlock);
regTracker.rsTrackRegIntCns(regLoopIndex, 0);
regNumber hiReg = (op1->gtFlags & GTF_REG_VAL) ? genRegPairHi(op1->gtRegPair)
: REG_NA;
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+
switch (dstType)
{
case TYP_INT: // conv.ovf.i8.i4
instGen_Compare_Reg_To_Zero(EA_4BYTE, reg);
if (tree->gtFlags & GTF_UNSIGNED) // conv.ovf.u8.i4 (i4 > 0 and upper bits 0)
{
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
goto UPPER_BITS_ZERO;
}
done = genCreateTempLabel();
// Is the loDWord positive or negative
- inst_JMP(genJumpKindForOper(GT_LT, false), neg);
+ inst_JMP(jmpLTS, neg);
// If loDWord is positive, hiDWord should be 0 (sign extended loDWord)
inst_TT_IV(INS_cmp, op1, 0x00000000, 4);
}
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
inst_JMP(EJ_jmp, done);
// If loDWord is negative, hiDWord should be -1 (sign extended loDWord)
{
inst_TT_IV(INS_cmp, op1, 0xFFFFFFFFL, 4);
}
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
// Done
{
inst_TT_IV(INS_cmp, op1, 0, 4);
}
-
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
break;
default:
if (unsv)
{
inst_RV_IV(INS_TEST, reg, typeMask, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
}
else
{
noway_assert(typeMin != DUMMY_INIT(~0) && typeMax != DUMMY_INIT(0));
inst_RV_IV(INS_cmp, reg, typeMax, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_GT, false), SCK_OVERFLOW);
+ emitJumpKind jmpGTS = genJumpKindForOper(GT_GT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpGTS, SCK_OVERFLOW);
// Compare with the MIN
inst_RV_IV(INS_cmp, reg, typeMin, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
genCodeForTree_DONE(tree, reg);
{
noway_assert((op2->gtFlags & GTF_UNSIGNED) == 0); // conv.ovf.u8.un should be bashed to conv.u8.un
instGen_Compare_Reg_To_Zero(EA_4BYTE, regHi); // set flags
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
/* Move the value into the target */
{
regNumber hiReg = genRegPairHi(regPair);
instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
}
goto DONE;
#endif
case TYP_LONG:
case TYP_ULONG:
+ {
+ noway_assert(tree->gtOverflow()); // conv.ovf.u8 or conv.ovf.i8
- noway_assert(tree->gtOverflow()); // conv.ovf.u8 or conv.ovf.i8
+ genComputeRegPair(op1, REG_PAIR_NONE, RBM_ALLINT & ~needReg, RegSet::FREE_REG);
+ regPair = op1->gtRegPair;
- genComputeRegPair(op1, REG_PAIR_NONE, RBM_ALLINT & ~needReg, RegSet::FREE_REG);
- regPair = op1->gtRegPair;
+ // Do we need to set the sign-flag, or can be check if it
+ // set, and not do this "test" if so.
- // Do we need to set the sign-flag, or can be check if it
- // set, and not do this "test" if so.
+ if (op1->gtFlags & GTF_REG_VAL)
+ {
+ regNumber hiReg = genRegPairHi(op1->gtRegPair);
+ noway_assert(hiReg != REG_STK);
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
+ }
+ else
+ {
+ inst_TT_IV(INS_cmp, op1, 0, sizeof(int));
+ }
- if (op1->gtFlags & GTF_REG_VAL)
- {
- regNumber hiReg = genRegPairHi(op1->gtRegPair);
- noway_assert(hiReg != REG_STK);
- instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- }
- else
- {
- inst_TT_IV(INS_cmp, op1, 0, sizeof(int));
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
- genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
goto DONE;
default:
if (jumpCnt < minSwitchTabJumpCnt)
{
/* Does the first case label follow? */
- emitJumpKind jmpIfEqual = genJumpKindForOper(GT_EQ, false);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
if (fFirstCaseFollows)
{
/* Check for the default case */
inst_RV_IV(INS_cmp, reg, jumpCnt - 1, EA_4BYTE);
- inst_JMP(genJumpKindForOper(GT_GE, true), jumpTab[jumpCnt - 1]);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ inst_JMP(jmpGEU, jumpTab[jumpCnt - 1]);
/* No need to jump to the first case */
while (jumpCnt > 0)
{
inst_RV_IV(INS_sub, reg, 1, EA_4BYTE, INS_FLAGS_SET);
- inst_JMP(jmpIfEqual, *jumpTab++);
+ inst_JMP(jmpEqual, *jumpTab++);
jumpCnt--;
}
}
{
/* Check for case0 first */
instGen_Compare_Reg_To_Zero(EA_4BYTE, reg); // set flags
- inst_JMP(jmpIfEqual, *jumpTab);
+ inst_JMP(jmpEqual, *jumpTab);
/* No need to jump to the first case or the default */
while (jumpCnt > 0)
{
inst_RV_IV(INS_sub, reg, 1, EA_4BYTE, INS_FLAGS_SET);
- inst_JMP(jmpIfEqual, *jumpTab++);
+ inst_JMP(jmpEqual, *jumpTab++);
jumpCnt--;
}
/* First take care of the default case */
inst_RV_IV(INS_cmp, reg, jumpCnt - 1, EA_4BYTE);
- inst_JMP(genJumpKindForOper(GT_GE, true), jumpTab[jumpCnt - 1]);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ inst_JMP(jmpGEU, jumpTab[jumpCnt - 1]);
/* Generate the jump table contents */
esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
getEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
// If 0 we bail out
instGen_Compare_Reg_To_Zero(easz, regCnt); // set flags
- inst_JMP(genJumpKindForOper(GT_EQ, false), endLabel);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, endLabel);
// Align to STACK_ALIGN
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
assert(!"Codegen missing");
#endif // TARGETS
- inst_JMP(genJumpKindForOper(GT_NE, false), loop);
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ inst_JMP(jmpNotEqual, loop);
// Move the final value of ESP into regCnt
inst_RV_RV(INS_mov, regCnt, REG_SPBASE);
noway_assert(size->gtFlags & GTF_REG_VAL);
regCnt = size->gtRegNum;
inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
- inst_JMP(genJumpKindForOper(GT_GE, true), loop);
+ emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ inst_JMP(jmpGEU, loop);
// Move the final value to ESP
inst_RV_RV(INS_mov, REG_SPBASE, regCnt);
}
BasicBlock *gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
}
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
assert(treeNode->gtRsvdRegs != RBM_NONE);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
// For integer comparisons just use genJumpKindForOper
if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
{
- jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, (cmpTree->gtFlags & GTF_UNSIGNED) != 0);
+ CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
jmpKind[1] = EJ_NONE;
}
else
jmpToTrueLabel[1] = true;
assert(cmpTree->OperIsCompare());
- jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, true);
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, CK_UNSIGNED);
jmpKind[1] = EJ_NONE;
}
inst_RV_IV(INS_cmp, reg, expMask, EA_4BYTE);
// If exponent was all 1's, we need to throw ArithExcep
- genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_ARITH_EXCPN);
genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
}
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3151
+Categories=JIT;EXPECTED_FAIL;ISSUE_3151;REL_PASS
[attributector.exe_2053]
RelativePath=CoreMangLib\cti\system\resources\satellitecontractversionattribute\AttributeCtor\AttributeCtor.exe
WorkingDir=CoreMangLib\cti\system\resources\satellitecontractversionattribute\AttributeCtor
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3151
+Categories=JIT;EXPECTED_FAIL;ISSUE_3151;REL_PASS
[ldfldahack.exe_5577]
RelativePath=JIT\Regression\CLR-x86-JIT\v2.1\DDB\B168384\LdfldaHack\LdfldaHack.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\v2.1\DDB\B168384\LdfldaHack
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_2988;ISSUE_2987
+Categories=JIT;EXPECTED_FAIL;ISSUE_2988;ISSUE_2987;REL_PASS
[_il_relconv_i8_i.exe_3892]
RelativePath=JIT\Methodical\ELEMENT_TYPE_IU\_il_relconv_i8_i\_il_relconv_i8_i.exe
WorkingDir=JIT\Methodical\ELEMENT_TYPE_IU\_il_relconv_i8_i
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_2988;ISSUE_2987
+Categories=JIT;EXPECTED_FAIL;ISSUE_2988;ISSUE_2987;REL_PASS
[weakreferenceisaliveb_psc.exe_2559]
RelativePath=CoreMangLib\cti\system\weakreference\WeakReferenceIsAliveb_PSC\WeakReferenceIsAliveb_PSC.exe
WorkingDir=CoreMangLib\cti\system\weakreference\WeakReferenceIsAliveb_PSC
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[int16iconvertibletoboolean.exe_1323]
RelativePath=CoreMangLib\cti\system\int16\Int16IConvertibleToBoolean\Int16IConvertibleToBoolean.exe
WorkingDir=CoreMangLib\cti\system\int16\Int16IConvertibleToBoolean
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3151
+Categories=JIT;EXPECTED_FAIL;ISSUE_3151;REL_PASS
[b15222.exe_4865]
RelativePath=JIT\Regression\CLR-x86-JIT\V1-M09.5-PDC\b15222\b15222\b15222.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\V1-M09.5-PDC\b15222\b15222