Updated the enum emitJumpKInd to use Arm condition codes rather than x86 conditional branches
This change impacts the Arm32 JIT and verified to have no asm diffs using the for Arm32
Implemented and tested for Arm64 the full set of ordered and unordered floating point branches
Implemented and tested for Arm64 genSetRegToCond for ordered and unordered floating point compares
On Arm64 we now use the genJumpKIndsForTree to return up to two conditional branches for floating point compares
Cleanup we now use genJumpKindForOper to select conditional branches and prefer
using isUnsigned=false for creating branches for GT_EQ and GT_NE
Removed the unused fourth 'condcode' portion of the JMP_SMALL macro and
the fourth column from the Arm/ARm64 part of "emitjmps.cpp"
Unified Arm32 and Arm64 conditionals in "emitjmps.cpp"
Reordered the Arm32 and Arm64 conditionals in "emitjmps.cpp" to matchthe ARMV8 docs
static emitJumpKind genJumpKindForOper(genTreeOps cmp, bool isUnsigned);
-#ifdef _TARGET_XARCH_
+
// For a given compare oper tree, returns the conditions to use with jmp/set in 'jmpKind' array.
// The corresponding elements of jmpToTrueLabel indicate whether the target of the jump is to the
// 'true' label or a 'false' label.
// branch to on compare condition being true. 'false' label corresponds to the target to
// branch to on condition being false.
static void genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]);
+
#if !defined(_TARGET_64BIT_)
static void genJumpKindsForTreeLongHi(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]);
static void genJumpKindsForTreeLongLo(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2]);
#endif //!defined(_TARGET_64BIT_)
-#endif // _TARGET_XARCH_
static bool genShouldRoundFP();
#include "gcinfoencoder.h"
#endif
+
// Get the register assigned to the given node
regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), skipLabel);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
if (arrIdx->isContainedIntOrIImmed())
{
+ // To encode using a cmp immediate, we place the
+ // constant operand in the second position
src1 = arrLen;
src2 = arrIdx;
- jmpKind = EJ_jbe;
+ jmpKind = genJumpKindForOper(GT_LE, true); // unsigned compare
}
else
{
src1 = arrIdx;
src2 = arrLen;
- jmpKind = EJ_jae;
+ jmpKind = genJumpKindForOper(GT_GE, true); // unsigned compare
}
genConsumeIfReg(src1);
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock *gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), gsCheckBlk);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
}
if (divisorOp->IsZero())
{
- genJumpToThrowHlpBlk(EJ_je, SCK_DIV_BY_ZERO);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
// We don't need to generate the sdiv/udiv instruction
}
else
{
// Check if the divisor is zero throw a DivideByZeroException
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, 0);
- genJumpToThrowHlpBlk(EJ_je, SCK_DIV_BY_ZERO);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
// Check if the divisor is not -1 branch to 'sdivLabel'
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, -1);
- inst_JMP(genJumpKindForOper(GT_NE, true), sdivLabel);
+ inst_JMP(genJumpKindForOper(GT_NE, false), sdivLabel);
// If control flow continues past here the 'divisorReg' is known to be -1
}
// this will set the Z and V flags only when dividendReg is MinInt
//
emit->emitIns_R_R_R(INS_adds, cmpSize, REG_ZR, dividendReg, dividendReg);
- inst_JMP(genJumpKindForOper(GT_NE, true), sdivLabel); // goto sdiv if Z flag is clear
- genJumpToThrowHlpBlk(EJ_jo, SCK_ARITH_EXCPN); // if the V flags is set throw ArithmeticException
+ inst_JMP(genJumpKindForOper(GT_NE, false), sdivLabel); // goto sdiv if the Z flag is clear
+ genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw ArithmeticException
}
genDefineTempLabel(sdivLabel);
if (!divisorOp->isContainedIntOrIImmed())
{
emit->emitIns_R_I(INS_cmp, cmpSize, divisorReg, 0);
- genJumpToThrowHlpBlk(EJ_je, SCK_DIV_BY_ZERO);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_DIV_BY_ZERO);
}
genCodeForBinary(treeNode); // Generate the udiv instruction
assert(cmp->OperIsCompare());
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
- // Get the "jmpKind" using the gtOper kind
- // Note that whether it is an unsigned cmp is governed by the GTF_UNSIGNED flags
+ // Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
+ // is governed by a flag NOT by the inherent type of the node
+ // TODO-XArch-CQ: Check if we can use the currently set flags.
+ emitJumpKind jumpKind[2];
+ bool branchToTrueLabel[2];
+ genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
- emitJumpKind jmpKind = genJumpKindForOper(cmp->gtOper, (cmp->gtFlags & GTF_UNSIGNED) != 0);
- BasicBlock * jmpTarget = compiler->compCurBB->bbJumpDest;
+ BasicBlock* skipLabel = nullptr;
+ if (jumpKind[0] != EJ_NONE)
+ {
+ BasicBlock *jmpTarget;
+ if (branchToTrueLabel[0])
+ {
+ jmpTarget = compiler->compCurBB->bbJumpDest;
+ }
+ else
+ {
+ // This case arises only for ordered GT_EQ right now
+ assert((cmp->gtOper == GT_EQ) && ((cmp->gtFlags & GTF_RELOP_NAN_UN) == 0));
+ skipLabel = genCreateTempLabel();
+ jmpTarget = skipLabel;
+ }
+
+ inst_JMP(jumpKind[0], jmpTarget);
+ }
- inst_JMP(jmpKind, jmpTarget);
+ if (jumpKind[1] != EJ_NONE)
+ {
+ // the second conditional branch always has to be to the true label
+ assert(branchToTrueLabel[1]);
+ inst_JMP(jumpKind[1], compiler->compCurBB->bbJumpDest);
+ }
+
+ if (skipLabel != nullptr)
+ genDefineTempLabel(skipLabel);
}
break;
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), skipLabel);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
getEmitter()->emitIns_R_R(INS_TEST, easz, targetReg, targetReg);
- inst_JMP(EJ_je, endLabel);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), endLabel);
// Compute the size of the block to allocate and perform alignment.
// If the method has no PSPSym and compInitMem=true, we can reuse targetReg as regcnt,
// Therefore we need to subtract 16 from regcnt here.
assert(genIsValidIntReg(regCnt));
inst_RV_IV(INS_subs, regCnt, 16, emitActualTypeSize(type));
- inst_JMP(EJ_jne, loop);
+ inst_JMP(genJumpKindForOper(GT_NE, false), loop);
}
else
{
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
getEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
- inst_JMP(EJ_jno, loop); // branch if the V flag is not set
+ inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, CORINFO_PAGE_SIZE);
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
- inst_JMP(EJ_jb, done);
-
+ inst_JMP(genJumpKindForOper(GT_LT, true), done);
+
// Update SP to be at the next page of stack that we will tickle
getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
if (arrIndex->isContainedIntOrIImmed())
{
+ // To encode using a cmp immediate, we place the
+ // constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
- jmpKind = EJ_jbe;
+ jmpKind = genJumpKindForOper(GT_LE, true); // unsigned compare
}
else
{
src1 = arrIndex;
src2 = arrLen;
- jmpKind = EJ_jae;
+ jmpKind = genJumpKindForOper(GT_GE, true); // unsigned compare
}
GenTreeIntConCommon* intConst = nullptr;
emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
- genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
genProduceReg(lea);
}
+/*****************************************************************************
+ * The condition to use for (the jmp/set for) the given type of compare operation are
+ * returned in 'jmpKind' array. The corresponding elements of jmpToTrueLabel indicate
+ * the branch target when the condition being true.
+ *
+ * jmpToTrueLabel[i]= true implies branch to the target when the compare operation is true.
+ * jmpToTrueLabel[i]= false implies branch to the target when the compare operation is false.
+ */
+// static
+void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
+ emitJumpKind jmpKind[2],
+ bool jmpToTrueLabel[2])
+{
+ // Except for BEQ (ordered GT_EQ) both jumps are to the true label.
+ jmpToTrueLabel[0] = true;
+ jmpToTrueLabel[1] = true;
+
+ // For integer comparisons just use genJumpKindForOper
+ if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
+ {
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, (cmpTree->gtFlags & GTF_UNSIGNED) != 0);
+ jmpKind[1] = EJ_NONE;
+ }
+ else
+ {
+ assert(cmpTree->OperIsCompare());
+
+ // For details on this mapping, see the ARM64 Condition Code
+ // table at section C1.2.3 in the ARMV8 architecture manual
+ //
+ if ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) != 0)
+ {
+ // Must branch if we have an NaN, unordered
+ switch (cmpTree->gtOper)
+ {
+ case GT_EQ:
+ jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
+ jmpKind[1] = EJ_vs; // branch or set when we have a NaN
+ break;
+
+ case GT_NE:
+ jmpKind[0] = EJ_ne; // branch or set when not equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_LT:
+ jmpKind[0] = EJ_lt; // branch or set when less than (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_LE:
+ jmpKind[0] = EJ_le; // branch or set when less than or equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_GT:
+ jmpKind[0] = EJ_hi; // branch or set when greater than (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_GE:
+ jmpKind[0] = EJ_hs; // branch or set when greater than or equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ default:
+ unreached();
+ }
+ }
+ else
+ {
+ // Do not branch if we have an NaN, unordered
+ switch (cmpTree->gtOper)
+ {
+ case GT_EQ:
+ jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_NE:
+ jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
+ jmpKind[1] = EJ_lo; // branch or set when less than (and no NaN's)
+ break;
+
+ case GT_LT:
+ jmpKind[0] = EJ_lo; // branch or set when less than (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_LE:
+ jmpKind[0] = EJ_ls; // branch or set when less than or equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_GT:
+ jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ case GT_GE:
+ jmpKind[0] = EJ_ge; // branch or set when greater than or equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ default:
+ unreached();
+ }
+ }
+ }
+}
+
// Generate code to materialize a condition into a register
// (the condition codes must already have been appropriately set)
void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
{
- // Get the "jmpKind" using the gtOper kind
- // Note that whether it is an unsigned cmp is governed by the GTF_UNSIGNED flags
+ emitJumpKind jumpKind[2];
+ bool branchToTrueLabel[2];
+ genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
- emitJumpKind jmpKind = genJumpKindForOper(tree->gtOper, (tree->gtFlags & GTF_UNSIGNED) != 0);
+ // Set the reg according to the flags
+ inst_SET(jumpKind[0], dstReg);
- inst_SET(jmpKind, dstReg);
+ // Do we need to use two operation to set the flags?
+ //
+ if (jumpKind[1] != EJ_NONE)
+ {
+ emitter * emit = getEmitter();
+ bool ordered = ((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
+ insCond secondCond;
+
+ // The only ones that require two operations are the
+ // floating point compare operations of BEQ or BNE.UN
+ //
+ if (tree->gtOper == GT_EQ)
+ {
+ // This must be an ordered comparison.
+ assert(ordered);
+ assert(jumpKind[1] == EJ_vs); // We complement this value
+ secondCond = INS_COND_VC; // for the secondCond
+ }
+ else // gtOper == GT_NE
+ {
+ // This must be BNE.UN (unordered comparison)
+ assert((tree->gtOper == GT_NE) && !ordered);
+ assert(jumpKind[1] == EJ_lo); // We complement this value
+ secondCond = INS_COND_HS; // for the secondCond
+ }
+
+ // The second instruction is a 'csinc' instruction that either selects the previous dstReg
+ // or increments the ZR register, which produces a 1 result.
+
+ emit->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, dstReg, dstReg, REG_ZR, secondCond);
+ }
}
//------------------------------------------------------------------------
{
// We only need to check for a negative value in sourceReg
emit->emitIns_R_I(INS_cmp, cmpSize, sourceReg, 0);
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
if (dstType == TYP_ULONG)
{
// cast to TYP_ULONG:
noway_assert(castInfo.typeMask != 0);
emit->emitIns_R_I(INS_tst, cmpSize, sourceReg, castInfo.typeMask);
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
}
else
{
emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
}
- genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_GT, false), SCK_OVERFLOW);
// Compare with the MIN
emit->emitIns_R_R(INS_cmp, cmpSize, sourceReg, tmpReg);
}
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
}
ins = INS_mov;
}
emit->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, expMask);
// If exponent is all 1's, throw ArithmeticException
- genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_EQ, false), SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
if (treeNode->gtRegNum != fpReg)
return regMask;
}
+
/*****************************************************************************
* TRACKING OF FLAGS
*****************************************************************************/
return true;
}
-
-
/*****************************************************************************
- * The condition to use for (the jmp/set for) the given type of operation
- *
- * In case of amd64, this routine should be used when there is no gentree available
- * and one needs to generate jumps based on integer comparisons. When gentree is
- * available always use its overloaded version.
- *
- */
+* The condition to use for (the jmp/set for) the given type of operation
+*
+* In case of amd64, this routine should be used when there is no gentree available
+* and one needs to generate jumps based on integer comparisons. When gentree is
+* available always use its overloaded version.
+*
+*/
// static
-emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp,
- bool isUnsigned)
+emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, bool isUnsigned)
{
const static
- BYTE genJCCinsSgn[] =
+ BYTE genJCCinsSgn[] =
{
+#if defined(_TARGET_XARCH_)
EJ_je, // GT_EQ
EJ_jne, // GT_NE
EJ_jl, // GT_LT
EJ_jle, // GT_LE
EJ_jge, // GT_GE
EJ_jg, // GT_GT
+#elif defined(_TARGET_ARMARCH_)
+ EJ_eq, // GT_EQ
+ EJ_ne, // GT_NE
+ EJ_lt, // GT_LT
+ EJ_le, // GT_LE
+ EJ_ge, // GT_GE
+ EJ_gt, // GT_GT
+#endif
};
const static
- BYTE genJCCinsUns[] = /* unsigned comparison */
+ BYTE genJCCinsUns[] = /* unsigned comparison */
{
+#if defined(_TARGET_XARCH_)
EJ_je, // GT_EQ
EJ_jne, // GT_NE
EJ_jb, // GT_LT
EJ_jbe, // GT_LE
EJ_jae, // GT_GE
EJ_ja, // GT_GT
+#elif defined(_TARGET_ARMARCH_)
+ EJ_eq, // GT_EQ
+ EJ_ne, // GT_NE
+ EJ_lo, // GT_LT
+ EJ_ls, // GT_LE
+ EJ_hs, // GT_GE
+ EJ_hi, // GT_GT
+#endif
};
-
- assert(genJCCinsSgn[GT_EQ - GT_EQ] == EJ_je );
+#if defined(_TARGET_XARCH_)
+ assert(genJCCinsSgn[GT_EQ - GT_EQ] == EJ_je);
assert(genJCCinsSgn[GT_NE - GT_EQ] == EJ_jne);
- assert(genJCCinsSgn[GT_LT - GT_EQ] == EJ_jl );
+ assert(genJCCinsSgn[GT_LT - GT_EQ] == EJ_jl);
assert(genJCCinsSgn[GT_LE - GT_EQ] == EJ_jle);
assert(genJCCinsSgn[GT_GE - GT_EQ] == EJ_jge);
- assert(genJCCinsSgn[GT_GT - GT_EQ] == EJ_jg );
+ assert(genJCCinsSgn[GT_GT - GT_EQ] == EJ_jg);
- assert(genJCCinsUns[GT_EQ - GT_EQ] == EJ_je );
+ assert(genJCCinsUns[GT_EQ - GT_EQ] == EJ_je);
assert(genJCCinsUns[GT_NE - GT_EQ] == EJ_jne);
- assert(genJCCinsUns[GT_LT - GT_EQ] == EJ_jb );
+ assert(genJCCinsUns[GT_LT - GT_EQ] == EJ_jb);
assert(genJCCinsUns[GT_LE - GT_EQ] == EJ_jbe);
assert(genJCCinsUns[GT_GE - GT_EQ] == EJ_jae);
- assert(genJCCinsUns[GT_GT - GT_EQ] == EJ_ja );
-
+ assert(genJCCinsUns[GT_GT - GT_EQ] == EJ_ja);
+#elif defined(_TARGET_ARMARCH_)
+ assert(genJCCinsSgn[GT_EQ - GT_EQ] == EJ_eq);
+ assert(genJCCinsSgn[GT_NE - GT_EQ] == EJ_ne);
+ assert(genJCCinsSgn[GT_LT - GT_EQ] == EJ_lt);
+ assert(genJCCinsSgn[GT_LE - GT_EQ] == EJ_le);
+ assert(genJCCinsSgn[GT_GE - GT_EQ] == EJ_ge);
+ assert(genJCCinsSgn[GT_GT - GT_EQ] == EJ_gt);
+
+ assert(genJCCinsUns[GT_EQ - GT_EQ] == EJ_eq);
+ assert(genJCCinsUns[GT_NE - GT_EQ] == EJ_ne);
+ assert(genJCCinsUns[GT_LT - GT_EQ] == EJ_lo);
+ assert(genJCCinsUns[GT_LE - GT_EQ] == EJ_ls);
+ assert(genJCCinsUns[GT_GE - GT_EQ] == EJ_hs);
+ assert(genJCCinsUns[GT_GT - GT_EQ] == EJ_hi);
+#else
+ assert(!"unknown arch");
+#endif
assert(GenTree::OperIsCompare(cmp));
if (isUnsigned)
{
- return (emitJumpKind) genJCCinsUns[cmp - GT_EQ];
+ return (emitJumpKind)genJCCinsUns[cmp - GT_EQ];
}
else
{
- return (emitJumpKind) genJCCinsSgn[cmp - GT_EQ];
+ return (emitJumpKind)genJCCinsSgn[cmp - GT_EQ];
}
}
#ifdef _TARGET_ARM64_
if (tree->OperGet() == GT_MUL)
{
- jumpKind = EJ_jne;
+ jumpKind = EJ_ne;
}
else
#endif
{
- bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
+ bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
+
+#if defined(_TARGET_XARCH_)
+
+ jumpKind = isUnsignedOverflow ? EJ_jb : EJ_jo;
- if (isUnsignedOverflow)
+#elif defined(_TARGET_ARMARCH_)
+
+ jumpKind = isUnsignedOverflow ? EJ_lo : EJ_vs;
+
+ if (jumpKind == EJ_lo)
{
- jumpKind = EJ_jb;
-#ifdef _TARGET_ARMARCH_
if ((tree->OperGet() != GT_SUB) && (tree->gtOper != GT_ASG_SUB))
{
- jumpKind = EJ_jae;
+ jumpKind = EJ_hs;
}
-#endif
- }
- else
- {
- jumpKind = EJ_jo;
}
+
+#endif // defined(_TARGET_ARMARCH_)
}
// Jump to the block which will throw the expection
/* Generate the conditional jump */
- inst_JMP(genJumpKindForOper(GT_EQ, true), clab_nostop);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), clab_nostop);
#ifdef _TARGET_ARM_
// The helper preserves the return value on ARM
/* Generate "jae <fail_label>" */
noway_assert(oper->gtOper == GT_ARR_BOUNDS_CHECK);
- genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
else
{
/* Generate "cmp [arrRef+LenOffs], ixv" */
inst_AT_IV(INS_cmp, EA_4BYTE, arrRef, ixv, lenOffset);
// Generate "jbe <fail_label>"
- genJumpToThrowHlpBlk(EJ_jbe, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
else if (arrLen->IsCnsIntOrI())
{
/* Generate "cmp arrLen, ixv" */
inst_RV_IV(INS_cmp, arrLen->gtRegNum, ixv, EA_4BYTE);
// Generate "jbe <fail_label>"
- genJumpToThrowHlpBlk(EJ_jbe, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LE, true), SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
}
}
compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
#endif
- genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_GE, true), SCK_RNGCHK_FAIL);
if (dim == 0)
{
}
gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), gsCheckBlk);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
}
-
+#ifdef _TARGET_X86_
/*****************************************************************************
*
* Generate the appropriate conditional jump(s) right after the low 32 bits
noway_assert(!"expected comparison");
}
}
+#elif defined(_TARGET_ARM_)
+/*****************************************************************************
+*
+* Generate the appropriate conditional jump(s) right after the low 32 bits
+* of two long values have been compared.
+*/
+
+void CodeGen::genJccLongHi(genTreeOps cmp,
+ BasicBlock * jumpTrue,
+ BasicBlock * jumpFalse,
+ bool unsOper)
+{
+ if (cmp != GT_NE)
+ jumpFalse->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
+
+ switch (cmp)
+ {
+ case GT_EQ:
+ inst_JMP(EJ_ne, jumpFalse);
+ break;
+
+ case GT_NE:
+ inst_JMP(EJ_ne, jumpTrue);
+ break;
+
+ case GT_LT:
+ case GT_LE:
+ if (unsOper)
+ {
+ inst_JMP(EJ_hi, jumpFalse);
+ inst_JMP(EJ_lo, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_gt, jumpFalse);
+ inst_JMP(EJ_lt, jumpTrue);
+ }
+ break;
+
+ case GT_GE:
+ case GT_GT:
+ if (unsOper)
+ {
+ inst_JMP(EJ_lo, jumpFalse);
+ inst_JMP(EJ_hi, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_lt, jumpFalse);
+ inst_JMP(EJ_gt, jumpTrue);
+ }
+ break;
+
+ default:
+ noway_assert(!"expected a comparison operator");
+ }
+}
+
+/*****************************************************************************
+*
+* Generate the appropriate conditional jump(s) right after the high 32 bits
+* of two long values have been compared.
+*/
+
+void CodeGen::genJccLongLo(genTreeOps cmp,
+ BasicBlock* jumpTrue,
+ BasicBlock* jumpFalse)
+{
+ switch (cmp)
+ {
+ case GT_EQ:
+ inst_JMP(EJ_eq, jumpTrue);
+ break;
+ case GT_NE:
+ inst_JMP(EJ_ne, jumpTrue);
+ break;
+
+ case GT_LT:
+ inst_JMP(EJ_lo, jumpTrue);
+ break;
+
+ case GT_LE:
+ inst_JMP(EJ_ls, jumpTrue);
+ break;
+
+ case GT_GE:
+ inst_JMP(EJ_hs, jumpTrue);
+ break;
+
+ case GT_GT:
+ inst_JMP(EJ_hi, jumpTrue);
+ break;
+
+ default:
+ noway_assert(!"expected comparison");
+ }
+}
+#endif
/*****************************************************************************
*
* Called by genCondJump() for TYP_LONG.
#endif
if (cmp == GT_EQ)
{
- inst_JMP(EJ_je, jumpTrue);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), jumpTrue);
}
else
{
- inst_JMP(EJ_jne, jumpTrue);
+ inst_JMP(genJumpKindForOper(GT_NE, false), jumpTrue);
}
}
else // specialCaseCmp == false
*/
switch (cmp)
{
- case GT_EQ: jumpKind = EJ_je; break;
- case GT_NE: jumpKind = EJ_jne; break;
- case GT_LT: break;
- case GT_LE: jumpKind = EJ_je; break;
- case GT_GE: break;
- case GT_GT: jumpKind = EJ_jne; break;
- default:
- noway_assert(!"Unexpected comparison OpCode");
- break;
+#ifdef _TARGET_ARM_
+ case GT_EQ: jumpKind = EJ_eq; break;
+ case GT_NE: jumpKind = EJ_ne; break;
+ case GT_LT: break;
+ case GT_LE: jumpKind = EJ_eq; break;
+ case GT_GE: break;
+ case GT_GT: jumpKind = EJ_ne; break;
+#elif defined(_TARGET_X86_)
+ case GT_EQ: jumpKind = EJ_je; break;
+ case GT_NE: jumpKind = EJ_jne; break;
+ case GT_LT: break;
+ case GT_LE: jumpKind = EJ_je; break;
+ case GT_GE: break;
+ case GT_GT: jumpKind = EJ_jne; break;
+#endif // TARGET
+ default:
+ noway_assert(!"Unexpected comparison OpCode");
+ break;
}
}
else
*/
switch (cmp)
{
- case GT_EQ: jumpKind = EJ_je; break;
- case GT_NE: jumpKind = EJ_jne; break;
- case GT_LT: jumpKind = EJ_js; break;
- case GT_LE: break;
- case GT_GE: jumpKind = EJ_jns; break;
- case GT_GT: break;
+#ifdef _TARGET_ARM_
+ case GT_EQ: jumpKind = EJ_eq; break;
+ case GT_NE: jumpKind = EJ_ne; break;
+ case GT_LT: jumpKind = EJ_mi; break;
+ case GT_LE: break;
+ case GT_GE: jumpKind = EJ_pl; break;
+ case GT_GT: break;
+#elif defined(_TARGET_X86_)
+ case GT_EQ: jumpKind = EJ_je; break;
+ case GT_NE: jumpKind = EJ_jne; break;
+ case GT_LT: jumpKind = EJ_js; break;
+ case GT_LE: break;
+ case GT_GE: jumpKind = EJ_jns; break;
+ case GT_GT: break;
+#endif // TARGET
default:
noway_assert(!"Unexpected comparison OpCode");
break;
getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, regTmpHi, 0);
// Jump to the block which will throw the expection
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
// Unlock regLo [and regHi] after generating code for the gtOverflow() case
//
regTracker.rsTrackRegTrash(reg);
/* Generate "jns skip" */
-
+#ifdef _TARGET_ARM_
+ inst_JMP(EJ_pl, skip);
+#else
inst_JMP(EJ_jns, skip);
-
+#endif
/* Generate the rest of the sequence and we're done */
genIncRegBy(reg, -1, NULL, treeType);
inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, emitTypeSize(treeType), reg, genLog2(ival), INS_FLAGS_SET);
/* Generate "jns onNegDivisee" followed by "adc reg, 0" */
-
- inst_JMP (EJ_jns, onNegDivisee);
+#ifdef _TARGET_ARM_
+ inst_JMP(EJ_pl, onNegDivisee);
+#else
+ inst_JMP(EJ_jns, onNegDivisee);
+#endif
inst_RV_IV(INS_ADDC, reg, 0, emitActualTypeSize(treeType));
/* Define the 'onNegDivisee' label and we're done */
sar reg, log2(ival)
*/
instGen_Compare_Reg_To_Zero(emitTypeSize(treeType), reg);
-
- inst_JMP (EJ_jns, onNegDivisee);
+#ifdef _TARGET_ARM_
+ inst_JMP(EJ_pl, onNegDivisee);
+#else
+ inst_JMP(EJ_jns, onNegDivisee);
+#endif
inst_RV_IV(INS_add, reg, (int)ival-1, emitActualTypeSize(treeType));
/* Define the 'onNegDivisee' label and we're done */
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
getEmitter()->emitIns_R_I(INS_add, dstType, regDst, 2 * TARGET_POINTER_SIZE);
regTracker.rsTrackRegTrash(regDst);
getEmitter()->emitIns_R_I(INS_sub, EA_4BYTE, regLoopIndex, 1, INS_FLAGS_SET);
- inst_JMP(EJ_jg, loopTopBlock);
+ inst_JMP(genJumpKindForOper(GT_GT, false), loopTopBlock);
+
regTracker.rsTrackRegIntCns(regLoopIndex, 0);
length -= (pairStoreLoopCount * (2 * TARGET_POINTER_SIZE));
instGen_Compare_Reg_To_Zero(EA_4BYTE, reg);
if (tree->gtFlags & GTF_UNSIGNED) // conv.ovf.u8.i4 (i4 > 0 and upper bits 0)
{
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
goto UPPER_BITS_ZERO;
}
done = genCreateTempLabel();
// Is the loDWord positive or negative
-
- inst_JMP(EJ_jl, neg);
+ inst_JMP(genJumpKindForOper(GT_LT, false), neg);
// If loDWord is positive, hiDWord should be 0 (sign extended loDWord)
inst_TT_IV(INS_cmp, op1, 0x00000000, 4);
}
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
inst_JMP(EJ_jmp, done);
// If loDWord is negative, hiDWord should be -1 (sign extended loDWord)
{
inst_TT_IV(INS_cmp, op1, 0xFFFFFFFFL, 4);
}
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
// Done
inst_TT_IV(INS_cmp, op1, 0, 4);
}
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
break;
default:
if (unsv)
{
inst_RV_IV(INS_TEST, reg, typeMask, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_NE, false), SCK_OVERFLOW);
}
else
{
noway_assert(typeMin != DUMMY_INIT(~0) && typeMax != DUMMY_INIT(0));
inst_RV_IV(INS_cmp, reg, typeMax, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(EJ_jg, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_GT, false), SCK_OVERFLOW);
// Compare with the MIN
inst_RV_IV(INS_cmp, reg, typeMin, emitActualTypeSize(baseType));
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
}
genCodeForTree_DONE(tree, reg);
{
noway_assert((op2->gtFlags & GTF_UNSIGNED) == 0); // conv.ovf.u8.un should be bashed to conv.u8.un
instGen_Compare_Reg_To_Zero(EA_4BYTE, regHi); // set flags
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
}
/* Move the value into the target */
{
regNumber hiReg = genRegPairHi(regPair);
instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
}
}
goto DONE;
{
inst_TT_IV(INS_cmp, op1, 0, sizeof(int));
}
-
- genJumpToThrowHlpBlk(EJ_jl, SCK_OVERFLOW);
+ genJumpToThrowHlpBlk(genJumpKindForOper(GT_LT, false), SCK_OVERFLOW);
goto DONE;
default:
if (jumpCnt < minSwitchTabJumpCnt)
{
/* Does the first case label follow? */
+ emitJumpKind jmpIfEqual = genJumpKindForOper(GT_EQ, false);
if (fFirstCaseFollows)
{
/* Check for the default case */
-
inst_RV_IV(INS_cmp, reg, jumpCnt - 1, EA_4BYTE);
- inst_JMP (EJ_jae, jumpTab[jumpCnt-1]);
+ inst_JMP(genJumpKindForOper(GT_GE, true), jumpTab[jumpCnt - 1]);
/* No need to jump to the first case */
while (jumpCnt > 0)
{
inst_RV_IV(INS_sub, reg, 1, EA_4BYTE, INS_FLAGS_SET);
- inst_JMP(EJ_je, *jumpTab++);
+ inst_JMP(jmpIfEqual, *jumpTab++);
jumpCnt--;
}
}
{
/* Check for case0 first */
instGen_Compare_Reg_To_Zero(EA_4BYTE, reg); // set flags
- inst_JMP (EJ_je, *jumpTab);
+ inst_JMP(jmpIfEqual, *jumpTab);
/* No need to jump to the first case or the default */
while (jumpCnt > 0)
{
inst_RV_IV(INS_sub, reg, 1, EA_4BYTE, INS_FLAGS_SET);
- inst_JMP(EJ_je, *jumpTab++);
+ inst_JMP(jmpIfEqual, *jumpTab++);
jumpCnt--;
}
/* First take care of the default case */
inst_RV_IV(INS_cmp, reg, jumpCnt - 1, EA_4BYTE);
- inst_JMP (EJ_jae, jumpTab[jumpCnt-1]);
+ inst_JMP(genJumpKindForOper(GT_GE, true), jumpTab[jumpCnt - 1]);
/* Generate the jump table contents */
esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
getEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
// If 0 we bail out
instGen_Compare_Reg_To_Zero(easz, regCnt); // set flags
- inst_JMP(EJ_je, endLabel);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), endLabel);
// Align to STACK_ALIGN
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
#if defined(_TARGET_X86_)
inst_IV(INS_push_hide, 0); // --- push 0
-
// Are we done?
inst_RV(INS_dec, regCnt, type);
#elif defined(_TARGET_ARM_)
inst_IV(INS_push, (unsigned) (genRegMask(regZero1) | genRegMask(regZero2)));
-
// Are we done?
inst_RV_IV(INS_sub, regCnt, 2, emitActualTypeSize(type), INS_FLAGS_SET);
assert(!"Codegen missing");
#endif // TARGETS
- inst_JMP(EJ_jne, loop);
+ inst_JMP(genJumpKindForOper(GT_NE, false), loop);
// Move the final value of ESP into regCnt
inst_RV_RV(INS_mov, regCnt, REG_SPBASE);
*/
#ifdef _TARGET_ARM_
inst_RV_RV_RV(INS_sub, regCnt, REG_SPBASE, regCnt, EA_4BYTE, INS_FLAGS_SET);
- inst_JMP(EJ_jae, loop);
+ inst_JMP(EJ_hs, loop);
#else
inst_RV(INS_NEG, regCnt, TYP_I_IMPL);
inst_RV_RV(INS_add, regCnt, REG_SPBASE, TYP_I_IMPL);
noway_assert(size->gtFlags & GTF_REG_VAL);
regCnt = size->gtRegNum;
inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
- inst_JMP(EJ_jae, loop);
+ inst_JMP(genJumpKindForOper(GT_GE, true), loop);
// Move the final value to ESP
inst_RV_RV(INS_mov, REG_SPBASE, regCnt);
}
BasicBlock *gsCheckBlk = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), gsCheckBlk);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
}
BasicBlock* skipLabel = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), skipLabel);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
assert(treeNode->gtRsvdRegs != RBM_NONE);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
BasicBlock * esp_check = genCreateTempLabel();
- inst_JMP(genJumpKindForOper(GT_EQ, true), esp_check);
+ inst_JMP(genJumpKindForOper(GT_EQ, false), esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
{
INS_nop,
- #define JMP_SMALL(en, rev, ins, condcode) INS_##ins,
+ #define JMP_SMALL(en, rev, ins) INS_##ins,
#include "emitjmps.h"
};
{
EJ_NONE,
- #define JMP_SMALL(en, rev, ins, condcode) EJ_##rev,
- #include "emitjmps.h"
-};
-
-
-const unsigned emitJumpKindCondCodes[] =
-{
- 15, // illegal
-
- #define JMP_SMALL(en, rev, ins, condcode) condcode,
+ #define JMP_SMALL(en, rev, ins) EJ_##rev,
#include "emitjmps.h"
};
return emitReverseJumpKinds[jumpKind];
}
-/*****************************************************************************
- * Look up the condition code for a give jump kind
- */
-
-/*static*/ unsigned emitter::emitJumpKindCondCode(emitJumpKind jumpKind)
-{
- assert(EJ_NONE < jumpKind && jumpKind < EJ_COUNT);
- return emitJumpKindCondCodes[jumpKind];
-}
-
/*****************************************************************************
*
* Return the allocated size (in bytes) of the given instruction descriptor.
{
INS_nop,
- #define JMP_SMALL(en, rev, ins, condcode) INS_##ins,
+ #define JMP_SMALL(en, rev, ins) INS_##ins,
#include "emitjmps.h"
};
{
EJ_NONE,
- #define JMP_SMALL(en, rev, ins, condcode) EJ_##rev,
+ #define JMP_SMALL(en, rev, ins) EJ_##rev,
#include "emitjmps.h"
};
JMP_SMALL(jle , jg , jle )
JMP_SMALL(jg , jle , jg )
-#elif defined(_TARGET_ARM_)
+#elif defined(_TARGET_ARMARCH_)
// jump reverse instruction condcode
-JMP_SMALL(jmp , jmp , b , 15 ) // illegal condcode
-JMP_SMALL(jo , jno , bvs , 6 ) // VS
-JMP_SMALL(jno , jo , bvc , 7 ) // VC
-JMP_SMALL(jb , jae , blo , 3 ) // LO also CC
-JMP_SMALL(jae , jb , bhs , 2 ) // HS also CS
-JMP_SMALL(je , jne , beq , 0 ) // EQ
-JMP_SMALL(jne , je , bne , 1 ) // NE
-JMP_SMALL(jbe , ja , bls , 9 ) // LS
-JMP_SMALL(ja , jbe , bhi , 8 ) // HI
-JMP_SMALL(js , jns , bmi , 4 ) // MI
-JMP_SMALL(jns , js , bpl , 5 ) // PL
-JMP_SMALL(jl , jge , blt , 11 ) // LT
-JMP_SMALL(jge , jl , bge , 10 ) // GE
-JMP_SMALL(jle , jg , ble , 13 ) // LE
-JMP_SMALL(jg , jle , bgt , 12 ) // GT
-
-#elif defined(_TARGET_ARM64_)
-
-// jump reverse instruction condcode
-JMP_SMALL(jmp , jmp , b , 15 ) // illegal condcode
-JMP_SMALL(jo , jno , bvs , 6 ) // VS
-JMP_SMALL(jno , jo , bvc , 7 ) // VC
-JMP_SMALL(jb , jae , blo , 3 ) // LO also CC
-JMP_SMALL(jae , jb , bhs , 2 ) // HS also CS
-JMP_SMALL(je , jne , beq , 0 ) // EQ
-JMP_SMALL(jne , je , bne , 1 ) // NE
-JMP_SMALL(jbe , ja , bls , 9 ) // LS
-JMP_SMALL(ja , jbe , bhi , 8 ) // HI
-JMP_SMALL(js , jns , bmi , 4 ) // MI
-JMP_SMALL(jns , js , bpl , 5 ) // PL
-JMP_SMALL(jl , jge , blt , 11 ) // LT
-JMP_SMALL(jge , jl , bge , 10 ) // GE
-JMP_SMALL(jle , jg , ble , 13 ) // LE
-JMP_SMALL(jg , jle , bgt , 12 ) // GT
+JMP_SMALL(jmp , jmp , b ) // AL always
+JMP_SMALL(eq , ne , beq ) // EQ
+JMP_SMALL(ne , eq , bne ) // NE
+JMP_SMALL(hs , lo , bhs ) // HS also CS
+JMP_SMALL(lo , hs , blo ) // LO also CC
+JMP_SMALL(mi , pl , bmi ) // MI
+JMP_SMALL(pl , mi , bpl ) // PL
+JMP_SMALL(vs , vc , bvs ) // VS
+JMP_SMALL(vc , vs , bvc ) // VC
+JMP_SMALL(hi , ls , bhi ) // HI
+JMP_SMALL(ls , hi , bls ) // LS
+JMP_SMALL(ge , lt , bge ) // GE
+JMP_SMALL(lt , ge , blt ) // LT
+JMP_SMALL(gt , le , bgt ) // GT
+JMP_SMALL(le , gt , ble ) // LE
#else
#error Unsupported or unset target architecture
/* Convert the condition to an insCond value */
switch (condition)
{
- case EJ_je : cond = INS_COND_EQ; break;
- case EJ_jne : cond = INS_COND_NE; break;
- case EJ_jae : cond = INS_COND_HS; break;
- case EJ_jb : cond = INS_COND_LO; break;
-
- case EJ_js : cond = INS_COND_MI; break;
- case EJ_jns : cond = INS_COND_PL; break;
- case EJ_ja : cond = INS_COND_HI; break;
- case EJ_jbe : cond = INS_COND_LS; break;
-
- case EJ_jge : cond = INS_COND_GE; break;
- case EJ_jl : cond = INS_COND_LT; break;
- case EJ_jg : cond = INS_COND_GT; break;
- case EJ_jle : cond = INS_COND_LE; break;
+ case EJ_eq : cond = INS_COND_EQ; break;
+ case EJ_ne : cond = INS_COND_NE; break;
+ case EJ_hs : cond = INS_COND_HS; break;
+ case EJ_lo : cond = INS_COND_LO; break;
+
+ case EJ_mi : cond = INS_COND_MI; break;
+ case EJ_pl : cond = INS_COND_PL; break;
+ case EJ_vs : cond = INS_COND_VS; break;
+ case EJ_vc : cond = INS_COND_VC; break;
+
+ case EJ_hi : cond = INS_COND_HI; break;
+ case EJ_ls : cond = INS_COND_LS; break;
+ case EJ_ge : cond = INS_COND_GE; break;
+ case EJ_lt : cond = INS_COND_LT; break;
+
+ case EJ_gt : cond = INS_COND_GT; break;
+ case EJ_le : cond = INS_COND_LE; break;
default: NO_WAY("unexpected condition type"); return;
}
{
EJ_NONE,
-#if defined(_TARGET_XARCH_)
#define JMP_SMALL(en, rev, ins) EJ_##en,
-#elif defined(_TARGET_ARMARCH_)
- #define JMP_SMALL(en, rev, ins, condcode) EJ_##en,
-#endif
#include "emitjmps.h"
EJ_COUNT
inst_RV_IV(INS_cmp, reg, expMask, EA_4BYTE);
// If exponent was all 1's, we need to throw ArithExcep
- genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
+ genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
}
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[operandtypeinlinetype.exe_1907]
RelativePath=CoreMangLib\cti\system\reflection\emit\operandtype\OperandTypeInlineType\OperandTypeInlineType.exe
WorkingDir=CoreMangLib\cti\system\reflection\emit\operandtype\OperandTypeInlineType
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[_reltailjump_cs.exe_3683]
RelativePath=JIT\Methodical\Boxing\misc\_reltailjump_cs\_reltailjump_cs.exe
WorkingDir=JIT\Methodical\Boxing\misc\_reltailjump_cs
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=RT;EXPECTED_PASS;ISSUE_3105
[opcodesthrow.exe_1883]
RelativePath=CoreMangLib\cti\system\reflection\emit\opcodes\OpCodesThrow\OpCodesThrow.exe
WorkingDir=CoreMangLib\cti\system\reflection\emit\opcodes\OpCodesThrow
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[b85317.exe_5669]
RelativePath=JIT\Regression\VS-ia64-JIT\M00\b85317\b85317\b85317.exe
WorkingDir=JIT\Regression\VS-ia64-JIT\M00\b85317\b85317
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[seq_funcptr_gc_r.exe_3964]
RelativePath=JIT\Methodical\explicit\funcptr\seq_funcptr_gc_r\seq_funcptr_gc_r.exe
WorkingDir=JIT\Methodical\explicit\funcptr\seq_funcptr_gc_r
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_PASS;ISSUE_3032
+Categories=RT;EXPECTED_PASS;ISSUE_3032;DBG_FAIL;ISSUE_5814
[b14617.exe_5517]
RelativePath=JIT\Regression\CLR-x86-JIT\V1.2-M01\b14617\b14617\b14617.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\V1.2-M01\b14617\b14617
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=RT;EXPECTED_PASS;ISSUE_3105
[sbyte_cs_d.exe_4376]
RelativePath=JIT\Methodical\MDArray\DataTypes\sbyte_cs_d\sbyte_cs_d.exe
WorkingDir=JIT\Methodical\MDArray\DataTypes\sbyte_cs_d
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[b57492.exe_5294]
RelativePath=JIT\Regression\CLR-x86-JIT\V1-M12-Beta2\b57492\b57492\b57492.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\V1-M12-Beta2\b57492\b57492
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[_relexplicit3.exe_3995]
RelativePath=JIT\Methodical\explicit\misc\_relexplicit3\_relexplicit3.exe
WorkingDir=JIT\Methodical\explicit\misc\_relexplicit3
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=RT;EXPECTED_PASS;ISSUE_3105
[booleaniconvertibletosbyte.exe_387]
RelativePath=CoreMangLib\cti\system\boolean\BooleanIConvertibleToSByte\BooleanIConvertibleToSByte.exe
WorkingDir=CoreMangLib\cti\system\boolean\BooleanIConvertibleToSByte
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=RT;EXPECTED_PASS;ISSUE_3105
[b70808.exe_5371]
RelativePath=JIT\Regression\CLR-x86-JIT\V1-M12-Beta2\b70808\b70808\b70808.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\V1-M12-Beta2\b70808\b70808
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[textelementenumeratorelementindex.exe_1217]
RelativePath=CoreMangLib\cti\system\globalization\textelementenumerator\TextElementEnumeratorElementIndex\TextElementEnumeratorElementIndex.exe
WorkingDir=CoreMangLib\cti\system\globalization\textelementenumerator\TextElementEnumeratorElementIndex
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[b07411.exe_5023]
RelativePath=JIT\Regression\CLR-x86-JIT\V1-M10\b07411\b07411\b07411.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\V1-M10\b07411\b07411
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=JIT;EXPECTED_FAIL;ISSUE_3105
+Categories=JIT;EXPECTED_PASS;ISSUE_3105
[tailcall_av.exe_4593]
RelativePath=JIT\Methodical\tailcall_v4\tailcall_AV\tailcall_AV.exe
WorkingDir=JIT\Methodical\tailcall_v4\tailcall_AV
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_PASS;ISSUE_3032
+Categories=RT;EXPECTED_PASS;ISSUE_3032;DBG_FAIL;ISSUE_5814
[b565808.exe_5560]
RelativePath=JIT\Regression\CLR-x86-JIT\v2.1\b565808\b565808\b565808.exe
WorkingDir=JIT\Regression\CLR-x86-JIT\v2.1\b565808\b565808
MaxAllowedDurationSeconds=600
HostStyle=Any
Expected=100
-Categories=RT;EXPECTED_FAIL;NEED_TRIAGE
+Categories=RT;EXPECTED_PASS;ISSUE_3105
[threadstartdouble_2.exe_198]
RelativePath=baseservices\threading\paramthreadstart\ThreadStartDouble_2\ThreadStartDouble_2.exe
WorkingDir=baseservices\threading\paramthreadstart\ThreadStartDouble_2