case GT_LE:
case GT_GE:
case GT_GT:
- {
- // TODO-ARM-CQ: Check if we can use the currently set flags.
- // TODO-ARM-CQ: Check for the case where we can simply transfer the carry bit to a register
- // (signed < or >= where targetReg != REG_NA)
-
- GenTreeOp* tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1->gtEffectiveVal();
- GenTreePtr op2 = tree->gtOp2->gtEffectiveVal();
-
- genConsumeIfReg(op1);
- genConsumeIfReg(op2);
-
- instruction ins = INS_cmp;
- emitAttr cmpAttr;
- if (varTypeIsFloating(op1))
- {
- assert(op1->TypeGet() == op2->TypeGet());
- ins = INS_vcmp;
- cmpAttr = emitTypeSize(op1->TypeGet());
- emit->emitInsBinary(ins, cmpAttr, op1, op2);
- // vmrs with register 0xf has special meaning of transferring flags
- emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
- }
- else if (varTypeIsLong(op1))
- {
-#ifdef DEBUG
- // The result of an unlowered long compare on a 32-bit target must either be
- // a) materialized into a register, or
- // b) unused.
- //
- // A long compare that has a result that is used but not materialized into a register should
- // have been handled by Lowering::LowerCompare.
-
- LIR::Use use;
- assert((treeNode->gtRegNum != REG_NA) || !LIR::AsRange(compiler->compCurBB).TryGetUse(treeNode, &use));
-#endif
- genCompareLong(treeNode);
- break;
- }
- else
- {
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
- assert(!varTypeIsFloating(op2Type));
- ins = INS_cmp;
- if (op1Type == op2Type)
- {
- cmpAttr = emitTypeSize(op1Type);
- }
- else
- {
- var_types cmpType = TYP_INT;
- bool op1Is64Bit = (varTypeIsLong(op1Type) || op1Type == TYP_REF);
- bool op2Is64Bit = (varTypeIsLong(op2Type) || op2Type == TYP_REF);
- NYI_IF(op1Is64Bit || op2Is64Bit, "Long compare");
- assert(!op1->isUsedFromMemory() || op1Type == op2Type);
- assert(!op2->isUsedFromMemory() || op1Type == op2Type);
- cmpAttr = emitTypeSize(cmpType);
- }
- emit->emitInsBinary(ins, cmpAttr, op1, op2);
- }
-
- // Are we evaluating this into a register?
- if (targetReg != REG_NA)
- {
- genSetRegToCond(targetReg, tree);
- genProduceReg(tree);
- }
- }
- break;
+ genCodeForCompare(treeNode->AsOp());
+ break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode);
}
//------------------------------------------------------------------------
+// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT node.
+//
+// Arguments:
+// tree - the node
+//
+void CodeGen::genCodeForCompare(GenTreeOp* tree)
+{
+ // TODO-ARM-CQ: Check if we can use the currently set flags.
+ // TODO-ARM-CQ: Check for the case where we can simply transfer the carry bit to a register
+ // (signed < or >= where targetReg != REG_NA)
+
+ GenTreePtr op1 = tree->gtOp1->gtEffectiveVal();
+ GenTreePtr op2 = tree->gtOp2->gtEffectiveVal();
+
+ if (varTypeIsLong(op1))
+ {
+#ifdef DEBUG
+ // The result of an unlowered long compare on a 32-bit target must either be
+ // a) materialized into a register, or
+ // b) unused.
+ //
+ // A long compare that has a result that is used but not materialized into a register should
+ // have been handled by Lowering::LowerCompare.
+
+ LIR::Use use;
+ assert((tree->gtRegNum != REG_NA) || !LIR::AsRange(compiler->compCurBB).TryGetUse(tree, &use));
+#endif
+ genCompareLong(tree);
+ }
+ else
+ {
+ regNumber targetReg = tree->gtRegNum;
+ emitter* emit = getEmitter();
+ emitAttr cmpAttr;
+
+ genConsumeIfReg(op1);
+ genConsumeIfReg(op2);
+
+ if (varTypeIsFloating(op1))
+ {
+ assert(op1->TypeGet() == op2->TypeGet());
+ instruction ins = INS_vcmp;
+ cmpAttr = emitTypeSize(op1->TypeGet());
+ emit->emitInsBinary(ins, cmpAttr, op1, op2);
+ // vmrs with register 0xf has special meaning of transferring flags
+ emit->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
+ }
+ else
+ {
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
+ assert(!varTypeIsFloating(op2Type));
+ instruction ins = INS_cmp;
+ if (op1Type == op2Type)
+ {
+ cmpAttr = emitTypeSize(op1Type);
+ }
+ else
+ {
+ var_types cmpType = TYP_INT;
+ bool op1Is64Bit = (varTypeIsLong(op1Type) || op1Type == TYP_REF);
+ bool op2Is64Bit = (varTypeIsLong(op2Type) || op2Type == TYP_REF);
+ NYI_IF(op1Is64Bit || op2Is64Bit, "Long compare");
+ assert(!op1->isUsedFromMemory() || op1Type == op2Type);
+ assert(!op2->isUsedFromMemory() || op1Type == op2Type);
+ cmpAttr = emitTypeSize(cmpType);
+ }
+ emit->emitInsBinary(ins, cmpAttr, op1, op2);
+ }
+
+ // Are we evaluating this into a register?
+ if (targetReg != REG_NA)
+ {
+ genSetRegToCond(targetReg, tree);
+ genProduceReg(tree);
+ }
+ }
+}
+
+//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
case GT_LE:
case GT_GE:
case GT_GT:
- {
- // TODO-ARM64-CQ: Check if we can use the currently set flags.
- // TODO-ARM64-CQ: Check for the case where we can simply transfer the carry bit to a register
- // (signed < or >= where targetReg != REG_NA)
-
- GenTreeOp* tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
-
- assert(!op1->isUsedFromMemory());
- assert(!op2->isUsedFromMemory());
-
- genConsumeOperands(tree);
-
- emitAttr cmpSize = EA_UNKNOWN;
-
- if (varTypeIsFloating(op1Type))
- {
- assert(varTypeIsFloating(op2Type));
- assert(!op1->isContained());
- assert(op1Type == op2Type);
- cmpSize = EA_ATTR(genTypeSize(op1Type));
-
- if (op2->IsIntegralConst(0))
- {
- emit->emitIns_R_F(INS_fcmp, cmpSize, op1->gtRegNum, 0.0);
- }
- else
- {
- assert(!op2->isContained());
- emit->emitIns_R_R(INS_fcmp, cmpSize, op1->gtRegNum, op2->gtRegNum);
- }
- }
- else
- {
- assert(!varTypeIsFloating(op2Type));
- // We don't support swapping op1 and op2 to generate cmp reg, imm
- assert(!op1->isContainedIntOrIImmed());
-
- // TODO-ARM64-CQ: the second register argument of a CMP can be sign/zero
- // extended as part of the instruction (using "CMP (extended register)").
- // We should use that if possible, swapping operands
- // (and reversing the condition) if necessary.
- unsigned op1Size = genTypeSize(op1Type);
- unsigned op2Size = genTypeSize(op2Type);
-
- if ((op1Size < 4) || (op1Size < op2Size))
- {
- // We need to sign/zero extend op1 up to 32 or 64 bits.
- instruction ins = ins_Move_Extend(op1Type, true);
- inst_RV_RV(ins, op1->gtRegNum, op1->gtRegNum);
- }
-
- if (!op2->isContainedIntOrIImmed())
- {
- if ((op2Size < 4) || (op2Size < op1Size))
- {
- // We need to sign/zero extend op2 up to 32 or 64 bits.
- instruction ins = ins_Move_Extend(op2Type, true);
- inst_RV_RV(ins, op2->gtRegNum, op2->gtRegNum);
- }
- }
- cmpSize = EA_4BYTE;
- if ((op1Size == EA_8BYTE) || (op2Size == EA_8BYTE))
- {
- cmpSize = EA_8BYTE;
- }
-
- if (op2->isContainedIntOrIImmed())
- {
- GenTreeIntConCommon* intConst = op2->AsIntConCommon();
- emit->emitIns_R_I(INS_cmp, cmpSize, op1->gtRegNum, intConst->IconValue());
- }
- else
- {
- emit->emitIns_R_R(INS_cmp, cmpSize, op1->gtRegNum, op2->gtRegNum);
- }
- }
-
- // Are we evaluating this into a register?
- if (targetReg != REG_NA)
- {
- genSetRegToCond(targetReg, tree);
- genProduceReg(tree);
- }
- }
- break;
+ genCodeForCompare(treeNode->AsOp());
+ break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode);
genProduceReg(treeNode);
}
+//------------------------------------------------------------------------
+// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT node.
+//
+// Arguments:
+// tree - the node
+//
+void CodeGen::genCodeForCompare(GenTreeOp* tree)
+{
+ regNumber targetReg = tree->gtRegNum;
+ emitter* emit = getEmitter();
+
+ // TODO-ARM64-CQ: Check if we can use the currently set flags.
+ // TODO-ARM64-CQ: Check for the case where we can simply transfer the carry bit to a register
+ // (signed < or >= where targetReg != REG_NA)
+
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
+
+ assert(!op1->isUsedFromMemory());
+ assert(!op2->isUsedFromMemory());
+
+ genConsumeOperands(tree);
+
+ emitAttr cmpSize = EA_UNKNOWN;
+
+ if (varTypeIsFloating(op1Type))
+ {
+ assert(varTypeIsFloating(op2Type));
+ assert(!op1->isContained());
+ assert(op1Type == op2Type);
+ cmpSize = EA_ATTR(genTypeSize(op1Type));
+
+ if (op2->IsIntegralConst(0))
+ {
+ emit->emitIns_R_F(INS_fcmp, cmpSize, op1->gtRegNum, 0.0);
+ }
+ else
+ {
+ assert(!op2->isContained());
+ emit->emitIns_R_R(INS_fcmp, cmpSize, op1->gtRegNum, op2->gtRegNum);
+ }
+ }
+ else
+ {
+ assert(!varTypeIsFloating(op2Type));
+ // We don't support swapping op1 and op2 to generate cmp reg, imm
+ assert(!op1->isContainedIntOrIImmed());
+
+ // TODO-ARM64-CQ: the second register argument of a CMP can be sign/zero
+ // extended as part of the instruction (using "CMP (extended register)").
+ // We should use that if possible, swapping operands
+ // (and reversing the condition) if necessary.
+ unsigned op1Size = genTypeSize(op1Type);
+ unsigned op2Size = genTypeSize(op2Type);
+
+ if ((op1Size < 4) || (op1Size < op2Size))
+ {
+ // We need to sign/zero extend op1 up to 32 or 64 bits.
+ instruction ins = ins_Move_Extend(op1Type, true);
+ inst_RV_RV(ins, op1->gtRegNum, op1->gtRegNum);
+ }
+
+ if (!op2->isContainedIntOrIImmed())
+ {
+ if ((op2Size < 4) || (op2Size < op1Size))
+ {
+ // We need to sign/zero extend op2 up to 32 or 64 bits.
+ instruction ins = ins_Move_Extend(op2Type, true);
+ inst_RV_RV(ins, op2->gtRegNum, op2->gtRegNum);
+ }
+ }
+ cmpSize = EA_4BYTE;
+ if ((op1Size == EA_8BYTE) || (op2Size == EA_8BYTE))
+ {
+ cmpSize = EA_8BYTE;
+ }
+
+ if (op2->isContainedIntOrIImmed())
+ {
+ GenTreeIntConCommon* intConst = op2->AsIntConCommon();
+ emit->emitIns_R_I(INS_cmp, cmpSize, op1->gtRegNum, intConst->IconValue());
+ }
+ else
+ {
+ emit->emitIns_R_R(INS_cmp, cmpSize, op1->gtRegNum, op2->gtRegNum);
+ }
+ }
+
+ // Are we evaluating this into a register?
+ if (targetReg != REG_NA)
+ {
+ genSetRegToCond(targetReg, tree);
+ genProduceReg(tree);
+ }
+}
+
int CodeGenInterface::genSPtoFPdelta()
{
int delta;