// ----- Emulated conditions.
-// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
-// Oppposite conditions must be paired as odd/even numbers
+// Opposite conditions must be paired as odd/even numbers
// because 'NegateCondition' function flips LSB to negate condition.
enum Condition {
// Any value < 0 is considered no_condition.
no_overflow = 1,
Uless = 2,
Ugreater_equal = 3,
- equal = 4,
- not_equal = 5,
- Uless_equal = 6,
- Ugreater = 7,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
negative = 8,
positive = 9,
parity_even = 10,
less_equal = 14,
greater = 15,
ueq = 16, // Unordered or Equal.
- nue = 17, // Not (Unordered or Equal).
+ ogl = 17, // Ordered and Not Equal.
cc_always = 18,
// Aliases.
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
cc_default = kNoCondition
};
}
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ueq:
+ return ogl;
+ case ogl:
+ return ueq;
+ default:
+ return cc;
+ }
+}
+
+
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
switch (cc) {
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
- __ jmp(&outer_loop_header);
+ __ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
+ __ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, - i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
// Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump);
- __ Branch(USE_DELAY_SLOT, &done_special);
+ __ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1);
for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
}
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateCondition(condition), src1, src2);
+ NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2);
DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- EmitBranchF(instr, nue, reg, kDoubleRegZero);
+ EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
- EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+ EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
- Label* nan, Condition cc, FPURegister cmp1,
+ Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cc == al) {
- Branch(bd, target);
- return;
- }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == al) {
+ Branch(bd, target);
+ return;
+ }
- if (IsMipsArchVariant(kMips32r6)) {
- sizeField = sizeField == D ? L : W;
- }
- DCHECK(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- if (!IsMipsArchVariant(kMips32r6)) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
- } else {
- // Use kDoubleCompareReg for comparison result. It has to be unavailable
- // to lithium register allocator.
- DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(nan, kDoubleCompareReg);
+ if (IsMipsArchVariant(kMips32r6)) {
+ sizeField = sizeField == D ? L : W;
+ }
+ DCHECK(nan || target);
+ // Check for unordered (NaN) cases.
+ if (nan) {
+ bool long_branch =
+ nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
+ if (!IsMipsArchVariant(kMips32r6)) {
+ if (long_branch) {
+ Label skip;
+ c(UN, D, cmp1, cmp2);
+ bc1f(&skip);
+ nop();
+ Jr(nan, bd);
+ bind(&skip);
+ } else {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ if (bd == PROTECT) {
+ nop();
+ }
+ }
+ } else {
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium register allocator.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ if (long_branch) {
+ Label skip;
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(&skip, kDoubleCompareReg);
+ nop();
+ Jr(nan, bd);
+ bind(&skip);
+ } else {
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ if (bd == PROTECT) {
+ nop();
+ }
+ }
+ }
+ }
+
+ if (target) {
+ bool long_branch =
+ target->is_bound() ? is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ Condition neg_cond = NegateFpuCondition(cond);
+ BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
+ Jr(target, bd);
+ bind(&skip);
+ } else {
+ BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
+ }
}
}
+}
+void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
+ Condition cc, FPURegister cmp1,
+ FPURegister cmp2, BranchDelaySlot bd) {
if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
+ case ult:
+ c(ULT, sizeField, cmp1, cmp2);
+ bc1t(target);
+ break;
case gt:
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
+ case ugt:
+ c(OLE, sizeField, cmp1, cmp2);
+ bc1f(target);
+ break;
case ge:
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
+ case uge:
+ c(OLT, sizeField, cmp1, cmp2);
+ bc1f(target);
+ break;
case le:
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
+ case ule:
+ c(ULE, sizeField, cmp1, cmp2);
+ bc1t(target);
+ break;
case eq:
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
- case ne:
+ case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
- case nue:
+ case ogl:
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
}
}
} else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
+ case ult:
+ cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
case gt:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
+ case ugt:
+ cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
case ge:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
+ case uge:
+ cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
case le:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
+ case ule:
+ cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
case eq:
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
- case nue:
+ case ogl:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
}
}
}
-
if (bd == PROTECT) {
nop();
}
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
+ void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
no_overflow = 1,
Uless = 2,
Ugreater_equal = 3,
- equal = 4,
- not_equal = 5,
- Uless_equal = 6,
- Ugreater = 7,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
negative = 8,
positive = 9,
parity_even = 10,
less_equal = 14,
greater = 15,
ueq = 16, // Unordered or Equal.
- nue = 17, // Not (Unordered or Equal).
+ ogl = 17, // Ordered and Not Equal.
cc_always = 18,
// Aliases.
hs = Ugreater_equal,
lo = Uless,
al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
cc_default = kNoCondition
};
}
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ueq:
+ return ogl;
+ case ogl:
+ return ueq;
+ default:
+ return cc;
+ }
+}
+
+
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
switch (cc) {
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
__ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
- __ jmp(&outer_loop_header);
+ __ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
+ __ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, -i); // In the delay slot.
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
// Entry with id == kMaxEntriesBranchReach - 1.
__ bind(&trampoline_jump);
- __ Branch(USE_DELAY_SLOT, &done_special);
+ __ BranchShort(USE_DELAY_SLOT, &done_special);
__ li(at, -1);
for (int i = kMaxEntriesBranchReach; i < count(); i++) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateCondition(condition), src1, src2);
+ NegateFpuCondition(condition), src1, src2);
} else if (right_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
condition, src1, src2);
DCHECK(!info()->IsStub());
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
- EmitBranchF(instr, nue, reg, kDoubleRegZero);
+ EmitBranchF(instr, ogl, reg, kDoubleRegZero);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
- EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+ EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
__ ld(at, FieldMemOperand(reg, String::kLengthOffset));
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
- Label* nan, Condition cc, FPURegister cmp1,
+ Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cc == al) {
+ if (cond == al) {
Branch(bd, target);
return;
}
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
}
+
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
+ bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
if (kArchVariant != kMips64r6) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
+ if (long_branch) {
+ Label skip;
+ c(UN, D, cmp1, cmp2);
+ bc1f(&skip);
+ nop();
+ Jr(nan, bd);
+ bind(&skip);
+ } else {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ if (bd == PROTECT) {
+ nop();
+ }
+ }
} else {
- // Use f31 for comparison result. It has to be unavailable to lithium
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium
// register allocator.
- DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
- cmp(UN, L, f31, cmp1, cmp2);
- bc1nez(nan, f31);
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ if (long_branch) {
+ Label skip;
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(&skip, kDoubleCompareReg);
+ nop();
+ Jr(nan, bd);
+ bind(&skip);
+ } else {
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ if (bd == PROTECT) {
+ nop();
+ }
+ }
+ }
+ }
+
+ if (target) {
+ bool long_branch =
+ target->is_bound() ? is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ Condition neg_cond = NegateFpuCondition(cond);
+ BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
+ Jr(target, bd);
+ bind(&skip);
+ } else {
+ BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
}
}
+}
+
+void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
+ Condition cc, FPURegister cmp1,
+ FPURegister cmp2, BranchDelaySlot bd) {
if (kArchVariant != kMips64r6) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
+ case ult:
+ c(ULT, sizeField, cmp1, cmp2);
+ bc1t(target);
+ break;
case gt:
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
+ case ugt:
+ c(OLE, sizeField, cmp1, cmp2);
+ bc1f(target);
+ break;
case ge:
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
+ case uge:
+ c(OLT, sizeField, cmp1, cmp2);
+ bc1f(target);
+ break;
case le:
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
+ case ule:
+ c(ULE, sizeField, cmp1, cmp2);
+ bc1t(target);
+ break;
case eq:
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
- case ne:
+ case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
- case nue:
+ case ogl:
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
}
}
} else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
- // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
- DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
+ // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
+ // 1) mode.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
switch (cc) {
case lt:
- cmp(OLT, sizeField, f31, cmp1, cmp2);
- bc1nez(target, f31);
+ cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ult:
+ cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
break;
case gt:
- cmp(ULE, sizeField, f31, cmp1, cmp2);
- bc1eqz(target, f31);
+ cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case ugt:
+ cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
break;
case ge:
- cmp(ULT, sizeField, f31, cmp1, cmp2);
- bc1eqz(target, f31);
+ cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case uge:
+ cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
break;
case le:
- cmp(OLE, sizeField, f31, cmp1, cmp2);
- bc1nez(target, f31);
+ cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ule:
+ cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
break;
case eq:
- cmp(EQ, sizeField, f31, cmp1, cmp2);
- bc1nez(target, f31);
+ cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
break;
case ueq:
- cmp(UEQ, sizeField, f31, cmp1, cmp2);
- bc1nez(target, f31);
+ cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
break;
case ne:
- cmp(EQ, sizeField, f31, cmp1, cmp2);
- bc1eqz(target, f31);
+ cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
break;
- case nue:
- cmp(UEQ, sizeField, f31, cmp1, cmp2);
- bc1eqz(target, f31);
+ case ogl:
+ cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
break;
default:
CHECK(0);
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
+ void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,