DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects s0 to contain the number of arguments
+ // JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
- __ Addu(s0, a0, num_extra_args + 1);
- __ sll(s1, s0, kPointerSizeLog2);
- __ Subu(s1, s1, kPointerSize);
+ __ Addu(a0, a0, num_extra_args + 1);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
- // s0: number of arguments including receiver
- // s1: size of arguments excluding receiver
- // s2: pointer to builtin function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
- // The reason for this is that these arguments would need to be saved anyway
- // so it's faster to set them up directly.
- // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
// Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
+ __ Subu(s1, s1, kPointerSize);
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
// Prepare arguments for C routine.
// a0 = argc
- __ mov(a0, s0);
+ __ mov(s0, a0);
+ __ mov(s2, a1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
+ if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
__ Branch(&miss, ne, function, Operand(at));
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ }
} else {
// Patch the call site to return true.
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (!HasCallSiteInlineCheck()) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ }
} else {
// Patch the call site to return false.
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
ne,
scratch,
Operand(isolate()->factory()->null_value()));
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
- __ li(v0, Operand(Smi::FromInt(1)));
+ if (ReturnTrueFalseObject()) {
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ } else {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(s0.code(), params);
- output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
- output_frame->SetRegister(s2.code(), handler);
+ output_frame->SetRegister(a0.code(), params);
+ output_frame->SetRegister(a1.code(), handler);
}
}
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ } else {
+ div(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ } else {
+ div(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
}
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ modu(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mfhi(rd);
+ } else {
+ modu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
}
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, rt.rm());
+ mflo(res);
+ } else {
+ divu(res, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ divu(rs, at);
+ mflo(res);
+ } else {
+ divu(res, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
break;
case Uless_equal:
if (r2.is(zero_reg)) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
break;
case Uless_equal:
if (rt.imm32_ == 0) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
case Ugreater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(overflow_dst, dst, t9);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addiu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, dst, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst,
- Register left,
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right,
+ Register overflow_dst,
+ Register scratch) {
+ if (right.is_reg()) {
+ SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ } else {
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addiu(dst, left, -(right.immediate())); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, scratch, t9); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ addiu(dst, left, -(right.immediate()));
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ addiu(t9, zero_reg, right.immediate());
+ xor_(scratch, left, t9);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Div);
+ DEFINE_INSTRUCTION(Divu);
DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Modu);
DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
Register overflow_dst,
Register scratch = at);
+ void AdduAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void SubuAndCheckForOverflow(Register dst,
Register left,
Register right,
Register overflow_dst,
Register scratch = at);
+ void SubuAndCheckForOverflow(Register dst, Register left,
+ const Operand& right, Register overflow_dst,
+ Register scratch = at);
+
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
// Runtime calls.
// See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) {
- li(s0, num_args);
- li(s1, (num_args - 1) * kPointerSize);
- }
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(s2, Operand(ref));
+ li(a1, Operand(ref));
}
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \