void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Class for scoping postponing the trampoline pool generation.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
__ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(Operand(t9));
+ __ Jump(t9);
// a0: number of arguments
// a1: called object
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION,
__ mov(scratch1, a0);
ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
__ push(ra);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Write Smi from a1 to a1 and a0 in double format.
__ mov(scratch1, a1);
ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(ra);
}
}
__ mov(scratch1, object);
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(ra);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub.GetCode());
__ pop(ra);
}
__ mov(t6, rhs);
ConvertToDoubleStub stub1(a1, a0, t6, t5);
__ push(ra);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
__ pop(ra);
}
__ mov(t6, lhs);
ConvertToDoubleStub stub2(a3, a2, t6, t5);
__ push(ra);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(ra);
// Load rhs to a double in a1, a0.
if (rhs.is(a0)) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- __ CallWithAstId(ic);
+ __ Call(ic);
// Value in v0 is ignored (declarations are statements).
}
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ CallWithAstId(ic, mode);
+ __ Call(ic, mode);
}
__ li(a0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Branch(done);
}
}
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
} else if (slot->type() == Slot::LOOKUP) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- __ CallWithAstId(ic);
+ __ Call(ic);
break;
}
case KEYED_PROPERTY: {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- __ CallWithAstId(ic);
+ __ Call(ic);
break;
}
}
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- __ CallWithAstId(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ lw(a1, GlobalObjectOperand());
__ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
__ Push(v0, a1); // Function, receiver.
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- __ CallWithAstId(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ CallWithAstId(ic);
+ __ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL &&
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
-// Arguments macros.
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
-#define REGISTER_TARGET_BODY(Name) \
-void MacroAssembler::Name(Register target, \
- BranchDelaySlot bd) { \
- Name(Operand(target), bd); \
-} \
-void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target), COND_ARGS, bd); \
-}
-
-
-#define INT_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), bd); \
-} \
-void MacroAssembler::Name(intptr_t target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), COND_ARGS, bd); \
-}
-
-
-#define BYTE_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
-} \
-void MacroAssembler::Name(byte* target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
-}
-
-
-#define CODE_TARGET_BODY(Name) \
-void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
-} \
-void MacroAssembler::Name(Handle<Code> target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
-}
-
-
-REGISTER_TARGET_BODY(Jump)
-REGISTER_TARGET_BODY(Call)
-INT_PTR_TARGET_BODY(Jump)
-INT_PTR_TARGET_BODY(Call)
-BYTE_PTR_TARGET_BODY(Jump)
-BYTE_PTR_TARGET_BODY(Call)
-CODE_TARGET_BODY(Jump)
-CODE_TARGET_BODY(Call)
-
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-#undef REGISTER_TARGET_BODY
-#undef BYTE_PTR_TARGET_BODY
-#undef CODE_TARGET_BODY
-
-
-void MacroAssembler::Ret(BranchDelaySlot bd) {
- Jump(Operand(ra), bd);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
- BranchDelaySlot bd) {
- Jump(Operand(ra), cond, r1, r2, bd);
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
+void MacroAssembler::Jump(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(intptr_t target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ li(t9, Operand(target, rmode));
+ Jump(t9, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+}
+
+
+int MacroAssembler::CallSize(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = 0;
+
+ if (cond == cc_always) {
+ size += 1;
+ } else {
+ size += 3;
+ }
+
+ if (bd == PROTECT)
+ size += 1;
+
+ return size * kInstrSize;
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+
+ ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = CallSize(t9, cond, rs, rt, bd);
+ return size + 2 * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ int32_t target_int = reinterpret_cast<int32_t>(target);
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
+ li(t9, Operand(target_int, rmode), true);
+ Call(t9, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ return CallSize(reinterpret_cast<Address>(code.location()),
+ rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ Jump(ra, cond, rs, rt, bd);
+}
+
+
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
}
-void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jr(target.rm());
- } else {
- if (!MustUseReg(target.rmode_)) {
- j(target.imm32_);
- } else {
- li(t9, target);
- jr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target.rm());
- }
- } else { // Not register target.
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- j(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- j(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target).
- li(t9, target);
- if (cond == cc_always) {
- jr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(t9); // Will generate only one instruction.
- }
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 4 * kInstrSize;
-}
-
-
-int MacroAssembler::CallSize(Register reg) {
- return 2 * kInstrSize;
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jalr(target.rm());
- } else { // !target.is_reg().
- if (!MustUseReg(target.rmode_)) {
- jal(target.imm32_);
- } else { // MustUseReg(target).
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
- li(t9, target);
- jalr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jalr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target.rm());
- }
- } else { // !target.is_reg().
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- jal(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jal(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target)
- li(t9, target);
- if (cond == cc_always) {
- jalr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(t9); // Will generate only one instruction.
- }
- }
+void MacroAssembler::DropAndRet(int drop,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ // This is a workaround to make sure only one branch instruction is
+ // generated. It relies on Drop and Ret not creating branches if
+ // cond == cc_always.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ Drop(drop);
+ Ret();
-void MacroAssembler::CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond,
- Register r1,
- const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ if (cond != cc_always) {
+ bind(&skip);
}
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
Label skip;
if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
+ Branch(&skip, NegateCondition(cond), reg, op);
}
- if (count > 0) {
- addiu(sp, sp, count * kPointerSize);
- }
+ addiu(sp, sp, count * kPointerSize);
if (cond != al) {
bind(&skip);
}
-void MacroAssembler::DropAndRet(int drop,
- Condition cond,
- Register r1,
- const Operand& r2) {
- // This is a workaround to make sure only one branch instruction is
- // generated. It relies on Drop and Ret not creating branches if
- // cond == cc_always.
- Label skip;
- if (cond != cc_always) {
- Branch(&skip, NegateCondition(cond), r1, r2);
- }
-
- Drop(drop);
- Ret();
-
- if (cond != cc_always) {
- bind(&skip);
- }
-}
-
void MacroAssembler::Swap(Register reg1,
Register reg2,
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(t1, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor);
call_wrapper.AfterCall();
jmp(done);
} else {
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
+ kNoASTId, cond, r1, r2);
return result;
}
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
-// Arguments macros.
+ // Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
-// Prototypes.
-
-// Prototypes for functions with no target (eg Ret()).
-#define DECLARE_NOTARGET_PROTOTYPE(Name) \
- void Name(BranchDelaySlot bd = PROTECT); \
- void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
- Name(COND_ARGS, bd); \
- }
-
-// Prototypes for functions with a target.
-
-// Cases when relocation may be needed.
-#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode) { \
- Name(target, rmode, bd); \
- } \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS) { \
- Name(target, rmode, COND_ARGS, bd); \
- }
-
-// Cases when relocation is not needed.
+ // Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
Name(target, COND_ARGS, bd); \
}
-// Target prototypes.
-
-#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
- DECLARE_NORELOC_PROTOTYPE(Name, Register) \
- DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
- DECLARE_RELOC_PROTOTYPE(Name, byte*) \
- DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
-
#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_BRANCH_PROTOTYPES(Branch)
+ DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-DECLARE_JUMP_CALL_PROTOTYPES(Jump)
-DECLARE_JUMP_CALL_PROTOTYPES(Call)
-
-DECLARE_BRANCH_PROTOTYPES(Branch)
-DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-
-DECLARE_NOTARGET_PROTOTYPE(Ret)
-
+#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
#undef COND_ARGS
-#undef DECLARE_NOTARGET_PROTOTYPE
-#undef DECLARE_NORELOC_PROTOTYPE
-#undef DECLARE_RELOC_PROTOTYPE
-#undef DECLARE_JUMP_CALL_PROTOTYPES
-#undef DECLARE_BRANCH_PROTOTYPES
- void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- Condition cond = al,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg));
- int CallSize(Register reg);
- int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Register target, COND_ARGS);
+ void Call(Register target, COND_ARGS);
+ int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
+ void Ret(COND_ARGS);
+ inline void Ret(BranchDelaySlot bd) {
+ Ret(al, zero_reg, Operand(zero_reg), bd);
+ }
+
+#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
- void Jump(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(a0);
__ Addu(a0, a0, code_pointer());
- __ Jump(Operand(a0));
+ __ Jump(a0);
}
__ Call(t9);
__ lw(ra, MemOperand(sp, 0));
__ Addu(sp, sp, Operand(stack_alignment));
- __ Jump(Operand(ra));
+ __ Jump(ra);
}