}
+Condition Assembler::GetCondition(Instr instr) {
+ return Instruction::ConditionField(instr);
+}
+
+
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
}
+Register Assembler::GetRn(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RnValue(instr);
+ return reg;
+}
+
+
+Register Assembler::GetRm(Instr instr) {
+ Register reg;
+ reg.code_ = Instruction::RmValue(instr);
+ return reg;
+}
+
+
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
}
+bool Assembler::IsTstImmediate(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
+ (I | TST | S);
+}
+
+
+bool Assembler::IsCmpRegister(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
+ (CMP | S);
+}
+
+
+bool Assembler::IsCmpImmediate(Instr instr) {
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
+ (I | CMP | S);
+}
+
+
+Register Assembler::GetCmpImmediateRegister(Instr instr) {
+ ASSERT(IsCmpImmediate(instr));
+ return GetRn(instr);
+}
+
+
+int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
+ ASSERT(IsCmpImmediate(instr));
+ return instr & kOff12Mask;
+}
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
}
+void Assembler::cmp_raw_immediate(
+ Register src, int raw_immediate, Condition cond) {
+ ASSERT(is_uint12(raw_immediate));
+ emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
+}
+
+
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMN | S, src1, r0, src2);
}
bool Assembler::IsNop(Instr instr, int type) {
- // Check for mov rx, rx.
+ // Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
+ void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
+ static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
+ static Register GetRn(Instr instr);
+ static Register GetRm(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsTstImmediate(Instr instr);
+ static bool IsCmpRegister(Instr instr);
+ static bool IsCmpImmediate(Instr instr);
+ static Register GetCmpImmediateRegister(Instr instr);
+ static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result;
Label not_heap_number;
- Register scratch = r7;
+ Register scratch = r9.is(tos_) ? r7 : r9;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
- __ sub(r1, r1, r0, SetCC);
- // Correct sign of result in case of overflow.
- __ rsb(r1, r1, Operand(0), SetCC, vs);
- __ mov(r0, r1);
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(r1);
+ __ sub(r0, r1, SmiUntagOperand(r0));
}
__ Ret();
inline int TypeValue() const { return Bits(27, 25); }
inline int RnValue() const { return Bits(19, 16); }
+ DECLARE_STATIC_ACCESSOR(RnValue);
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
inline int SValue() const { return Bit(20); }
// with register
inline int RmValue() const { return Bits(3, 0); }
+ DECLARE_STATIC_ACCESSOR(RmValue);
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));
#define __ ACCESS_MASM(masm_)
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ // Don't use b(al, ...) as that might emit the constant pool right after the
+ // branch. After patching when the branch is no longer unconditional
+ // execution can continue into the constant pool.
+ __ b(eq, target); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ cmp(reg, Operand(reg));
+ __ b(ne, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
+
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
- __ bind(&slow_case);
+ __ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(eq, true, flags, r1, r0);
- __ CallStub(&stub);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+ __ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
- __ JumpIfNotSmi(r2, &slow_case);
+ patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cond, strict, flags, r1, r0);
- __ CallStub(&stub);
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
}
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
Token::Name(op_));
}
#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
}
void PatchInlinedSmiCode(Address address) {
- // Currently there is no smi inlining in the ARM full code generator.
+ Address cmp_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ if (!Assembler::IsCmpImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+ delta +=
+ Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
+ // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+ // nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
+ address, cmp_instruction_address, delta);
+ }
+#endif
+
+ Address patch_address =
+ cmp_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+ ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+ Assembler::GetRm(instr_at_patch).code());
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::GetCondition(branch_instr) == eq) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b eq, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b ne, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
+ patcher.EmitCondition(ne);
+ } else {
+ ASSERT(Assembler::GetCondition(branch_instr) == ne);
+ // This is patching a "jump if smi" site to be active.
+ // Changing
+ // cmp rx, rx
+ // b ne, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b eq, <target>
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
+ patcher.EmitCondition(eq);
+ }
}
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
+ UseRegisterAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
+ LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- __ cmp(ToRegister(left), ToOperand(right));
+ __ cmp(ToRegister(left), ToRegister(right));
}
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
- __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
- __ vmrs(pc);
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
- __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
- __ vmrs(pc);
+ __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
- __ cmp(r0, Operand(0));
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Abort("DoCmpTAndBranch unimplemented.");
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ // The compare stub expects compare condition and the input operands
+ // reversed for GT and LTE.
+ Condition condition = ComputeCompareCondition(op);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ __ cmp(r0, Operand(0));
+ EmitBranch(true_block, false_block, condition);
}
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
}
-void CodePatcher::Emit(Instr x) {
- masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
-#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+void CodePatcher::EmitCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ instr = (instr & ~kCondMask) | cond;
+ masm_.emit(instr);
+}
} } // namespace v8::internal
}
+static inline Operand SmiUntagOperand(Register object) {
+ return Operand(object, ASR, kSmiTagSize);
+}
+
+
+
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
- void Emit(Instr x);
+ void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
+ // Emit the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void EmitCondition(Condition cond);
+
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
- // indicates no inlined smi code and emits a nop after the IC call.
+ // or non NULL patch_site which is not activated indicates no inlined smi code
+ // and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative
class JumpPatchSite BASE_EMBEDDED {
public:
- explicit JumpPatchSite(MacroAssembler* masm)
- : masm_(masm) {
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
+ EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {