// Call stub for +1/-1.
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- TypeRecordingBinaryOpStub stub(expr->binary_op(),
- NO_OVERWRITE);
+ TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
ASSERT(result.is(elements));
// Load the result.
- __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+ __ mov(result, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
// Check for the hole value.
__ cmp(result, Factory::the_hole_value());
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value);
} else {
- __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+ __ mov(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
- __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+ __ lea(key,
+ FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
}
}
return i;
}
}
+
return -1;
}
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
+ // One byte opcode for test al, 0xXX.
+ static const byte kTestAlByte = 0xA8;
// ---------------------------------------------------------------------------
// Code generation
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ SmiSub(rax, rax, rdx);
+ __ subq(rax, rdx);
} else {
NearLabel done;
- __ SmiSub(rdx, rdx, rax);
+ __ subq(rdx, rax);
__ j(no_overflow, &done);
// Correct sign of result in case of overflow.
__ SmiNot(rdx, rdx);
#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm)
+ : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ testl(rax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, NearLabel* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
// Perform the comparison as if via '==='.
__ movq(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rdx, rax, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+
+ __ cmpq(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()->entry_label());
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(equal, true, flags);
- __ CallStub(&stub);
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
- Label done, stub_call, smi_case;
+ NearLabel done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
- Condition smi = masm()->CheckBothSmi(rdx, rax);
- __ j(smi, &smi_case);
+ __ or_(rax, rdx);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(rax, &smi_case);
__ bind(&stub_call);
- TypeRecordingBinaryOpStub stub(op, mode);
__ movq(rax, rcx);
- __ CallStub(&stub);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
__ bind(&smi_case);
}
// Inline smi case if we are in a loop.
- Label stub_call, done;
+ NearLabel stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
__ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &done);
+ patch_site.EmitJumpIfSmi(rax, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- __ CallStub(&stub);
-
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
+
// Store the value returned in rax.
switch (assign_type) {
case VARIABLE:
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ __ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cc, strict, flags);
- __ CallStub(&stub);
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
}
+static bool HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
void LCodeGen::DoThrow(LThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
+ __ push(ToRegister(instr->InputAt(0)));
+ CallRuntime(Runtime::kThrow, 1, instr);
+
+ if (FLAG_debug_code) {
+ Comment("Unreachable code.");
+ __ int3();
+ }
}
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- Abort("Unimplemented: %s", "DoDeferredStackCheck");
+ __ Pushad();
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ __ Popad();
}
__ cmpl(ToOperand(left), Immediate(value));
}
} else if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpl(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpl(ToRegister(left), ToOperand(right));
}
}
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements, offset), value);
+ } else {
+ __ movq(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ value);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Compute address of modified element and store it into key register.
+ __ lea(key, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ RecordWrite(elements, key, value);
+ }
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
- return NULL;
+ LOperand* value = UseFixed(instr->value(), rax);
+ return MarkAsCall(new LThrow(value), instr);
}
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
- return NULL;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+
+ return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
}