1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_X64)
32 #include "x64/lithium-codegen-x64.h"
33 #include "code-stubs.h"
34 #include "stub-cache.h"
40 // When invoking builtins, we need to record the safepoint in the middle of
41 // the invoke instruction sequence generated by the macro assembler.
42 class SafepointGenerator : public CallWrapper {
44 SafepointGenerator(LCodeGen* codegen,
45 LPointerMap* pointers,
46 Safepoint::DeoptMode mode)
50 virtual ~SafepointGenerator() { }
52 virtual void BeforeCall(int call_size) const {
53 codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
56 virtual void AfterCall() const {
57 codegen_->RecordSafepoint(pointers_, deopt_mode_);
62 LPointerMap* pointers_;
63 Safepoint::DeoptMode deopt_mode_;
69 bool LCodeGen::GenerateCode() {
70 HPhase phase("Z_Code generation", chunk());
74 // Open a frame scope to indicate that there is a frame on the stack. The
75 // MANUAL indicates that the scope shouldn't actually generate code to set up
76 // the frame (that is done in GeneratePrologue).
77 FrameScope frame_scope(masm_, StackFrame::MANUAL);
79 return GeneratePrologue() &&
81 GenerateDeferredCode() &&
82 GenerateJumpTable() &&
83 GenerateSafepointTable();
87 void LCodeGen::FinishCode(Handle<Code> code) {
89 code->set_stack_slots(GetStackSlotCount());
90 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
91 PopulateDeoptimizationData(code);
95 void LCodeGen::Abort(const char* format, ...) {
96 if (FLAG_trace_bailout) {
97 SmartArrayPointer<char> name(
98 info()->shared_info()->DebugName()->ToCString());
99 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
101 va_start(arguments, format);
102 OS::VPrint(format, arguments);
110 void LCodeGen::Comment(const char* format, ...) {
111 if (!FLAG_code_comments) return;
113 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
115 va_start(arguments, format);
116 builder.AddFormattedList(format, arguments);
119 // Copy the string before recording it in the assembler to avoid
120 // issues when the stack allocated buffer goes out of scope.
121 int length = builder.position();
122 Vector<char> copy = Vector<char>::New(length + 1);
123 memcpy(copy.start(), builder.Finalize(), copy.length());
124 masm()->RecordComment(copy.start());
128 bool LCodeGen::GeneratePrologue() {
129 ASSERT(is_generating());
132 if (strlen(FLAG_stop_at) > 0 &&
133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
138 // Strict mode functions need to replace the receiver with undefined
139 // when called as functions (without an explicit receiver
140 // object). rcx is zero for method calls and non-zero for function
142 if (!info_->is_classic_mode() || info_->is_native()) {
145 __ j(zero, &ok, Label::kNear);
146 // +1 for return address.
147 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
148 __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
149 __ movq(Operand(rsp, receiver_offset), kScratchRegister);
153 __ push(rbp); // Caller's frame pointer.
155 __ push(rsi); // Callee's context.
156 __ push(rdi); // Callee's JS function.
158 // Reserve space for the stack slots needed by the code.
159 int slots = GetStackSlotCount();
161 if (FLAG_debug_code) {
163 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
166 __ push(kScratchRegister);
168 __ j(not_zero, &loop);
170 __ subq(rsp, Immediate(slots * kPointerSize));
172 // On windows, you may not access the stack more than one page below
173 // the most recently mapped page. To make the allocated area randomly
174 // accessible, we write to each page in turn (the value is irrelevant).
175 const int kPageSize = 4 * KB;
176 for (int offset = slots * kPointerSize - kPageSize;
178 offset -= kPageSize) {
179 __ movq(Operand(rsp, offset), rax);
185 // Possibly allocate a local context.
186 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
187 if (heap_slots > 0) {
188 Comment(";;; Allocate local context");
189 // Argument to NewContext is the function, which is still in rdi.
191 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
192 FastNewContextStub stub(heap_slots);
195 __ CallRuntime(Runtime::kNewFunctionContext, 1);
197 RecordSafepoint(Safepoint::kNoLazyDeopt);
198 // Context is returned in both rax and rsi. It replaces the context
199 // passed to us. It's saved in the stack and kept live in rsi.
200 __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
202 // Copy any necessary parameters into the context.
203 int num_parameters = scope()->num_parameters();
204 for (int i = 0; i < num_parameters; i++) {
205 Variable* var = scope()->parameter(i);
206 if (var->IsContextSlot()) {
207 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
208 (num_parameters - 1 - i) * kPointerSize;
209 // Load parameter from stack.
210 __ movq(rax, Operand(rbp, parameter_offset));
211 // Store it in the context.
212 int context_offset = Context::SlotOffset(var->index());
213 __ movq(Operand(rsi, context_offset), rax);
214 // Update the write barrier. This clobbers rax and rbx.
215 __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
218 Comment(";;; End allocate local context");
223 __ CallRuntime(Runtime::kTraceEnter, 0);
225 return !is_aborted();
229 bool LCodeGen::GenerateBody() {
230 ASSERT(is_generating());
231 bool emit_instructions = true;
232 for (current_instruction_ = 0;
233 !is_aborted() && current_instruction_ < instructions_->length();
234 current_instruction_++) {
235 LInstruction* instr = instructions_->at(current_instruction_);
236 if (instr->IsLabel()) {
237 LLabel* label = LLabel::cast(instr);
238 emit_instructions = !label->HasReplacement();
241 if (emit_instructions) {
242 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
243 instr->CompileToNative(this);
246 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
247 return !is_aborted();
251 bool LCodeGen::GenerateJumpTable() {
252 for (int i = 0; i < jump_table_.length(); i++) {
253 __ bind(&jump_table_[i].label);
254 __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
256 return !is_aborted();
260 bool LCodeGen::GenerateDeferredCode() {
261 ASSERT(is_generating());
262 if (deferred_.length() > 0) {
263 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
264 LDeferredCode* code = deferred_[i];
265 __ bind(code->entry());
266 Comment(";;; Deferred code @%d: %s.",
267 code->instruction_index(),
268 code->instr()->Mnemonic());
270 __ jmp(code->exit());
274 // Deferred code is the last part of the instruction sequence. Mark
275 // the generated code as done unless we bailed out.
276 if (!is_aborted()) status_ = DONE;
277 return !is_aborted();
281 bool LCodeGen::GenerateSafepointTable() {
283 safepoints_.Emit(masm(), GetStackSlotCount());
284 return !is_aborted();
288 Register LCodeGen::ToRegister(int index) const {
289 return Register::FromAllocationIndex(index);
293 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
294 return XMMRegister::FromAllocationIndex(index);
298 Register LCodeGen::ToRegister(LOperand* op) const {
299 ASSERT(op->IsRegister());
300 return ToRegister(op->index());
304 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
305 ASSERT(op->IsDoubleRegister());
306 return ToDoubleRegister(op->index());
310 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
311 return op->IsConstantOperand() &&
312 chunk_->LookupLiteralRepresentation(op).IsInteger32();
316 bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
317 return op->IsConstantOperand() &&
318 chunk_->LookupLiteralRepresentation(op).IsTagged();
322 int LCodeGen::ToInteger32(LConstantOperand* op) const {
323 Handle<Object> value = chunk_->LookupLiteral(op);
324 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
325 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
327 return static_cast<int32_t>(value->Number());
331 double LCodeGen::ToDouble(LConstantOperand* op) const {
332 Handle<Object> value = chunk_->LookupLiteral(op);
333 return value->Number();
337 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
338 Handle<Object> literal = chunk_->LookupLiteral(op);
339 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
344 Operand LCodeGen::ToOperand(LOperand* op) const {
345 // Does not handle registers. In X64 assembler, plain registers are not
346 // representable as an Operand.
347 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
348 int index = op->index();
350 // Local or spill slot. Skip the frame pointer, function, and
351 // context in the fixed part of the frame.
352 return Operand(rbp, -(index + 3) * kPointerSize);
354 // Incoming parameter. Skip the return address.
355 return Operand(rbp, -(index - 1) * kPointerSize);
360 void LCodeGen::WriteTranslation(LEnvironment* environment,
361 Translation* translation) {
362 if (environment == NULL) return;
364 // The translation includes one command per value in the environment.
365 int translation_size = environment->values()->length();
366 // The output frame height does not include the parameters.
367 int height = translation_size - environment->parameter_count();
369 WriteTranslation(environment->outer(), translation);
370 int closure_id = DefineDeoptimizationLiteral(environment->closure());
371 switch (environment->frame_type()) {
373 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
376 translation->BeginConstructStubFrame(closure_id, translation_size);
378 case ARGUMENTS_ADAPTOR:
379 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
384 for (int i = 0; i < translation_size; ++i) {
385 LOperand* value = environment->values()->at(i);
386 // spilled_registers_ and spilled_double_registers_ are either
387 // both NULL or both set.
388 if (environment->spilled_registers() != NULL && value != NULL) {
389 if (value->IsRegister() &&
390 environment->spilled_registers()[value->index()] != NULL) {
391 translation->MarkDuplicate();
392 AddToTranslation(translation,
393 environment->spilled_registers()[value->index()],
394 environment->HasTaggedValueAt(i));
396 value->IsDoubleRegister() &&
397 environment->spilled_double_registers()[value->index()] != NULL) {
398 translation->MarkDuplicate();
401 environment->spilled_double_registers()[value->index()],
406 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
411 void LCodeGen::AddToTranslation(Translation* translation,
415 // TODO(twuerthinger): Introduce marker operands to indicate that this value
416 // is not present and must be reconstructed from the deoptimizer. Currently
417 // this is only used for the arguments object.
418 translation->StoreArgumentsObject();
419 } else if (op->IsStackSlot()) {
421 translation->StoreStackSlot(op->index());
423 translation->StoreInt32StackSlot(op->index());
425 } else if (op->IsDoubleStackSlot()) {
426 translation->StoreDoubleStackSlot(op->index());
427 } else if (op->IsArgument()) {
429 int src_index = GetStackSlotCount() + op->index();
430 translation->StoreStackSlot(src_index);
431 } else if (op->IsRegister()) {
432 Register reg = ToRegister(op);
434 translation->StoreRegister(reg);
436 translation->StoreInt32Register(reg);
438 } else if (op->IsDoubleRegister()) {
439 XMMRegister reg = ToDoubleRegister(op);
440 translation->StoreDoubleRegister(reg);
441 } else if (op->IsConstantOperand()) {
442 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
443 int src_index = DefineDeoptimizationLiteral(literal);
444 translation->StoreLiteral(src_index);
451 void LCodeGen::CallCodeGeneric(Handle<Code> code,
452 RelocInfo::Mode mode,
454 SafepointMode safepoint_mode,
456 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
457 ASSERT(instr != NULL);
458 LPointerMap* pointers = instr->pointer_map();
459 RecordPosition(pointers->position());
461 RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
463 // Signal that we don't inline smi code before these stubs in the
464 // optimizing code generator.
465 if (code->kind() == Code::BINARY_OP_IC ||
466 code->kind() == Code::COMPARE_IC) {
472 void LCodeGen::CallCode(Handle<Code> code,
473 RelocInfo::Mode mode,
474 LInstruction* instr) {
475 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
479 void LCodeGen::CallRuntime(const Runtime::Function* function,
481 LInstruction* instr) {
482 ASSERT(instr != NULL);
483 ASSERT(instr->HasPointerMap());
484 LPointerMap* pointers = instr->pointer_map();
485 RecordPosition(pointers->position());
487 __ CallRuntime(function, num_arguments);
488 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
492 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
494 LInstruction* instr) {
495 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
496 __ CallRuntimeSaveDoubles(id);
497 RecordSafepointWithRegisters(
498 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
502 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
503 Safepoint::DeoptMode mode) {
504 if (!environment->HasBeenRegistered()) {
505 // Physical stack frame layout:
506 // -x ............. -4 0 ..................................... y
507 // [incoming arguments] [spill slots] [pushed outgoing arguments]
509 // Layout of the environment:
510 // 0 ..................................................... size-1
511 // [parameters] [locals] [expression stack including arguments]
513 // Layout of the translation:
514 // 0 ........................................................ size - 1 + 4
515 // [expression stack including arguments] [locals] [4 words] [parameters]
516 // |>------------ translation_size ------------<|
519 int jsframe_count = 0;
520 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
522 if (e->frame_type() == JS_FUNCTION) {
526 Translation translation(&translations_, frame_count, jsframe_count);
527 WriteTranslation(environment, &translation);
528 int deoptimization_index = deoptimizations_.length();
529 int pc_offset = masm()->pc_offset();
530 environment->Register(deoptimization_index,
532 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
533 deoptimizations_.Add(environment);
538 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
539 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
540 ASSERT(environment->HasBeenRegistered());
541 int id = environment->deoptimization_index();
542 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
544 Abort("bailout was not prepared");
548 if (cc == no_condition) {
549 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
551 // We often have several deopts to the same entry, reuse the last
552 // jump entry if this is the case.
553 if (jump_table_.is_empty() ||
554 jump_table_.last().address != entry) {
555 jump_table_.Add(JumpTableEntry(entry));
557 __ j(cc, &jump_table_.last().label);
562 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
563 int length = deoptimizations_.length();
564 if (length == 0) return;
565 Handle<DeoptimizationInputData> data =
566 factory()->NewDeoptimizationInputData(length, TENURED);
568 Handle<ByteArray> translations = translations_.CreateByteArray();
569 data->SetTranslationByteArray(*translations);
570 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
572 Handle<FixedArray> literals =
573 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
574 for (int i = 0; i < deoptimization_literals_.length(); i++) {
575 literals->set(i, *deoptimization_literals_[i]);
577 data->SetLiteralArray(*literals);
579 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
580 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
582 // Populate the deoptimization entries.
583 for (int i = 0; i < length; i++) {
584 LEnvironment* env = deoptimizations_[i];
585 data->SetAstId(i, Smi::FromInt(env->ast_id()));
586 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
587 data->SetArgumentsStackHeight(i,
588 Smi::FromInt(env->arguments_stack_height()));
589 data->SetPc(i, Smi::FromInt(env->pc_offset()));
591 code->set_deoptimization_data(*data);
595 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
596 int result = deoptimization_literals_.length();
597 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
598 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
600 deoptimization_literals_.Add(literal);
605 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
606 ASSERT(deoptimization_literals_.length() == 0);
608 const ZoneList<Handle<JSFunction> >* inlined_closures =
609 chunk()->inlined_closures();
611 for (int i = 0, length = inlined_closures->length();
614 DefineDeoptimizationLiteral(inlined_closures->at(i));
617 inlined_function_count_ = deoptimization_literals_.length();
621 void LCodeGen::RecordSafepointWithLazyDeopt(
622 LInstruction* instr, SafepointMode safepoint_mode, int argc) {
623 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
624 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
626 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
627 RecordSafepointWithRegisters(
628 instr->pointer_map(), argc, Safepoint::kLazyDeopt);
633 void LCodeGen::RecordSafepoint(
634 LPointerMap* pointers,
635 Safepoint::Kind kind,
637 Safepoint::DeoptMode deopt_mode) {
638 ASSERT(kind == expected_safepoint_kind_);
640 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
642 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
643 kind, arguments, deopt_mode);
644 for (int i = 0; i < operands->length(); i++) {
645 LOperand* pointer = operands->at(i);
646 if (pointer->IsStackSlot()) {
647 safepoint.DefinePointerSlot(pointer->index());
648 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
649 safepoint.DefinePointerRegister(ToRegister(pointer));
652 if (kind & Safepoint::kWithRegisters) {
653 // Register rsi always contains a pointer to the context.
654 safepoint.DefinePointerRegister(rsi);
659 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
660 Safepoint::DeoptMode deopt_mode) {
661 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
665 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
666 LPointerMap empty_pointers(RelocInfo::kNoPosition);
667 RecordSafepoint(&empty_pointers, deopt_mode);
671 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
673 Safepoint::DeoptMode deopt_mode) {
674 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
678 void LCodeGen::RecordPosition(int position) {
679 if (position == RelocInfo::kNoPosition) return;
680 masm()->positions_recorder()->RecordPosition(position);
684 void LCodeGen::DoLabel(LLabel* label) {
685 if (label->is_loop_header()) {
686 Comment(";;; B%d - LOOP entry", label->block_id());
688 Comment(";;; B%d", label->block_id());
690 __ bind(label->label());
691 current_block_ = label->block_id();
696 void LCodeGen::DoParallelMove(LParallelMove* move) {
697 resolver_.Resolve(move);
701 void LCodeGen::DoGap(LGap* gap) {
702 for (int i = LGap::FIRST_INNER_POSITION;
703 i <= LGap::LAST_INNER_POSITION;
705 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
706 LParallelMove* move = gap->GetParallelMove(inner_pos);
707 if (move != NULL) DoParallelMove(move);
712 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
717 void LCodeGen::DoParameter(LParameter* instr) {
722 void LCodeGen::DoCallStub(LCallStub* instr) {
723 ASSERT(ToRegister(instr->result()).is(rax));
724 switch (instr->hydrogen()->major_key()) {
725 case CodeStub::RegExpConstructResult: {
726 RegExpConstructResultStub stub;
727 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
730 case CodeStub::RegExpExec: {
732 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
735 case CodeStub::SubString: {
737 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
740 case CodeStub::NumberToString: {
741 NumberToStringStub stub;
742 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
745 case CodeStub::StringAdd: {
746 StringAddStub stub(NO_STRING_ADD_FLAGS);
747 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
750 case CodeStub::StringCompare: {
751 StringCompareStub stub;
752 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
755 case CodeStub::TranscendentalCache: {
756 TranscendentalCacheStub stub(instr->transcendental_type(),
757 TranscendentalCacheStub::TAGGED);
758 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
767 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
772 void LCodeGen::DoModI(LModI* instr) {
773 if (instr->hydrogen()->HasPowerOf2Divisor()) {
774 Register dividend = ToRegister(instr->InputAt(0));
777 HConstant::cast(instr->hydrogen()->right())->Integer32Value();
779 if (divisor < 0) divisor = -divisor;
781 Label positive_dividend, done;
782 __ testl(dividend, dividend);
783 __ j(not_sign, &positive_dividend, Label::kNear);
785 __ andl(dividend, Immediate(divisor - 1));
787 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
788 __ j(not_zero, &done, Label::kNear);
789 DeoptimizeIf(no_condition, instr->environment());
791 __ jmp(&done, Label::kNear);
793 __ bind(&positive_dividend);
794 __ andl(dividend, Immediate(divisor - 1));
797 Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
798 Register left_reg = ToRegister(instr->InputAt(0));
799 Register right_reg = ToRegister(instr->InputAt(1));
800 Register result_reg = ToRegister(instr->result());
802 ASSERT(left_reg.is(rax));
803 ASSERT(result_reg.is(rdx));
804 ASSERT(!right_reg.is(rax));
805 ASSERT(!right_reg.is(rdx));
808 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
809 __ testl(right_reg, right_reg);
810 DeoptimizeIf(zero, instr->environment());
813 __ testl(left_reg, left_reg);
814 __ j(zero, &remainder_eq_dividend, Label::kNear);
815 __ j(sign, &slow, Label::kNear);
817 __ testl(right_reg, right_reg);
818 __ j(not_sign, &both_positive, Label::kNear);
819 // The sign of the divisor doesn't matter.
822 __ bind(&both_positive);
823 // If the dividend is smaller than the nonnegative
824 // divisor, the dividend is the result.
825 __ cmpl(left_reg, right_reg);
826 __ j(less, &remainder_eq_dividend, Label::kNear);
828 // Check if the divisor is a PowerOfTwo integer.
829 Register scratch = ToRegister(instr->TempAt(0));
830 __ movl(scratch, right_reg);
831 __ subl(scratch, Immediate(1));
832 __ testl(scratch, right_reg);
833 __ j(not_zero, &do_subtraction, Label::kNear);
834 __ andl(left_reg, scratch);
835 __ jmp(&remainder_eq_dividend, Label::kNear);
837 __ bind(&do_subtraction);
838 const int kUnfolds = 3;
839 // Try a few subtractions of the dividend.
840 __ movl(scratch, left_reg);
841 for (int i = 0; i < kUnfolds; i++) {
842 // Reduce the dividend by the divisor.
843 __ subl(left_reg, right_reg);
844 // Check if the dividend is less than the divisor.
845 __ cmpl(left_reg, right_reg);
846 __ j(less, &remainder_eq_dividend, Label::kNear);
848 __ movl(left_reg, scratch);
850 // Slow case, using idiv instruction.
852 // Sign extend eax to edx.
853 // (We are using only the low 32 bits of the values.)
856 // Check for (0 % -x) that will produce negative zero.
857 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
860 __ testl(left_reg, left_reg);
861 __ j(not_sign, &positive_left, Label::kNear);
864 // Test the remainder for 0, because then the result would be -0.
865 __ testl(result_reg, result_reg);
866 __ j(not_zero, &done, Label::kNear);
868 DeoptimizeIf(no_condition, instr->environment());
869 __ bind(&positive_left);
875 __ jmp(&done, Label::kNear);
877 __ bind(&remainder_eq_dividend);
878 __ movl(result_reg, left_reg);
885 void LCodeGen::DoDivI(LDivI* instr) {
886 LOperand* right = instr->InputAt(1);
887 ASSERT(ToRegister(instr->result()).is(rax));
888 ASSERT(ToRegister(instr->InputAt(0)).is(rax));
889 ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
890 ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
892 Register left_reg = rax;
895 Register right_reg = ToRegister(right);
896 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
897 __ testl(right_reg, right_reg);
898 DeoptimizeIf(zero, instr->environment());
901 // Check for (0 / -x) that will produce negative zero.
902 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
904 __ testl(left_reg, left_reg);
905 __ j(not_zero, &left_not_zero, Label::kNear);
906 __ testl(right_reg, right_reg);
907 DeoptimizeIf(sign, instr->environment());
908 __ bind(&left_not_zero);
911 // Check for (-kMinInt / -1).
912 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
913 Label left_not_min_int;
914 __ cmpl(left_reg, Immediate(kMinInt));
915 __ j(not_zero, &left_not_min_int, Label::kNear);
916 __ cmpl(right_reg, Immediate(-1));
917 DeoptimizeIf(zero, instr->environment());
918 __ bind(&left_not_min_int);
921 // Sign extend to rdx.
925 // Deoptimize if remainder is not 0.
927 DeoptimizeIf(not_zero, instr->environment());
931 void LCodeGen::DoMulI(LMulI* instr) {
932 Register left = ToRegister(instr->InputAt(0));
933 LOperand* right = instr->InputAt(1);
935 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
936 __ movl(kScratchRegister, left);
940 instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
941 if (right->IsConstantOperand()) {
942 int right_value = ToInteger32(LConstantOperand::cast(right));
943 if (right_value == -1) {
945 } else if (right_value == 0) {
947 } else if (right_value == 2) {
949 } else if (!can_overflow) {
950 // If the multiplication is known to not overflow, we
951 // can use operations that don't set the overflow flag
953 switch (right_value) {
958 __ leal(left, Operand(left, left, times_2, 0));
961 __ shll(left, Immediate(2));
964 __ leal(left, Operand(left, left, times_4, 0));
967 __ shll(left, Immediate(3));
970 __ leal(left, Operand(left, left, times_8, 0));
973 __ shll(left, Immediate(4));
976 __ imull(left, left, Immediate(right_value));
980 __ imull(left, left, Immediate(right_value));
982 } else if (right->IsStackSlot()) {
983 __ imull(left, ToOperand(right));
985 __ imull(left, ToRegister(right));
989 DeoptimizeIf(overflow, instr->environment());
992 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
993 // Bail out if the result is supposed to be negative zero.
995 __ testl(left, left);
996 __ j(not_zero, &done, Label::kNear);
997 if (right->IsConstantOperand()) {
998 if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
999 DeoptimizeIf(no_condition, instr->environment());
1001 } else if (right->IsStackSlot()) {
1002 __ orl(kScratchRegister, ToOperand(right));
1003 DeoptimizeIf(sign, instr->environment());
1005 // Test the non-zero operand for negative sign.
1006 __ orl(kScratchRegister, ToRegister(right));
1007 DeoptimizeIf(sign, instr->environment());
1014 void LCodeGen::DoBitI(LBitI* instr) {
1015 LOperand* left = instr->InputAt(0);
1016 LOperand* right = instr->InputAt(1);
1017 ASSERT(left->Equals(instr->result()));
1018 ASSERT(left->IsRegister());
1020 if (right->IsConstantOperand()) {
1021 int right_operand = ToInteger32(LConstantOperand::cast(right));
1022 switch (instr->op()) {
1023 case Token::BIT_AND:
1024 __ andl(ToRegister(left), Immediate(right_operand));
1027 __ orl(ToRegister(left), Immediate(right_operand));
1029 case Token::BIT_XOR:
1030 __ xorl(ToRegister(left), Immediate(right_operand));
1036 } else if (right->IsStackSlot()) {
1037 switch (instr->op()) {
1038 case Token::BIT_AND:
1039 __ andl(ToRegister(left), ToOperand(right));
1042 __ orl(ToRegister(left), ToOperand(right));
1044 case Token::BIT_XOR:
1045 __ xorl(ToRegister(left), ToOperand(right));
1052 ASSERT(right->IsRegister());
1053 switch (instr->op()) {
1054 case Token::BIT_AND:
1055 __ andl(ToRegister(left), ToRegister(right));
1058 __ orl(ToRegister(left), ToRegister(right));
1060 case Token::BIT_XOR:
1061 __ xorl(ToRegister(left), ToRegister(right));
1071 void LCodeGen::DoShiftI(LShiftI* instr) {
1072 LOperand* left = instr->InputAt(0);
1073 LOperand* right = instr->InputAt(1);
1074 ASSERT(left->Equals(instr->result()));
1075 ASSERT(left->IsRegister());
1076 if (right->IsRegister()) {
1077 ASSERT(ToRegister(right).is(rcx));
1079 switch (instr->op()) {
1081 __ sarl_cl(ToRegister(left));
1084 __ shrl_cl(ToRegister(left));
1085 if (instr->can_deopt()) {
1086 __ testl(ToRegister(left), ToRegister(left));
1087 DeoptimizeIf(negative, instr->environment());
1091 __ shll_cl(ToRegister(left));
1098 int value = ToInteger32(LConstantOperand::cast(right));
1099 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1100 switch (instr->op()) {
1102 if (shift_count != 0) {
1103 __ sarl(ToRegister(left), Immediate(shift_count));
1107 if (shift_count == 0 && instr->can_deopt()) {
1108 __ testl(ToRegister(left), ToRegister(left));
1109 DeoptimizeIf(negative, instr->environment());
1111 __ shrl(ToRegister(left), Immediate(shift_count));
1115 if (shift_count != 0) {
1116 __ shll(ToRegister(left), Immediate(shift_count));
1127 void LCodeGen::DoSubI(LSubI* instr) {
1128 LOperand* left = instr->InputAt(0);
1129 LOperand* right = instr->InputAt(1);
1130 ASSERT(left->Equals(instr->result()));
1132 if (right->IsConstantOperand()) {
1133 __ subl(ToRegister(left),
1134 Immediate(ToInteger32(LConstantOperand::cast(right))));
1135 } else if (right->IsRegister()) {
1136 __ subl(ToRegister(left), ToRegister(right));
1138 __ subl(ToRegister(left), ToOperand(right));
1141 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1142 DeoptimizeIf(overflow, instr->environment());
1147 void LCodeGen::DoConstantI(LConstantI* instr) {
1148 ASSERT(instr->result()->IsRegister());
1149 __ Set(ToRegister(instr->result()), instr->value());
1153 void LCodeGen::DoConstantD(LConstantD* instr) {
1154 ASSERT(instr->result()->IsDoubleRegister());
1155 XMMRegister res = ToDoubleRegister(instr->result());
1156 double v = instr->value();
1157 uint64_t int_val = BitCast<uint64_t, double>(v);
1158 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1159 // do so if the constant is -0.0.
1163 Register tmp = ToRegister(instr->TempAt(0));
1164 __ Set(tmp, int_val);
1170 void LCodeGen::DoConstantT(LConstantT* instr) {
1171 Handle<Object> value = instr->value();
1172 if (value->IsSmi()) {
1173 __ Move(ToRegister(instr->result()), value);
1175 __ LoadHeapObject(ToRegister(instr->result()),
1176 Handle<HeapObject>::cast(value));
1181 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1182 Register result = ToRegister(instr->result());
1183 Register array = ToRegister(instr->InputAt(0));
1184 __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
1188 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1189 Register result = ToRegister(instr->result());
1190 Register array = ToRegister(instr->InputAt(0));
1191 __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
1195 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1196 Register result = ToRegister(instr->result());
1197 Register input = ToRegister(instr->InputAt(0));
1199 // Load map into |result|.
1200 __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
1201 // Load the map's "bit field 2" into |result|. We only need the first byte.
1202 __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
1203 // Retrieve elements_kind from bit field 2.
1204 __ and_(result, Immediate(Map::kElementsKindMask));
1205 __ shr(result, Immediate(Map::kElementsKindShift));
1209 void LCodeGen::DoValueOf(LValueOf* instr) {
1210 Register input = ToRegister(instr->InputAt(0));
1211 Register result = ToRegister(instr->result());
1212 ASSERT(input.is(result));
1214 // If the object is a smi return the object.
1215 __ JumpIfSmi(input, &done, Label::kNear);
1217 // If the object is not a value type, return the object.
1218 __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
1219 __ j(not_equal, &done, Label::kNear);
1220 __ movq(result, FieldOperand(input, JSValue::kValueOffset));
1226 void LCodeGen::DoDateField(LDateField* instr) {
1227 Register object = ToRegister(instr->InputAt(0));
1228 Register result = ToRegister(instr->result());
1229 Smi* index = instr->index();
1230 Label runtime, done;
1231 ASSERT(object.is(result));
1232 ASSERT(object.is(rax));
1235 __ AbortIfSmi(object);
1236 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
1237 __ Assert(equal, "Trying to get date field from non-date.");
1240 if (index->value() == 0) {
1241 __ movq(result, FieldOperand(object, JSDate::kValueOffset));
1243 if (index->value() < JSDate::kFirstUncachedField) {
1244 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1245 __ movq(kScratchRegister, stamp);
1246 __ cmpq(kScratchRegister, FieldOperand(object,
1247 JSDate::kCacheStampOffset));
1248 __ j(not_equal, &runtime, Label::kNear);
1249 __ movq(result, FieldOperand(object, JSDate::kValueOffset +
1250 kPointerSize * index->value()));
1254 __ PrepareCallCFunction(2);
1256 __ movq(rcx, object);
1257 __ movq(rdx, index, RelocInfo::NONE);
1259 __ movq(rdi, object);
1260 __ movq(rsi, index, RelocInfo::NONE);
1262 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1263 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1269 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1270 LOperand* input = instr->InputAt(0);
1271 ASSERT(input->Equals(instr->result()));
1272 __ not_(ToRegister(input));
1276 void LCodeGen::DoThrow(LThrow* instr) {
1277 __ push(ToRegister(instr->InputAt(0)));
1278 CallRuntime(Runtime::kThrow, 1, instr);
1280 if (FLAG_debug_code) {
1281 Comment("Unreachable code.");
1287 void LCodeGen::DoAddI(LAddI* instr) {
1288 LOperand* left = instr->InputAt(0);
1289 LOperand* right = instr->InputAt(1);
1290 ASSERT(left->Equals(instr->result()));
1292 if (right->IsConstantOperand()) {
1293 __ addl(ToRegister(left),
1294 Immediate(ToInteger32(LConstantOperand::cast(right))));
1295 } else if (right->IsRegister()) {
1296 __ addl(ToRegister(left), ToRegister(right));
1298 __ addl(ToRegister(left), ToOperand(right));
1301 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1302 DeoptimizeIf(overflow, instr->environment());
1307 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1308 XMMRegister left = ToDoubleRegister(instr->InputAt(0));
1309 XMMRegister right = ToDoubleRegister(instr->InputAt(1));
1310 XMMRegister result = ToDoubleRegister(instr->result());
1311 // All operations except MOD are computed in-place.
1312 ASSERT(instr->op() == Token::MOD || left.is(result));
1313 switch (instr->op()) {
1315 __ addsd(left, right);
1318 __ subsd(left, right);
1321 __ mulsd(left, right);
1324 __ divsd(left, right);
1327 __ PrepareCallCFunction(2);
1328 __ movaps(xmm0, left);
1329 ASSERT(right.is(xmm1));
1331 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
1332 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1333 __ movaps(result, xmm0);
1342 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1343 ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
1344 ASSERT(ToRegister(instr->InputAt(1)).is(rax));
1345 ASSERT(ToRegister(instr->result()).is(rax));
1347 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1348 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1349 __ nop(); // Signals no inlined code.
1353 int LCodeGen::GetNextEmittedBlock(int block) {
1354 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1355 LLabel* label = chunk_->GetLabel(i);
1356 if (!label->HasReplacement()) return i;
1362 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1363 int next_block = GetNextEmittedBlock(current_block_);
1364 right_block = chunk_->LookupDestination(right_block);
1365 left_block = chunk_->LookupDestination(left_block);
1367 if (right_block == left_block) {
1368 EmitGoto(left_block);
1369 } else if (left_block == next_block) {
1370 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1371 } else if (right_block == next_block) {
1372 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1374 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1376 __ jmp(chunk_->GetAssemblyLabel(right_block));
1382 void LCodeGen::DoBranch(LBranch* instr) {
1383 int true_block = chunk_->LookupDestination(instr->true_block_id());
1384 int false_block = chunk_->LookupDestination(instr->false_block_id());
1386 Representation r = instr->hydrogen()->value()->representation();
1387 if (r.IsInteger32()) {
1388 Register reg = ToRegister(instr->InputAt(0));
1390 EmitBranch(true_block, false_block, not_zero);
1391 } else if (r.IsDouble()) {
1392 XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1393 __ xorps(xmm0, xmm0);
1394 __ ucomisd(reg, xmm0);
1395 EmitBranch(true_block, false_block, not_equal);
1397 ASSERT(r.IsTagged());
1398 Register reg = ToRegister(instr->InputAt(0));
1399 HType type = instr->hydrogen()->value()->type();
1400 if (type.IsBoolean()) {
1401 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1402 EmitBranch(true_block, false_block, equal);
1403 } else if (type.IsSmi()) {
1404 __ SmiCompare(reg, Smi::FromInt(0));
1405 EmitBranch(true_block, false_block, not_equal);
1407 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1408 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1410 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1411 // Avoid deopts in the case where we've never executed this path before.
1412 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1414 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1415 // undefined -> false.
1416 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1417 __ j(equal, false_label);
1419 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1421 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1422 __ j(equal, true_label);
1424 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1425 __ j(equal, false_label);
1427 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1429 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1430 __ j(equal, false_label);
1433 if (expected.Contains(ToBooleanStub::SMI)) {
1434 // Smis: 0 -> false, all other -> true.
1435 __ Cmp(reg, Smi::FromInt(0));
1436 __ j(equal, false_label);
1437 __ JumpIfSmi(reg, true_label);
1438 } else if (expected.NeedsMap()) {
1439 // If we need a map later and have a Smi -> deopt.
1440 __ testb(reg, Immediate(kSmiTagMask));
1441 DeoptimizeIf(zero, instr->environment());
1444 const Register map = kScratchRegister;
1445 if (expected.NeedsMap()) {
1446 __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
1448 if (expected.CanBeUndetectable()) {
1449 // Undetectable -> false.
1450 __ testb(FieldOperand(map, Map::kBitFieldOffset),
1451 Immediate(1 << Map::kIsUndetectable));
1452 __ j(not_zero, false_label);
1456 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1457 // spec object -> true.
1458 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
1459 __ j(above_equal, true_label);
1462 if (expected.Contains(ToBooleanStub::STRING)) {
1463 // String value -> false iff empty.
1465 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1466 __ j(above_equal, ¬_string, Label::kNear);
1467 __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1468 __ j(not_zero, true_label);
1469 __ jmp(false_label);
1470 __ bind(¬_string);
1473 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1474 // heap number -> false iff +0, -0, or NaN.
1475 Label not_heap_number;
1476 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1477 __ j(not_equal, ¬_heap_number, Label::kNear);
1478 __ xorps(xmm0, xmm0);
1479 __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
1480 __ j(zero, false_label);
1482 __ bind(¬_heap_number);
1485 // We've seen something for the first time -> deopt.
1486 DeoptimizeIf(no_condition, instr->environment());
1492 void LCodeGen::EmitGoto(int block) {
1493 block = chunk_->LookupDestination(block);
1494 int next_block = GetNextEmittedBlock(current_block_);
1495 if (block != next_block) {
1496 __ jmp(chunk_->GetAssemblyLabel(block));
1501 void LCodeGen::DoGoto(LGoto* instr) {
1502 EmitGoto(instr->block_id());
1506 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1507 Condition cond = no_condition;
1510 case Token::EQ_STRICT:
1514 cond = is_unsigned ? below : less;
1517 cond = is_unsigned ? above : greater;
1520 cond = is_unsigned ? below_equal : less_equal;
1523 cond = is_unsigned ? above_equal : greater_equal;
1526 case Token::INSTANCEOF:
1534 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1535 LOperand* left = instr->InputAt(0);
1536 LOperand* right = instr->InputAt(1);
1537 int false_block = chunk_->LookupDestination(instr->false_block_id());
1538 int true_block = chunk_->LookupDestination(instr->true_block_id());
1539 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1541 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1542 // We can statically evaluate the comparison.
1543 double left_val = ToDouble(LConstantOperand::cast(left));
1544 double right_val = ToDouble(LConstantOperand::cast(right));
1546 EvalComparison(instr->op(), left_val, right_val) ? true_block
1548 EmitGoto(next_block);
1550 if (instr->is_double()) {
1551 // Don't base result on EFLAGS when a NaN is involved. Instead
1552 // jump to the false block.
1553 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1554 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1557 if (right->IsConstantOperand()) {
1558 value = ToInteger32(LConstantOperand::cast(right));
1559 __ cmpl(ToRegister(left), Immediate(value));
1560 } else if (left->IsConstantOperand()) {
1561 value = ToInteger32(LConstantOperand::cast(left));
1562 if (right->IsRegister()) {
1563 __ cmpl(ToRegister(right), Immediate(value));
1565 __ cmpl(ToOperand(right), Immediate(value));
1567 // We transposed the operands. Reverse the condition.
1568 cc = ReverseCondition(cc);
1570 if (right->IsRegister()) {
1571 __ cmpl(ToRegister(left), ToRegister(right));
1573 __ cmpl(ToRegister(left), ToOperand(right));
1577 EmitBranch(true_block, false_block, cc);
1582 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1583 Register left = ToRegister(instr->InputAt(0));
1584 Register right = ToRegister(instr->InputAt(1));
1585 int false_block = chunk_->LookupDestination(instr->false_block_id());
1586 int true_block = chunk_->LookupDestination(instr->true_block_id());
1588 __ cmpq(left, right);
1589 EmitBranch(true_block, false_block, equal);
1593 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1594 Register left = ToRegister(instr->InputAt(0));
1595 int true_block = chunk_->LookupDestination(instr->true_block_id());
1596 int false_block = chunk_->LookupDestination(instr->false_block_id());
1598 __ cmpq(left, Immediate(instr->hydrogen()->right()));
1599 EmitBranch(true_block, false_block, equal);
1603 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1604 Register reg = ToRegister(instr->InputAt(0));
1605 int false_block = chunk_->LookupDestination(instr->false_block_id());
1607 // If the expression is known to be untagged or a smi, then it's definitely
1608 // not null, and it can't be a an undetectable object.
1609 if (instr->hydrogen()->representation().IsSpecialization() ||
1610 instr->hydrogen()->type().IsSmi()) {
1611 EmitGoto(false_block);
1615 int true_block = chunk_->LookupDestination(instr->true_block_id());
1616 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1617 Heap::kNullValueRootIndex :
1618 Heap::kUndefinedValueRootIndex;
1619 __ CompareRoot(reg, nil_value);
1620 if (instr->kind() == kStrictEquality) {
1621 EmitBranch(true_block, false_block, equal);
1623 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1624 Heap::kUndefinedValueRootIndex :
1625 Heap::kNullValueRootIndex;
1626 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1627 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1628 __ j(equal, true_label);
1629 __ CompareRoot(reg, other_nil_value);
1630 __ j(equal, true_label);
1631 __ JumpIfSmi(reg, false_label);
1632 // Check for undetectable objects by looking in the bit field in
1633 // the map. The object has already been smi checked.
1634 Register scratch = ToRegister(instr->TempAt(0));
1635 __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1636 __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
1637 Immediate(1 << Map::kIsUndetectable));
1638 EmitBranch(true_block, false_block, not_zero);
1643 Condition LCodeGen::EmitIsObject(Register input,
1644 Label* is_not_object,
1646 ASSERT(!input.is(kScratchRegister));
1648 __ JumpIfSmi(input, is_not_object);
1650 __ CompareRoot(input, Heap::kNullValueRootIndex);
1651 __ j(equal, is_object);
1653 __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
1654 // Undetectable objects behave like undefined.
1655 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
1656 Immediate(1 << Map::kIsUndetectable));
1657 __ j(not_zero, is_not_object);
1659 __ movzxbl(kScratchRegister,
1660 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
1661 __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1662 __ j(below, is_not_object);
1663 __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1668 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1669 Register reg = ToRegister(instr->InputAt(0));
1671 int true_block = chunk_->LookupDestination(instr->true_block_id());
1672 int false_block = chunk_->LookupDestination(instr->false_block_id());
1673 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1674 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1676 Condition true_cond = EmitIsObject(reg, false_label, true_label);
1678 EmitBranch(true_block, false_block, true_cond);
1682 Condition LCodeGen::EmitIsString(Register input,
1684 Label* is_not_string) {
1685 __ JumpIfSmi(input, is_not_string);
1686 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
1692 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1693 Register reg = ToRegister(instr->InputAt(0));
1694 Register temp = ToRegister(instr->TempAt(0));
1696 int true_block = chunk_->LookupDestination(instr->true_block_id());
1697 int false_block = chunk_->LookupDestination(instr->false_block_id());
1698 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1700 Condition true_cond = EmitIsString(reg, temp, false_label);
1702 EmitBranch(true_block, false_block, true_cond);
1706 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1707 int true_block = chunk_->LookupDestination(instr->true_block_id());
1708 int false_block = chunk_->LookupDestination(instr->false_block_id());
1711 if (instr->InputAt(0)->IsRegister()) {
1712 Register input = ToRegister(instr->InputAt(0));
1713 is_smi = masm()->CheckSmi(input);
1715 Operand input = ToOperand(instr->InputAt(0));
1716 is_smi = masm()->CheckSmi(input);
1718 EmitBranch(true_block, false_block, is_smi);
1722 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1723 Register input = ToRegister(instr->InputAt(0));
1724 Register temp = ToRegister(instr->TempAt(0));
1726 int true_block = chunk_->LookupDestination(instr->true_block_id());
1727 int false_block = chunk_->LookupDestination(instr->false_block_id());
1729 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1730 __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
1731 __ testb(FieldOperand(temp, Map::kBitFieldOffset),
1732 Immediate(1 << Map::kIsUndetectable));
1733 EmitBranch(true_block, false_block, not_zero);
1737 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1738 Token::Value op = instr->op();
1739 int true_block = chunk_->LookupDestination(instr->true_block_id());
1740 int false_block = chunk_->LookupDestination(instr->false_block_id());
1742 Handle<Code> ic = CompareIC::GetUninitialized(op);
1743 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1745 Condition condition = TokenToCondition(op, false);
1748 EmitBranch(true_block, false_block, condition);
1752 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1753 InstanceType from = instr->from();
1754 InstanceType to = instr->to();
1755 if (from == FIRST_TYPE) return to;
1756 ASSERT(from == to || to == LAST_TYPE);
1761 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1762 InstanceType from = instr->from();
1763 InstanceType to = instr->to();
1764 if (from == to) return equal;
1765 if (to == LAST_TYPE) return above_equal;
1766 if (from == FIRST_TYPE) return below_equal;
1772 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1773 Register input = ToRegister(instr->InputAt(0));
1775 int true_block = chunk_->LookupDestination(instr->true_block_id());
1776 int false_block = chunk_->LookupDestination(instr->false_block_id());
1778 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1780 __ JumpIfSmi(input, false_label);
1782 __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
1783 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1787 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1788 Register input = ToRegister(instr->InputAt(0));
1789 Register result = ToRegister(instr->result());
1791 if (FLAG_debug_code) {
1792 __ AbortIfNotString(input);
1795 __ movl(result, FieldOperand(input, String::kHashFieldOffset));
1796 ASSERT(String::kHashShift >= kSmiTagSize);
1797 __ IndexFromHash(result, result);
1801 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1802 LHasCachedArrayIndexAndBranch* instr) {
1803 Register input = ToRegister(instr->InputAt(0));
1805 int true_block = chunk_->LookupDestination(instr->true_block_id());
1806 int false_block = chunk_->LookupDestination(instr->false_block_id());
1808 __ testl(FieldOperand(input, String::kHashFieldOffset),
1809 Immediate(String::kContainsCachedArrayIndexMask));
1810 EmitBranch(true_block, false_block, equal);
1814 // Branches to a label or falls through with the answer in the z flag.
1815 // Trashes the temp register.
1816 void LCodeGen::EmitClassOfTest(Label* is_true,
1818 Handle<String> class_name,
1822 ASSERT(!input.is(temp));
1823 ASSERT(!input.is(temp2));
1824 ASSERT(!temp.is(temp2));
1826 __ JumpIfSmi(input, is_false);
1828 if (class_name->IsEqualTo(CStrVector("Function"))) {
1829 // Assuming the following assertions, we can use the same compares to test
1830 // for both being a function type and being in the object type range.
1831 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1832 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1833 FIRST_SPEC_OBJECT_TYPE + 1);
1834 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1835 LAST_SPEC_OBJECT_TYPE - 1);
1836 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1837 __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
1838 __ j(below, is_false);
1839 __ j(equal, is_true);
1840 __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
1841 __ j(equal, is_true);
1843 // Faster code path to avoid two compares: subtract lower bound from the
1844 // actual type and do a signed compare with the width of the type range.
1845 __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
1846 __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
1847 __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1848 __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1849 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1850 __ j(above, is_false);
1853 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1854 // Check if the constructor in the map is a function.
1855 __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
1857 // Objects with a non-function constructor have class 'Object'.
1858 __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
1859 if (class_name->IsEqualTo(CStrVector("Object"))) {
1860 __ j(not_equal, is_true);
1862 __ j(not_equal, is_false);
1865 // temp now contains the constructor function. Grab the
1866 // instance class name from there.
1867 __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1868 __ movq(temp, FieldOperand(temp,
1869 SharedFunctionInfo::kInstanceClassNameOffset));
1870 // The class name we are testing against is a symbol because it's a literal.
1871 // The name in the constructor is a symbol because of the way the context is
1872 // booted. This routine isn't expected to work for random API-created
1873 // classes and it doesn't have to because you can't access it with natives
1874 // syntax. Since both sides are symbols it is sufficient to use an identity
1876 ASSERT(class_name->IsSymbol());
1877 __ Cmp(temp, class_name);
1878 // End with the answer in the z flag.
1882 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1883 Register input = ToRegister(instr->InputAt(0));
1884 Register temp = ToRegister(instr->TempAt(0));
1885 Register temp2 = ToRegister(instr->TempAt(1));
1886 Handle<String> class_name = instr->hydrogen()->class_name();
1888 int true_block = chunk_->LookupDestination(instr->true_block_id());
1889 int false_block = chunk_->LookupDestination(instr->false_block_id());
1891 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1892 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1894 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1896 EmitBranch(true_block, false_block, equal);
1900 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1901 Register reg = ToRegister(instr->InputAt(0));
1902 int true_block = instr->true_block_id();
1903 int false_block = instr->false_block_id();
1905 __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1906 EmitBranch(true_block, false_block, equal);
1910 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1911 InstanceofStub stub(InstanceofStub::kNoFlags);
1912 __ push(ToRegister(instr->InputAt(0)));
1913 __ push(ToRegister(instr->InputAt(1)));
1914 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1915 Label true_value, done;
1917 __ j(zero, &true_value, Label::kNear);
1918 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1919 __ jmp(&done, Label::kNear);
1920 __ bind(&true_value);
1921 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
1926 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1927 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1929 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1930 LInstanceOfKnownGlobal* instr)
1931 : LDeferredCode(codegen), instr_(instr) { }
1932 virtual void Generate() {
1933 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
1935 virtual LInstruction* instr() { return instr_; }
1936 Label* map_check() { return &map_check_; }
1938 LInstanceOfKnownGlobal* instr_;
1943 DeferredInstanceOfKnownGlobal* deferred;
1944 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1946 Label done, false_result;
1947 Register object = ToRegister(instr->InputAt(0));
1949 // A Smi is not an instance of anything.
1950 __ JumpIfSmi(object, &false_result);
1952 // This is the inlined call site instanceof cache. The two occurences of the
1953 // hole value will be patched to the last map/result pair generated by the
1956 // Use a temp register to avoid memory operands with variable lengths.
1957 Register map = ToRegister(instr->TempAt(0));
1958 __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
1959 __ bind(deferred->map_check()); // Label for calculating code patching.
1960 Handle<JSGlobalPropertyCell> cache_cell =
1961 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
1962 __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
1963 __ cmpq(map, Operand(kScratchRegister, 0));
1964 __ j(not_equal, &cache_miss, Label::kNear);
1965 // Patched to load either true or false.
1966 __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
1968 // Check that the code size between patch label and patch sites is invariant.
1969 Label end_of_patched_code;
1970 __ bind(&end_of_patched_code);
1975 // The inlined call site cache did not match. Check for null and string
1976 // before calling the deferred code.
1977 __ bind(&cache_miss); // Null is not an instance of anything.
1978 __ CompareRoot(object, Heap::kNullValueRootIndex);
1979 __ j(equal, &false_result, Label::kNear);
1981 // String values are not instances of anything.
1982 __ JumpIfNotString(object, kScratchRegister, deferred->entry());
1984 __ bind(&false_result);
1985 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1987 __ bind(deferred->exit());
1992 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1995 PushSafepointRegistersScope scope(this);
1996 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
1997 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
1998 InstanceofStub stub(flags);
2000 __ push(ToRegister(instr->InputAt(0)));
2001 __ PushHeapObject(instr->function());
2003 static const int kAdditionalDelta = 10;
2005 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2007 __ push_imm32(delta);
2009 // We are pushing three values on the stack but recording a
2010 // safepoint with two arguments because stub is going to
2011 // remove the third argument from the stack before jumping
2012 // to instanceof builtin on the slow path.
2013 CallCodeGeneric(stub.GetCode(),
2014 RelocInfo::CODE_TARGET,
2016 RECORD_SAFEPOINT_WITH_REGISTERS,
2018 ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
2019 ASSERT(instr->HasDeoptimizationEnvironment());
2020 LEnvironment* env = instr->deoptimization_environment();
2021 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2022 // Move result to a register that survives the end of the
2023 // PushSafepointRegisterScope.
2024 __ movq(kScratchRegister, rax);
2026 __ testq(kScratchRegister, kScratchRegister);
2029 __ j(not_zero, &load_false);
2030 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2032 __ bind(&load_false);
2033 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2038 void LCodeGen::DoCmpT(LCmpT* instr) {
2039 Token::Value op = instr->op();
2041 Handle<Code> ic = CompareIC::GetUninitialized(op);
2042 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2044 Condition condition = TokenToCondition(op, false);
2045 Label true_value, done;
2047 __ j(condition, &true_value, Label::kNear);
2048 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2049 __ jmp(&done, Label::kNear);
2050 __ bind(&true_value);
2051 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2056 void LCodeGen::DoReturn(LReturn* instr) {
2058 // Preserve the return value on the stack and rely on the runtime
2059 // call to return the value in the same register.
2061 __ CallRuntime(Runtime::kTraceExit, 1);
2065 __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
2069 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2070 Register result = ToRegister(instr->result());
2071 __ LoadGlobalCell(result, instr->hydrogen()->cell());
2072 if (instr->hydrogen()->RequiresHoleCheck()) {
2073 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2074 DeoptimizeIf(equal, instr->environment());
2079 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2080 ASSERT(ToRegister(instr->global_object()).is(rax));
2081 ASSERT(ToRegister(instr->result()).is(rax));
2083 __ Move(rcx, instr->name());
2084 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2085 RelocInfo::CODE_TARGET_CONTEXT;
2086 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2087 CallCode(ic, mode, instr);
2091 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2092 Register value = ToRegister(instr->value());
2093 Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
2095 // If the cell we are storing to contains the hole it could have
2096 // been deleted from the property dictionary. In that case, we need
2097 // to update the property details in the property dictionary to mark
2098 // it as no longer deleted. We deoptimize in that case.
2099 if (instr->hydrogen()->RequiresHoleCheck()) {
2100 // We have a temp because CompareRoot might clobber kScratchRegister.
2101 Register cell = ToRegister(instr->TempAt(0));
2102 ASSERT(!value.is(cell));
2103 __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2104 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
2105 DeoptimizeIf(equal, instr->environment());
2107 __ movq(Operand(cell, 0), value);
2110 __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
2111 __ movq(Operand(kScratchRegister, 0), value);
2113 // Cells are always rescanned, so no write barrier here.
2117 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2118 ASSERT(ToRegister(instr->global_object()).is(rdx));
2119 ASSERT(ToRegister(instr->value()).is(rax));
2121 __ Move(rcx, instr->name());
2122 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2123 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2124 : isolate()->builtins()->StoreIC_Initialize();
2125 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2129 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2130 Register context = ToRegister(instr->context());
2131 Register result = ToRegister(instr->result());
2132 __ movq(result, ContextOperand(context, instr->slot_index()));
2133 if (instr->hydrogen()->RequiresHoleCheck()) {
2134 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2135 if (instr->hydrogen()->DeoptimizesOnHole()) {
2136 DeoptimizeIf(equal, instr->environment());
2139 __ j(not_equal, &is_not_hole, Label::kNear);
2140 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2141 __ bind(&is_not_hole);
2147 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2148 Register context = ToRegister(instr->context());
2149 Register value = ToRegister(instr->value());
2151 Operand target = ContextOperand(context, instr->slot_index());
2153 Label skip_assignment;
2154 if (instr->hydrogen()->RequiresHoleCheck()) {
2155 __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2156 if (instr->hydrogen()->DeoptimizesOnHole()) {
2157 DeoptimizeIf(equal, instr->environment());
2159 __ j(not_equal, &skip_assignment);
2162 __ movq(target, value);
2164 if (instr->hydrogen()->NeedsWriteBarrier()) {
2165 HType type = instr->hydrogen()->value()->type();
2166 SmiCheck check_needed =
2167 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2168 int offset = Context::SlotOffset(instr->slot_index());
2169 Register scratch = ToRegister(instr->TempAt(0));
2170 __ RecordWriteContextSlot(context,
2175 EMIT_REMEMBERED_SET,
2179 __ bind(&skip_assignment);
2183 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2184 Register object = ToRegister(instr->InputAt(0));
2185 Register result = ToRegister(instr->result());
2186 if (instr->hydrogen()->is_in_object()) {
2187 __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
2189 __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2190 __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
2195 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2198 Handle<String> name) {
2199 LookupResult lookup(isolate());
2200 type->LookupInDescriptors(NULL, *name, &lookup);
2201 ASSERT(lookup.IsFound() &&
2202 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2203 if (lookup.type() == FIELD) {
2204 int index = lookup.GetLocalFieldIndexFromMap(*type);
2205 int offset = index * kPointerSize;
2207 // Negative property indices are in-object properties, indexed
2208 // from the end of the fixed part of the object.
2209 __ movq(result, FieldOperand(object, offset + type->instance_size()));
2211 // Non-negative property indices are in the properties array.
2212 __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2213 __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2216 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2217 __ LoadHeapObject(result, function);
2222 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2223 Register object = ToRegister(instr->object());
2224 Register result = ToRegister(instr->result());
2226 int map_count = instr->hydrogen()->types()->length();
2227 Handle<String> name = instr->hydrogen()->name();
2229 if (map_count == 0) {
2230 ASSERT(instr->hydrogen()->need_generic());
2231 __ Move(rcx, instr->hydrogen()->name());
2232 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2233 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2236 for (int i = 0; i < map_count - 1; ++i) {
2237 Handle<Map> map = instr->hydrogen()->types()->at(i);
2239 __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2240 __ j(not_equal, &next, Label::kNear);
2241 EmitLoadFieldOrConstantFunction(result, object, map, name);
2242 __ jmp(&done, Label::kNear);
2245 Handle<Map> map = instr->hydrogen()->types()->last();
2246 __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2247 if (instr->hydrogen()->need_generic()) {
2249 __ j(not_equal, &generic, Label::kNear);
2250 EmitLoadFieldOrConstantFunction(result, object, map, name);
2251 __ jmp(&done, Label::kNear);
2253 __ Move(rcx, instr->hydrogen()->name());
2254 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2255 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2257 DeoptimizeIf(not_equal, instr->environment());
2258 EmitLoadFieldOrConstantFunction(result, object, map, name);
2265 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2266 ASSERT(ToRegister(instr->object()).is(rax));
2267 ASSERT(ToRegister(instr->result()).is(rax));
2269 __ Move(rcx, instr->name());
2270 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2271 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2275 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2276 Register function = ToRegister(instr->function());
2277 Register result = ToRegister(instr->result());
2279 // Check that the function really is a function.
2280 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2281 DeoptimizeIf(not_equal, instr->environment());
2283 // Check whether the function has an instance prototype.
2285 __ testb(FieldOperand(result, Map::kBitFieldOffset),
2286 Immediate(1 << Map::kHasNonInstancePrototype));
2287 __ j(not_zero, &non_instance, Label::kNear);
2289 // Get the prototype or initial map from the function.
2291 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2293 // Check that the function has a prototype or an initial map.
2294 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2295 DeoptimizeIf(equal, instr->environment());
2297 // If the function does not have an initial map, we're done.
2299 __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2300 __ j(not_equal, &done, Label::kNear);
2302 // Get the prototype from the initial map.
2303 __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
2304 __ jmp(&done, Label::kNear);
2306 // Non-instance prototype: Fetch prototype from constructor field
2307 // in the function's map.
2308 __ bind(&non_instance);
2309 __ movq(result, FieldOperand(result, Map::kConstructorOffset));
2316 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2317 Register result = ToRegister(instr->result());
2318 Register input = ToRegister(instr->InputAt(0));
2319 __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
2320 if (FLAG_debug_code) {
2321 Label done, ok, fail;
2322 __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2323 Heap::kFixedArrayMapRootIndex);
2324 __ j(equal, &done, Label::kNear);
2325 __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2326 Heap::kFixedCOWArrayMapRootIndex);
2327 __ j(equal, &done, Label::kNear);
2328 Register temp((result.is(rax)) ? rbx : rax);
2330 __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
2331 __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
2332 __ and_(temp, Immediate(Map::kElementsKindMask));
2333 __ shr(temp, Immediate(Map::kElementsKindShift));
2334 __ cmpl(temp, Immediate(FAST_ELEMENTS));
2335 __ j(equal, &ok, Label::kNear);
2336 __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2337 __ j(less, &fail, Label::kNear);
2338 __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2339 __ j(less_equal, &ok, Label::kNear);
2341 __ Abort("Check for fast or external elements failed");
2349 void LCodeGen::DoLoadExternalArrayPointer(
2350 LLoadExternalArrayPointer* instr) {
2351 Register result = ToRegister(instr->result());
2352 Register input = ToRegister(instr->InputAt(0));
2353 __ movq(result, FieldOperand(input,
2354 ExternalPixelArray::kExternalPointerOffset));
2358 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2359 Register arguments = ToRegister(instr->arguments());
2360 Register length = ToRegister(instr->length());
2361 Register result = ToRegister(instr->result());
2363 if (instr->index()->IsRegister()) {
2364 __ subl(length, ToRegister(instr->index()));
2366 __ subl(length, ToOperand(instr->index()));
2368 DeoptimizeIf(below_equal, instr->environment());
2370 // There are two words between the frame pointer and the last argument.
2371 // Subtracting from length accounts for one of them add one more.
2372 __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
2376 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2377 Register result = ToRegister(instr->result());
2381 BuildFastArrayOperand(instr->elements(), instr->key(),
2383 FixedArray::kHeaderSize - kHeapObjectTag));
2385 // Check for the hole value.
2386 if (instr->hydrogen()->RequiresHoleCheck()) {
2387 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2388 DeoptimizeIf(equal, instr->environment());
2393 void LCodeGen::DoLoadKeyedFastDoubleElement(
2394 LLoadKeyedFastDoubleElement* instr) {
2395 XMMRegister result(ToDoubleRegister(instr->result()));
2397 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2398 sizeof(kHoleNanLower32);
2399 Operand hole_check_operand = BuildFastArrayOperand(
2402 FAST_DOUBLE_ELEMENTS,
2404 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
2405 DeoptimizeIf(equal, instr->environment());
2407 Operand double_load_operand = BuildFastArrayOperand(
2408 instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
2409 FixedDoubleArray::kHeaderSize - kHeapObjectTag);
2410 __ movsd(result, double_load_operand);
2414 Operand LCodeGen::BuildFastArrayOperand(
2415 LOperand* elements_pointer,
2417 ElementsKind elements_kind,
2419 Register elements_pointer_reg = ToRegister(elements_pointer);
2420 int shift_size = ElementsKindToShiftSize(elements_kind);
2421 if (key->IsConstantOperand()) {
2422 int constant_value = ToInteger32(LConstantOperand::cast(key));
2423 if (constant_value & 0xF0000000) {
2424 Abort("array index constant value too big");
2426 return Operand(elements_pointer_reg,
2427 constant_value * (1 << shift_size) + offset);
2429 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2430 return Operand(elements_pointer_reg, ToRegister(key),
2431 scale_factor, offset);
2436 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2437 LLoadKeyedSpecializedArrayElement* instr) {
2438 ElementsKind elements_kind = instr->elements_kind();
2439 Operand operand(BuildFastArrayOperand(instr->external_pointer(),
2440 instr->key(), elements_kind, 0));
2441 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2442 XMMRegister result(ToDoubleRegister(instr->result()));
2443 __ movss(result, operand);
2444 __ cvtss2sd(result, result);
2445 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2446 __ movsd(ToDoubleRegister(instr->result()), operand);
2448 Register result(ToRegister(instr->result()));
2449 switch (elements_kind) {
2450 case EXTERNAL_BYTE_ELEMENTS:
2451 __ movsxbq(result, operand);
2453 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2454 case EXTERNAL_PIXEL_ELEMENTS:
2455 __ movzxbq(result, operand);
2457 case EXTERNAL_SHORT_ELEMENTS:
2458 __ movsxwq(result, operand);
2460 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2461 __ movzxwq(result, operand);
2463 case EXTERNAL_INT_ELEMENTS:
2464 __ movsxlq(result, operand);
2466 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2467 __ movl(result, operand);
2468 __ testl(result, result);
2469 // TODO(danno): we could be more clever here, perhaps having a special
2470 // version of the stub that detects if the overflow case actually
2471 // happens, and generate code that returns a double rather than int.
2472 DeoptimizeIf(negative, instr->environment());
2474 case EXTERNAL_FLOAT_ELEMENTS:
2475 case EXTERNAL_DOUBLE_ELEMENTS:
2477 case FAST_SMI_ONLY_ELEMENTS:
2478 case FAST_DOUBLE_ELEMENTS:
2479 case DICTIONARY_ELEMENTS:
2480 case NON_STRICT_ARGUMENTS_ELEMENTS:
2488 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2489 ASSERT(ToRegister(instr->object()).is(rdx));
2490 ASSERT(ToRegister(instr->key()).is(rax));
2492 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2493 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2497 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2498 Register result = ToRegister(instr->result());
2500 // Check for arguments adapter frame.
2501 Label done, adapted;
2502 __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2503 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
2504 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2505 __ j(equal, &adapted, Label::kNear);
2507 // No arguments adaptor frame.
2508 __ movq(result, rbp);
2509 __ jmp(&done, Label::kNear);
2511 // Arguments adaptor frame present.
2513 __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2515 // Result is the frame pointer for the frame if not adapted and for the real
2516 // frame below the adaptor frame if adapted.
2521 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2522 Register result = ToRegister(instr->result());
2526 // If no arguments adaptor frame the number of arguments is fixed.
2527 if (instr->InputAt(0)->IsRegister()) {
2528 __ cmpq(rbp, ToRegister(instr->InputAt(0)));
2530 __ cmpq(rbp, ToOperand(instr->InputAt(0)));
2532 __ movl(result, Immediate(scope()->num_parameters()));
2533 __ j(equal, &done, Label::kNear);
2535 // Arguments adaptor frame present. Get argument length from there.
2536 __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2537 __ SmiToInteger32(result,
2539 ArgumentsAdaptorFrameConstants::kLengthOffset));
2541 // Argument length is in result register.
2546 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2547 Register receiver = ToRegister(instr->receiver());
2548 Register function = ToRegister(instr->function());
2550 // If the receiver is null or undefined, we have to pass the global
2551 // object as a receiver to normal functions. Values have to be
2552 // passed unchanged to builtins and strict-mode functions.
2553 Label global_object, receiver_ok;
2555 // Do not transform the receiver to object for strict mode
2557 __ movq(kScratchRegister,
2558 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2559 __ testb(FieldOperand(kScratchRegister,
2560 SharedFunctionInfo::kStrictModeByteOffset),
2561 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
2562 __ j(not_equal, &receiver_ok, Label::kNear);
2564 // Do not transform the receiver to object for builtins.
2565 __ testb(FieldOperand(kScratchRegister,
2566 SharedFunctionInfo::kNativeByteOffset),
2567 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
2568 __ j(not_equal, &receiver_ok, Label::kNear);
2570 // Normal function. Replace undefined or null with global receiver.
2571 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
2572 __ j(equal, &global_object, Label::kNear);
2573 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
2574 __ j(equal, &global_object, Label::kNear);
2576 // The receiver should be a JS object.
2577 Condition is_smi = __ CheckSmi(receiver);
2578 DeoptimizeIf(is_smi, instr->environment());
2579 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
2580 DeoptimizeIf(below, instr->environment());
2581 __ jmp(&receiver_ok, Label::kNear);
2583 __ bind(&global_object);
2584 // TODO(kmillikin): We have a hydrogen value for the global object. See
2585 // if it's better to use it than to explicitly fetch it from the context
2587 __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
2589 FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2590 __ bind(&receiver_ok);
2594 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2595 Register receiver = ToRegister(instr->receiver());
2596 Register function = ToRegister(instr->function());
2597 Register length = ToRegister(instr->length());
2598 Register elements = ToRegister(instr->elements());
2599 ASSERT(receiver.is(rax)); // Used for parameter count.
2600 ASSERT(function.is(rdi)); // Required by InvokeFunction.
2601 ASSERT(ToRegister(instr->result()).is(rax));
2603 // Copy the arguments to this function possibly from the
2604 // adaptor frame below it.
2605 const uint32_t kArgumentsLimit = 1 * KB;
2606 __ cmpq(length, Immediate(kArgumentsLimit));
2607 DeoptimizeIf(above, instr->environment());
2610 __ movq(receiver, length);
2612 // Loop through the arguments pushing them onto the execution
2615 // length is a small non-negative integer, due to the test above.
2616 __ testl(length, length);
2617 __ j(zero, &invoke, Label::kNear);
2619 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2621 __ j(not_zero, &loop);
2623 // Invoke the function.
2625 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2626 LPointerMap* pointers = instr->pointer_map();
2627 RecordPosition(pointers->position());
2628 SafepointGenerator safepoint_generator(
2629 this, pointers, Safepoint::kLazyDeopt);
2630 ParameterCount actual(rax);
2631 __ InvokeFunction(function, actual, CALL_FUNCTION,
2632 safepoint_generator, CALL_AS_METHOD);
2633 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2637 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2638 LOperand* argument = instr->InputAt(0);
2639 EmitPushTaggedOperand(argument);
2643 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2644 Register result = ToRegister(instr->result());
2645 __ LoadHeapObject(result, instr->hydrogen()->closure());
2649 void LCodeGen::DoContext(LContext* instr) {
2650 Register result = ToRegister(instr->result());
2651 __ movq(result, rsi);
2655 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2656 Register context = ToRegister(instr->context());
2657 Register result = ToRegister(instr->result());
2659 Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2663 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2664 __ push(rsi); // The context is the first argument.
2665 __ PushHeapObject(instr->hydrogen()->pairs());
2666 __ Push(Smi::FromInt(instr->hydrogen()->flags()));
2667 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2671 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2672 Register result = ToRegister(instr->result());
2673 __ movq(result, GlobalObjectOperand());
2677 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2678 Register global = ToRegister(instr->global());
2679 Register result = ToRegister(instr->result());
2680 __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2684 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2686 LInstruction* instr,
2687 CallKind call_kind) {
2688 bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2689 function->shared()->formal_parameter_count() == arity;
2691 LPointerMap* pointers = instr->pointer_map();
2692 RecordPosition(pointers->position());
2694 if (can_invoke_directly) {
2695 __ LoadHeapObject(rdi, function);
2697 // Change context if needed.
2698 bool change_context =
2699 (info()->closure()->context() != function->context()) ||
2700 scope()->contains_with() ||
2701 (scope()->num_heap_slots() > 0);
2702 if (change_context) {
2703 __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2706 // Set rax to arguments count if adaption is not needed. Assumes that rax
2707 // is available to write to at this point.
2708 if (!function->NeedsArgumentsAdaption()) {
2713 __ SetCallKind(rcx, call_kind);
2714 if (*function == *info()->closure()) {
2717 __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
2720 // Set up deoptimization.
2721 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
2723 // We need to adapt arguments.
2724 SafepointGenerator generator(
2725 this, pointers, Safepoint::kLazyDeopt);
2726 ParameterCount count(arity);
2727 __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2731 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2735 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2736 ASSERT(ToRegister(instr->result()).is(rax));
2737 CallKnownFunction(instr->function(),
2744 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2745 Register input_reg = ToRegister(instr->InputAt(0));
2746 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
2747 Heap::kHeapNumberMapRootIndex);
2748 DeoptimizeIf(not_equal, instr->environment());
2751 Register tmp = input_reg.is(rax) ? rcx : rax;
2752 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
2754 // Preserve the value of all registers.
2755 PushSafepointRegistersScope scope(this);
2758 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2759 // Check the sign of the argument. If the argument is positive, just
2760 // return it. We do not need to patch the stack since |input| and
2761 // |result| are the same register and |input| will be restored
2762 // unchanged by popping safepoint registers.
2763 __ testl(tmp, Immediate(HeapNumber::kSignMask));
2764 __ j(not_zero, &negative);
2769 Label allocated, slow;
2770 __ AllocateHeapNumber(tmp, tmp2, &slow);
2773 // Slow case: Call the runtime system to do the number allocation.
2776 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2777 // Set the pointer to the new heap number in tmp.
2782 // Restore input_reg after call to runtime.
2783 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
2785 __ bind(&allocated);
2786 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
2787 __ shl(tmp2, Immediate(1));
2788 __ shr(tmp2, Immediate(1));
2789 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
2790 __ StoreToSafepointRegisterSlot(input_reg, tmp);
2796 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2797 Register input_reg = ToRegister(instr->InputAt(0));
2798 __ testl(input_reg, input_reg);
2800 __ j(not_sign, &is_positive);
2801 __ negl(input_reg); // Sets flags.
2802 DeoptimizeIf(negative, instr->environment());
2803 __ bind(&is_positive);
2807 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2808 // Class for deferred case.
2809 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2811 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2812 LUnaryMathOperation* instr)
2813 : LDeferredCode(codegen), instr_(instr) { }
2814 virtual void Generate() {
2815 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2817 virtual LInstruction* instr() { return instr_; }
2819 LUnaryMathOperation* instr_;
2822 ASSERT(instr->InputAt(0)->Equals(instr->result()));
2823 Representation r = instr->hydrogen()->value()->representation();
2826 XMMRegister scratch = xmm0;
2827 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2828 __ xorps(scratch, scratch);
2829 __ subsd(scratch, input_reg);
2830 __ andpd(input_reg, scratch);
2831 } else if (r.IsInteger32()) {
2832 EmitIntegerMathAbs(instr);
2833 } else { // Tagged case.
2834 DeferredMathAbsTaggedHeapNumber* deferred =
2835 new DeferredMathAbsTaggedHeapNumber(this, instr);
2836 Register input_reg = ToRegister(instr->InputAt(0));
2838 __ JumpIfNotSmi(input_reg, deferred->entry());
2839 __ SmiToInteger32(input_reg, input_reg);
2840 EmitIntegerMathAbs(instr);
2841 __ Integer32ToSmi(input_reg, input_reg);
2842 __ bind(deferred->exit());
2847 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2848 XMMRegister xmm_scratch = xmm0;
2849 Register output_reg = ToRegister(instr->result());
2850 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2853 if (CpuFeatures::IsSupported(SSE4_1)) {
2854 CpuFeatures::Scope scope(SSE4_1);
2855 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2856 // Deoptimize if minus zero.
2857 __ movq(output_reg, input_reg);
2858 __ subq(output_reg, Immediate(1));
2859 DeoptimizeIf(overflow, instr->environment());
2861 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
2862 __ cvttsd2si(output_reg, xmm_scratch);
2863 __ cmpl(output_reg, Immediate(0x80000000));
2864 DeoptimizeIf(equal, instr->environment());
2866 // Deoptimize on negative inputs.
2867 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
2868 __ ucomisd(input_reg, xmm_scratch);
2869 DeoptimizeIf(below, instr->environment());
2870 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2871 // Check for negative zero.
2872 Label positive_sign;
2873 __ j(above, &positive_sign, Label::kNear);
2874 __ movmskpd(output_reg, input_reg);
2875 __ testq(output_reg, Immediate(1));
2876 DeoptimizeIf(not_zero, instr->environment());
2877 __ Set(output_reg, 0);
2879 __ bind(&positive_sign);
2882 // Use truncating instruction (OK because input is positive).
2883 __ cvttsd2si(output_reg, input_reg);
2885 // Overflow is signalled with minint.
2886 __ cmpl(output_reg, Immediate(0x80000000));
2887 DeoptimizeIf(equal, instr->environment());
2893 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2894 const XMMRegister xmm_scratch = xmm0;
2895 Register output_reg = ToRegister(instr->result());
2896 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2899 // xmm_scratch = 0.5
2900 __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
2901 __ movq(xmm_scratch, kScratchRegister);
2903 __ ucomisd(xmm_scratch, input_reg);
2904 // If input_reg is NaN, this doesn't jump.
2905 __ j(above, &below_half, Label::kNear);
2906 // input = input + 0.5
2907 // This addition might give a result that isn't the correct for
2908 // rounding, due to loss of precision, but only for a number that's
2909 // so big that the conversion below will overflow anyway.
2910 __ addsd(xmm_scratch, input_reg);
2911 // Compute Math.floor(input).
2912 // Use truncating instruction (OK because input is positive).
2913 __ cvttsd2si(output_reg, xmm_scratch);
2914 // Overflow is signalled with minint.
2915 __ cmpl(output_reg, Immediate(0x80000000));
2916 DeoptimizeIf(equal, instr->environment());
2919 __ bind(&below_half);
2920 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2921 // Bailout if negative (including -0).
2922 __ movq(output_reg, input_reg);
2923 __ testq(output_reg, output_reg);
2924 DeoptimizeIf(negative, instr->environment());
2926 // Bailout if below -0.5, otherwise round to (positive) zero, even
2928 // xmm_scrach = -0.5
2929 __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
2930 __ movq(xmm_scratch, kScratchRegister);
2931 __ ucomisd(input_reg, xmm_scratch);
2932 DeoptimizeIf(below, instr->environment());
2934 __ xorl(output_reg, output_reg);
2940 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2941 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2942 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2943 __ sqrtsd(input_reg, input_reg);
2947 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2948 XMMRegister xmm_scratch = xmm0;
2949 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2950 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2952 // Note that according to ECMA-262 15.8.2.13:
2953 // Math.pow(-Infinity, 0.5) == Infinity
2954 // Math.sqrt(-Infinity) == NaN
2956 // Check base for -Infinity. According to IEEE-754, double-precision
2957 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
2958 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
2959 __ movq(xmm_scratch, kScratchRegister);
2960 __ ucomisd(xmm_scratch, input_reg);
2961 // Comparing -Infinity with NaN results in "unordered", which sets the
2962 // zero flag as if both were equal. However, it also sets the carry flag.
2963 __ j(not_equal, &sqrt, Label::kNear);
2964 __ j(carry, &sqrt, Label::kNear);
2965 // If input is -Infinity, return Infinity.
2966 __ xorps(input_reg, input_reg);
2967 __ subsd(input_reg, xmm_scratch);
2968 __ jmp(&done, Label::kNear);
2972 __ xorps(xmm_scratch, xmm_scratch);
2973 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
2974 __ sqrtsd(input_reg, input_reg);
2979 void LCodeGen::DoPower(LPower* instr) {
2980 Representation exponent_type = instr->hydrogen()->right()->representation();
2981 // Having marked this as a call, we can use any registers.
2982 // Just make sure that the input/output registers are the expected ones.
2984 // Choose register conforming to calling convention (when bailing out).
2986 Register exponent = rdx;
2988 Register exponent = rdi;
2990 ASSERT(!instr->InputAt(1)->IsRegister() ||
2991 ToRegister(instr->InputAt(1)).is(exponent));
2992 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
2993 ToDoubleRegister(instr->InputAt(1)).is(xmm1));
2994 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
2995 ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
2997 if (exponent_type.IsTagged()) {
2999 __ JumpIfSmi(exponent, &no_deopt);
3000 __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
3001 DeoptimizeIf(not_equal, instr->environment());
3003 MathPowStub stub(MathPowStub::TAGGED);
3005 } else if (exponent_type.IsInteger32()) {
3006 MathPowStub stub(MathPowStub::INTEGER);
3009 ASSERT(exponent_type.IsDouble());
3010 MathPowStub stub(MathPowStub::DOUBLE);
3016 void LCodeGen::DoRandom(LRandom* instr) {
3017 class DeferredDoRandom: public LDeferredCode {
3019 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3020 : LDeferredCode(codegen), instr_(instr) { }
3021 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3022 virtual LInstruction* instr() { return instr_; }
3027 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3029 // Having marked this instruction as a call we can use any
3031 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3033 // Choose the right register for the first argument depending on
3034 // calling convention.
3036 ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
3037 Register global_object = rcx;
3039 ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
3040 Register global_object = rdi;
3043 static const int kSeedSize = sizeof(uint32_t);
3044 STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
3046 __ movq(global_object,
3047 FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
3048 static const int kRandomSeedOffset =
3049 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3050 __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
3051 // rbx: FixedArray of the global context's random seeds
3054 __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
3055 // If state[0] == 0, call runtime to initialize seeds.
3057 __ j(zero, deferred->entry());
3059 __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
3061 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3062 // Only operate on the lower 32 bit of rax.
3064 __ andl(rdx, Immediate(0xFFFF));
3065 __ imull(rdx, rdx, Immediate(18273));
3066 __ shrl(rax, Immediate(16));
3069 __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
3071 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3073 __ andl(rdx, Immediate(0xFFFF));
3074 __ imull(rdx, rdx, Immediate(36969));
3075 __ shrl(rcx, Immediate(16));
3078 __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
3080 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3081 __ shll(rax, Immediate(14));
3082 __ andl(rcx, Immediate(0x3FFFF));
3085 __ bind(deferred->exit());
3086 // Convert 32 random bits in rax to 0.(32 random bits) in a double
3088 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3089 __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
3092 __ cvtss2sd(xmm2, xmm2);
3093 __ xorps(xmm1, xmm2);
3094 __ subsd(xmm1, xmm2);
3098 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3099 __ PrepareCallCFunction(1);
3100 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3101 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3102 // Return value is in rax.
3106 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3107 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3108 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3109 TranscendentalCacheStub::UNTAGGED);
3110 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3114 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3115 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3116 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3117 TranscendentalCacheStub::UNTAGGED);
3118 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3122 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3123 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3124 TranscendentalCacheStub stub(TranscendentalCache::COS,
3125 TranscendentalCacheStub::UNTAGGED);
3126 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3130 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3131 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3132 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3133 TranscendentalCacheStub::UNTAGGED);
3134 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3138 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3139 switch (instr->op()) {
3153 DoMathPowHalf(instr);
3174 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3175 ASSERT(ToRegister(instr->function()).is(rdi));
3176 ASSERT(instr->HasPointerMap());
3177 ASSERT(instr->HasDeoptimizationEnvironment());
3178 LPointerMap* pointers = instr->pointer_map();
3179 RecordPosition(pointers->position());
3180 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3181 ParameterCount count(instr->arity());
3182 __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3183 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3187 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3188 ASSERT(ToRegister(instr->key()).is(rcx));
3189 ASSERT(ToRegister(instr->result()).is(rax));
3191 int arity = instr->arity();
3193 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3194 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3195 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3199 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3200 ASSERT(ToRegister(instr->result()).is(rax));
3202 int arity = instr->arity();
3203 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3205 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3206 __ Move(rcx, instr->name());
3207 CallCode(ic, mode, instr);
3208 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3212 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3213 ASSERT(ToRegister(instr->function()).is(rdi));
3214 ASSERT(ToRegister(instr->result()).is(rax));
3216 int arity = instr->arity();
3217 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3218 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3219 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3223 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3224 ASSERT(ToRegister(instr->result()).is(rax));
3225 int arity = instr->arity();
3226 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3228 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3229 __ Move(rcx, instr->name());
3230 CallCode(ic, mode, instr);
3231 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3235 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3236 ASSERT(ToRegister(instr->result()).is(rax));
3237 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3241 void LCodeGen::DoCallNew(LCallNew* instr) {
3242 ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
3243 ASSERT(ToRegister(instr->result()).is(rax));
3245 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3246 __ Set(rax, instr->arity());
3247 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3251 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3252 CallRuntime(instr->function(), instr->arity(), instr);
3256 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3257 Register object = ToRegister(instr->object());
3258 Register value = ToRegister(instr->value());
3259 int offset = instr->offset();
3261 if (!instr->transition().is_null()) {
3262 __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
3266 HType type = instr->hydrogen()->value()->type();
3267 SmiCheck check_needed =
3268 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3269 if (instr->is_in_object()) {
3270 __ movq(FieldOperand(object, offset), value);
3271 if (instr->hydrogen()->NeedsWriteBarrier()) {
3272 Register temp = ToRegister(instr->TempAt(0));
3273 // Update the write barrier for the object for in-object properties.
3274 __ RecordWriteField(object,
3279 EMIT_REMEMBERED_SET,
3283 Register temp = ToRegister(instr->TempAt(0));
3284 __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
3285 __ movq(FieldOperand(temp, offset), value);
3286 if (instr->hydrogen()->NeedsWriteBarrier()) {
3287 // Update the write barrier for the properties array.
3288 // object is used as a scratch register.
3289 __ RecordWriteField(temp,
3294 EMIT_REMEMBERED_SET,
3301 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3302 ASSERT(ToRegister(instr->object()).is(rdx));
3303 ASSERT(ToRegister(instr->value()).is(rax));
3305 __ Move(rcx, instr->hydrogen()->name());
3306 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3307 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3308 : isolate()->builtins()->StoreIC_Initialize();
3309 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3313 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3314 LStoreKeyedSpecializedArrayElement* instr) {
3315 ElementsKind elements_kind = instr->elements_kind();
3316 Operand operand(BuildFastArrayOperand(instr->external_pointer(),
3317 instr->key(), elements_kind, 0));
3318 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3319 XMMRegister value(ToDoubleRegister(instr->value()));
3320 __ cvtsd2ss(value, value);
3321 __ movss(operand, value);
3322 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3323 __ movsd(operand, ToDoubleRegister(instr->value()));
3325 Register value(ToRegister(instr->value()));
3326 switch (elements_kind) {
3327 case EXTERNAL_PIXEL_ELEMENTS:
3328 case EXTERNAL_BYTE_ELEMENTS:
3329 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3330 __ movb(operand, value);
3332 case EXTERNAL_SHORT_ELEMENTS:
3333 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3334 __ movw(operand, value);
3336 case EXTERNAL_INT_ELEMENTS:
3337 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3338 __ movl(operand, value);
3340 case EXTERNAL_FLOAT_ELEMENTS:
3341 case EXTERNAL_DOUBLE_ELEMENTS:
3343 case FAST_SMI_ONLY_ELEMENTS:
3344 case FAST_DOUBLE_ELEMENTS:
3345 case DICTIONARY_ELEMENTS:
3346 case NON_STRICT_ARGUMENTS_ELEMENTS:
3354 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3355 if (instr->length()->IsRegister()) {
3356 Register reg = ToRegister(instr->length());
3357 if (FLAG_debug_code) {
3358 __ AbortIfNotZeroExtended(reg);
3360 if (instr->index()->IsConstantOperand()) {
3362 Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
3364 Register reg2 = ToRegister(instr->index());
3365 if (FLAG_debug_code) {
3366 __ AbortIfNotZeroExtended(reg2);
3371 if (instr->index()->IsConstantOperand()) {
3372 __ cmpq(ToOperand(instr->length()),
3373 Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
3375 __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
3378 DeoptimizeIf(below_equal, instr->environment());
3382 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3383 Register value = ToRegister(instr->value());
3384 Register elements = ToRegister(instr->object());
3385 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3388 if (instr->key()->IsConstantOperand()) {
3389 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3390 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3392 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3393 __ movq(FieldOperand(elements, offset), value);
3395 __ movq(FieldOperand(elements,
3398 FixedArray::kHeaderSize),
3402 if (instr->hydrogen()->NeedsWriteBarrier()) {
3403 HType type = instr->hydrogen()->value()->type();
3404 SmiCheck check_needed =
3405 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3406 // Compute address of modified element and store it into key register.
3407 __ lea(key, FieldOperand(elements,
3410 FixedArray::kHeaderSize));
3411 __ RecordWrite(elements,
3415 EMIT_REMEMBERED_SET,
3421 void LCodeGen::DoStoreKeyedFastDoubleElement(
3422 LStoreKeyedFastDoubleElement* instr) {
3423 XMMRegister value = ToDoubleRegister(instr->value());
3426 __ ucomisd(value, value);
3427 __ j(parity_odd, &have_value); // NaN.
3429 __ Set(kScratchRegister, BitCast<uint64_t>(
3430 FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3431 __ movq(value, kScratchRegister);
3433 __ bind(&have_value);
3434 Operand double_store_operand = BuildFastArrayOperand(
3435 instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
3436 FixedDoubleArray::kHeaderSize - kHeapObjectTag);
3437 __ movsd(double_store_operand, value);
3440 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3441 ASSERT(ToRegister(instr->object()).is(rdx));
3442 ASSERT(ToRegister(instr->key()).is(rcx));
3443 ASSERT(ToRegister(instr->value()).is(rax));
3445 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3446 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3447 : isolate()->builtins()->KeyedStoreIC_Initialize();
3448 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3452 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3453 Register object_reg = ToRegister(instr->object());
3454 Register new_map_reg = ToRegister(instr->new_map_reg());
3456 Handle<Map> from_map = instr->original_map();
3457 Handle<Map> to_map = instr->transitioned_map();
3458 ElementsKind from_kind = from_map->elements_kind();
3459 ElementsKind to_kind = to_map->elements_kind();
3461 Label not_applicable;
3462 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3463 __ j(not_equal, ¬_applicable);
3464 __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
3465 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3466 __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
3468 ASSERT_NE(instr->temp_reg(), NULL);
3469 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3470 ToRegister(instr->temp_reg()), kDontSaveFPRegs);
3471 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3472 to_kind == FAST_DOUBLE_ELEMENTS) {
3473 Register fixed_object_reg = ToRegister(instr->temp_reg());
3474 ASSERT(fixed_object_reg.is(rdx));
3475 ASSERT(new_map_reg.is(rbx));
3476 __ movq(fixed_object_reg, object_reg);
3477 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3478 RelocInfo::CODE_TARGET, instr);
3479 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3480 Register fixed_object_reg = ToRegister(instr->temp_reg());
3481 ASSERT(fixed_object_reg.is(rdx));
3482 ASSERT(new_map_reg.is(rbx));
3483 __ movq(fixed_object_reg, object_reg);
3484 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3485 RelocInfo::CODE_TARGET, instr);
3489 __ bind(¬_applicable);
3493 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3494 EmitPushTaggedOperand(instr->left());
3495 EmitPushTaggedOperand(instr->right());
3496 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3497 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3501 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3502 class DeferredStringCharCodeAt: public LDeferredCode {
3504 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3505 : LDeferredCode(codegen), instr_(instr) { }
3506 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3507 virtual LInstruction* instr() { return instr_; }
3509 LStringCharCodeAt* instr_;
3512 DeferredStringCharCodeAt* deferred =
3513 new DeferredStringCharCodeAt(this, instr);
3515 StringCharLoadGenerator::Generate(masm(),
3516 ToRegister(instr->string()),
3517 ToRegister(instr->index()),
3518 ToRegister(instr->result()),
3520 __ bind(deferred->exit());
3524 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3525 Register string = ToRegister(instr->string());
3526 Register result = ToRegister(instr->result());
3528 // TODO(3095996): Get rid of this. For now, we need to make the
3529 // result register contain a valid pointer because it is already
3530 // contained in the register pointer map.
3533 PushSafepointRegistersScope scope(this);
3535 // Push the index as a smi. This is safe because of the checks in
3536 // DoStringCharCodeAt above.
3537 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3538 if (instr->index()->IsConstantOperand()) {
3539 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3540 __ Push(Smi::FromInt(const_index));
3542 Register index = ToRegister(instr->index());
3543 __ Integer32ToSmi(index, index);
3546 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3547 if (FLAG_debug_code) {
3548 __ AbortIfNotSmi(rax);
3550 __ SmiToInteger32(rax, rax);
3551 __ StoreToSafepointRegisterSlot(result, rax);
3555 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3556 class DeferredStringCharFromCode: public LDeferredCode {
3558 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3559 : LDeferredCode(codegen), instr_(instr) { }
3560 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3561 virtual LInstruction* instr() { return instr_; }
3563 LStringCharFromCode* instr_;
3566 DeferredStringCharFromCode* deferred =
3567 new DeferredStringCharFromCode(this, instr);
3569 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3570 Register char_code = ToRegister(instr->char_code());
3571 Register result = ToRegister(instr->result());
3572 ASSERT(!char_code.is(result));
3574 __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
3575 __ j(above, deferred->entry());
3576 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3577 __ movq(result, FieldOperand(result,
3578 char_code, times_pointer_size,
3579 FixedArray::kHeaderSize));
3580 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
3581 __ j(equal, deferred->entry());
3582 __ bind(deferred->exit());
3586 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3587 Register char_code = ToRegister(instr->char_code());
3588 Register result = ToRegister(instr->result());
3590 // TODO(3095996): Get rid of this. For now, we need to make the
3591 // result register contain a valid pointer because it is already
3592 // contained in the register pointer map.
3595 PushSafepointRegistersScope scope(this);
3596 __ Integer32ToSmi(char_code, char_code);
3598 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3599 __ StoreToSafepointRegisterSlot(result, rax);
3603 void LCodeGen::DoStringLength(LStringLength* instr) {
3604 Register string = ToRegister(instr->string());
3605 Register result = ToRegister(instr->result());
3606 __ movq(result, FieldOperand(string, String::kLengthOffset));
3610 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3611 LOperand* input = instr->InputAt(0);
3612 ASSERT(input->IsRegister() || input->IsStackSlot());
3613 LOperand* output = instr->result();
3614 ASSERT(output->IsDoubleRegister());
3615 if (input->IsRegister()) {
3616 __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
3618 __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
3623 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3624 LOperand* input = instr->InputAt(0);
3625 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3626 Register reg = ToRegister(input);
3628 __ Integer32ToSmi(reg, reg);
3632 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3633 class DeferredNumberTagD: public LDeferredCode {
3635 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3636 : LDeferredCode(codegen), instr_(instr) { }
3637 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3638 virtual LInstruction* instr() { return instr_; }
3640 LNumberTagD* instr_;
3643 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3644 Register reg = ToRegister(instr->result());
3645 Register tmp = ToRegister(instr->TempAt(0));
3647 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3648 if (FLAG_inline_new) {
3649 __ AllocateHeapNumber(reg, tmp, deferred->entry());
3651 __ jmp(deferred->entry());
3653 __ bind(deferred->exit());
3654 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
3658 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3659 // TODO(3095996): Get rid of this. For now, we need to make the
3660 // result register contain a valid pointer because it is already
3661 // contained in the register pointer map.
3662 Register reg = ToRegister(instr->result());
3663 __ Move(reg, Smi::FromInt(0));
3666 PushSafepointRegistersScope scope(this);
3667 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3668 // Ensure that value in rax survives popping registers.
3669 __ movq(kScratchRegister, rax);
3671 __ movq(reg, kScratchRegister);
3675 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3676 ASSERT(instr->InputAt(0)->Equals(instr->result()));
3677 Register input = ToRegister(instr->InputAt(0));
3678 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3679 __ Integer32ToSmi(input, input);
3683 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3684 ASSERT(instr->InputAt(0)->Equals(instr->result()));
3685 Register input = ToRegister(instr->InputAt(0));
3686 if (instr->needs_check()) {
3687 Condition is_smi = __ CheckSmi(input);
3688 DeoptimizeIf(NegateCondition(is_smi), instr->environment());
3690 __ SmiToInteger32(input, input);
3694 void LCodeGen::EmitNumberUntagD(Register input_reg,
3695 XMMRegister result_reg,
3696 bool deoptimize_on_undefined,
3697 bool deoptimize_on_minus_zero,
3698 LEnvironment* env) {
3699 Label load_smi, done;
3702 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
3704 // Heap number map check.
3705 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3706 Heap::kHeapNumberMapRootIndex);
3707 if (deoptimize_on_undefined) {
3708 DeoptimizeIf(not_equal, env);
3711 __ j(equal, &heap_number, Label::kNear);
3713 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3714 DeoptimizeIf(not_equal, env);
3716 // Convert undefined to NaN. Compute NaN as 0/0.
3717 __ xorps(result_reg, result_reg);
3718 __ divsd(result_reg, result_reg);
3719 __ jmp(&done, Label::kNear);
3721 __ bind(&heap_number);
3723 // Heap number to XMM conversion.
3724 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3725 if (deoptimize_on_minus_zero) {
3726 XMMRegister xmm_scratch = xmm0;
3727 __ xorps(xmm_scratch, xmm_scratch);
3728 __ ucomisd(xmm_scratch, result_reg);
3729 __ j(not_equal, &done, Label::kNear);
3730 __ movmskpd(kScratchRegister, result_reg);
3731 __ testq(kScratchRegister, Immediate(1));
3732 DeoptimizeIf(not_zero, env);
3734 __ jmp(&done, Label::kNear);
3736 // Smi to XMM conversion
3738 __ SmiToInteger32(kScratchRegister, input_reg);
3739 __ cvtlsi2sd(result_reg, kScratchRegister);
3744 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3745 Label done, heap_number;
3746 Register input_reg = ToRegister(instr->InputAt(0));
3748 // Heap number map check.
3749 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3750 Heap::kHeapNumberMapRootIndex);
3752 if (instr->truncating()) {
3753 __ j(equal, &heap_number, Label::kNear);
3754 // Check for undefined. Undefined is converted to zero for truncating
3756 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3757 DeoptimizeIf(not_equal, instr->environment());
3758 __ Set(input_reg, 0);
3759 __ jmp(&done, Label::kNear);
3761 __ bind(&heap_number);
3763 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3764 __ cvttsd2siq(input_reg, xmm0);
3765 __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
3766 __ cmpq(input_reg, kScratchRegister);
3767 DeoptimizeIf(equal, instr->environment());
3769 // Deoptimize if we don't have a heap number.
3770 DeoptimizeIf(not_equal, instr->environment());
3772 XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
3773 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3774 __ cvttsd2si(input_reg, xmm0);
3775 __ cvtlsi2sd(xmm_temp, input_reg);
3776 __ ucomisd(xmm0, xmm_temp);
3777 DeoptimizeIf(not_equal, instr->environment());
3778 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3779 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3780 __ testl(input_reg, input_reg);
3781 __ j(not_zero, &done);
3782 __ movmskpd(input_reg, xmm0);
3783 __ andl(input_reg, Immediate(1));
3784 DeoptimizeIf(not_zero, instr->environment());
3791 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3792 class DeferredTaggedToI: public LDeferredCode {
3794 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3795 : LDeferredCode(codegen), instr_(instr) { }
3796 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3797 virtual LInstruction* instr() { return instr_; }
3802 LOperand* input = instr->InputAt(0);
3803 ASSERT(input->IsRegister());
3804 ASSERT(input->Equals(instr->result()));
3806 Register input_reg = ToRegister(input);
3807 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3808 __ JumpIfNotSmi(input_reg, deferred->entry());
3809 __ SmiToInteger32(input_reg, input_reg);
3810 __ bind(deferred->exit());
3814 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3815 LOperand* input = instr->InputAt(0);
3816 ASSERT(input->IsRegister());
3817 LOperand* result = instr->result();
3818 ASSERT(result->IsDoubleRegister());
3820 Register input_reg = ToRegister(input);
3821 XMMRegister result_reg = ToDoubleRegister(result);
3823 EmitNumberUntagD(input_reg, result_reg,
3824 instr->hydrogen()->deoptimize_on_undefined(),
3825 instr->hydrogen()->deoptimize_on_minus_zero(),
3826 instr->environment());
3830 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3831 LOperand* input = instr->InputAt(0);
3832 ASSERT(input->IsDoubleRegister());
3833 LOperand* result = instr->result();
3834 ASSERT(result->IsRegister());
3836 XMMRegister input_reg = ToDoubleRegister(input);
3837 Register result_reg = ToRegister(result);
3839 if (instr->truncating()) {
3840 // Performs a truncating conversion of a floating point number as used by
3841 // the JS bitwise operations.
3842 __ cvttsd2siq(result_reg, input_reg);
3843 __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
3844 __ cmpq(result_reg, kScratchRegister);
3845 DeoptimizeIf(equal, instr->environment());
3847 __ cvttsd2si(result_reg, input_reg);
3848 __ cvtlsi2sd(xmm0, result_reg);
3849 __ ucomisd(xmm0, input_reg);
3850 DeoptimizeIf(not_equal, instr->environment());
3851 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3852 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3854 // The integer converted back is equal to the original. We
3855 // only have to test if we got -0 as an input.
3856 __ testl(result_reg, result_reg);
3857 __ j(not_zero, &done, Label::kNear);
3858 __ movmskpd(result_reg, input_reg);
3859 // Bit 0 contains the sign of the double in input_reg.
3860 // If input was positive, we are ok and return 0, otherwise
3862 __ andl(result_reg, Immediate(1));
3863 DeoptimizeIf(not_zero, instr->environment());
3870 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3871 LOperand* input = instr->InputAt(0);
3872 Condition cc = masm()->CheckSmi(ToRegister(input));
3873 DeoptimizeIf(NegateCondition(cc), instr->environment());
3877 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3878 LOperand* input = instr->InputAt(0);
3879 Condition cc = masm()->CheckSmi(ToRegister(input));
3880 DeoptimizeIf(cc, instr->environment());
3884 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3885 Register input = ToRegister(instr->InputAt(0));
3887 __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
3889 if (instr->hydrogen()->is_interval_check()) {
3892 instr->hydrogen()->GetCheckInterval(&first, &last);
3894 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3895 Immediate(static_cast<int8_t>(first)));
3897 // If there is only one type in the interval check for equality.
3898 if (first == last) {
3899 DeoptimizeIf(not_equal, instr->environment());
3901 DeoptimizeIf(below, instr->environment());
3902 // Omit check for the last type.
3903 if (last != LAST_TYPE) {
3904 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3905 Immediate(static_cast<int8_t>(last)));
3906 DeoptimizeIf(above, instr->environment());
3912 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
3914 if (IsPowerOf2(mask)) {
3915 ASSERT(tag == 0 || IsPowerOf2(tag));
3916 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3918 DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
3920 __ movzxbl(kScratchRegister,
3921 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
3922 __ andb(kScratchRegister, Immediate(mask));
3923 __ cmpb(kScratchRegister, Immediate(tag));
3924 DeoptimizeIf(not_equal, instr->environment());
3930 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3931 Register reg = ToRegister(instr->value());
3932 Handle<JSFunction> target = instr->hydrogen()->target();
3933 if (isolate()->heap()->InNewSpace(*target)) {
3934 Handle<JSGlobalPropertyCell> cell =
3935 isolate()->factory()->NewJSGlobalPropertyCell(target);
3936 __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
3937 __ cmpq(reg, Operand(kScratchRegister, 0));
3939 __ Cmp(reg, target);
3941 DeoptimizeIf(not_equal, instr->environment());
3945 void LCodeGen::DoCheckMapCommon(Register reg,
3947 CompareMapMode mode,
3948 LEnvironment* env) {
3950 __ CompareMap(reg, map, &success, mode);
3951 DeoptimizeIf(not_equal, env);
3956 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
3957 LOperand* input = instr->InputAt(0);
3958 ASSERT(input->IsRegister());
3959 Register reg = ToRegister(input);
3962 SmallMapList* map_set = instr->hydrogen()->map_set();
3963 for (int i = 0; i < map_set->length() - 1; i++) {
3964 Handle<Map> map = map_set->at(i);
3965 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
3966 __ j(equal, &success);
3968 Handle<Map> map = map_set->last();
3969 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
3974 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
3975 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
3976 Register result_reg = ToRegister(instr->result());
3977 Register temp_reg = ToRegister(instr->TempAt(0));
3978 __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
3982 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
3983 ASSERT(instr->unclamped()->Equals(instr->result()));
3984 Register value_reg = ToRegister(instr->result());
3985 __ ClampUint8(value_reg);
3989 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
3990 ASSERT(instr->unclamped()->Equals(instr->result()));
3991 Register input_reg = ToRegister(instr->unclamped());
3992 Register temp_reg = ToRegister(instr->TempAt(0));
3993 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
3994 Label is_smi, done, heap_number;
3996 __ JumpIfSmi(input_reg, &is_smi);
3998 // Check for heap number
3999 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4000 factory()->heap_number_map());
4001 __ j(equal, &heap_number, Label::kNear);
4003 // Check for undefined. Undefined is converted to zero for clamping
4005 __ Cmp(input_reg, factory()->undefined_value());
4006 DeoptimizeIf(not_equal, instr->environment());
4007 __ movq(input_reg, Immediate(0));
4008 __ jmp(&done, Label::kNear);
4011 __ bind(&heap_number);
4012 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4013 __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
4014 __ jmp(&done, Label::kNear);
4018 __ SmiToInteger32(input_reg, input_reg);
4019 __ ClampUint8(input_reg);
4025 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4026 Register reg = ToRegister(instr->TempAt(0));
4028 Handle<JSObject> holder = instr->holder();
4029 Handle<JSObject> current_prototype = instr->prototype();
4031 // Load prototype object.
4032 __ LoadHeapObject(reg, current_prototype);
4034 // Check prototype maps up to the holder.
4035 while (!current_prototype.is_identical_to(holder)) {
4036 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4037 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4039 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4040 // Load next prototype object.
4041 __ LoadHeapObject(reg, current_prototype);
4044 // Check the holder map.
4045 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4046 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4050 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4051 class DeferredAllocateObject: public LDeferredCode {
4053 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4054 : LDeferredCode(codegen), instr_(instr) { }
4055 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4056 virtual LInstruction* instr() { return instr_; }
4058 LAllocateObject* instr_;
4061 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4063 Register result = ToRegister(instr->result());
4064 Register scratch = ToRegister(instr->TempAt(0));
4065 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4066 Handle<Map> initial_map(constructor->initial_map());
4067 int instance_size = initial_map->instance_size();
4068 ASSERT(initial_map->pre_allocated_property_fields() +
4069 initial_map->unused_property_fields() -
4070 initial_map->inobject_properties() == 0);
4072 // Allocate memory for the object. The initial map might change when
4073 // the constructor's prototype changes, but instance size and property
4074 // counts remain unchanged (if slack tracking finished).
4075 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4076 __ AllocateInNewSpace(instance_size,
4083 __ bind(deferred->exit());
4084 if (FLAG_debug_code) {
4085 Label is_in_new_space;
4086 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4087 __ Abort("Allocated object is not in new-space");
4088 __ bind(&is_in_new_space);
4091 // Load the initial map.
4092 Register map = scratch;
4093 __ LoadHeapObject(scratch, constructor);
4094 __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
4096 if (FLAG_debug_code) {
4098 __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
4099 Immediate(instance_size >> kPointerSizeLog2));
4100 __ Assert(equal, "Unexpected instance size");
4101 __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
4102 Immediate(initial_map->pre_allocated_property_fields()));
4103 __ Assert(equal, "Unexpected pre-allocated property fields count");
4104 __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
4105 Immediate(initial_map->unused_property_fields()));
4106 __ Assert(equal, "Unexpected unused property fields count");
4107 __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
4108 Immediate(initial_map->inobject_properties()));
4109 __ Assert(equal, "Unexpected in-object property fields count");
4112 // Initialize map and fields of the newly allocated object.
4113 ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4114 __ movq(FieldOperand(result, JSObject::kMapOffset), map);
4115 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4116 __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
4117 __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
4118 if (initial_map->inobject_properties() != 0) {
4119 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4120 for (int i = 0; i < initial_map->inobject_properties(); i++) {
4121 int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4122 __ movq(FieldOperand(result, property_offset), scratch);
4128 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4129 Register result = ToRegister(instr->result());
4130 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4131 Handle<Map> initial_map(constructor->initial_map());
4132 int instance_size = initial_map->instance_size();
4134 // TODO(3095996): Get rid of this. For now, we need to make the
4135 // result register contain a valid pointer because it is already
4136 // contained in the register pointer map.
4139 PushSafepointRegistersScope scope(this);
4140 __ Push(Smi::FromInt(instance_size));
4141 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4142 __ StoreToSafepointRegisterSlot(result, rax);
4146 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4147 Heap* heap = isolate()->heap();
4148 ElementsKind boilerplate_elements_kind =
4149 instr->hydrogen()->boilerplate_elements_kind();
4151 // Deopt if the array literal boilerplate ElementsKind is of a type different
4152 // than the expected one. The check isn't necessary if the boilerplate has
4153 // already been converted to FAST_ELEMENTS.
4154 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4155 __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
4156 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
4157 // Load the map's "bit field 2".
4158 __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
4159 // Retrieve elements_kind from bit field 2.
4160 __ and_(rbx, Immediate(Map::kElementsKindMask));
4161 __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
4162 Map::kElementsKindShift));
4163 DeoptimizeIf(not_equal, instr->environment());
4166 // Set up the parameters to the stub/runtime call.
4167 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4168 __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
4169 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4170 // Boilerplate already exists, constant elements are never accessed.
4171 // Pass an empty fixed array.
4172 __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
4174 // Pick the right runtime function or stub to call.
4175 int length = instr->hydrogen()->length();
4176 if (instr->hydrogen()->IsCopyOnWrite()) {
4177 ASSERT(instr->hydrogen()->depth() == 1);
4178 FastCloneShallowArrayStub::Mode mode =
4179 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4180 FastCloneShallowArrayStub stub(mode, length);
4181 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4182 } else if (instr->hydrogen()->depth() > 1) {
4183 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4184 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4185 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4187 FastCloneShallowArrayStub::Mode mode =
4188 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4189 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4190 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4191 FastCloneShallowArrayStub stub(mode, length);
4192 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4197 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4201 ASSERT(!source.is(rcx));
4202 ASSERT(!result.is(rcx));
4204 // Only elements backing stores for non-COW arrays need to be copied.
4205 Handle<FixedArrayBase> elements(object->elements());
4206 bool has_elements = elements->length() > 0 &&
4207 elements->map() != isolate()->heap()->fixed_cow_array_map();
4209 // Increase the offset so that subsequent objects end up right after
4210 // this object and its backing store.
4211 int object_offset = *offset;
4212 int object_size = object->map()->instance_size();
4213 int elements_offset = *offset + object_size;
4214 int elements_size = has_elements ? elements->Size() : 0;
4215 *offset += object_size + elements_size;
4217 // Copy object header.
4218 ASSERT(object->properties()->length() == 0);
4219 int inobject_properties = object->map()->inobject_properties();
4220 int header_size = object_size - inobject_properties * kPointerSize;
4221 for (int i = 0; i < header_size; i += kPointerSize) {
4222 if (has_elements && i == JSObject::kElementsOffset) {
4223 __ lea(rcx, Operand(result, elements_offset));
4225 __ movq(rcx, FieldOperand(source, i));
4227 __ movq(FieldOperand(result, object_offset + i), rcx);
4230 // Copy in-object properties.
4231 for (int i = 0; i < inobject_properties; i++) {
4232 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4233 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4234 if (value->IsJSObject()) {
4235 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4236 __ lea(rcx, Operand(result, *offset));
4237 __ movq(FieldOperand(result, total_offset), rcx);
4238 __ LoadHeapObject(source, value_object);
4239 EmitDeepCopy(value_object, result, source, offset);
4240 } else if (value->IsHeapObject()) {
4241 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4242 __ movq(FieldOperand(result, total_offset), rcx);
4244 __ movq(rcx, value, RelocInfo::NONE);
4245 __ movq(FieldOperand(result, total_offset), rcx);
4250 // Copy elements backing store header.
4251 __ LoadHeapObject(source, elements);
4252 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4253 __ movq(rcx, FieldOperand(source, i));
4254 __ movq(FieldOperand(result, elements_offset + i), rcx);
4257 // Copy elements backing store content.
4258 int elements_length = elements->length();
4259 if (elements->IsFixedDoubleArray()) {
4260 Handle<FixedDoubleArray> double_array =
4261 Handle<FixedDoubleArray>::cast(elements);
4262 for (int i = 0; i < elements_length; i++) {
4263 int64_t value = double_array->get_representation(i);
4265 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4266 __ movq(rcx, value, RelocInfo::NONE);
4267 __ movq(FieldOperand(result, total_offset), rcx);
4269 } else if (elements->IsFixedArray()) {
4270 for (int i = 0; i < elements_length; i++) {
4271 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4272 Handle<Object> value = JSObject::GetElement(object, i);
4273 if (value->IsJSObject()) {
4274 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4275 __ lea(rcx, Operand(result, *offset));
4276 __ movq(FieldOperand(result, total_offset), rcx);
4277 __ LoadHeapObject(source, value_object);
4278 EmitDeepCopy(value_object, result, source, offset);
4279 } else if (value->IsHeapObject()) {
4280 __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
4281 __ movq(FieldOperand(result, total_offset), rcx);
4283 __ movq(rcx, value, RelocInfo::NONE);
4284 __ movq(FieldOperand(result, total_offset), rcx);
4294 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4295 int size = instr->hydrogen()->total_size();
4297 // Allocate all objects that are part of the literal in one big
4298 // allocation. This avoids multiple limit checks.
4299 Label allocated, runtime_allocate;
4300 __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4303 __ bind(&runtime_allocate);
4304 __ Push(Smi::FromInt(size));
4305 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4307 __ bind(&allocated);
4309 __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
4310 EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
4311 ASSERT_EQ(size, offset);
4315 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4316 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4317 Handle<FixedArray> constant_properties =
4318 instr->hydrogen()->constant_properties();
4320 // Set up the parameters to the stub/runtime call.
4321 __ PushHeapObject(literals);
4322 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4323 __ Push(constant_properties);
4324 int flags = instr->hydrogen()->fast_elements()
4325 ? ObjectLiteral::kFastElements
4326 : ObjectLiteral::kNoFlags;
4327 flags |= instr->hydrogen()->has_function()
4328 ? ObjectLiteral::kHasFunction
4329 : ObjectLiteral::kNoFlags;
4330 __ Push(Smi::FromInt(flags));
4332 // Pick the right runtime function or stub to call.
4333 int properties_count = constant_properties->length() / 2;
4334 if (instr->hydrogen()->depth() > 1) {
4335 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4336 } else if (flags != ObjectLiteral::kFastElements ||
4337 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4338 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4340 FastCloneShallowObjectStub stub(properties_count);
4341 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4346 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4347 ASSERT(ToRegister(instr->InputAt(0)).is(rax));
4349 CallRuntime(Runtime::kToFastProperties, 1, instr);
4353 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4355 // Registers will be used as follows:
4356 // rdi = JS function.
4357 // rcx = literals array.
4358 // rbx = regexp literal.
4359 // rax = regexp literal clone.
4360 __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4361 __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
4362 int literal_offset = FixedArray::kHeaderSize +
4363 instr->hydrogen()->literal_index() * kPointerSize;
4364 __ movq(rbx, FieldOperand(rcx, literal_offset));
4365 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4366 __ j(not_equal, &materialized, Label::kNear);
4368 // Create regexp literal using runtime function
4369 // Result will be in rax.
4371 __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
4372 __ Push(instr->hydrogen()->pattern());
4373 __ Push(instr->hydrogen()->flags());
4374 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4377 __ bind(&materialized);
4378 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4379 Label allocated, runtime_allocate;
4380 __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
4383 __ bind(&runtime_allocate);
4385 __ Push(Smi::FromInt(size));
4386 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4389 __ bind(&allocated);
4390 // Copy the content into the newly allocated memory.
4391 // (Unroll copy loop once for better throughput).
4392 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4393 __ movq(rdx, FieldOperand(rbx, i));
4394 __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
4395 __ movq(FieldOperand(rax, i), rdx);
4396 __ movq(FieldOperand(rax, i + kPointerSize), rcx);
4398 if ((size % (2 * kPointerSize)) != 0) {
4399 __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
4400 __ movq(FieldOperand(rax, size - kPointerSize), rdx);
4405 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4406 // Use the fast case closure allocation code that allocates in new
4407 // space for nested functions that don't need literals cloning.
4408 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4409 bool pretenure = instr->hydrogen()->pretenure();
4410 if (!pretenure && shared_info->num_literals() == 0) {
4411 FastNewClosureStub stub(shared_info->language_mode());
4412 __ Push(shared_info);
4413 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4416 __ Push(shared_info);
4417 __ PushRoot(pretenure ?
4418 Heap::kTrueValueRootIndex :
4419 Heap::kFalseValueRootIndex);
4420 CallRuntime(Runtime::kNewClosure, 3, instr);
4425 void LCodeGen::DoTypeof(LTypeof* instr) {
4426 LOperand* input = instr->InputAt(0);
4427 EmitPushTaggedOperand(input);
4428 CallRuntime(Runtime::kTypeof, 1, instr);
4432 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
4433 ASSERT(!operand->IsDoubleRegister());
4434 if (operand->IsConstantOperand()) {
4435 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
4436 if (object->IsSmi()) {
4437 __ Push(Handle<Smi>::cast(object));
4439 __ PushHeapObject(Handle<HeapObject>::cast(object));
4441 } else if (operand->IsRegister()) {
4442 __ push(ToRegister(operand));
4444 __ push(ToOperand(operand));
4449 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4450 Register input = ToRegister(instr->InputAt(0));
4451 int true_block = chunk_->LookupDestination(instr->true_block_id());
4452 int false_block = chunk_->LookupDestination(instr->false_block_id());
4453 Label* true_label = chunk_->GetAssemblyLabel(true_block);
4454 Label* false_label = chunk_->GetAssemblyLabel(false_block);
4456 Condition final_branch_condition =
4457 EmitTypeofIs(true_label, false_label, input, instr->type_literal());
4458 if (final_branch_condition != no_condition) {
4459 EmitBranch(true_block, false_block, final_branch_condition);
4464 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4467 Handle<String> type_name) {
4468 Condition final_branch_condition = no_condition;
4469 if (type_name->Equals(heap()->number_symbol())) {
4470 __ JumpIfSmi(input, true_label);
4471 __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
4472 Heap::kHeapNumberMapRootIndex);
4474 final_branch_condition = equal;
4476 } else if (type_name->Equals(heap()->string_symbol())) {
4477 __ JumpIfSmi(input, false_label);
4478 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
4479 __ j(above_equal, false_label);
4480 __ testb(FieldOperand(input, Map::kBitFieldOffset),
4481 Immediate(1 << Map::kIsUndetectable));
4482 final_branch_condition = zero;
4484 } else if (type_name->Equals(heap()->boolean_symbol())) {
4485 __ CompareRoot(input, Heap::kTrueValueRootIndex);
4486 __ j(equal, true_label);
4487 __ CompareRoot(input, Heap::kFalseValueRootIndex);
4488 final_branch_condition = equal;
4490 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4491 __ CompareRoot(input, Heap::kNullValueRootIndex);
4492 final_branch_condition = equal;
4494 } else if (type_name->Equals(heap()->undefined_symbol())) {
4495 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
4496 __ j(equal, true_label);
4497 __ JumpIfSmi(input, false_label);
4498 // Check for undetectable objects => true.
4499 __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
4500 __ testb(FieldOperand(input, Map::kBitFieldOffset),
4501 Immediate(1 << Map::kIsUndetectable));
4502 final_branch_condition = not_zero;
4504 } else if (type_name->Equals(heap()->function_symbol())) {
4505 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4506 __ JumpIfSmi(input, false_label);
4507 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
4508 __ j(equal, true_label);
4509 __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
4510 final_branch_condition = equal;
4512 } else if (type_name->Equals(heap()->object_symbol())) {
4513 __ JumpIfSmi(input, false_label);
4514 if (!FLAG_harmony_typeof) {
4515 __ CompareRoot(input, Heap::kNullValueRootIndex);
4516 __ j(equal, true_label);
4518 __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
4519 __ j(below, false_label);
4520 __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
4521 __ j(above, false_label);
4522 // Check for undetectable objects => false.
4523 __ testb(FieldOperand(input, Map::kBitFieldOffset),
4524 Immediate(1 << Map::kIsUndetectable));
4525 final_branch_condition = zero;
4528 __ jmp(false_label);
4531 return final_branch_condition;
4535 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4536 Register temp = ToRegister(instr->TempAt(0));
4537 int true_block = chunk_->LookupDestination(instr->true_block_id());
4538 int false_block = chunk_->LookupDestination(instr->false_block_id());
4540 EmitIsConstructCall(temp);
4541 EmitBranch(true_block, false_block, equal);
4545 void LCodeGen::EmitIsConstructCall(Register temp) {
4546 // Get the frame pointer for the calling frame.
4547 __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
4549 // Skip the arguments adaptor frame if it exists.
4550 Label check_frame_marker;
4551 __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
4552 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
4553 __ j(not_equal, &check_frame_marker, Label::kNear);
4554 __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
4556 // Check the marker in the calling frame.
4557 __ bind(&check_frame_marker);
4558 __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
4559 Smi::FromInt(StackFrame::CONSTRUCT));
4563 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
4564 // Ensure that we have enough space after the previous lazy-bailout
4565 // instruction for patching the code here.
4566 int current_pc = masm()->pc_offset();
4567 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
4568 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
4569 __ Nop(padding_size);
4574 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4575 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4576 last_lazy_deopt_pc_ = masm()->pc_offset();
4577 ASSERT(instr->HasEnvironment());
4578 LEnvironment* env = instr->environment();
4579 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4580 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4584 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4585 DeoptimizeIf(no_condition, instr->environment());
4589 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4590 LOperand* obj = instr->object();
4591 LOperand* key = instr->key();
4592 EmitPushTaggedOperand(obj);
4593 EmitPushTaggedOperand(key);
4594 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4595 LPointerMap* pointers = instr->pointer_map();
4596 RecordPosition(pointers->position());
4597 // Create safepoint generator that will also ensure enough space in the
4598 // reloc info for patching in deoptimization (since this is invoking a
4600 SafepointGenerator safepoint_generator(
4601 this, pointers, Safepoint::kLazyDeopt);
4602 __ Push(Smi::FromInt(strict_mode_flag()));
4603 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4607 void LCodeGen::DoIn(LIn* instr) {
4608 LOperand* obj = instr->object();
4609 LOperand* key = instr->key();
4610 EmitPushTaggedOperand(key);
4611 EmitPushTaggedOperand(obj);
4612 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4613 LPointerMap* pointers = instr->pointer_map();
4614 RecordPosition(pointers->position());
4615 SafepointGenerator safepoint_generator(
4616 this, pointers, Safepoint::kLazyDeopt);
4617 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4621 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4622 PushSafepointRegistersScope scope(this);
4623 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
4624 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4625 RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4626 ASSERT(instr->HasEnvironment());
4627 LEnvironment* env = instr->environment();
4628 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4632 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4633 class DeferredStackCheck: public LDeferredCode {
4635 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4636 : LDeferredCode(codegen), instr_(instr) { }
4637 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4638 virtual LInstruction* instr() { return instr_; }
4640 LStackCheck* instr_;
4643 ASSERT(instr->HasEnvironment());
4644 LEnvironment* env = instr->environment();
4645 // There is no LLazyBailout instruction for stack-checks. We have to
4646 // prepare for lazy deoptimization explicitly here.
4647 if (instr->hydrogen()->is_function_entry()) {
4648 // Perform stack overflow check.
4650 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
4651 __ j(above_equal, &done, Label::kNear);
4652 StackCheckStub stub;
4653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4654 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4655 last_lazy_deopt_pc_ = masm()->pc_offset();
4657 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4658 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4660 ASSERT(instr->hydrogen()->is_backwards_branch());
4661 // Perform stack overflow check if this goto needs it before jumping.
4662 DeferredStackCheck* deferred_stack_check =
4663 new DeferredStackCheck(this, instr);
4664 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
4665 __ j(below, deferred_stack_check->entry());
4666 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4667 last_lazy_deopt_pc_ = masm()->pc_offset();
4668 __ bind(instr->done_label());
4669 deferred_stack_check->SetExit(instr->done_label());
4670 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4671 // Don't record a deoptimization index for the safepoint here.
4672 // This will be done explicitly when emitting call and the safepoint in
4673 // the deferred code.
4678 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4679 // This is a pseudo-instruction that ensures that the environment here is
4680 // properly registered for deoptimization and records the assembler's PC
4682 LEnvironment* environment = instr->environment();
4683 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4684 instr->SpilledDoubleRegisterArray());
4686 // If the environment were already registered, we would have no way of
4687 // backpatching it with the spill slot operands.
4688 ASSERT(!environment->HasBeenRegistered());
4689 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4690 ASSERT(osr_pc_offset_ == -1);
4691 osr_pc_offset_ = masm()->pc_offset();
4695 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
4696 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
4697 DeoptimizeIf(equal, instr->environment());
4699 Register null_value = rdi;
4700 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
4701 __ cmpq(rax, null_value);
4702 DeoptimizeIf(equal, instr->environment());
4704 Condition cc = masm()->CheckSmi(rax);
4705 DeoptimizeIf(cc, instr->environment());
4707 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
4708 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
4709 DeoptimizeIf(below_equal, instr->environment());
4711 Label use_cache, call_runtime;
4712 __ CheckEnumCache(null_value, &call_runtime);
4714 __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
4715 __ jmp(&use_cache, Label::kNear);
4717 // Get the set of properties to enumerate.
4718 __ bind(&call_runtime);
4720 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
4722 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
4723 Heap::kMetaMapRootIndex);
4724 DeoptimizeIf(not_equal, instr->environment());
4725 __ bind(&use_cache);
4729 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
4730 Register map = ToRegister(instr->map());
4731 Register result = ToRegister(instr->result());
4732 __ LoadInstanceDescriptors(map, result);
4734 FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
4736 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
4737 Condition cc = masm()->CheckSmi(result);
4738 DeoptimizeIf(cc, instr->environment());
4742 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
4743 Register object = ToRegister(instr->value());
4744 __ cmpq(ToRegister(instr->map()),
4745 FieldOperand(object, HeapObject::kMapOffset));
4746 DeoptimizeIf(not_equal, instr->environment());
4750 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
4751 Register object = ToRegister(instr->object());
4752 Register index = ToRegister(instr->index());
4754 Label out_of_object, done;
4755 __ SmiToInteger32(index, index);
4756 __ cmpl(index, Immediate(0));
4757 __ j(less, &out_of_object);
4758 __ movq(object, FieldOperand(object,
4761 JSObject::kHeaderSize));
4762 __ jmp(&done, Label::kNear);
4764 __ bind(&out_of_object);
4765 __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
4767 // Index is now equal to out of object property index plus 1.
4768 __ movq(object, FieldOperand(object,
4771 FixedArray::kHeaderSize - kPointerSize));
4778 } } // namespace v8::internal
4780 #endif // V8_TARGET_ARCH_X64