1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/code-stubs.h"
10 #include "src/stub-cache.h"
11 #include "src/hydrogen-osr.h"
17 class SafepointGenerator V8_FINAL : public CallWrapper {
19 SafepointGenerator(LCodeGen* codegen,
20 LPointerMap* pointers,
21 Safepoint::DeoptMode mode)
25 virtual ~SafepointGenerator() {}
27 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
29 virtual void AfterCall() const V8_OVERRIDE {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
35 LPointerMap* pointers_;
36 Safepoint::DeoptMode deopt_mode_;
42 bool LCodeGen::GenerateCode() {
43 LPhase phase("Z_Code generation", chunk());
47 // Open a frame scope to indicate that there is a frame on the stack. The
48 // NONE indicates that the scope shouldn't actually generate code to set up
49 // the frame (that is done in GeneratePrologue).
50 FrameScope frame_scope(masm_, StackFrame::NONE);
52 return GeneratePrologue() &&
54 GenerateDeferredCode() &&
55 GenerateDeoptJumpTable() &&
56 GenerateSafepointTable();
60 void LCodeGen::FinishCode(Handle<Code> code) {
62 code->set_stack_slots(GetStackSlotCount());
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
65 PopulateDeoptimizationData(code);
69 void LCodeGen::SaveCallerDoubles() {
70 ASSERT(info()->saves_caller_doubles());
71 ASSERT(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
85 void LCodeGen::RestoreCallerDoubles() {
86 ASSERT(info()->saves_caller_doubles());
87 ASSERT(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
101 bool LCodeGen::GeneratePrologue() {
102 ASSERT(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // r1: Callee's JS function.
115 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117 // fp: Caller's frame pointer.
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (info_->this_has_uses() &&
124 info_->strict_mode() == SLOPPY &&
125 !info_->is_native()) {
127 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
128 __ ldr(r2, MemOperand(sp, receiver_offset));
129 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
132 __ ldr(r2, GlobalObjectOperand());
133 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
135 __ str(r2, MemOperand(sp, receiver_offset));
141 info()->set_prologue_offset(masm_->pc_offset());
142 if (NeedsEagerFrame()) {
143 if (info()->IsStub()) {
146 __ Prologue(info()->IsCodePreAgingActive());
148 frame_is_built_ = true;
149 info_->AddNoFrameRange(0, masm_->pc_offset());
152 // Reserve space for the stack slots needed by the code.
153 int slots = GetStackSlotCount();
155 if (FLAG_debug_code) {
156 __ sub(sp, sp, Operand(slots * kPointerSize));
159 __ add(r0, sp, Operand(slots * kPointerSize));
160 __ mov(r1, Operand(kSlotsZapValue));
163 __ sub(r0, r0, Operand(kPointerSize));
164 __ str(r1, MemOperand(r0, 2 * kPointerSize));
170 __ sub(sp, sp, Operand(slots * kPointerSize));
174 if (info()->saves_caller_doubles()) {
178 // Possibly allocate a local context.
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180 if (heap_slots > 0) {
181 Comment(";;; Allocate local context");
182 bool need_write_barrier = true;
183 // Argument to NewContext is the function, which is in r1.
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots);
187 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false;
191 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
193 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp.
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters();
200 for (int i = 0; i < num_parameters; i++) {
201 Variable* var = scope()->parameter(i);
202 if (var->IsContextSlot()) {
203 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204 (num_parameters - 1 - i) * kPointerSize;
205 // Load parameter from stack.
206 __ ldr(r0, MemOperand(fp, parameter_offset));
207 // Store it in the context.
208 MemOperand target = ContextOperand(cp, var->index());
210 // Update the write barrier. This clobbers r3 and r0.
211 if (need_write_barrier) {
212 __ RecordWriteContextSlot(
217 GetLinkRegisterState(),
219 } else if (FLAG_debug_code) {
221 __ JumpIfInNewSpace(cp, r0, &done);
222 __ Abort(kExpectedNewSpaceObject);
227 Comment(";;; End allocate local context");
231 if (FLAG_trace && info()->IsOptimizing()) {
232 // We have not executed any compiled code yet, so cp still holds the
234 __ CallRuntime(Runtime::kTraceEnter, 0);
236 return !is_aborted();
240 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return;
245 osr_pc_offset_ = masm()->pc_offset();
247 // Adjust the frame size, subsuming the unoptimized frame into the
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
251 __ sub(sp, sp, Operand(slots * kPointerSize));
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
259 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex();
265 bool LCodeGen::GenerateDeferredCode() {
266 ASSERT(is_generating());
267 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i];
272 instructions_->at(code->instruction_index())->hydrogen_value();
273 RecordAndWritePosition(
274 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
276 Comment(";;; <@%d,#%d> "
277 "-------------------- Deferred %s --------------------",
278 code->instruction_index(),
279 code->instr()->hydrogen_value()->id(),
280 code->instr()->Mnemonic());
281 __ bind(code->entry());
282 if (NeedsDeferredFrame()) {
283 Comment(";;; Build frame");
284 ASSERT(!frame_is_built_);
285 ASSERT(info()->IsStub());
286 frame_is_built_ = true;
288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
291 Comment(";;; Deferred code");
294 if (NeedsDeferredFrame()) {
295 Comment(";;; Destroy frame");
296 ASSERT(frame_is_built_);
299 frame_is_built_ = false;
301 __ jmp(code->exit());
305 // Force constant pool emission at the end of the deferred code to make
306 // sure that no constant pools are emitted after.
307 masm()->CheckConstPool(true, false);
309 return !is_aborted();
313 bool LCodeGen::GenerateDeoptJumpTable() {
314 // Check that the jump table is accessible from everywhere in the function
315 // code, i.e. that offsets to the table can be encoded in the 24bit signed
316 // immediate of a branch instruction.
317 // To simplify we consider the code size from the first instruction to the
318 // end of the jump table. We also don't consider the pc load delta.
319 // Each entry in the jump table generates one instruction and inlines one
320 // 32bit data after it.
321 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
322 deopt_jump_table_.length() * 7)) {
323 Abort(kGeneratedCodeIsTooLarge);
326 if (deopt_jump_table_.length() > 0) {
327 Comment(";;; -------------------- Jump table --------------------");
330 __ bind(&table_start);
332 for (int i = 0; i < deopt_jump_table_.length(); i++) {
333 __ bind(&deopt_jump_table_[i].label);
334 Address entry = deopt_jump_table_[i].address;
335 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
336 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
337 if (id == Deoptimizer::kNotDeoptimizationEntry) {
338 Comment(";;; jump table entry %d.", i);
340 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
342 if (deopt_jump_table_[i].needs_frame) {
343 ASSERT(!info()->saves_caller_doubles());
344 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
345 if (needs_frame.is_bound()) {
348 __ bind(&needs_frame);
350 // This variant of deopt can only be used with stubs. Since we don't
351 // have a function pointer to install in the stack frame that we're
352 // building, install a special marker there instead.
353 ASSERT(info()->IsStub());
354 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
356 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
357 __ mov(lr, Operand(pc), LeaveCC, al);
361 if (info()->saves_caller_doubles()) {
362 ASSERT(info()->IsStub());
363 RestoreCallerDoubles();
365 __ mov(lr, Operand(pc), LeaveCC, al);
366 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
368 masm()->CheckConstPool(false, false);
371 // Force constant pool emission at the end of the deopt jump table to make
372 // sure that no constant pools are emitted after.
373 masm()->CheckConstPool(true, false);
375 // The deoptimization jump table is the last part of the instruction
376 // sequence. Mark the generated code as done unless we bailed out.
377 if (!is_aborted()) status_ = DONE;
378 return !is_aborted();
382 bool LCodeGen::GenerateSafepointTable() {
384 safepoints_.Emit(masm(), GetStackSlotCount());
385 return !is_aborted();
389 Register LCodeGen::ToRegister(int index) const {
390 return Register::FromAllocationIndex(index);
394 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
395 return DwVfpRegister::FromAllocationIndex(index);
399 Register LCodeGen::ToRegister(LOperand* op) const {
400 ASSERT(op->IsRegister());
401 return ToRegister(op->index());
405 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
406 if (op->IsRegister()) {
407 return ToRegister(op->index());
408 } else if (op->IsConstantOperand()) {
409 LConstantOperand* const_op = LConstantOperand::cast(op);
410 HConstant* constant = chunk_->LookupConstant(const_op);
411 Handle<Object> literal = constant->handle(isolate());
412 Representation r = chunk_->LookupLiteralRepresentation(const_op);
413 if (r.IsInteger32()) {
414 ASSERT(literal->IsNumber());
415 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
416 } else if (r.IsDouble()) {
417 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
419 ASSERT(r.IsSmiOrTagged());
420 __ Move(scratch, literal);
423 } else if (op->IsStackSlot()) {
424 __ ldr(scratch, ToMemOperand(op));
432 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
433 ASSERT(op->IsDoubleRegister());
434 return ToDoubleRegister(op->index());
438 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
439 SwVfpRegister flt_scratch,
440 DwVfpRegister dbl_scratch) {
441 if (op->IsDoubleRegister()) {
442 return ToDoubleRegister(op->index());
443 } else if (op->IsConstantOperand()) {
444 LConstantOperand* const_op = LConstantOperand::cast(op);
445 HConstant* constant = chunk_->LookupConstant(const_op);
446 Handle<Object> literal = constant->handle(isolate());
447 Representation r = chunk_->LookupLiteralRepresentation(const_op);
448 if (r.IsInteger32()) {
449 ASSERT(literal->IsNumber());
450 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
451 __ vmov(flt_scratch, ip);
452 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
454 } else if (r.IsDouble()) {
455 Abort(kUnsupportedDoubleImmediate);
456 } else if (r.IsTagged()) {
457 Abort(kUnsupportedTaggedImmediate);
459 } else if (op->IsStackSlot()) {
460 // TODO(regis): Why is vldr not taking a MemOperand?
461 // __ vldr(dbl_scratch, ToMemOperand(op));
462 MemOperand mem_op = ToMemOperand(op);
463 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
471 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
472 HConstant* constant = chunk_->LookupConstant(op);
473 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
474 return constant->handle(isolate());
478 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
479 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
483 bool LCodeGen::IsSmi(LConstantOperand* op) const {
484 return chunk_->LookupLiteralRepresentation(op).IsSmi();
488 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
489 return ToRepresentation(op, Representation::Integer32());
493 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
494 const Representation& r) const {
495 HConstant* constant = chunk_->LookupConstant(op);
496 int32_t value = constant->Integer32Value();
497 if (r.IsInteger32()) return value;
498 ASSERT(r.IsSmiOrTagged());
499 return reinterpret_cast<int32_t>(Smi::FromInt(value));
503 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
504 HConstant* constant = chunk_->LookupConstant(op);
505 return Smi::FromInt(constant->Integer32Value());
509 double LCodeGen::ToDouble(LConstantOperand* op) const {
510 HConstant* constant = chunk_->LookupConstant(op);
511 ASSERT(constant->HasDoubleValue());
512 return constant->DoubleValue();
516 Operand LCodeGen::ToOperand(LOperand* op) {
517 if (op->IsConstantOperand()) {
518 LConstantOperand* const_op = LConstantOperand::cast(op);
519 HConstant* constant = chunk()->LookupConstant(const_op);
520 Representation r = chunk_->LookupLiteralRepresentation(const_op);
522 ASSERT(constant->HasSmiValue());
523 return Operand(Smi::FromInt(constant->Integer32Value()));
524 } else if (r.IsInteger32()) {
525 ASSERT(constant->HasInteger32Value());
526 return Operand(constant->Integer32Value());
527 } else if (r.IsDouble()) {
528 Abort(kToOperandUnsupportedDoubleImmediate);
530 ASSERT(r.IsTagged());
531 return Operand(constant->handle(isolate()));
532 } else if (op->IsRegister()) {
533 return Operand(ToRegister(op));
534 } else if (op->IsDoubleRegister()) {
535 Abort(kToOperandIsDoubleRegisterUnimplemented);
536 return Operand::Zero();
538 // Stack slots not implemented, use ToMemOperand instead.
540 return Operand::Zero();
544 static int ArgumentsOffsetWithoutFrame(int index) {
546 return -(index + 1) * kPointerSize;
550 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
551 ASSERT(!op->IsRegister());
552 ASSERT(!op->IsDoubleRegister());
553 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
554 if (NeedsEagerFrame()) {
555 return MemOperand(fp, StackSlotOffset(op->index()));
557 // Retrieve parameter without eager stack-frame relative to the
559 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
564 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
565 ASSERT(op->IsDoubleStackSlot());
566 if (NeedsEagerFrame()) {
567 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
569 // Retrieve parameter without eager stack-frame relative to the
572 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
577 void LCodeGen::WriteTranslation(LEnvironment* environment,
578 Translation* translation) {
579 if (environment == NULL) return;
581 // The translation includes one command per value in the environment.
582 int translation_size = environment->translation_size();
583 // The output frame height does not include the parameters.
584 int height = translation_size - environment->parameter_count();
586 WriteTranslation(environment->outer(), translation);
587 bool has_closure_id = !info()->closure().is_null() &&
588 !info()->closure().is_identical_to(environment->closure());
589 int closure_id = has_closure_id
590 ? DefineDeoptimizationLiteral(environment->closure())
591 : Translation::kSelfLiteralId;
593 switch (environment->frame_type()) {
595 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
598 translation->BeginConstructStubFrame(closure_id, translation_size);
601 ASSERT(translation_size == 1);
603 translation->BeginGetterStubFrame(closure_id);
606 ASSERT(translation_size == 2);
608 translation->BeginSetterStubFrame(closure_id);
611 translation->BeginCompiledStubFrame();
613 case ARGUMENTS_ADAPTOR:
614 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
618 int object_index = 0;
619 int dematerialized_index = 0;
620 for (int i = 0; i < translation_size; ++i) {
621 LOperand* value = environment->values()->at(i);
622 AddToTranslation(environment,
625 environment->HasTaggedValueAt(i),
626 environment->HasUint32ValueAt(i),
628 &dematerialized_index);
633 void LCodeGen::AddToTranslation(LEnvironment* environment,
634 Translation* translation,
638 int* object_index_pointer,
639 int* dematerialized_index_pointer) {
640 if (op == LEnvironment::materialization_marker()) {
641 int object_index = (*object_index_pointer)++;
642 if (environment->ObjectIsDuplicateAt(object_index)) {
643 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
644 translation->DuplicateObject(dupe_of);
647 int object_length = environment->ObjectLengthAt(object_index);
648 if (environment->ObjectIsArgumentsAt(object_index)) {
649 translation->BeginArgumentsObject(object_length);
651 translation->BeginCapturedObject(object_length);
653 int dematerialized_index = *dematerialized_index_pointer;
654 int env_offset = environment->translation_size() + dematerialized_index;
655 *dematerialized_index_pointer += object_length;
656 for (int i = 0; i < object_length; ++i) {
657 LOperand* value = environment->values()->at(env_offset + i);
658 AddToTranslation(environment,
661 environment->HasTaggedValueAt(env_offset + i),
662 environment->HasUint32ValueAt(env_offset + i),
663 object_index_pointer,
664 dematerialized_index_pointer);
669 if (op->IsStackSlot()) {
671 translation->StoreStackSlot(op->index());
672 } else if (is_uint32) {
673 translation->StoreUint32StackSlot(op->index());
675 translation->StoreInt32StackSlot(op->index());
677 } else if (op->IsDoubleStackSlot()) {
678 translation->StoreDoubleStackSlot(op->index());
679 } else if (op->IsRegister()) {
680 Register reg = ToRegister(op);
682 translation->StoreRegister(reg);
683 } else if (is_uint32) {
684 translation->StoreUint32Register(reg);
686 translation->StoreInt32Register(reg);
688 } else if (op->IsDoubleRegister()) {
689 DoubleRegister reg = ToDoubleRegister(op);
690 translation->StoreDoubleRegister(reg);
691 } else if (op->IsConstantOperand()) {
692 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
693 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
694 translation->StoreLiteral(src_index);
701 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
702 int size = masm()->CallSize(code, mode);
703 if (code->kind() == Code::BINARY_OP_IC ||
704 code->kind() == Code::COMPARE_IC) {
705 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
711 void LCodeGen::CallCode(Handle<Code> code,
712 RelocInfo::Mode mode,
714 TargetAddressStorageMode storage_mode) {
715 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
719 void LCodeGen::CallCodeGeneric(Handle<Code> code,
720 RelocInfo::Mode mode,
722 SafepointMode safepoint_mode,
723 TargetAddressStorageMode storage_mode) {
724 ASSERT(instr != NULL);
725 // Block literal pool emission to ensure nop indicating no inlined smi code
726 // is in the correct position.
727 Assembler::BlockConstPoolScope block_const_pool(masm());
728 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
729 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
731 // Signal that we don't inline smi code before these stubs in the
732 // optimizing code generator.
733 if (code->kind() == Code::BINARY_OP_IC ||
734 code->kind() == Code::COMPARE_IC) {
740 void LCodeGen::CallRuntime(const Runtime::Function* function,
743 SaveFPRegsMode save_doubles) {
744 ASSERT(instr != NULL);
746 __ CallRuntime(function, num_arguments, save_doubles);
748 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
752 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
753 if (context->IsRegister()) {
754 __ Move(cp, ToRegister(context));
755 } else if (context->IsStackSlot()) {
756 __ ldr(cp, ToMemOperand(context));
757 } else if (context->IsConstantOperand()) {
758 HConstant* constant =
759 chunk_->LookupConstant(LConstantOperand::cast(context));
760 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
767 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
771 LoadContextFromDeferred(context);
772 __ CallRuntimeSaveDoubles(id);
773 RecordSafepointWithRegisters(
774 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
778 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
779 Safepoint::DeoptMode mode) {
780 environment->set_has_been_used();
781 if (!environment->HasBeenRegistered()) {
782 // Physical stack frame layout:
783 // -x ............. -4 0 ..................................... y
784 // [incoming arguments] [spill slots] [pushed outgoing arguments]
786 // Layout of the environment:
787 // 0 ..................................................... size-1
788 // [parameters] [locals] [expression stack including arguments]
790 // Layout of the translation:
791 // 0 ........................................................ size - 1 + 4
792 // [expression stack including arguments] [locals] [4 words] [parameters]
793 // |>------------ translation_size ------------<|
796 int jsframe_count = 0;
797 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
799 if (e->frame_type() == JS_FUNCTION) {
803 Translation translation(&translations_, frame_count, jsframe_count, zone());
804 WriteTranslation(environment, &translation);
805 int deoptimization_index = deoptimizations_.length();
806 int pc_offset = masm()->pc_offset();
807 environment->Register(deoptimization_index,
809 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
810 deoptimizations_.Add(environment, zone());
815 void LCodeGen::DeoptimizeIf(Condition condition,
816 LEnvironment* environment,
817 Deoptimizer::BailoutType bailout_type) {
818 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
819 ASSERT(environment->HasBeenRegistered());
820 int id = environment->deoptimization_index();
821 ASSERT(info()->IsOptimizing() || info()->IsStub());
823 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
825 Abort(kBailoutWasNotPrepared);
829 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
830 Register scratch = scratch0();
831 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
833 // Store the condition on the stack if necessary
834 if (condition != al) {
835 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
836 __ mov(scratch, Operand(1), LeaveCC, condition);
841 __ mov(scratch, Operand(count));
842 __ ldr(r1, MemOperand(scratch));
843 __ sub(r1, r1, Operand(1), SetCC);
844 __ movw(r1, FLAG_deopt_every_n_times, eq);
845 __ str(r1, MemOperand(scratch));
848 if (condition != al) {
849 // Clean up the stack before the deoptimizer call
853 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
855 // 'Restore' the condition in a slightly hacky way. (It would be better
856 // to use 'msr' and 'mrs' instructions here, but they are not supported by
857 // our ARM simulator).
858 if (condition != al) {
860 __ cmp(scratch, Operand::Zero());
864 if (info()->ShouldTrapOnDeopt()) {
865 __ stop("trap_on_deopt", condition);
868 ASSERT(info()->IsStub() || frame_is_built_);
869 // Go through jump table if we need to handle condition, build frame, or
870 // restore caller doubles.
871 if (condition == al && frame_is_built_ &&
872 !info()->saves_caller_doubles()) {
873 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
875 // We often have several deopts to the same entry, reuse the last
876 // jump entry if this is the case.
877 if (deopt_jump_table_.is_empty() ||
878 (deopt_jump_table_.last().address != entry) ||
879 (deopt_jump_table_.last().bailout_type != bailout_type) ||
880 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
881 Deoptimizer::JumpTableEntry table_entry(entry,
884 deopt_jump_table_.Add(table_entry, zone());
886 __ b(condition, &deopt_jump_table_.last().label);
891 void LCodeGen::DeoptimizeIf(Condition condition,
892 LEnvironment* environment) {
893 Deoptimizer::BailoutType bailout_type = info()->IsStub()
895 : Deoptimizer::EAGER;
896 DeoptimizeIf(condition, environment, bailout_type);
900 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
901 int length = deoptimizations_.length();
902 if (length == 0) return;
903 Handle<DeoptimizationInputData> data =
904 DeoptimizationInputData::New(isolate(), length, TENURED);
906 Handle<ByteArray> translations =
907 translations_.CreateByteArray(isolate()->factory());
908 data->SetTranslationByteArray(*translations);
909 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
910 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
911 if (info_->IsOptimizing()) {
912 // Reference to shared function info does not change between phases.
913 AllowDeferredHandleDereference allow_handle_dereference;
914 data->SetSharedFunctionInfo(*info_->shared_info());
916 data->SetSharedFunctionInfo(Smi::FromInt(0));
919 Handle<FixedArray> literals =
920 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
921 { AllowDeferredHandleDereference copy_handles;
922 for (int i = 0; i < deoptimization_literals_.length(); i++) {
923 literals->set(i, *deoptimization_literals_[i]);
925 data->SetLiteralArray(*literals);
928 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
929 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
931 // Populate the deoptimization entries.
932 for (int i = 0; i < length; i++) {
933 LEnvironment* env = deoptimizations_[i];
934 data->SetAstId(i, env->ast_id());
935 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
936 data->SetArgumentsStackHeight(i,
937 Smi::FromInt(env->arguments_stack_height()));
938 data->SetPc(i, Smi::FromInt(env->pc_offset()));
940 code->set_deoptimization_data(*data);
944 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
945 int result = deoptimization_literals_.length();
946 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
947 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
949 deoptimization_literals_.Add(literal, zone());
954 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
955 ASSERT(deoptimization_literals_.length() == 0);
957 const ZoneList<Handle<JSFunction> >* inlined_closures =
958 chunk()->inlined_closures();
960 for (int i = 0, length = inlined_closures->length();
963 DefineDeoptimizationLiteral(inlined_closures->at(i));
966 inlined_function_count_ = deoptimization_literals_.length();
970 void LCodeGen::RecordSafepointWithLazyDeopt(
971 LInstruction* instr, SafepointMode safepoint_mode) {
972 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
973 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
975 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
976 RecordSafepointWithRegisters(
977 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
982 void LCodeGen::RecordSafepoint(
983 LPointerMap* pointers,
984 Safepoint::Kind kind,
986 Safepoint::DeoptMode deopt_mode) {
987 ASSERT(expected_safepoint_kind_ == kind);
989 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
990 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
991 kind, arguments, deopt_mode);
992 for (int i = 0; i < operands->length(); i++) {
993 LOperand* pointer = operands->at(i);
994 if (pointer->IsStackSlot()) {
995 safepoint.DefinePointerSlot(pointer->index(), zone());
996 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
997 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1000 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1001 // Register pp always contains a pointer to the constant pool.
1002 safepoint.DefinePointerRegister(pp, zone());
1007 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1008 Safepoint::DeoptMode deopt_mode) {
1009 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1013 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1014 LPointerMap empty_pointers(zone());
1015 RecordSafepoint(&empty_pointers, deopt_mode);
1019 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1021 Safepoint::DeoptMode deopt_mode) {
1023 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1027 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
1028 LPointerMap* pointers,
1030 Safepoint::DeoptMode deopt_mode) {
1032 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
1036 void LCodeGen::RecordAndWritePosition(int position) {
1037 if (position == RelocInfo::kNoPosition) return;
1038 masm()->positions_recorder()->RecordPosition(position);
1039 masm()->positions_recorder()->WriteRecordedPositions();
1043 static const char* LabelType(LLabel* label) {
1044 if (label->is_loop_header()) return " (loop header)";
1045 if (label->is_osr_entry()) return " (OSR entry)";
1050 void LCodeGen::DoLabel(LLabel* label) {
1051 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1052 current_instruction_,
1053 label->hydrogen_value()->id(),
1056 __ bind(label->label());
1057 current_block_ = label->block_id();
1062 void LCodeGen::DoParallelMove(LParallelMove* move) {
1063 resolver_.Resolve(move);
1067 void LCodeGen::DoGap(LGap* gap) {
1068 for (int i = LGap::FIRST_INNER_POSITION;
1069 i <= LGap::LAST_INNER_POSITION;
1071 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1072 LParallelMove* move = gap->GetParallelMove(inner_pos);
1073 if (move != NULL) DoParallelMove(move);
1078 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1083 void LCodeGen::DoParameter(LParameter* instr) {
1088 void LCodeGen::DoCallStub(LCallStub* instr) {
1089 ASSERT(ToRegister(instr->context()).is(cp));
1090 ASSERT(ToRegister(instr->result()).is(r0));
1091 switch (instr->hydrogen()->major_key()) {
1092 case CodeStub::RegExpExec: {
1093 RegExpExecStub stub(isolate());
1094 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1097 case CodeStub::SubString: {
1098 SubStringStub stub(isolate());
1099 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1102 case CodeStub::StringCompare: {
1103 StringCompareStub stub(isolate());
1104 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1113 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1114 GenerateOsrPrologue();
1118 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1119 Register dividend = ToRegister(instr->dividend());
1120 int32_t divisor = instr->divisor();
1121 ASSERT(dividend.is(ToRegister(instr->result())));
1123 // Theoretically, a variation of the branch-free code for integer division by
1124 // a power of 2 (calculating the remainder via an additional multiplication
1125 // (which gets simplified to an 'and') and subtraction) should be faster, and
1126 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1127 // indicate that positive dividends are heavily favored, so the branching
1128 // version performs better.
1129 HMod* hmod = instr->hydrogen();
1130 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1131 Label dividend_is_not_negative, done;
1132 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1133 __ cmp(dividend, Operand::Zero());
1134 __ b(pl, ÷nd_is_not_negative);
1135 // Note that this is correct even for kMinInt operands.
1136 __ rsb(dividend, dividend, Operand::Zero());
1137 __ and_(dividend, dividend, Operand(mask));
1138 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1139 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1140 DeoptimizeIf(eq, instr->environment());
1145 __ bind(÷nd_is_not_negative);
1146 __ and_(dividend, dividend, Operand(mask));
1151 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1152 Register dividend = ToRegister(instr->dividend());
1153 int32_t divisor = instr->divisor();
1154 Register result = ToRegister(instr->result());
1155 ASSERT(!dividend.is(result));
1158 DeoptimizeIf(al, instr->environment());
1162 __ TruncatingDiv(result, dividend, Abs(divisor));
1163 __ mov(ip, Operand(Abs(divisor)));
1164 __ smull(result, ip, result, ip);
1165 __ sub(result, dividend, result, SetCC);
1167 // Check for negative zero.
1168 HMod* hmod = instr->hydrogen();
1169 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1170 Label remainder_not_zero;
1171 __ b(ne, &remainder_not_zero);
1172 __ cmp(dividend, Operand::Zero());
1173 DeoptimizeIf(lt, instr->environment());
1174 __ bind(&remainder_not_zero);
1179 void LCodeGen::DoModI(LModI* instr) {
1180 HMod* hmod = instr->hydrogen();
1181 if (CpuFeatures::IsSupported(SUDIV)) {
1182 CpuFeatureScope scope(masm(), SUDIV);
1184 Register left_reg = ToRegister(instr->left());
1185 Register right_reg = ToRegister(instr->right());
1186 Register result_reg = ToRegister(instr->result());
1189 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1190 // case because we can't return a NaN.
1191 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1192 __ cmp(right_reg, Operand::Zero());
1193 DeoptimizeIf(eq, instr->environment());
1196 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1197 // want. We have to deopt if we care about -0, because we can't return that.
1198 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1199 Label no_overflow_possible;
1200 __ cmp(left_reg, Operand(kMinInt));
1201 __ b(ne, &no_overflow_possible);
1202 __ cmp(right_reg, Operand(-1));
1203 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1204 DeoptimizeIf(eq, instr->environment());
1206 __ b(ne, &no_overflow_possible);
1207 __ mov(result_reg, Operand::Zero());
1210 __ bind(&no_overflow_possible);
1213 // For 'r3 = r1 % r2' we can have the following ARM code:
1215 // mls r3, r3, r2, r1
1217 __ sdiv(result_reg, left_reg, right_reg);
1218 __ Mls(result_reg, result_reg, right_reg, left_reg);
1220 // If we care about -0, test if the dividend is <0 and the result is 0.
1221 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1222 __ cmp(result_reg, Operand::Zero());
1224 __ cmp(left_reg, Operand::Zero());
1225 DeoptimizeIf(lt, instr->environment());
1230 // General case, without any SDIV support.
1231 Register left_reg = ToRegister(instr->left());
1232 Register right_reg = ToRegister(instr->right());
1233 Register result_reg = ToRegister(instr->result());
1234 Register scratch = scratch0();
1235 ASSERT(!scratch.is(left_reg));
1236 ASSERT(!scratch.is(right_reg));
1237 ASSERT(!scratch.is(result_reg));
1238 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1239 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1240 ASSERT(!divisor.is(dividend));
1241 LowDwVfpRegister quotient = double_scratch0();
1242 ASSERT(!quotient.is(dividend));
1243 ASSERT(!quotient.is(divisor));
1246 // Check for x % 0, we have to deopt in this case because we can't return a
1248 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1249 __ cmp(right_reg, Operand::Zero());
1250 DeoptimizeIf(eq, instr->environment());
1253 __ Move(result_reg, left_reg);
1254 // Load the arguments in VFP registers. The divisor value is preloaded
1255 // before. Be careful that 'right_reg' is only live on entry.
1256 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1257 __ vmov(double_scratch0().low(), left_reg);
1258 __ vcvt_f64_s32(dividend, double_scratch0().low());
1259 __ vmov(double_scratch0().low(), right_reg);
1260 __ vcvt_f64_s32(divisor, double_scratch0().low());
1262 // We do not care about the sign of the divisor. Note that we still handle
1263 // the kMinInt % -1 case correctly, though.
1264 __ vabs(divisor, divisor);
1265 // Compute the quotient and round it to a 32bit integer.
1266 __ vdiv(quotient, dividend, divisor);
1267 __ vcvt_s32_f64(quotient.low(), quotient);
1268 __ vcvt_f64_s32(quotient, quotient.low());
1270 // Compute the remainder in result.
1271 __ vmul(double_scratch0(), divisor, quotient);
1272 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1273 __ vmov(scratch, double_scratch0().low());
1274 __ sub(result_reg, left_reg, scratch, SetCC);
1276 // If we care about -0, test if the dividend is <0 and the result is 0.
1277 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1279 __ cmp(left_reg, Operand::Zero());
1280 DeoptimizeIf(mi, instr->environment());
1287 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1288 Register dividend = ToRegister(instr->dividend());
1289 int32_t divisor = instr->divisor();
1290 Register result = ToRegister(instr->result());
1291 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1292 ASSERT(!result.is(dividend));
1294 // Check for (0 / -x) that will produce negative zero.
1295 HDiv* hdiv = instr->hydrogen();
1296 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1297 __ cmp(dividend, Operand::Zero());
1298 DeoptimizeIf(eq, instr->environment());
1300 // Check for (kMinInt / -1).
1301 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1302 __ cmp(dividend, Operand(kMinInt));
1303 DeoptimizeIf(eq, instr->environment());
1305 // Deoptimize if remainder will not be 0.
1306 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1307 divisor != 1 && divisor != -1) {
1308 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1309 __ tst(dividend, Operand(mask));
1310 DeoptimizeIf(ne, instr->environment());
1313 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1314 __ rsb(result, dividend, Operand(0));
1317 int32_t shift = WhichPowerOf2Abs(divisor);
1319 __ mov(result, dividend);
1320 } else if (shift == 1) {
1321 __ add(result, dividend, Operand(dividend, LSR, 31));
1323 __ mov(result, Operand(dividend, ASR, 31));
1324 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1326 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1327 if (divisor < 0) __ rsb(result, result, Operand(0));
1331 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1332 Register dividend = ToRegister(instr->dividend());
1333 int32_t divisor = instr->divisor();
1334 Register result = ToRegister(instr->result());
1335 ASSERT(!dividend.is(result));
1338 DeoptimizeIf(al, instr->environment());
1342 // Check for (0 / -x) that will produce negative zero.
1343 HDiv* hdiv = instr->hydrogen();
1344 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1345 __ cmp(dividend, Operand::Zero());
1346 DeoptimizeIf(eq, instr->environment());
1349 __ TruncatingDiv(result, dividend, Abs(divisor));
1350 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1352 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1353 __ mov(ip, Operand(divisor));
1354 __ smull(scratch0(), ip, result, ip);
1355 __ sub(scratch0(), scratch0(), dividend, SetCC);
1356 DeoptimizeIf(ne, instr->environment());
1361 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1362 void LCodeGen::DoDivI(LDivI* instr) {
1363 HBinaryOperation* hdiv = instr->hydrogen();
1364 Register dividend = ToRegister(instr->dividend());
1365 Register divisor = ToRegister(instr->divisor());
1366 Register result = ToRegister(instr->result());
1369 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1370 __ cmp(divisor, Operand::Zero());
1371 DeoptimizeIf(eq, instr->environment());
1374 // Check for (0 / -x) that will produce negative zero.
1375 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1377 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1378 // Do the test only if it hadn't be done above.
1379 __ cmp(divisor, Operand::Zero());
1381 __ b(pl, &positive);
1382 __ cmp(dividend, Operand::Zero());
1383 DeoptimizeIf(eq, instr->environment());
1387 // Check for (kMinInt / -1).
1388 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1389 (!CpuFeatures::IsSupported(SUDIV) ||
1390 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1391 // We don't need to check for overflow when truncating with sdiv
1392 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1393 __ cmp(dividend, Operand(kMinInt));
1394 __ cmp(divisor, Operand(-1), eq);
1395 DeoptimizeIf(eq, instr->environment());
1398 if (CpuFeatures::IsSupported(SUDIV)) {
1399 CpuFeatureScope scope(masm(), SUDIV);
1400 __ sdiv(result, dividend, divisor);
1402 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1403 DoubleRegister vright = double_scratch0();
1404 __ vmov(double_scratch0().low(), dividend);
1405 __ vcvt_f64_s32(vleft, double_scratch0().low());
1406 __ vmov(double_scratch0().low(), divisor);
1407 __ vcvt_f64_s32(vright, double_scratch0().low());
1408 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1409 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1410 __ vmov(result, double_scratch0().low());
1413 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1414 // Compute remainder and deopt if it's not zero.
1415 Register remainder = scratch0();
1416 __ Mls(remainder, result, divisor, dividend);
1417 __ cmp(remainder, Operand::Zero());
1418 DeoptimizeIf(ne, instr->environment());
1423 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1424 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1425 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1426 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1428 // This is computed in-place.
1429 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1431 __ vmla(addend, multiplier, multiplicand);
1435 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1436 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1437 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1438 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1440 // This is computed in-place.
1441 ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1443 __ vmls(minuend, multiplier, multiplicand);
1447 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1448 Register dividend = ToRegister(instr->dividend());
1449 Register result = ToRegister(instr->result());
1450 int32_t divisor = instr->divisor();
1452 // If the divisor is 1, return the dividend.
1454 __ Move(result, dividend);
1458 // If the divisor is positive, things are easy: There can be no deopts and we
1459 // can simply do an arithmetic right shift.
1460 int32_t shift = WhichPowerOf2Abs(divisor);
1462 __ mov(result, Operand(dividend, ASR, shift));
1466 // If the divisor is negative, we have to negate and handle edge cases.
1467 __ rsb(result, dividend, Operand::Zero(), SetCC);
1468 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1469 DeoptimizeIf(eq, instr->environment());
1472 // Dividing by -1 is basically negation, unless we overflow.
1473 if (divisor == -1) {
1474 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1475 DeoptimizeIf(vs, instr->environment());
1480 // If the negation could not overflow, simply shifting is OK.
1481 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1482 __ mov(result, Operand(result, ASR, shift));
1486 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1487 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1491 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1492 Register dividend = ToRegister(instr->dividend());
1493 int32_t divisor = instr->divisor();
1494 Register result = ToRegister(instr->result());
1495 ASSERT(!dividend.is(result));
1498 DeoptimizeIf(al, instr->environment());
1502 // Check for (0 / -x) that will produce negative zero.
1503 HMathFloorOfDiv* hdiv = instr->hydrogen();
1504 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1505 __ cmp(dividend, Operand::Zero());
1506 DeoptimizeIf(eq, instr->environment());
1509 // Easy case: We need no dynamic check for the dividend and the flooring
1510 // division is the same as the truncating division.
1511 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1512 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1513 __ TruncatingDiv(result, dividend, Abs(divisor));
1514 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1518 // In the general case we may need to adjust before and after the truncating
1519 // division to get a flooring division.
1520 Register temp = ToRegister(instr->temp());
1521 ASSERT(!temp.is(dividend) && !temp.is(result));
1522 Label needs_adjustment, done;
1523 __ cmp(dividend, Operand::Zero());
1524 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1525 __ TruncatingDiv(result, dividend, Abs(divisor));
1526 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1528 __ bind(&needs_adjustment);
1529 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1530 __ TruncatingDiv(result, temp, Abs(divisor));
1531 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1532 __ sub(result, result, Operand(1));
1537 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1538 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1539 HBinaryOperation* hdiv = instr->hydrogen();
1540 Register left = ToRegister(instr->dividend());
1541 Register right = ToRegister(instr->divisor());
1542 Register result = ToRegister(instr->result());
1545 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1546 __ cmp(right, Operand::Zero());
1547 DeoptimizeIf(eq, instr->environment());
1550 // Check for (0 / -x) that will produce negative zero.
1551 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1553 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1554 // Do the test only if it hadn't be done above.
1555 __ cmp(right, Operand::Zero());
1557 __ b(pl, &positive);
1558 __ cmp(left, Operand::Zero());
1559 DeoptimizeIf(eq, instr->environment());
1563 // Check for (kMinInt / -1).
1564 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1565 (!CpuFeatures::IsSupported(SUDIV) ||
1566 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1567 // We don't need to check for overflow when truncating with sdiv
1568 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1569 __ cmp(left, Operand(kMinInt));
1570 __ cmp(right, Operand(-1), eq);
1571 DeoptimizeIf(eq, instr->environment());
1574 if (CpuFeatures::IsSupported(SUDIV)) {
1575 CpuFeatureScope scope(masm(), SUDIV);
1576 __ sdiv(result, left, right);
1578 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1579 DoubleRegister vright = double_scratch0();
1580 __ vmov(double_scratch0().low(), left);
1581 __ vcvt_f64_s32(vleft, double_scratch0().low());
1582 __ vmov(double_scratch0().low(), right);
1583 __ vcvt_f64_s32(vright, double_scratch0().low());
1584 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1585 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1586 __ vmov(result, double_scratch0().low());
1590 Register remainder = scratch0();
1591 __ Mls(remainder, result, right, left);
1592 __ cmp(remainder, Operand::Zero());
1594 __ eor(remainder, remainder, Operand(right));
1595 __ add(result, result, Operand(remainder, ASR, 31));
1600 void LCodeGen::DoMulI(LMulI* instr) {
1601 Register result = ToRegister(instr->result());
1602 // Note that result may alias left.
1603 Register left = ToRegister(instr->left());
1604 LOperand* right_op = instr->right();
1606 bool bailout_on_minus_zero =
1607 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1608 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1610 if (right_op->IsConstantOperand()) {
1611 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1613 if (bailout_on_minus_zero && (constant < 0)) {
1614 // The case of a null constant will be handled separately.
1615 // If constant is negative and left is null, the result should be -0.
1616 __ cmp(left, Operand::Zero());
1617 DeoptimizeIf(eq, instr->environment());
1623 __ rsb(result, left, Operand::Zero(), SetCC);
1624 DeoptimizeIf(vs, instr->environment());
1626 __ rsb(result, left, Operand::Zero());
1630 if (bailout_on_minus_zero) {
1631 // If left is strictly negative and the constant is null, the
1632 // result is -0. Deoptimize if required, otherwise return 0.
1633 __ cmp(left, Operand::Zero());
1634 DeoptimizeIf(mi, instr->environment());
1636 __ mov(result, Operand::Zero());
1639 __ Move(result, left);
1642 // Multiplying by powers of two and powers of two plus or minus
1643 // one can be done faster with shifted operands.
1644 // For other constants we emit standard code.
1645 int32_t mask = constant >> 31;
1646 uint32_t constant_abs = (constant + mask) ^ mask;
1648 if (IsPowerOf2(constant_abs)) {
1649 int32_t shift = WhichPowerOf2(constant_abs);
1650 __ mov(result, Operand(left, LSL, shift));
1651 // Correct the sign of the result is the constant is negative.
1652 if (constant < 0) __ rsb(result, result, Operand::Zero());
1653 } else if (IsPowerOf2(constant_abs - 1)) {
1654 int32_t shift = WhichPowerOf2(constant_abs - 1);
1655 __ add(result, left, Operand(left, LSL, shift));
1656 // Correct the sign of the result is the constant is negative.
1657 if (constant < 0) __ rsb(result, result, Operand::Zero());
1658 } else if (IsPowerOf2(constant_abs + 1)) {
1659 int32_t shift = WhichPowerOf2(constant_abs + 1);
1660 __ rsb(result, left, Operand(left, LSL, shift));
1661 // Correct the sign of the result is the constant is negative.
1662 if (constant < 0) __ rsb(result, result, Operand::Zero());
1664 // Generate standard code.
1665 __ mov(ip, Operand(constant));
1666 __ mul(result, left, ip);
1671 ASSERT(right_op->IsRegister());
1672 Register right = ToRegister(right_op);
1675 Register scratch = scratch0();
1676 // scratch:result = left * right.
1677 if (instr->hydrogen()->representation().IsSmi()) {
1678 __ SmiUntag(result, left);
1679 __ smull(result, scratch, result, right);
1681 __ smull(result, scratch, left, right);
1683 __ cmp(scratch, Operand(result, ASR, 31));
1684 DeoptimizeIf(ne, instr->environment());
1686 if (instr->hydrogen()->representation().IsSmi()) {
1687 __ SmiUntag(result, left);
1688 __ mul(result, result, right);
1690 __ mul(result, left, right);
1694 if (bailout_on_minus_zero) {
1696 __ teq(left, Operand(right));
1698 // Bail out if the result is minus zero.
1699 __ cmp(result, Operand::Zero());
1700 DeoptimizeIf(eq, instr->environment());
1707 void LCodeGen::DoBitI(LBitI* instr) {
1708 LOperand* left_op = instr->left();
1709 LOperand* right_op = instr->right();
1710 ASSERT(left_op->IsRegister());
1711 Register left = ToRegister(left_op);
1712 Register result = ToRegister(instr->result());
1713 Operand right(no_reg);
1715 if (right_op->IsStackSlot()) {
1716 right = Operand(EmitLoadRegister(right_op, ip));
1718 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1719 right = ToOperand(right_op);
1722 switch (instr->op()) {
1723 case Token::BIT_AND:
1724 __ and_(result, left, right);
1727 __ orr(result, left, right);
1729 case Token::BIT_XOR:
1730 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1731 __ mvn(result, Operand(left));
1733 __ eor(result, left, right);
1743 void LCodeGen::DoShiftI(LShiftI* instr) {
1744 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1745 // result may alias either of them.
1746 LOperand* right_op = instr->right();
1747 Register left = ToRegister(instr->left());
1748 Register result = ToRegister(instr->result());
1749 Register scratch = scratch0();
1750 if (right_op->IsRegister()) {
1751 // Mask the right_op operand.
1752 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1753 switch (instr->op()) {
1755 __ mov(result, Operand(left, ROR, scratch));
1758 __ mov(result, Operand(left, ASR, scratch));
1761 if (instr->can_deopt()) {
1762 __ mov(result, Operand(left, LSR, scratch), SetCC);
1763 DeoptimizeIf(mi, instr->environment());
1765 __ mov(result, Operand(left, LSR, scratch));
1769 __ mov(result, Operand(left, LSL, scratch));
1776 // Mask the right_op operand.
1777 int value = ToInteger32(LConstantOperand::cast(right_op));
1778 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1779 switch (instr->op()) {
1781 if (shift_count != 0) {
1782 __ mov(result, Operand(left, ROR, shift_count));
1784 __ Move(result, left);
1788 if (shift_count != 0) {
1789 __ mov(result, Operand(left, ASR, shift_count));
1791 __ Move(result, left);
1795 if (shift_count != 0) {
1796 __ mov(result, Operand(left, LSR, shift_count));
1798 if (instr->can_deopt()) {
1799 __ tst(left, Operand(0x80000000));
1800 DeoptimizeIf(ne, instr->environment());
1802 __ Move(result, left);
1806 if (shift_count != 0) {
1807 if (instr->hydrogen_value()->representation().IsSmi() &&
1808 instr->can_deopt()) {
1809 if (shift_count != 1) {
1810 __ mov(result, Operand(left, LSL, shift_count - 1));
1811 __ SmiTag(result, result, SetCC);
1813 __ SmiTag(result, left, SetCC);
1815 DeoptimizeIf(vs, instr->environment());
1817 __ mov(result, Operand(left, LSL, shift_count));
1820 __ Move(result, left);
1831 void LCodeGen::DoSubI(LSubI* instr) {
1832 LOperand* left = instr->left();
1833 LOperand* right = instr->right();
1834 LOperand* result = instr->result();
1835 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1836 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1838 if (right->IsStackSlot()) {
1839 Register right_reg = EmitLoadRegister(right, ip);
1840 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1842 ASSERT(right->IsRegister() || right->IsConstantOperand());
1843 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1847 DeoptimizeIf(vs, instr->environment());
1852 void LCodeGen::DoRSubI(LRSubI* instr) {
1853 LOperand* left = instr->left();
1854 LOperand* right = instr->right();
1855 LOperand* result = instr->result();
1856 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1857 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1859 if (right->IsStackSlot()) {
1860 Register right_reg = EmitLoadRegister(right, ip);
1861 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1863 ASSERT(right->IsRegister() || right->IsConstantOperand());
1864 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1868 DeoptimizeIf(vs, instr->environment());
1873 void LCodeGen::DoConstantI(LConstantI* instr) {
1874 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1878 void LCodeGen::DoConstantS(LConstantS* instr) {
1879 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1883 void LCodeGen::DoConstantD(LConstantD* instr) {
1884 ASSERT(instr->result()->IsDoubleRegister());
1885 DwVfpRegister result = ToDoubleRegister(instr->result());
1886 double v = instr->value();
1887 __ Vmov(result, v, scratch0());
1891 void LCodeGen::DoConstantE(LConstantE* instr) {
1892 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1896 void LCodeGen::DoConstantT(LConstantT* instr) {
1897 Handle<Object> object = instr->value(isolate());
1898 AllowDeferredHandleDereference smi_check;
1899 __ Move(ToRegister(instr->result()), object);
1903 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1904 Register result = ToRegister(instr->result());
1905 Register map = ToRegister(instr->value());
1906 __ EnumLength(result, map);
1910 void LCodeGen::DoDateField(LDateField* instr) {
1911 Register object = ToRegister(instr->date());
1912 Register result = ToRegister(instr->result());
1913 Register scratch = ToRegister(instr->temp());
1914 Smi* index = instr->index();
1915 Label runtime, done;
1916 ASSERT(object.is(result));
1917 ASSERT(object.is(r0));
1918 ASSERT(!scratch.is(scratch0()));
1919 ASSERT(!scratch.is(object));
1922 DeoptimizeIf(eq, instr->environment());
1923 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1924 DeoptimizeIf(ne, instr->environment());
1926 if (index->value() == 0) {
1927 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1929 if (index->value() < JSDate::kFirstUncachedField) {
1930 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1931 __ mov(scratch, Operand(stamp));
1932 __ ldr(scratch, MemOperand(scratch));
1933 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1934 __ cmp(scratch, scratch0());
1936 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1937 kPointerSize * index->value()));
1941 __ PrepareCallCFunction(2, scratch);
1942 __ mov(r1, Operand(index));
1943 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1949 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1951 String::Encoding encoding) {
1952 if (index->IsConstantOperand()) {
1953 int offset = ToInteger32(LConstantOperand::cast(index));
1954 if (encoding == String::TWO_BYTE_ENCODING) {
1955 offset *= kUC16Size;
1957 STATIC_ASSERT(kCharSize == 1);
1958 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1960 Register scratch = scratch0();
1961 ASSERT(!scratch.is(string));
1962 ASSERT(!scratch.is(ToRegister(index)));
1963 if (encoding == String::ONE_BYTE_ENCODING) {
1964 __ add(scratch, string, Operand(ToRegister(index)));
1966 STATIC_ASSERT(kUC16Size == 2);
1967 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1969 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1973 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1974 String::Encoding encoding = instr->hydrogen()->encoding();
1975 Register string = ToRegister(instr->string());
1976 Register result = ToRegister(instr->result());
1978 if (FLAG_debug_code) {
1979 Register scratch = scratch0();
1980 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1981 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1983 __ and_(scratch, scratch,
1984 Operand(kStringRepresentationMask | kStringEncodingMask));
1985 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1986 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1987 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1988 ? one_byte_seq_type : two_byte_seq_type));
1989 __ Check(eq, kUnexpectedStringType);
1992 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1993 if (encoding == String::ONE_BYTE_ENCODING) {
1994 __ ldrb(result, operand);
1996 __ ldrh(result, operand);
2001 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2002 String::Encoding encoding = instr->hydrogen()->encoding();
2003 Register string = ToRegister(instr->string());
2004 Register value = ToRegister(instr->value());
2006 if (FLAG_debug_code) {
2007 Register index = ToRegister(instr->index());
2008 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2009 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2011 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2012 ? one_byte_seq_type : two_byte_seq_type;
2013 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2016 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2017 if (encoding == String::ONE_BYTE_ENCODING) {
2018 __ strb(value, operand);
2020 __ strh(value, operand);
2025 void LCodeGen::DoAddI(LAddI* instr) {
2026 LOperand* left = instr->left();
2027 LOperand* right = instr->right();
2028 LOperand* result = instr->result();
2029 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2030 SBit set_cond = can_overflow ? SetCC : LeaveCC;
2032 if (right->IsStackSlot()) {
2033 Register right_reg = EmitLoadRegister(right, ip);
2034 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2036 ASSERT(right->IsRegister() || right->IsConstantOperand());
2037 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2041 DeoptimizeIf(vs, instr->environment());
2046 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2047 LOperand* left = instr->left();
2048 LOperand* right = instr->right();
2049 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2050 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2051 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2052 Register left_reg = ToRegister(left);
2053 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2055 : Operand(EmitLoadRegister(right, ip));
2056 Register result_reg = ToRegister(instr->result());
2057 __ cmp(left_reg, right_op);
2058 __ Move(result_reg, left_reg, condition);
2059 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2061 ASSERT(instr->hydrogen()->representation().IsDouble());
2062 DwVfpRegister left_reg = ToDoubleRegister(left);
2063 DwVfpRegister right_reg = ToDoubleRegister(right);
2064 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2065 Label result_is_nan, return_left, return_right, check_zero, done;
2066 __ VFPCompareAndSetFlags(left_reg, right_reg);
2067 if (operation == HMathMinMax::kMathMin) {
2068 __ b(mi, &return_left);
2069 __ b(gt, &return_right);
2071 __ b(mi, &return_right);
2072 __ b(gt, &return_left);
2074 __ b(vs, &result_is_nan);
2075 // Left equals right => check for -0.
2076 __ VFPCompareAndSetFlags(left_reg, 0.0);
2077 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2078 __ b(ne, &done); // left == right != 0.
2080 __ b(ne, &return_left); // left == right != 0.
2082 // At this point, both left and right are either 0 or -0.
2083 if (operation == HMathMinMax::kMathMin) {
2084 // We could use a single 'vorr' instruction here if we had NEON support.
2085 __ vneg(left_reg, left_reg);
2086 __ vsub(result_reg, left_reg, right_reg);
2087 __ vneg(result_reg, result_reg);
2089 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2090 // the decision for vadd is easy because vand is a NEON instruction.
2091 __ vadd(result_reg, left_reg, right_reg);
2095 __ bind(&result_is_nan);
2096 __ vadd(result_reg, left_reg, right_reg);
2099 __ bind(&return_right);
2100 __ Move(result_reg, right_reg);
2101 if (!left_reg.is(result_reg)) {
2105 __ bind(&return_left);
2106 __ Move(result_reg, left_reg);
2113 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2114 DwVfpRegister left = ToDoubleRegister(instr->left());
2115 DwVfpRegister right = ToDoubleRegister(instr->right());
2116 DwVfpRegister result = ToDoubleRegister(instr->result());
2117 switch (instr->op()) {
2119 __ vadd(result, left, right);
2122 __ vsub(result, left, right);
2125 __ vmul(result, left, right);
2128 __ vdiv(result, left, right);
2131 __ PrepareCallCFunction(0, 2, scratch0());
2132 __ MovToFloatParameters(left, right);
2134 ExternalReference::mod_two_doubles_operation(isolate()),
2136 // Move the result in the double result register.
2137 __ MovFromFloatResult(result);
2147 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2148 ASSERT(ToRegister(instr->context()).is(cp));
2149 ASSERT(ToRegister(instr->left()).is(r1));
2150 ASSERT(ToRegister(instr->right()).is(r0));
2151 ASSERT(ToRegister(instr->result()).is(r0));
2153 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2154 // Block literal pool emission to ensure nop indicating no inlined smi code
2155 // is in the correct position.
2156 Assembler::BlockConstPoolScope block_const_pool(masm());
2157 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2161 template<class InstrType>
2162 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2163 int left_block = instr->TrueDestination(chunk_);
2164 int right_block = instr->FalseDestination(chunk_);
2166 int next_block = GetNextEmittedBlock();
2168 if (right_block == left_block || condition == al) {
2169 EmitGoto(left_block);
2170 } else if (left_block == next_block) {
2171 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2172 } else if (right_block == next_block) {
2173 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2175 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2176 __ b(chunk_->GetAssemblyLabel(right_block));
2181 template<class InstrType>
2182 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2183 int false_block = instr->FalseDestination(chunk_);
2184 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2188 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2193 void LCodeGen::DoBranch(LBranch* instr) {
2194 Representation r = instr->hydrogen()->value()->representation();
2195 if (r.IsInteger32() || r.IsSmi()) {
2196 ASSERT(!info()->IsStub());
2197 Register reg = ToRegister(instr->value());
2198 __ cmp(reg, Operand::Zero());
2199 EmitBranch(instr, ne);
2200 } else if (r.IsDouble()) {
2201 ASSERT(!info()->IsStub());
2202 DwVfpRegister reg = ToDoubleRegister(instr->value());
2203 // Test the double value. Zero and NaN are false.
2204 __ VFPCompareAndSetFlags(reg, 0.0);
2205 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2206 EmitBranch(instr, ne);
2208 ASSERT(r.IsTagged());
2209 Register reg = ToRegister(instr->value());
2210 HType type = instr->hydrogen()->value()->type();
2211 if (type.IsBoolean()) {
2212 ASSERT(!info()->IsStub());
2213 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2214 EmitBranch(instr, eq);
2215 } else if (type.IsSmi()) {
2216 ASSERT(!info()->IsStub());
2217 __ cmp(reg, Operand::Zero());
2218 EmitBranch(instr, ne);
2219 } else if (type.IsJSArray()) {
2220 ASSERT(!info()->IsStub());
2221 EmitBranch(instr, al);
2222 } else if (type.IsHeapNumber()) {
2223 ASSERT(!info()->IsStub());
2224 DwVfpRegister dbl_scratch = double_scratch0();
2225 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2226 // Test the double value. Zero and NaN are false.
2227 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2228 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2229 EmitBranch(instr, ne);
2230 } else if (type.IsString()) {
2231 ASSERT(!info()->IsStub());
2232 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2233 __ cmp(ip, Operand::Zero());
2234 EmitBranch(instr, ne);
2236 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2237 // Avoid deopts in the case where we've never executed this path before.
2238 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2240 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2241 // undefined -> false.
2242 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2243 __ b(eq, instr->FalseLabel(chunk_));
2245 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2246 // Boolean -> its value.
2247 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2248 __ b(eq, instr->TrueLabel(chunk_));
2249 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2250 __ b(eq, instr->FalseLabel(chunk_));
2252 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2254 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2255 __ b(eq, instr->FalseLabel(chunk_));
2258 if (expected.Contains(ToBooleanStub::SMI)) {
2259 // Smis: 0 -> false, all other -> true.
2260 __ cmp(reg, Operand::Zero());
2261 __ b(eq, instr->FalseLabel(chunk_));
2262 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2263 } else if (expected.NeedsMap()) {
2264 // If we need a map later and have a Smi -> deopt.
2266 DeoptimizeIf(eq, instr->environment());
2269 const Register map = scratch0();
2270 if (expected.NeedsMap()) {
2271 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2273 if (expected.CanBeUndetectable()) {
2274 // Undetectable -> false.
2275 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2276 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2277 __ b(ne, instr->FalseLabel(chunk_));
2281 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2282 // spec object -> true.
2283 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2284 __ b(ge, instr->TrueLabel(chunk_));
2287 if (expected.Contains(ToBooleanStub::STRING)) {
2288 // String value -> false iff empty.
2290 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2291 __ b(ge, ¬_string);
2292 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2293 __ cmp(ip, Operand::Zero());
2294 __ b(ne, instr->TrueLabel(chunk_));
2295 __ b(instr->FalseLabel(chunk_));
2296 __ bind(¬_string);
2299 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2300 // Symbol value -> true.
2301 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2302 __ b(eq, instr->TrueLabel(chunk_));
2305 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2306 // heap number -> false iff +0, -0, or NaN.
2307 DwVfpRegister dbl_scratch = double_scratch0();
2308 Label not_heap_number;
2309 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2310 __ b(ne, ¬_heap_number);
2311 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2312 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2313 __ cmp(r0, r0, vs); // NaN -> false.
2314 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2315 __ b(instr->TrueLabel(chunk_));
2316 __ bind(¬_heap_number);
2319 if (!expected.IsGeneric()) {
2320 // We've seen something for the first time -> deopt.
2321 // This can only happen if we are not generic already.
2322 DeoptimizeIf(al, instr->environment());
2329 void LCodeGen::EmitGoto(int block) {
2330 if (!IsNextEmittedBlock(block)) {
2331 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2336 void LCodeGen::DoGoto(LGoto* instr) {
2337 EmitGoto(instr->block_id());
2341 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2342 Condition cond = kNoCondition;
2345 case Token::EQ_STRICT:
2349 case Token::NE_STRICT:
2353 cond = is_unsigned ? lo : lt;
2356 cond = is_unsigned ? hi : gt;
2359 cond = is_unsigned ? ls : le;
2362 cond = is_unsigned ? hs : ge;
2365 case Token::INSTANCEOF:
2373 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2374 LOperand* left = instr->left();
2375 LOperand* right = instr->right();
2377 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2378 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2379 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2381 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2382 // We can statically evaluate the comparison.
2383 double left_val = ToDouble(LConstantOperand::cast(left));
2384 double right_val = ToDouble(LConstantOperand::cast(right));
2385 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2386 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2387 EmitGoto(next_block);
2389 if (instr->is_double()) {
2390 // Compare left and right operands as doubles and load the
2391 // resulting flags into the normal status register.
2392 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2393 // If a NaN is involved, i.e. the result is unordered (V set),
2394 // jump to false block label.
2395 __ b(vs, instr->FalseLabel(chunk_));
2397 if (right->IsConstantOperand()) {
2398 int32_t value = ToInteger32(LConstantOperand::cast(right));
2399 if (instr->hydrogen_value()->representation().IsSmi()) {
2400 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2402 __ cmp(ToRegister(left), Operand(value));
2404 } else if (left->IsConstantOperand()) {
2405 int32_t value = ToInteger32(LConstantOperand::cast(left));
2406 if (instr->hydrogen_value()->representation().IsSmi()) {
2407 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2409 __ cmp(ToRegister(right), Operand(value));
2411 // We commuted the operands, so commute the condition.
2412 cond = CommuteCondition(cond);
2414 __ cmp(ToRegister(left), ToRegister(right));
2417 EmitBranch(instr, cond);
2422 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2423 Register left = ToRegister(instr->left());
2424 Register right = ToRegister(instr->right());
2426 __ cmp(left, Operand(right));
2427 EmitBranch(instr, eq);
2431 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2432 if (instr->hydrogen()->representation().IsTagged()) {
2433 Register input_reg = ToRegister(instr->object());
2434 __ mov(ip, Operand(factory()->the_hole_value()));
2435 __ cmp(input_reg, ip);
2436 EmitBranch(instr, eq);
2440 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2441 __ VFPCompareAndSetFlags(input_reg, input_reg);
2442 EmitFalseBranch(instr, vc);
2444 Register scratch = scratch0();
2445 __ VmovHigh(scratch, input_reg);
2446 __ cmp(scratch, Operand(kHoleNanUpper32));
2447 EmitBranch(instr, eq);
2451 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2452 Representation rep = instr->hydrogen()->value()->representation();
2453 ASSERT(!rep.IsInteger32());
2454 Register scratch = ToRegister(instr->temp());
2456 if (rep.IsDouble()) {
2457 DwVfpRegister value = ToDoubleRegister(instr->value());
2458 __ VFPCompareAndSetFlags(value, 0.0);
2459 EmitFalseBranch(instr, ne);
2460 __ VmovHigh(scratch, value);
2461 __ cmp(scratch, Operand(0x80000000));
2463 Register value = ToRegister(instr->value());
2466 Heap::kHeapNumberMapRootIndex,
2467 instr->FalseLabel(chunk()),
2469 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2470 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2471 __ cmp(scratch, Operand(0x80000000));
2472 __ cmp(ip, Operand(0x00000000), eq);
2474 EmitBranch(instr, eq);
2478 Condition LCodeGen::EmitIsObject(Register input,
2480 Label* is_not_object,
2482 Register temp2 = scratch0();
2483 __ JumpIfSmi(input, is_not_object);
2485 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2486 __ cmp(input, temp2);
2487 __ b(eq, is_object);
2490 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2491 // Undetectable objects behave like undefined.
2492 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2493 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2494 __ b(ne, is_not_object);
2496 // Load instance type and check that it is in object type range.
2497 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2498 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2499 __ b(lt, is_not_object);
2500 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2505 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2506 Register reg = ToRegister(instr->value());
2507 Register temp1 = ToRegister(instr->temp());
2509 Condition true_cond =
2510 EmitIsObject(reg, temp1,
2511 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2513 EmitBranch(instr, true_cond);
2517 Condition LCodeGen::EmitIsString(Register input,
2519 Label* is_not_string,
2520 SmiCheck check_needed = INLINE_SMI_CHECK) {
2521 if (check_needed == INLINE_SMI_CHECK) {
2522 __ JumpIfSmi(input, is_not_string);
2524 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2530 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2531 Register reg = ToRegister(instr->value());
2532 Register temp1 = ToRegister(instr->temp());
2534 SmiCheck check_needed =
2535 instr->hydrogen()->value()->type().IsHeapObject()
2536 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2537 Condition true_cond =
2538 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2540 EmitBranch(instr, true_cond);
2544 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2545 Register input_reg = EmitLoadRegister(instr->value(), ip);
2546 __ SmiTst(input_reg);
2547 EmitBranch(instr, eq);
2551 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2552 Register input = ToRegister(instr->value());
2553 Register temp = ToRegister(instr->temp());
2555 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2556 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2558 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2559 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2560 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2561 EmitBranch(instr, ne);
2565 static Condition ComputeCompareCondition(Token::Value op) {
2567 case Token::EQ_STRICT:
2580 return kNoCondition;
2585 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2586 ASSERT(ToRegister(instr->context()).is(cp));
2587 Token::Value op = instr->op();
2589 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2590 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2591 // This instruction also signals no smi code inlined.
2592 __ cmp(r0, Operand::Zero());
2594 Condition condition = ComputeCompareCondition(op);
2596 EmitBranch(instr, condition);
2600 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2601 InstanceType from = instr->from();
2602 InstanceType to = instr->to();
2603 if (from == FIRST_TYPE) return to;
2604 ASSERT(from == to || to == LAST_TYPE);
2609 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2610 InstanceType from = instr->from();
2611 InstanceType to = instr->to();
2612 if (from == to) return eq;
2613 if (to == LAST_TYPE) return hs;
2614 if (from == FIRST_TYPE) return ls;
2620 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2621 Register scratch = scratch0();
2622 Register input = ToRegister(instr->value());
2624 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2625 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2628 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2629 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2633 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2634 Register input = ToRegister(instr->value());
2635 Register result = ToRegister(instr->result());
2637 __ AssertString(input);
2639 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2640 __ IndexFromHash(result, result);
2644 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2645 LHasCachedArrayIndexAndBranch* instr) {
2646 Register input = ToRegister(instr->value());
2647 Register scratch = scratch0();
2650 FieldMemOperand(input, String::kHashFieldOffset));
2651 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2652 EmitBranch(instr, eq);
2656 // Branches to a label or falls through with the answer in flags. Trashes
2657 // the temp registers, but not the input.
2658 void LCodeGen::EmitClassOfTest(Label* is_true,
2660 Handle<String>class_name,
2664 ASSERT(!input.is(temp));
2665 ASSERT(!input.is(temp2));
2666 ASSERT(!temp.is(temp2));
2668 __ JumpIfSmi(input, is_false);
2670 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2671 // Assuming the following assertions, we can use the same compares to test
2672 // for both being a function type and being in the object type range.
2673 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2674 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2675 FIRST_SPEC_OBJECT_TYPE + 1);
2676 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2677 LAST_SPEC_OBJECT_TYPE - 1);
2678 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2679 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2682 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2685 // Faster code path to avoid two compares: subtract lower bound from the
2686 // actual type and do a signed compare with the width of the type range.
2687 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2688 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2689 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2690 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2691 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2695 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2696 // Check if the constructor in the map is a function.
2697 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2699 // Objects with a non-function constructor have class 'Object'.
2700 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2701 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2707 // temp now contains the constructor function. Grab the
2708 // instance class name from there.
2709 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2710 __ ldr(temp, FieldMemOperand(temp,
2711 SharedFunctionInfo::kInstanceClassNameOffset));
2712 // The class name we are testing against is internalized since it's a literal.
2713 // The name in the constructor is internalized because of the way the context
2714 // is booted. This routine isn't expected to work for random API-created
2715 // classes and it doesn't have to because you can't access it with natives
2716 // syntax. Since both sides are internalized it is sufficient to use an
2717 // identity comparison.
2718 __ cmp(temp, Operand(class_name));
2719 // End with the answer in flags.
2723 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2724 Register input = ToRegister(instr->value());
2725 Register temp = scratch0();
2726 Register temp2 = ToRegister(instr->temp());
2727 Handle<String> class_name = instr->hydrogen()->class_name();
2729 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2730 class_name, input, temp, temp2);
2732 EmitBranch(instr, eq);
2736 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2737 Register reg = ToRegister(instr->value());
2738 Register temp = ToRegister(instr->temp());
2740 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2741 __ cmp(temp, Operand(instr->map()));
2742 EmitBranch(instr, eq);
2746 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2747 ASSERT(ToRegister(instr->context()).is(cp));
2748 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2749 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2751 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2752 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2754 __ cmp(r0, Operand::Zero());
2755 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2756 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2760 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2761 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2763 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2764 LInstanceOfKnownGlobal* instr)
2765 : LDeferredCode(codegen), instr_(instr) { }
2766 virtual void Generate() V8_OVERRIDE {
2767 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2769 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2770 Label* map_check() { return &map_check_; }
2772 LInstanceOfKnownGlobal* instr_;
2776 DeferredInstanceOfKnownGlobal* deferred;
2777 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2779 Label done, false_result;
2780 Register object = ToRegister(instr->value());
2781 Register temp = ToRegister(instr->temp());
2782 Register result = ToRegister(instr->result());
2784 // A Smi is not instance of anything.
2785 __ JumpIfSmi(object, &false_result);
2787 // This is the inlined call site instanceof cache. The two occurences of the
2788 // hole value will be patched to the last map/result pair generated by the
2791 Register map = temp;
2792 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2794 // Block constant pool emission to ensure the positions of instructions are
2795 // as expected by the patcher. See InstanceofStub::Generate().
2796 Assembler::BlockConstPoolScope block_const_pool(masm());
2797 __ bind(deferred->map_check()); // Label for calculating code patching.
2798 // We use Factory::the_hole_value() on purpose instead of loading from the
2799 // root array to force relocation to be able to later patch with
2801 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2802 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2803 __ mov(ip, Operand(Handle<Object>(cell)));
2804 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2805 __ cmp(map, Operand(ip));
2806 __ b(ne, &cache_miss);
2807 // We use Factory::the_hole_value() on purpose instead of loading from the
2808 // root array to force relocation to be able to later patch
2809 // with true or false.
2810 __ mov(result, Operand(factory()->the_hole_value()));
2814 // The inlined call site cache did not match. Check null and string before
2815 // calling the deferred code.
2816 __ bind(&cache_miss);
2817 // Null is not instance of anything.
2818 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2819 __ cmp(object, Operand(ip));
2820 __ b(eq, &false_result);
2822 // String values is not instance of anything.
2823 Condition is_string = masm_->IsObjectStringType(object, temp);
2824 __ b(is_string, &false_result);
2826 // Go to the deferred code.
2827 __ b(deferred->entry());
2829 __ bind(&false_result);
2830 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2832 // Here result has either true or false. Deferred code also produces true or
2834 __ bind(deferred->exit());
2839 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2841 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2842 flags = static_cast<InstanceofStub::Flags>(
2843 flags | InstanceofStub::kArgsInRegisters);
2844 flags = static_cast<InstanceofStub::Flags>(
2845 flags | InstanceofStub::kCallSiteInlineCheck);
2846 flags = static_cast<InstanceofStub::Flags>(
2847 flags | InstanceofStub::kReturnTrueFalseObject);
2848 InstanceofStub stub(isolate(), flags);
2850 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2851 LoadContextFromDeferred(instr->context());
2853 __ Move(InstanceofStub::right(), instr->function());
2854 static const int kAdditionalDelta = 4;
2855 // Make sure that code size is predicable, since we use specific constants
2856 // offsets in the code to find embedded values..
2857 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2858 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2859 Label before_push_delta;
2860 __ bind(&before_push_delta);
2861 __ BlockConstPoolFor(kAdditionalDelta);
2862 // r5 is used to communicate the offset to the location of the map check.
2863 __ mov(r5, Operand(delta * kPointerSize));
2864 // The mov above can generate one or two instructions. The delta was computed
2865 // for two instructions, so we need to pad here in case of one instruction.
2866 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2867 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2870 CallCodeGeneric(stub.GetCode(),
2871 RelocInfo::CODE_TARGET,
2873 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2874 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2875 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2876 // Put the result value (r0) into the result register slot and
2877 // restore all registers.
2878 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2882 void LCodeGen::DoCmpT(LCmpT* instr) {
2883 ASSERT(ToRegister(instr->context()).is(cp));
2884 Token::Value op = instr->op();
2886 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2887 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2888 // This instruction also signals no smi code inlined.
2889 __ cmp(r0, Operand::Zero());
2891 Condition condition = ComputeCompareCondition(op);
2892 __ LoadRoot(ToRegister(instr->result()),
2893 Heap::kTrueValueRootIndex,
2895 __ LoadRoot(ToRegister(instr->result()),
2896 Heap::kFalseValueRootIndex,
2897 NegateCondition(condition));
2901 void LCodeGen::DoReturn(LReturn* instr) {
2902 if (FLAG_trace && info()->IsOptimizing()) {
2903 // Push the return value on the stack as the parameter.
2904 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2905 // managed by the register allocator and tearing down the frame, it's
2906 // safe to write to the context register.
2908 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2909 __ CallRuntime(Runtime::kTraceExit, 1);
2911 if (info()->saves_caller_doubles()) {
2912 RestoreCallerDoubles();
2914 int no_frame_start = -1;
2915 if (NeedsEagerFrame()) {
2916 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2918 if (instr->has_constant_parameter_count()) {
2919 int parameter_count = ToInteger32(instr->constant_parameter_count());
2920 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2921 if (sp_delta != 0) {
2922 __ add(sp, sp, Operand(sp_delta));
2925 Register reg = ToRegister(instr->parameter_count());
2926 // The argument count parameter is a smi
2928 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2933 if (no_frame_start != -1) {
2934 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2939 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2940 Register result = ToRegister(instr->result());
2941 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2942 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2943 if (instr->hydrogen()->RequiresHoleCheck()) {
2944 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2946 DeoptimizeIf(eq, instr->environment());
2951 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2952 ASSERT(ToRegister(instr->context()).is(cp));
2953 ASSERT(ToRegister(instr->global_object()).is(r0));
2954 ASSERT(ToRegister(instr->result()).is(r0));
2956 __ mov(r2, Operand(instr->name()));
2957 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2958 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2959 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2963 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2964 Register value = ToRegister(instr->value());
2965 Register cell = scratch0();
2968 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2970 // If the cell we are storing to contains the hole it could have
2971 // been deleted from the property dictionary. In that case, we need
2972 // to update the property details in the property dictionary to mark
2973 // it as no longer deleted.
2974 if (instr->hydrogen()->RequiresHoleCheck()) {
2975 // We use a temp to check the payload (CompareRoot might clobber ip).
2976 Register payload = ToRegister(instr->temp());
2977 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
2978 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2979 DeoptimizeIf(eq, instr->environment());
2983 __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
2984 // Cells are always rescanned, so no write barrier here.
2988 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2989 Register context = ToRegister(instr->context());
2990 Register result = ToRegister(instr->result());
2991 __ ldr(result, ContextOperand(context, instr->slot_index()));
2992 if (instr->hydrogen()->RequiresHoleCheck()) {
2993 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2995 if (instr->hydrogen()->DeoptimizesOnHole()) {
2996 DeoptimizeIf(eq, instr->environment());
2998 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3004 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3005 Register context = ToRegister(instr->context());
3006 Register value = ToRegister(instr->value());
3007 Register scratch = scratch0();
3008 MemOperand target = ContextOperand(context, instr->slot_index());
3010 Label skip_assignment;
3012 if (instr->hydrogen()->RequiresHoleCheck()) {
3013 __ ldr(scratch, target);
3014 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3015 __ cmp(scratch, ip);
3016 if (instr->hydrogen()->DeoptimizesOnHole()) {
3017 DeoptimizeIf(eq, instr->environment());
3019 __ b(ne, &skip_assignment);
3023 __ str(value, target);
3024 if (instr->hydrogen()->NeedsWriteBarrier()) {
3025 SmiCheck check_needed =
3026 instr->hydrogen()->value()->type().IsHeapObject()
3027 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3028 __ RecordWriteContextSlot(context,
3032 GetLinkRegisterState(),
3034 EMIT_REMEMBERED_SET,
3038 __ bind(&skip_assignment);
3042 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3043 HObjectAccess access = instr->hydrogen()->access();
3044 int offset = access.offset();
3045 Register object = ToRegister(instr->object());
3047 if (access.IsExternalMemory()) {
3048 Register result = ToRegister(instr->result());
3049 MemOperand operand = MemOperand(object, offset);
3050 __ Load(result, operand, access.representation());
3054 if (instr->hydrogen()->representation().IsDouble()) {
3055 DwVfpRegister result = ToDoubleRegister(instr->result());
3056 __ vldr(result, FieldMemOperand(object, offset));
3060 Register result = ToRegister(instr->result());
3061 if (!access.IsInobject()) {
3062 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3065 MemOperand operand = FieldMemOperand(object, offset);
3066 __ Load(result, operand, access.representation());
3070 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3071 ASSERT(ToRegister(instr->context()).is(cp));
3072 ASSERT(ToRegister(instr->object()).is(r0));
3073 ASSERT(ToRegister(instr->result()).is(r0));
3075 // Name is always in r2.
3076 __ mov(r2, Operand(instr->name()));
3077 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3078 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3082 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3083 Register scratch = scratch0();
3084 Register function = ToRegister(instr->function());
3085 Register result = ToRegister(instr->result());
3087 // Check that the function really is a function. Load map into the
3089 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
3090 DeoptimizeIf(ne, instr->environment());
3092 // Make sure that the function has an instance prototype.
3094 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3095 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
3096 __ b(ne, &non_instance);
3098 // Get the prototype or initial map from the function.
3100 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3102 // Check that the function has a prototype or an initial map.
3103 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3105 DeoptimizeIf(eq, instr->environment());
3107 // If the function does not have an initial map, we're done.
3109 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3112 // Get the prototype from the initial map.
3113 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3116 // Non-instance prototype: Fetch prototype from constructor field
3118 __ bind(&non_instance);
3119 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3126 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3127 Register result = ToRegister(instr->result());
3128 __ LoadRoot(result, instr->index());
3132 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3133 Register arguments = ToRegister(instr->arguments());
3134 Register result = ToRegister(instr->result());
3135 // There are two words between the frame pointer and the last argument.
3136 // Subtracting from length accounts for one of them add one more.
3137 if (instr->length()->IsConstantOperand()) {
3138 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3139 if (instr->index()->IsConstantOperand()) {
3140 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3141 int index = (const_length - const_index) + 1;
3142 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3144 Register index = ToRegister(instr->index());
3145 __ rsb(result, index, Operand(const_length + 1));
3146 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3148 } else if (instr->index()->IsConstantOperand()) {
3149 Register length = ToRegister(instr->length());
3150 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3151 int loc = const_index - 1;
3153 __ sub(result, length, Operand(loc));
3154 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3156 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3159 Register length = ToRegister(instr->length());
3160 Register index = ToRegister(instr->index());
3161 __ sub(result, length, index);
3162 __ add(result, result, Operand(1));
3163 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3168 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3169 Runtime::FunctionId id) {
3170 // TODO(3095996): Get rid of this. For now, we need to make the
3171 // result register contain a valid pointer because it is already
3172 // contained in the register pointer map.
3173 Register reg = ToRegister(instr->result());
3174 __ mov(reg, Operand::Zero());
3176 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3177 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3178 __ CallRuntimeSaveDoubles(id);
3179 RecordSafepointWithRegisters(
3180 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3181 __ sub(r0, r0, Operand(kHeapObjectTag));
3182 __ StoreToSafepointRegisterSlot(r0, reg);
3187 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3188 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
3190 DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
3191 Runtime::FunctionId id)
3192 : LDeferredCode(codegen), instr_(instr), id_(id) { }
3193 virtual void Generate() V8_OVERRIDE {
3194 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3196 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3198 LInstruction* instr_;
3199 Runtime::FunctionId id_;
3202 // Allocate a SIMD128 object on the heap.
3203 Register reg = ToRegister(instr->result());
3204 Register temp = ToRegister(instr->temp());
3205 Register temp2 = ToRegister(instr->temp2());
3206 Register scratch = scratch0();
3208 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3209 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
3210 __ jmp(deferred->entry());
3211 __ bind(deferred->exit());
3213 // Copy the SIMD128 value from the external array to the heap object.
3214 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3215 Register external_pointer = ToRegister(instr->elements());
3216 Register key = no_reg;
3217 ElementsKind elements_kind = instr->elements_kind();
3218 bool key_is_constant = instr->key()->IsConstantOperand();
3219 int constant_key = 0;
3220 if (key_is_constant) {
3221 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3222 if (constant_key & 0xF0000000) {
3223 Abort(kArrayIndexConstantValueTooBig);
3226 key = ToRegister(instr->key());
3228 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3229 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3230 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3231 int base_offset = instr->base_offset();
3232 Operand operand = key_is_constant
3233 ? Operand(constant_key << element_size_shift)
3234 : Operand(key, LSL, shift_size);
3236 __ add(scratch, external_pointer, operand);
3238 // Load the inner FixedTypedArray.
3239 __ ldr(temp2, MemOperand(reg, T::kValueOffset));
3241 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3242 __ ldr(temp, MemOperand(scratch, base_offset + offset));
3247 FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
3250 // Now that we have finished with the object's real address tag it
3251 __ add(reg, reg, Operand(kHeapObjectTag));
3255 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3256 Register external_pointer = ToRegister(instr->elements());
3257 Register key = no_reg;
3258 ElementsKind elements_kind = instr->elements_kind();
3259 bool key_is_constant = instr->key()->IsConstantOperand();
3260 int constant_key = 0;
3261 if (key_is_constant) {
3262 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3263 if (constant_key & 0xF0000000) {
3264 Abort(kArrayIndexConstantValueTooBig);
3267 key = ToRegister(instr->key());
3269 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3270 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3271 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3272 int base_offset = instr->base_offset();
3274 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3275 elements_kind == FLOAT32_ELEMENTS ||
3276 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3277 elements_kind == FLOAT64_ELEMENTS) {
3278 int base_offset = instr->base_offset();
3279 DwVfpRegister result = ToDoubleRegister(instr->result());
3280 Operand operand = key_is_constant
3281 ? Operand(constant_key << element_size_shift)
3282 : Operand(key, LSL, shift_size);
3283 __ add(scratch0(), external_pointer, operand);
3284 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3285 elements_kind == FLOAT32_ELEMENTS) {
3286 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3287 __ vcvt_f64_f32(result, double_scratch0().low());
3288 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3289 __ vldr(result, scratch0(), base_offset);
3291 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3292 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3293 } else if (IsFloat64x2ElementsKind(elements_kind)) {
3294 DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
3295 } else if (IsInt32x4ElementsKind(elements_kind)) {
3296 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3298 Register result = ToRegister(instr->result());
3299 MemOperand mem_operand = PrepareKeyedOperand(
3300 key, external_pointer, key_is_constant, constant_key,
3301 element_size_shift, shift_size, base_offset);
3302 switch (elements_kind) {
3303 case EXTERNAL_INT8_ELEMENTS:
3305 __ ldrsb(result, mem_operand);
3307 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3308 case EXTERNAL_UINT8_ELEMENTS:
3309 case UINT8_ELEMENTS:
3310 case UINT8_CLAMPED_ELEMENTS:
3311 __ ldrb(result, mem_operand);
3313 case EXTERNAL_INT16_ELEMENTS:
3314 case INT16_ELEMENTS:
3315 __ ldrsh(result, mem_operand);
3317 case EXTERNAL_UINT16_ELEMENTS:
3318 case UINT16_ELEMENTS:
3319 __ ldrh(result, mem_operand);
3321 case EXTERNAL_INT32_ELEMENTS:
3322 case INT32_ELEMENTS:
3323 __ ldr(result, mem_operand);
3325 case EXTERNAL_UINT32_ELEMENTS:
3326 case UINT32_ELEMENTS:
3327 __ ldr(result, mem_operand);
3328 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3329 __ cmp(result, Operand(0x80000000));
3330 DeoptimizeIf(cs, instr->environment());
3333 case FLOAT32_ELEMENTS:
3334 case FLOAT64_ELEMENTS:
3335 case EXTERNAL_FLOAT32_ELEMENTS:
3336 case EXTERNAL_FLOAT64_ELEMENTS:
3337 case FLOAT32x4_ELEMENTS:
3338 case FLOAT64x2_ELEMENTS:
3339 case INT32x4_ELEMENTS:
3340 case EXTERNAL_FLOAT32x4_ELEMENTS:
3341 case EXTERNAL_FLOAT64x2_ELEMENTS:
3342 case EXTERNAL_INT32x4_ELEMENTS:
3343 case FAST_HOLEY_DOUBLE_ELEMENTS:
3344 case FAST_HOLEY_ELEMENTS:
3345 case FAST_HOLEY_SMI_ELEMENTS:
3346 case FAST_DOUBLE_ELEMENTS:
3348 case FAST_SMI_ELEMENTS:
3349 case DICTIONARY_ELEMENTS:
3350 case SLOPPY_ARGUMENTS_ELEMENTS:
3358 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3359 Register elements = ToRegister(instr->elements());
3360 bool key_is_constant = instr->key()->IsConstantOperand();
3361 Register key = no_reg;
3362 DwVfpRegister result = ToDoubleRegister(instr->result());
3363 Register scratch = scratch0();
3365 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3367 int base_offset = instr->base_offset();
3368 if (key_is_constant) {
3369 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3370 if (constant_key & 0xF0000000) {
3371 Abort(kArrayIndexConstantValueTooBig);
3373 base_offset += constant_key * kDoubleSize;
3375 __ add(scratch, elements, Operand(base_offset));
3377 if (!key_is_constant) {
3378 key = ToRegister(instr->key());
3379 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3380 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3381 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3384 __ vldr(result, scratch, 0);
3386 if (instr->hydrogen()->RequiresHoleCheck()) {
3387 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3388 __ cmp(scratch, Operand(kHoleNanUpper32));
3389 DeoptimizeIf(eq, instr->environment());
3394 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3395 Register elements = ToRegister(instr->elements());
3396 Register result = ToRegister(instr->result());
3397 Register scratch = scratch0();
3398 Register store_base = scratch;
3399 int offset = instr->base_offset();
3401 if (instr->key()->IsConstantOperand()) {
3402 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3403 offset += ToInteger32(const_operand) * kPointerSize;
3404 store_base = elements;
3406 Register key = ToRegister(instr->key());
3407 // Even though the HLoadKeyed instruction forces the input
3408 // representation for the key to be an integer, the input gets replaced
3409 // during bound check elimination with the index argument to the bounds
3410 // check, which can be tagged, so that case must be handled here, too.
3411 if (instr->hydrogen()->key()->representation().IsSmi()) {
3412 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3414 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3417 __ ldr(result, MemOperand(store_base, offset));
3419 // Check for the hole value.
3420 if (instr->hydrogen()->RequiresHoleCheck()) {
3421 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3423 DeoptimizeIf(ne, instr->environment());
3425 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3426 __ cmp(result, scratch);
3427 DeoptimizeIf(eq, instr->environment());
3433 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3434 if (instr->is_typed_elements()) {
3435 DoLoadKeyedExternalArray(instr);
3436 } else if (instr->hydrogen()->representation().IsDouble()) {
3437 DoLoadKeyedFixedDoubleArray(instr);
3439 DoLoadKeyedFixedArray(instr);
3444 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3446 bool key_is_constant,
3451 if (key_is_constant) {
3452 return MemOperand(base, (constant_key << element_size) + base_offset);
3455 if (base_offset == 0) {
3456 if (shift_size >= 0) {
3457 return MemOperand(base, key, LSL, shift_size);
3459 ASSERT_EQ(-1, shift_size);
3460 return MemOperand(base, key, LSR, 1);
3464 if (shift_size >= 0) {
3465 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3466 return MemOperand(scratch0(), base_offset);
3468 ASSERT_EQ(-1, shift_size);
3469 __ add(scratch0(), base, Operand(key, ASR, 1));
3470 return MemOperand(scratch0(), base_offset);
3475 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3476 ASSERT(ToRegister(instr->context()).is(cp));
3477 ASSERT(ToRegister(instr->object()).is(r1));
3478 ASSERT(ToRegister(instr->key()).is(r0));
3480 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3481 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3485 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3486 Register scratch = scratch0();
3487 Register result = ToRegister(instr->result());
3489 if (instr->hydrogen()->from_inlined()) {
3490 __ sub(result, sp, Operand(2 * kPointerSize));
3492 // Check if the calling frame is an arguments adaptor frame.
3493 Label done, adapted;
3494 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3495 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3496 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3498 // Result is the frame pointer for the frame if not adapted and for the real
3499 // frame below the adaptor frame if adapted.
3500 __ mov(result, fp, LeaveCC, ne);
3501 __ mov(result, scratch, LeaveCC, eq);
3506 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3507 Register elem = ToRegister(instr->elements());
3508 Register result = ToRegister(instr->result());
3512 // If no arguments adaptor frame the number of arguments is fixed.
3514 __ mov(result, Operand(scope()->num_parameters()));
3517 // Arguments adaptor frame present. Get argument length from there.
3518 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3520 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3521 __ SmiUntag(result);
3523 // Argument length is in result register.
3528 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3529 Register receiver = ToRegister(instr->receiver());
3530 Register function = ToRegister(instr->function());
3531 Register result = ToRegister(instr->result());
3532 Register scratch = scratch0();
3534 // If the receiver is null or undefined, we have to pass the global
3535 // object as a receiver to normal functions. Values have to be
3536 // passed unchanged to builtins and strict-mode functions.
3537 Label global_object, result_in_receiver;
3539 if (!instr->hydrogen()->known_function()) {
3540 // Do not transform the receiver to object for strict mode
3543 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3545 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3546 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3547 __ tst(scratch, Operand(mask));
3548 __ b(ne, &result_in_receiver);
3550 // Do not transform the receiver to object for builtins.
3551 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3552 __ b(ne, &result_in_receiver);
3555 // Normal function. Replace undefined or null with global receiver.
3556 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3557 __ cmp(receiver, scratch);
3558 __ b(eq, &global_object);
3559 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3560 __ cmp(receiver, scratch);
3561 __ b(eq, &global_object);
3563 // Deoptimize if the receiver is not a JS object.
3564 __ SmiTst(receiver);
3565 DeoptimizeIf(eq, instr->environment());
3566 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3567 DeoptimizeIf(lt, instr->environment());
3569 __ b(&result_in_receiver);
3570 __ bind(&global_object);
3571 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3573 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3575 FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
3577 if (result.is(receiver)) {
3578 __ bind(&result_in_receiver);
3582 __ bind(&result_in_receiver);
3583 __ mov(result, receiver);
3584 __ bind(&result_ok);
3589 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3590 Register receiver = ToRegister(instr->receiver());
3591 Register function = ToRegister(instr->function());
3592 Register length = ToRegister(instr->length());
3593 Register elements = ToRegister(instr->elements());
3594 Register scratch = scratch0();
3595 ASSERT(receiver.is(r0)); // Used for parameter count.
3596 ASSERT(function.is(r1)); // Required by InvokeFunction.
3597 ASSERT(ToRegister(instr->result()).is(r0));
3599 // Copy the arguments to this function possibly from the
3600 // adaptor frame below it.
3601 const uint32_t kArgumentsLimit = 1 * KB;
3602 __ cmp(length, Operand(kArgumentsLimit));
3603 DeoptimizeIf(hi, instr->environment());
3605 // Push the receiver and use the register to keep the original
3606 // number of arguments.
3608 __ mov(receiver, length);
3609 // The arguments are at a one pointer size offset from elements.
3610 __ add(elements, elements, Operand(1 * kPointerSize));
3612 // Loop through the arguments pushing them onto the execution
3615 // length is a small non-negative integer, due to the test above.
3616 __ cmp(length, Operand::Zero());
3619 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3621 __ sub(length, length, Operand(1), SetCC);
3625 ASSERT(instr->HasPointerMap());
3626 LPointerMap* pointers = instr->pointer_map();
3627 SafepointGenerator safepoint_generator(
3628 this, pointers, Safepoint::kLazyDeopt);
3629 // The number of arguments is stored in receiver which is r0, as expected
3630 // by InvokeFunction.
3631 ParameterCount actual(receiver);
3632 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3636 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3637 LOperand* argument = instr->value();
3638 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3639 Abort(kDoPushArgumentNotImplementedForDoubleType);
3641 Register argument_reg = EmitLoadRegister(argument, ip);
3642 __ push(argument_reg);
3647 void LCodeGen::DoDrop(LDrop* instr) {
3648 __ Drop(instr->count());
3652 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3653 Register result = ToRegister(instr->result());
3654 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3658 void LCodeGen::DoContext(LContext* instr) {
3659 // If there is a non-return use, the context must be moved to a register.
3660 Register result = ToRegister(instr->result());
3661 if (info()->IsOptimizing()) {
3662 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3664 // If there is no frame, the context must be in cp.
3665 ASSERT(result.is(cp));
3670 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3671 ASSERT(ToRegister(instr->context()).is(cp));
3672 __ push(cp); // The context is the first argument.
3673 __ Move(scratch0(), instr->hydrogen()->pairs());
3674 __ push(scratch0());
3675 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3676 __ push(scratch0());
3677 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3681 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3682 int formal_parameter_count,
3684 LInstruction* instr,
3686 bool dont_adapt_arguments =
3687 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3688 bool can_invoke_directly =
3689 dont_adapt_arguments || formal_parameter_count == arity;
3691 LPointerMap* pointers = instr->pointer_map();
3693 if (can_invoke_directly) {
3694 if (r1_state == R1_UNINITIALIZED) {
3695 __ Move(r1, function);
3699 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3701 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3702 // is available to write to at this point.
3703 if (dont_adapt_arguments) {
3704 __ mov(r0, Operand(arity));
3708 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3711 // Set up deoptimization.
3712 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3714 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3715 ParameterCount count(arity);
3716 ParameterCount expected(formal_parameter_count);
3717 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3722 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3723 ASSERT(instr->context() != NULL);
3724 ASSERT(ToRegister(instr->context()).is(cp));
3725 Register input = ToRegister(instr->value());
3726 Register result = ToRegister(instr->result());
3727 Register scratch = scratch0();
3729 // Deoptimize if not a heap number.
3730 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3731 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3732 __ cmp(scratch, Operand(ip));
3733 DeoptimizeIf(ne, instr->environment());
3736 Register exponent = scratch0();
3738 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3739 // Check the sign of the argument. If the argument is positive, just
3741 __ tst(exponent, Operand(HeapNumber::kSignMask));
3742 // Move the input to the result if necessary.
3743 __ Move(result, input);
3746 // Input is negative. Reverse its sign.
3747 // Preserve the value of all registers.
3749 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3751 // Registers were saved at the safepoint, so we can use
3752 // many scratch registers.
3753 Register tmp1 = input.is(r1) ? r0 : r1;
3754 Register tmp2 = input.is(r2) ? r0 : r2;
3755 Register tmp3 = input.is(r3) ? r0 : r3;
3756 Register tmp4 = input.is(r4) ? r0 : r4;
3758 // exponent: floating point exponent value.
3760 Label allocated, slow;
3761 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3762 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3765 // Slow case: Call the runtime system to do the number allocation.
3768 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3770 // Set the pointer to the new heap number in tmp.
3771 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3772 // Restore input_reg after call to runtime.
3773 __ LoadFromSafepointRegisterSlot(input, input);
3774 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3776 __ bind(&allocated);
3777 // exponent: floating point exponent value.
3778 // tmp1: allocated heap number.
3779 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3780 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3781 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3782 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3784 __ StoreToSafepointRegisterSlot(tmp1, result);
3791 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3792 Register input = ToRegister(instr->value());
3793 Register result = ToRegister(instr->result());
3794 __ cmp(input, Operand::Zero());
3795 __ Move(result, input, pl);
3796 // We can make rsb conditional because the previous cmp instruction
3797 // will clear the V (overflow) flag and rsb won't set this flag
3798 // if input is positive.
3799 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3800 // Deoptimize on overflow.
3801 DeoptimizeIf(vs, instr->environment());
3805 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3806 // Class for deferred case.
3807 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3809 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3810 : LDeferredCode(codegen), instr_(instr) { }
3811 virtual void Generate() V8_OVERRIDE {
3812 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3814 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3819 Representation r = instr->hydrogen()->value()->representation();
3821 DwVfpRegister input = ToDoubleRegister(instr->value());
3822 DwVfpRegister result = ToDoubleRegister(instr->result());
3823 __ vabs(result, input);
3824 } else if (r.IsSmiOrInteger32()) {
3825 EmitIntegerMathAbs(instr);
3827 // Representation is tagged.
3828 DeferredMathAbsTaggedHeapNumber* deferred =
3829 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3830 Register input = ToRegister(instr->value());
3832 __ JumpIfNotSmi(input, deferred->entry());
3833 // If smi, handle it directly.
3834 EmitIntegerMathAbs(instr);
3835 __ bind(deferred->exit());
3840 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3841 DwVfpRegister input = ToDoubleRegister(instr->value());
3842 Register result = ToRegister(instr->result());
3843 Register input_high = scratch0();
3846 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3847 DeoptimizeIf(al, instr->environment());
3850 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3852 __ cmp(result, Operand::Zero());
3854 __ cmp(input_high, Operand::Zero());
3855 DeoptimizeIf(mi, instr->environment());
3861 void LCodeGen::DoMathRound(LMathRound* instr) {
3862 DwVfpRegister input = ToDoubleRegister(instr->value());
3863 Register result = ToRegister(instr->result());
3864 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3865 DwVfpRegister input_plus_dot_five = double_scratch1;
3866 Register input_high = scratch0();
3867 DwVfpRegister dot_five = double_scratch0();
3868 Label convert, done;
3870 __ Vmov(dot_five, 0.5, scratch0());
3871 __ vabs(double_scratch1, input);
3872 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3873 // If input is in [-0.5, -0], the result is -0.
3874 // If input is in [+0, +0.5[, the result is +0.
3875 // If the input is +0.5, the result is 1.
3876 __ b(hi, &convert); // Out of [-0.5, +0.5].
3877 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3878 __ VmovHigh(input_high, input);
3879 __ cmp(input_high, Operand::Zero());
3880 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
3882 __ VFPCompareAndSetFlags(input, dot_five);
3883 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3884 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3885 // flag kBailoutOnMinusZero.
3886 __ mov(result, Operand::Zero(), LeaveCC, ne);
3890 __ vadd(input_plus_dot_five, input, dot_five);
3891 // Reuse dot_five (double_scratch0) as we no longer need this value.
3892 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3894 DeoptimizeIf(al, instr->environment());
3899 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3900 DwVfpRegister input = ToDoubleRegister(instr->value());
3901 DwVfpRegister result = ToDoubleRegister(instr->result());
3902 __ vsqrt(result, input);
3906 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3907 DwVfpRegister input = ToDoubleRegister(instr->value());
3908 DwVfpRegister result = ToDoubleRegister(instr->result());
3909 DwVfpRegister temp = double_scratch0();
3911 // Note that according to ECMA-262 15.8.2.13:
3912 // Math.pow(-Infinity, 0.5) == Infinity
3913 // Math.sqrt(-Infinity) == NaN
3915 __ vmov(temp, -V8_INFINITY, scratch0());
3916 __ VFPCompareAndSetFlags(input, temp);
3917 __ vneg(result, temp, eq);
3920 // Add +0 to convert -0 to +0.
3921 __ vadd(result, input, kDoubleRegZero);
3922 __ vsqrt(result, result);
3927 void LCodeGen::DoPower(LPower* instr) {
3928 Representation exponent_type = instr->hydrogen()->right()->representation();
3929 // Having marked this as a call, we can use any registers.
3930 // Just make sure that the input/output registers are the expected ones.
3931 ASSERT(!instr->right()->IsDoubleRegister() ||
3932 ToDoubleRegister(instr->right()).is(d1));
3933 ASSERT(!instr->right()->IsRegister() ||
3934 ToRegister(instr->right()).is(r2));
3935 ASSERT(ToDoubleRegister(instr->left()).is(d0));
3936 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3938 if (exponent_type.IsSmi()) {
3939 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3941 } else if (exponent_type.IsTagged()) {
3943 __ JumpIfSmi(r2, &no_deopt);
3944 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
3945 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3946 __ cmp(r6, Operand(ip));
3947 DeoptimizeIf(ne, instr->environment());
3949 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3951 } else if (exponent_type.IsInteger32()) {
3952 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3955 ASSERT(exponent_type.IsDouble());
3956 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3962 void LCodeGen::DoMathExp(LMathExp* instr) {
3963 DwVfpRegister input = ToDoubleRegister(instr->value());
3964 DwVfpRegister result = ToDoubleRegister(instr->result());
3965 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3966 DwVfpRegister double_scratch2 = double_scratch0();
3967 Register temp1 = ToRegister(instr->temp1());
3968 Register temp2 = ToRegister(instr->temp2());
3970 MathExpGenerator::EmitMathExp(
3971 masm(), input, result, double_scratch1, double_scratch2,
3972 temp1, temp2, scratch0());
3976 void LCodeGen::DoMathLog(LMathLog* instr) {
3977 __ PrepareCallCFunction(0, 1, scratch0());
3978 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3979 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3981 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3985 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3986 Register input = ToRegister(instr->value());
3987 Register result = ToRegister(instr->result());
3988 __ clz(result, input);
3992 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3993 ASSERT(ToRegister(instr->context()).is(cp));
3994 ASSERT(ToRegister(instr->function()).is(r1));
3995 ASSERT(instr->HasPointerMap());
3997 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3998 if (known_function.is_null()) {
3999 LPointerMap* pointers = instr->pointer_map();
4000 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4001 ParameterCount count(instr->arity());
4002 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
4004 CallKnownFunction(known_function,
4005 instr->hydrogen()->formal_parameter_count(),
4008 R1_CONTAINS_TARGET);
4013 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4014 ASSERT(ToRegister(instr->result()).is(r0));
4016 LPointerMap* pointers = instr->pointer_map();
4017 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4019 if (instr->target()->IsConstantOperand()) {
4020 LConstantOperand* target = LConstantOperand::cast(instr->target());
4021 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4022 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4023 PlatformCallInterfaceDescriptor* call_descriptor =
4024 instr->descriptor()->platform_specific_descriptor();
4025 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4026 call_descriptor->storage_mode());
4028 ASSERT(instr->target()->IsRegister());
4029 Register target = ToRegister(instr->target());
4030 generator.BeforeCall(__ CallSize(target));
4031 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4034 generator.AfterCall();
4038 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4039 ASSERT(ToRegister(instr->function()).is(r1));
4040 ASSERT(ToRegister(instr->result()).is(r0));
4042 if (instr->hydrogen()->pass_argument_count()) {
4043 __ mov(r0, Operand(instr->arity()));
4047 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4049 // Load the code entry address
4050 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4053 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4057 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4058 ASSERT(ToRegister(instr->context()).is(cp));
4059 ASSERT(ToRegister(instr->function()).is(r1));
4060 ASSERT(ToRegister(instr->result()).is(r0));
4062 int arity = instr->arity();
4063 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4064 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4068 void LCodeGen::DoCallNew(LCallNew* instr) {
4069 ASSERT(ToRegister(instr->context()).is(cp));
4070 ASSERT(ToRegister(instr->constructor()).is(r1));
4071 ASSERT(ToRegister(instr->result()).is(r0));
4073 __ mov(r0, Operand(instr->arity()));
4074 // No cell in r2 for construct type feedback in optimized code
4075 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4076 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4077 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4081 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4082 ASSERT(ToRegister(instr->context()).is(cp));
4083 ASSERT(ToRegister(instr->constructor()).is(r1));
4084 ASSERT(ToRegister(instr->result()).is(r0));
4086 __ mov(r0, Operand(instr->arity()));
4087 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4088 ElementsKind kind = instr->hydrogen()->elements_kind();
4089 AllocationSiteOverrideMode override_mode =
4090 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4091 ? DISABLE_ALLOCATION_SITES
4094 if (instr->arity() == 0) {
4095 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4096 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4097 } else if (instr->arity() == 1) {
4099 if (IsFastPackedElementsKind(kind)) {
4101 // We might need a change here
4102 // look at the first argument
4103 __ ldr(r5, MemOperand(sp, 0));
4104 __ cmp(r5, Operand::Zero());
4105 __ b(eq, &packed_case);
4107 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4108 ArraySingleArgumentConstructorStub stub(isolate(),
4111 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4113 __ bind(&packed_case);
4116 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4117 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4120 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4121 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4126 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4127 CallRuntime(instr->function(), instr->arity(), instr);
4131 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4132 Register function = ToRegister(instr->function());
4133 Register code_object = ToRegister(instr->code_object());
4134 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4136 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4140 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4141 Register result = ToRegister(instr->result());
4142 Register base = ToRegister(instr->base_object());
4143 if (instr->offset()->IsConstantOperand()) {
4144 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4145 __ add(result, base, Operand(ToInteger32(offset)));
4147 Register offset = ToRegister(instr->offset());
4148 __ add(result, base, offset);
4153 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4154 Representation representation = instr->representation();
4156 Register object = ToRegister(instr->object());
4157 Register scratch = scratch0();
4158 HObjectAccess access = instr->hydrogen()->access();
4159 int offset = access.offset();
4161 if (access.IsExternalMemory()) {
4162 Register value = ToRegister(instr->value());
4163 MemOperand operand = MemOperand(object, offset);
4164 __ Store(value, operand, representation);
4168 __ AssertNotSmi(object);
4170 ASSERT(!representation.IsSmi() ||
4171 !instr->value()->IsConstantOperand() ||
4172 IsSmi(LConstantOperand::cast(instr->value())));
4173 if (representation.IsDouble()) {
4174 ASSERT(access.IsInobject());
4175 ASSERT(!instr->hydrogen()->has_transition());
4176 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4177 DwVfpRegister value = ToDoubleRegister(instr->value());
4178 __ vstr(value, FieldMemOperand(object, offset));
4182 if (instr->hydrogen()->has_transition()) {
4183 Handle<Map> transition = instr->hydrogen()->transition_map();
4184 AddDeprecationDependency(transition);
4185 __ mov(scratch, Operand(transition));
4186 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4187 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4188 Register temp = ToRegister(instr->temp());
4189 // Update the write barrier for the map field.
4190 __ RecordWriteForMap(object,
4193 GetLinkRegisterState(),
4199 Register value = ToRegister(instr->value());
4200 if (access.IsInobject()) {
4201 MemOperand operand = FieldMemOperand(object, offset);
4202 __ Store(value, operand, representation);
4203 if (instr->hydrogen()->NeedsWriteBarrier()) {
4204 // Update the write barrier for the object for in-object properties.
4205 __ RecordWriteField(object,
4209 GetLinkRegisterState(),
4211 EMIT_REMEMBERED_SET,
4212 instr->hydrogen()->SmiCheckForWriteBarrier(),
4213 instr->hydrogen()->PointersToHereCheckForValue());
4216 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4217 MemOperand operand = FieldMemOperand(scratch, offset);
4218 __ Store(value, operand, representation);
4219 if (instr->hydrogen()->NeedsWriteBarrier()) {
4220 // Update the write barrier for the properties array.
4221 // object is used as a scratch register.
4222 __ RecordWriteField(scratch,
4226 GetLinkRegisterState(),
4228 EMIT_REMEMBERED_SET,
4229 instr->hydrogen()->SmiCheckForWriteBarrier(),
4230 instr->hydrogen()->PointersToHereCheckForValue());
4236 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4237 ASSERT(ToRegister(instr->context()).is(cp));
4238 ASSERT(ToRegister(instr->object()).is(r1));
4239 ASSERT(ToRegister(instr->value()).is(r0));
4241 // Name is always in r2.
4242 __ mov(r2, Operand(instr->name()));
4243 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4244 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4248 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4249 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4250 if (instr->index()->IsConstantOperand()) {
4251 Operand index = ToOperand(instr->index());
4252 Register length = ToRegister(instr->length());
4253 __ cmp(length, index);
4254 cc = CommuteCondition(cc);
4256 Register index = ToRegister(instr->index());
4257 Operand length = ToOperand(instr->length());
4258 __ cmp(index, length);
4260 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4262 __ b(NegateCondition(cc), &done);
4263 __ stop("eliminated bounds check failed");
4266 DeoptimizeIf(cc, instr->environment());
4272 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4273 ASSERT(instr->value()->IsRegister());
4274 Register temp = ToRegister(instr->temp());
4275 Register temp2 = ToRegister(instr->temp2());
4276 Register input_reg = ToRegister(instr->value());
4277 __ SmiTst(input_reg);
4278 DeoptimizeIf(eq, instr->environment());
4279 __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
4280 DeoptimizeIf(ne, instr->environment());
4282 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4283 Register external_pointer = ToRegister(instr->elements());
4284 Register key = no_reg;
4285 ElementsKind elements_kind = instr->elements_kind();
4286 bool key_is_constant = instr->key()->IsConstantOperand();
4287 int constant_key = 0;
4288 if (key_is_constant) {
4289 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4290 if (constant_key & 0xF0000000) {
4291 Abort(kArrayIndexConstantValueTooBig);
4294 key = ToRegister(instr->key());
4296 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4297 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4298 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4299 int base_offset = instr->base_offset();
4300 Register address = scratch0();
4301 if (key_is_constant) {
4302 if (constant_key != 0) {
4303 __ add(address, external_pointer,
4304 Operand(constant_key << element_size_shift));
4306 address = external_pointer;
4309 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4312 // Load the inner FixedTypedArray.
4313 __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
4315 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4316 __ ldr(temp, MemOperand(temp2,
4317 FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
4318 __ str(temp, MemOperand(address, base_offset + offset));
4323 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4324 Register external_pointer = ToRegister(instr->elements());
4325 Register key = no_reg;
4326 ElementsKind elements_kind = instr->elements_kind();
4327 bool key_is_constant = instr->key()->IsConstantOperand();
4328 int constant_key = 0;
4329 if (key_is_constant) {
4330 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4331 if (constant_key & 0xF0000000) {
4332 Abort(kArrayIndexConstantValueTooBig);
4335 key = ToRegister(instr->key());
4337 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4338 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4339 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4340 int base_offset = instr->base_offset();
4342 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4343 elements_kind == FLOAT32_ELEMENTS ||
4344 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4345 elements_kind == FLOAT64_ELEMENTS) {
4346 Register address = scratch0();
4347 DwVfpRegister value(ToDoubleRegister(instr->value()));
4348 if (key_is_constant) {
4349 if (constant_key != 0) {
4350 __ add(address, external_pointer,
4351 Operand(constant_key << element_size_shift));
4353 address = external_pointer;
4356 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4358 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4359 elements_kind == FLOAT32_ELEMENTS) {
4360 __ vcvt_f32_f64(double_scratch0().low(), value);
4361 __ vstr(double_scratch0().low(), address, base_offset);
4362 } else { // Storing doubles, not floats.
4363 __ vstr(value, address, base_offset);
4365 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4366 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4367 } else if (IsFloat64x2ElementsKind(elements_kind)) {
4368 DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
4369 } else if (IsInt32x4ElementsKind(elements_kind)) {
4370 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4372 Register value(ToRegister(instr->value()));
4373 MemOperand mem_operand = PrepareKeyedOperand(
4374 key, external_pointer, key_is_constant, constant_key,
4375 element_size_shift, shift_size,
4377 switch (elements_kind) {
4378 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4379 case EXTERNAL_INT8_ELEMENTS:
4380 case EXTERNAL_UINT8_ELEMENTS:
4381 case UINT8_ELEMENTS:
4382 case UINT8_CLAMPED_ELEMENTS:
4384 __ strb(value, mem_operand);
4386 case EXTERNAL_INT16_ELEMENTS:
4387 case EXTERNAL_UINT16_ELEMENTS:
4388 case INT16_ELEMENTS:
4389 case UINT16_ELEMENTS:
4390 __ strh(value, mem_operand);
4392 case EXTERNAL_INT32_ELEMENTS:
4393 case EXTERNAL_UINT32_ELEMENTS:
4394 case INT32_ELEMENTS:
4395 case UINT32_ELEMENTS:
4396 __ str(value, mem_operand);
4398 case FLOAT32_ELEMENTS:
4399 case FLOAT64_ELEMENTS:
4400 case EXTERNAL_FLOAT32_ELEMENTS:
4401 case EXTERNAL_FLOAT64_ELEMENTS:
4402 case FLOAT32x4_ELEMENTS:
4403 case FLOAT64x2_ELEMENTS:
4404 case INT32x4_ELEMENTS:
4405 case EXTERNAL_FLOAT32x4_ELEMENTS:
4406 case EXTERNAL_FLOAT64x2_ELEMENTS:
4407 case EXTERNAL_INT32x4_ELEMENTS:
4408 case FAST_DOUBLE_ELEMENTS:
4410 case FAST_SMI_ELEMENTS:
4411 case FAST_HOLEY_DOUBLE_ELEMENTS:
4412 case FAST_HOLEY_ELEMENTS:
4413 case FAST_HOLEY_SMI_ELEMENTS:
4414 case DICTIONARY_ELEMENTS:
4415 case SLOPPY_ARGUMENTS_ELEMENTS:
4423 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4424 DwVfpRegister value = ToDoubleRegister(instr->value());
4425 Register elements = ToRegister(instr->elements());
4426 Register scratch = scratch0();
4427 DwVfpRegister double_scratch = double_scratch0();
4428 bool key_is_constant = instr->key()->IsConstantOperand();
4429 int base_offset = instr->base_offset();
4431 // Calculate the effective address of the slot in the array to store the
4433 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4434 if (key_is_constant) {
4435 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4436 if (constant_key & 0xF0000000) {
4437 Abort(kArrayIndexConstantValueTooBig);
4439 __ add(scratch, elements,
4440 Operand((constant_key << element_size_shift) + base_offset));
4442 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4443 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4444 __ add(scratch, elements, Operand(base_offset));
4445 __ add(scratch, scratch,
4446 Operand(ToRegister(instr->key()), LSL, shift_size));
4449 if (instr->NeedsCanonicalization()) {
4450 // Force a canonical NaN.
4451 if (masm()->emit_debug_code()) {
4453 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4454 __ Assert(ne, kDefaultNaNModeNotSet);
4456 __ VFPCanonicalizeNaN(double_scratch, value);
4457 __ vstr(double_scratch, scratch, 0);
4459 __ vstr(value, scratch, 0);
4464 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4465 Register value = ToRegister(instr->value());
4466 Register elements = ToRegister(instr->elements());
4467 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4469 Register scratch = scratch0();
4470 Register store_base = scratch;
4471 int offset = instr->base_offset();
4474 if (instr->key()->IsConstantOperand()) {
4475 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4476 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4477 offset += ToInteger32(const_operand) * kPointerSize;
4478 store_base = elements;
4480 // Even though the HLoadKeyed instruction forces the input
4481 // representation for the key to be an integer, the input gets replaced
4482 // during bound check elimination with the index argument to the bounds
4483 // check, which can be tagged, so that case must be handled here, too.
4484 if (instr->hydrogen()->key()->representation().IsSmi()) {
4485 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4487 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4490 __ str(value, MemOperand(store_base, offset));
4492 if (instr->hydrogen()->NeedsWriteBarrier()) {
4493 SmiCheck check_needed =
4494 instr->hydrogen()->value()->type().IsHeapObject()
4495 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4496 // Compute address of modified element and store it into key register.
4497 __ add(key, store_base, Operand(offset));
4498 __ RecordWrite(elements,
4501 GetLinkRegisterState(),
4503 EMIT_REMEMBERED_SET,
4505 instr->hydrogen()->PointersToHereCheckForValue());
4510 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4511 // By cases: external, fast double
4512 if (instr->is_typed_elements()) {
4513 DoStoreKeyedExternalArray(instr);
4514 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4515 DoStoreKeyedFixedDoubleArray(instr);
4517 DoStoreKeyedFixedArray(instr);
4522 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4523 ASSERT(ToRegister(instr->context()).is(cp));
4524 ASSERT(ToRegister(instr->object()).is(r2));
4525 ASSERT(ToRegister(instr->key()).is(r1));
4526 ASSERT(ToRegister(instr->value()).is(r0));
4528 Handle<Code> ic = instr->strict_mode() == STRICT
4529 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4530 : isolate()->builtins()->KeyedStoreIC_Initialize();
4531 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4535 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4536 Register object_reg = ToRegister(instr->object());
4537 Register scratch = scratch0();
4539 Handle<Map> from_map = instr->original_map();
4540 Handle<Map> to_map = instr->transitioned_map();
4541 ElementsKind from_kind = instr->from_kind();
4542 ElementsKind to_kind = instr->to_kind();
4544 Label not_applicable;
4545 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4546 __ cmp(scratch, Operand(from_map));
4547 __ b(ne, ¬_applicable);
4549 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4550 Register new_map_reg = ToRegister(instr->new_map_temp());
4551 __ mov(new_map_reg, Operand(to_map));
4552 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4554 __ RecordWriteForMap(object_reg,
4557 GetLinkRegisterState(),
4560 ASSERT(ToRegister(instr->context()).is(cp));
4561 ASSERT(object_reg.is(r0));
4562 PushSafepointRegistersScope scope(
4563 this, Safepoint::kWithRegistersAndDoubles);
4564 __ Move(r1, to_map);
4565 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4566 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4568 RecordSafepointWithRegistersAndDoubles(
4569 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4571 __ bind(¬_applicable);
4575 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4576 Register object = ToRegister(instr->object());
4577 Register temp = ToRegister(instr->temp());
4578 Label no_memento_found;
4579 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4580 DeoptimizeIf(eq, instr->environment());
4581 __ bind(&no_memento_found);
4585 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4586 ASSERT(ToRegister(instr->context()).is(cp));
4587 ASSERT(ToRegister(instr->left()).is(r1));
4588 ASSERT(ToRegister(instr->right()).is(r0));
4589 StringAddStub stub(isolate(),
4590 instr->hydrogen()->flags(),
4591 instr->hydrogen()->pretenure_flag());
4592 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4596 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4597 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4599 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4600 : LDeferredCode(codegen), instr_(instr) { }
4601 virtual void Generate() V8_OVERRIDE {
4602 codegen()->DoDeferredStringCharCodeAt(instr_);
4604 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4606 LStringCharCodeAt* instr_;
4609 DeferredStringCharCodeAt* deferred =
4610 new(zone()) DeferredStringCharCodeAt(this, instr);
4612 StringCharLoadGenerator::Generate(masm(),
4613 ToRegister(instr->string()),
4614 ToRegister(instr->index()),
4615 ToRegister(instr->result()),
4617 __ bind(deferred->exit());
4621 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4622 Register string = ToRegister(instr->string());
4623 Register result = ToRegister(instr->result());
4624 Register scratch = scratch0();
4626 // TODO(3095996): Get rid of this. For now, we need to make the
4627 // result register contain a valid pointer because it is already
4628 // contained in the register pointer map.
4629 __ mov(result, Operand::Zero());
4631 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4633 // Push the index as a smi. This is safe because of the checks in
4634 // DoStringCharCodeAt above.
4635 if (instr->index()->IsConstantOperand()) {
4636 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4637 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4640 Register index = ToRegister(instr->index());
4644 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4648 __ StoreToSafepointRegisterSlot(r0, result);
4652 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4653 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4655 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4656 : LDeferredCode(codegen), instr_(instr) { }
4657 virtual void Generate() V8_OVERRIDE {
4658 codegen()->DoDeferredStringCharFromCode(instr_);
4660 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4662 LStringCharFromCode* instr_;
4665 DeferredStringCharFromCode* deferred =
4666 new(zone()) DeferredStringCharFromCode(this, instr);
4668 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4669 Register char_code = ToRegister(instr->char_code());
4670 Register result = ToRegister(instr->result());
4671 ASSERT(!char_code.is(result));
4673 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4674 __ b(hi, deferred->entry());
4675 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4676 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4677 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4678 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4680 __ b(eq, deferred->entry());
4681 __ bind(deferred->exit());
4685 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4686 Register char_code = ToRegister(instr->char_code());
4687 Register result = ToRegister(instr->result());
4689 // TODO(3095996): Get rid of this. For now, we need to make the
4690 // result register contain a valid pointer because it is already
4691 // contained in the register pointer map.
4692 __ mov(result, Operand::Zero());
4694 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4695 __ SmiTag(char_code);
4697 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4698 __ StoreToSafepointRegisterSlot(r0, result);
4702 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4703 LOperand* input = instr->value();
4704 ASSERT(input->IsRegister() || input->IsStackSlot());
4705 LOperand* output = instr->result();
4706 ASSERT(output->IsDoubleRegister());
4707 SwVfpRegister single_scratch = double_scratch0().low();
4708 if (input->IsStackSlot()) {
4709 Register scratch = scratch0();
4710 __ ldr(scratch, ToMemOperand(input));
4711 __ vmov(single_scratch, scratch);
4713 __ vmov(single_scratch, ToRegister(input));
4715 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4719 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4720 LOperand* input = instr->value();
4721 LOperand* output = instr->result();
4723 SwVfpRegister flt_scratch = double_scratch0().low();
4724 __ vmov(flt_scratch, ToRegister(input));
4725 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4729 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4730 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4732 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4733 : LDeferredCode(codegen), instr_(instr) { }
4734 virtual void Generate() V8_OVERRIDE {
4735 codegen()->DoDeferredNumberTagIU(instr_,
4741 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4743 LNumberTagI* instr_;
4746 Register src = ToRegister(instr->value());
4747 Register dst = ToRegister(instr->result());
4749 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4750 __ SmiTag(dst, src, SetCC);
4751 __ b(vs, deferred->entry());
4752 __ bind(deferred->exit());
4756 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4757 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4759 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4760 : LDeferredCode(codegen), instr_(instr) { }
4761 virtual void Generate() V8_OVERRIDE {
4762 codegen()->DoDeferredNumberTagIU(instr_,
4768 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4770 LNumberTagU* instr_;
4773 Register input = ToRegister(instr->value());
4774 Register result = ToRegister(instr->result());
4776 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4777 __ cmp(input, Operand(Smi::kMaxValue));
4778 __ b(hi, deferred->entry());
4779 __ SmiTag(result, input);
4780 __ bind(deferred->exit());
4784 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4788 IntegerSignedness signedness) {
4790 Register src = ToRegister(value);
4791 Register dst = ToRegister(instr->result());
4792 Register tmp1 = scratch0();
4793 Register tmp2 = ToRegister(temp1);
4794 Register tmp3 = ToRegister(temp2);
4795 LowDwVfpRegister dbl_scratch = double_scratch0();
4797 if (signedness == SIGNED_INT32) {
4798 // There was overflow, so bits 30 and 31 of the original integer
4799 // disagree. Try to allocate a heap number in new space and store
4800 // the value in there. If that fails, call the runtime system.
4802 __ SmiUntag(src, dst);
4803 __ eor(src, src, Operand(0x80000000));
4805 __ vmov(dbl_scratch.low(), src);
4806 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4808 __ vmov(dbl_scratch.low(), src);
4809 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4812 if (FLAG_inline_new) {
4813 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4814 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4818 // Slow case: Call the runtime system to do the number allocation.
4821 // TODO(3095996): Put a valid pointer value in the stack slot where the
4822 // result register is stored, as this register is in the pointer map, but
4823 // contains an integer value.
4824 __ mov(dst, Operand::Zero());
4826 // Preserve the value of all registers.
4827 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4829 // NumberTagI and NumberTagD use the context from the frame, rather than
4830 // the environment's HContext or HInlinedContext value.
4831 // They only call Runtime::kHiddenAllocateHeapNumber.
4832 // The corresponding HChange instructions are added in a phase that does
4833 // not have easy access to the local context.
4834 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4835 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4836 RecordSafepointWithRegisters(
4837 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4838 __ sub(r0, r0, Operand(kHeapObjectTag));
4839 __ StoreToSafepointRegisterSlot(r0, dst);
4842 // Done. Put the value in dbl_scratch into the value of the allocated heap
4845 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4846 __ add(dst, dst, Operand(kHeapObjectTag));
4850 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4851 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4853 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4854 : LDeferredCode(codegen), instr_(instr) { }
4855 virtual void Generate() V8_OVERRIDE {
4856 codegen()->DoDeferredNumberTagD(instr_);
4858 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4860 LNumberTagD* instr_;
4863 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4864 Register scratch = scratch0();
4865 Register reg = ToRegister(instr->result());
4866 Register temp1 = ToRegister(instr->temp());
4867 Register temp2 = ToRegister(instr->temp2());
4869 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4870 if (FLAG_inline_new) {
4871 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4872 // We want the untagged address first for performance
4873 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4876 __ jmp(deferred->entry());
4878 __ bind(deferred->exit());
4879 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4880 // Now that we have finished with the object's real address tag it
4881 __ add(reg, reg, Operand(kHeapObjectTag));
4885 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4886 // TODO(3095996): Get rid of this. For now, we need to make the
4887 // result register contain a valid pointer because it is already
4888 // contained in the register pointer map.
4889 Register reg = ToRegister(instr->result());
4890 __ mov(reg, Operand::Zero());
4892 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4893 // NumberTagI and NumberTagD use the context from the frame, rather than
4894 // the environment's HContext or HInlinedContext value.
4895 // They only call Runtime::kHiddenAllocateHeapNumber.
4896 // The corresponding HChange instructions are added in a phase that does
4897 // not have easy access to the local context.
4898 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4899 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4900 RecordSafepointWithRegisters(
4901 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4902 __ sub(r0, r0, Operand(kHeapObjectTag));
4903 __ StoreToSafepointRegisterSlot(r0, reg);
4907 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4908 HChange* hchange = instr->hydrogen();
4909 Register input = ToRegister(instr->value());
4910 Register output = ToRegister(instr->result());
4911 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4912 hchange->value()->CheckFlag(HValue::kUint32)) {
4913 __ tst(input, Operand(0xc0000000));
4914 DeoptimizeIf(ne, instr->environment());
4916 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4917 !hchange->value()->CheckFlag(HValue::kUint32)) {
4918 __ SmiTag(output, input, SetCC);
4919 DeoptimizeIf(vs, instr->environment());
4921 __ SmiTag(output, input);
4926 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4927 Register input = ToRegister(instr->value());
4928 Register result = ToRegister(instr->result());
4929 if (instr->needs_check()) {
4930 STATIC_ASSERT(kHeapObjectTag == 1);
4931 // If the input is a HeapObject, SmiUntag will set the carry flag.
4932 __ SmiUntag(result, input, SetCC);
4933 DeoptimizeIf(cs, instr->environment());
4935 __ SmiUntag(result, input);
4940 void LCodeGen::EmitNumberUntagD(Register input_reg,
4941 DwVfpRegister result_reg,
4942 bool can_convert_undefined_to_nan,
4943 bool deoptimize_on_minus_zero,
4945 NumberUntagDMode mode) {
4946 Register scratch = scratch0();
4947 SwVfpRegister flt_scratch = double_scratch0().low();
4948 ASSERT(!result_reg.is(double_scratch0()));
4949 Label convert, load_smi, done;
4950 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4952 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4953 // Heap number map check.
4954 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4955 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4956 __ cmp(scratch, Operand(ip));
4957 if (can_convert_undefined_to_nan) {
4960 DeoptimizeIf(ne, env);
4963 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4964 if (deoptimize_on_minus_zero) {
4965 __ VmovLow(scratch, result_reg);
4966 __ cmp(scratch, Operand::Zero());
4968 __ VmovHigh(scratch, result_reg);
4969 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4970 DeoptimizeIf(eq, env);
4973 if (can_convert_undefined_to_nan) {
4975 // Convert undefined (and hole) to NaN.
4976 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4977 __ cmp(input_reg, Operand(ip));
4978 DeoptimizeIf(ne, env);
4979 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4980 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4984 __ SmiUntag(scratch, input_reg);
4985 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4987 // Smi to double register conversion
4989 // scratch: untagged value of input_reg
4990 __ vmov(flt_scratch, scratch);
4991 __ vcvt_f64_s32(result_reg, flt_scratch);
4996 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4997 Register input_reg = ToRegister(instr->value());
4998 Register scratch1 = scratch0();
4999 Register scratch2 = ToRegister(instr->temp());
5000 LowDwVfpRegister double_scratch = double_scratch0();
5001 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5003 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5004 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5008 // The input was optimistically untagged; revert it.
5009 // The carry flag is set when we reach this deferred code as we just executed
5010 // SmiUntag(heap_object, SetCC)
5011 STATIC_ASSERT(kHeapObjectTag == 1);
5012 __ adc(scratch2, input_reg, Operand(input_reg));
5014 // Heap number map check.
5015 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5016 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5017 __ cmp(scratch1, Operand(ip));
5019 if (instr->truncating()) {
5020 // Performs a truncating conversion of a floating point number as used by
5021 // the JS bitwise operations.
5022 Label no_heap_number, check_bools, check_false;
5023 __ b(ne, &no_heap_number);
5024 __ TruncateHeapNumberToI(input_reg, scratch2);
5027 // Check for Oddballs. Undefined/False is converted to zero and True to one
5028 // for truncating conversions.
5029 __ bind(&no_heap_number);
5030 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5031 __ cmp(scratch2, Operand(ip));
5032 __ b(ne, &check_bools);
5033 __ mov(input_reg, Operand::Zero());
5036 __ bind(&check_bools);
5037 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5038 __ cmp(scratch2, Operand(ip));
5039 __ b(ne, &check_false);
5040 __ mov(input_reg, Operand(1));
5043 __ bind(&check_false);
5044 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5045 __ cmp(scratch2, Operand(ip));
5046 DeoptimizeIf(ne, instr->environment());
5047 __ mov(input_reg, Operand::Zero());
5050 // Deoptimize if we don't have a heap number.
5051 DeoptimizeIf(ne, instr->environment());
5053 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5054 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5055 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5056 DeoptimizeIf(ne, instr->environment());
5058 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5059 __ cmp(input_reg, Operand::Zero());
5061 __ VmovHigh(scratch1, double_scratch2);
5062 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5063 DeoptimizeIf(ne, instr->environment());
5070 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5071 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5073 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5074 : LDeferredCode(codegen), instr_(instr) { }
5075 virtual void Generate() V8_OVERRIDE {
5076 codegen()->DoDeferredTaggedToI(instr_);
5078 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5083 LOperand* input = instr->value();
5084 ASSERT(input->IsRegister());
5085 ASSERT(input->Equals(instr->result()));
5087 Register input_reg = ToRegister(input);
5089 if (instr->hydrogen()->value()->representation().IsSmi()) {
5090 __ SmiUntag(input_reg);
5092 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5094 // Optimistically untag the input.
5095 // If the input is a HeapObject, SmiUntag will set the carry flag.
5096 __ SmiUntag(input_reg, SetCC);
5097 // Branch to deferred code if the input was tagged.
5098 // The deferred code will take care of restoring the tag.
5099 __ b(cs, deferred->entry());
5100 __ bind(deferred->exit());
5105 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5106 LOperand* input = instr->value();
5107 ASSERT(input->IsRegister());
5108 LOperand* result = instr->result();
5109 ASSERT(result->IsDoubleRegister());
5111 Register input_reg = ToRegister(input);
5112 DwVfpRegister result_reg = ToDoubleRegister(result);
5114 HValue* value = instr->hydrogen()->value();
5115 NumberUntagDMode mode = value->representation().IsSmi()
5116 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5118 EmitNumberUntagD(input_reg, result_reg,
5119 instr->hydrogen()->can_convert_undefined_to_nan(),
5120 instr->hydrogen()->deoptimize_on_minus_zero(),
5121 instr->environment(),
5126 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5127 Register result_reg = ToRegister(instr->result());
5128 Register scratch1 = scratch0();
5129 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5130 LowDwVfpRegister double_scratch = double_scratch0();
5132 if (instr->truncating()) {
5133 __ TruncateDoubleToI(result_reg, double_input);
5135 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5136 // Deoptimize if the input wasn't a int32 (inside a double).
5137 DeoptimizeIf(ne, instr->environment());
5138 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5140 __ cmp(result_reg, Operand::Zero());
5142 __ VmovHigh(scratch1, double_input);
5143 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5144 DeoptimizeIf(ne, instr->environment());
5151 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5152 Register result_reg = ToRegister(instr->result());
5153 Register scratch1 = scratch0();
5154 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5155 LowDwVfpRegister double_scratch = double_scratch0();
5157 if (instr->truncating()) {
5158 __ TruncateDoubleToI(result_reg, double_input);
5160 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5161 // Deoptimize if the input wasn't a int32 (inside a double).
5162 DeoptimizeIf(ne, instr->environment());
5163 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5165 __ cmp(result_reg, Operand::Zero());
5167 __ VmovHigh(scratch1, double_input);
5168 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5169 DeoptimizeIf(ne, instr->environment());
5173 __ SmiTag(result_reg, SetCC);
5174 DeoptimizeIf(vs, instr->environment());
5178 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5179 LOperand* input = instr->value();
5180 __ SmiTst(ToRegister(input));
5181 DeoptimizeIf(ne, instr->environment());
5185 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5186 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5187 LOperand* input = instr->value();
5188 __ SmiTst(ToRegister(input));
5189 DeoptimizeIf(eq, instr->environment());
5194 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5195 Register input = ToRegister(instr->value());
5196 Register scratch = scratch0();
5198 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5199 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5201 if (instr->hydrogen()->is_interval_check()) {
5204 instr->hydrogen()->GetCheckInterval(&first, &last);
5206 __ cmp(scratch, Operand(first));
5208 // If there is only one type in the interval check for equality.
5209 if (first == last) {
5210 DeoptimizeIf(ne, instr->environment());
5212 DeoptimizeIf(lo, instr->environment());
5213 // Omit check for the last type.
5214 if (last != LAST_TYPE) {
5215 __ cmp(scratch, Operand(last));
5216 DeoptimizeIf(hi, instr->environment());
5222 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5224 if (IsPowerOf2(mask)) {
5225 ASSERT(tag == 0 || IsPowerOf2(tag));
5226 __ tst(scratch, Operand(mask));
5227 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
5229 __ and_(scratch, scratch, Operand(mask));
5230 __ cmp(scratch, Operand(tag));
5231 DeoptimizeIf(ne, instr->environment());
5237 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5238 Register reg = ToRegister(instr->value());
5239 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5240 AllowDeferredHandleDereference smi_check;
5241 if (isolate()->heap()->InNewSpace(*object)) {
5242 Register reg = ToRegister(instr->value());
5243 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5244 __ mov(ip, Operand(Handle<Object>(cell)));
5245 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5248 __ cmp(reg, Operand(object));
5250 DeoptimizeIf(ne, instr->environment());
5254 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5256 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5258 __ mov(cp, Operand::Zero());
5259 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5260 RecordSafepointWithRegisters(
5261 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5262 __ StoreToSafepointRegisterSlot(r0, scratch0());
5264 __ tst(scratch0(), Operand(kSmiTagMask));
5265 DeoptimizeIf(eq, instr->environment());
5269 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5270 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5272 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5273 : LDeferredCode(codegen), instr_(instr), object_(object) {
5274 SetExit(check_maps());
5276 virtual void Generate() V8_OVERRIDE {
5277 codegen()->DoDeferredInstanceMigration(instr_, object_);
5279 Label* check_maps() { return &check_maps_; }
5280 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5287 if (instr->hydrogen()->IsStabilityCheck()) {
5288 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5289 for (int i = 0; i < maps->size(); ++i) {
5290 AddStabilityDependency(maps->at(i).handle());
5295 Register map_reg = scratch0();
5297 LOperand* input = instr->value();
5298 ASSERT(input->IsRegister());
5299 Register reg = ToRegister(input);
5301 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5303 DeferredCheckMaps* deferred = NULL;
5304 if (instr->hydrogen()->HasMigrationTarget()) {
5305 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5306 __ bind(deferred->check_maps());
5309 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5311 for (int i = 0; i < maps->size() - 1; i++) {
5312 Handle<Map> map = maps->at(i).handle();
5313 __ CompareMap(map_reg, map, &success);
5317 Handle<Map> map = maps->at(maps->size() - 1).handle();
5318 __ CompareMap(map_reg, map, &success);
5319 if (instr->hydrogen()->HasMigrationTarget()) {
5320 __ b(ne, deferred->entry());
5322 DeoptimizeIf(ne, instr->environment());
5329 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5330 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5331 Register result_reg = ToRegister(instr->result());
5332 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5336 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5337 Register unclamped_reg = ToRegister(instr->unclamped());
5338 Register result_reg = ToRegister(instr->result());
5339 __ ClampUint8(result_reg, unclamped_reg);
5343 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5344 Register scratch = scratch0();
5345 Register input_reg = ToRegister(instr->unclamped());
5346 Register result_reg = ToRegister(instr->result());
5347 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5348 Label is_smi, done, heap_number;
5350 // Both smi and heap number cases are handled.
5351 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5353 // Check for heap number
5354 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5355 __ cmp(scratch, Operand(factory()->heap_number_map()));
5356 __ b(eq, &heap_number);
5358 // Check for undefined. Undefined is converted to zero for clamping
5360 __ cmp(input_reg, Operand(factory()->undefined_value()));
5361 DeoptimizeIf(ne, instr->environment());
5362 __ mov(result_reg, Operand::Zero());
5366 __ bind(&heap_number);
5367 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5368 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5373 __ ClampUint8(result_reg, result_reg);
5379 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5380 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5381 Register result_reg = ToRegister(instr->result());
5382 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5383 __ VmovHigh(result_reg, value_reg);
5385 __ VmovLow(result_reg, value_reg);
5390 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5391 Register hi_reg = ToRegister(instr->hi());
5392 Register lo_reg = ToRegister(instr->lo());
5393 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5394 __ VmovHigh(result_reg, hi_reg);
5395 __ VmovLow(result_reg, lo_reg);
5399 void LCodeGen::DoAllocate(LAllocate* instr) {
5400 class DeferredAllocate V8_FINAL : public LDeferredCode {
5402 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5403 : LDeferredCode(codegen), instr_(instr) { }
5404 virtual void Generate() V8_OVERRIDE {
5405 codegen()->DoDeferredAllocate(instr_);
5407 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5412 DeferredAllocate* deferred =
5413 new(zone()) DeferredAllocate(this, instr);
5415 Register result = ToRegister(instr->result());
5416 Register scratch = ToRegister(instr->temp1());
5417 Register scratch2 = ToRegister(instr->temp2());
5419 // Allocate memory for the object.
5420 AllocationFlags flags = TAG_OBJECT;
5421 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5422 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5424 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5425 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5426 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5427 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5428 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5429 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5430 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5433 if (instr->size()->IsConstantOperand()) {
5434 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5435 if (size <= Page::kMaxRegularHeapObjectSize) {
5436 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5438 __ jmp(deferred->entry());
5441 Register size = ToRegister(instr->size());
5450 __ bind(deferred->exit());
5452 if (instr->hydrogen()->MustPrefillWithFiller()) {
5453 if (instr->size()->IsConstantOperand()) {
5454 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5455 __ mov(scratch, Operand(size));
5457 scratch = ToRegister(instr->size());
5459 __ sub(scratch, scratch, Operand(kPointerSize));
5460 __ sub(result, result, Operand(kHeapObjectTag));
5463 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5464 __ str(scratch2, MemOperand(result, scratch));
5465 __ sub(scratch, scratch, Operand(kPointerSize));
5466 __ cmp(scratch, Operand(0));
5468 __ add(result, result, Operand(kHeapObjectTag));
5473 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5474 Register result = ToRegister(instr->result());
5476 // TODO(3095996): Get rid of this. For now, we need to make the
5477 // result register contain a valid pointer because it is already
5478 // contained in the register pointer map.
5479 __ mov(result, Operand(Smi::FromInt(0)));
5481 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5482 if (instr->size()->IsRegister()) {
5483 Register size = ToRegister(instr->size());
5484 ASSERT(!size.is(result));
5488 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5489 if (size >= 0 && size <= Smi::kMaxValue) {
5490 __ Push(Smi::FromInt(size));
5492 // We should never get here at runtime => abort
5493 __ stop("invalid allocation size");
5498 int flags = AllocateDoubleAlignFlag::encode(
5499 instr->hydrogen()->MustAllocateDoubleAligned());
5500 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5501 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5502 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5503 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5504 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5505 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5506 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5508 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5510 __ Push(Smi::FromInt(flags));
5512 CallRuntimeFromDeferred(
5513 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5514 __ StoreToSafepointRegisterSlot(r0, result);
5518 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5519 ASSERT(ToRegister(instr->value()).is(r0));
5521 CallRuntime(Runtime::kToFastProperties, 1, instr);
5525 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5526 ASSERT(ToRegister(instr->context()).is(cp));
5528 // Registers will be used as follows:
5529 // r6 = literals array.
5530 // r1 = regexp literal.
5531 // r0 = regexp literal clone.
5532 // r2-5 are used as temporaries.
5533 int literal_offset =
5534 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5535 __ Move(r6, instr->hydrogen()->literals());
5536 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5537 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5539 __ b(ne, &materialized);
5541 // Create regexp literal using runtime function
5542 // Result will be in r0.
5543 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5544 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5545 __ mov(r3, Operand(instr->hydrogen()->flags()));
5546 __ Push(r6, r5, r4, r3);
5547 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5550 __ bind(&materialized);
5551 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5552 Label allocated, runtime_allocate;
5554 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5557 __ bind(&runtime_allocate);
5558 __ mov(r0, Operand(Smi::FromInt(size)));
5560 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5563 __ bind(&allocated);
5564 // Copy the content into the newly allocated memory.
5565 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5569 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5570 ASSERT(ToRegister(instr->context()).is(cp));
5571 // Use the fast case closure allocation code that allocates in new
5572 // space for nested functions that don't need literals cloning.
5573 bool pretenure = instr->hydrogen()->pretenure();
5574 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5575 FastNewClosureStub stub(isolate(),
5576 instr->hydrogen()->strict_mode(),
5577 instr->hydrogen()->is_generator());
5578 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5579 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5581 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5582 __ mov(r1, Operand(pretenure ? factory()->true_value()
5583 : factory()->false_value()));
5584 __ Push(cp, r2, r1);
5585 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5590 void LCodeGen::DoTypeof(LTypeof* instr) {
5591 Register input = ToRegister(instr->value());
5593 CallRuntime(Runtime::kTypeof, 1, instr);
5597 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5598 Register input = ToRegister(instr->value());
5600 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5601 instr->FalseLabel(chunk_),
5603 instr->type_literal());
5604 if (final_branch_condition != kNoCondition) {
5605 EmitBranch(instr, final_branch_condition);
5610 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5613 Handle<String> type_name) {
5614 Condition final_branch_condition = kNoCondition;
5615 Register scratch = scratch0();
5616 Factory* factory = isolate()->factory();
5617 if (String::Equals(type_name, factory->number_string())) {
5618 __ JumpIfSmi(input, true_label);
5619 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5620 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5621 final_branch_condition = eq;
5623 } else if (String::Equals(type_name, factory->string_string())) {
5624 __ JumpIfSmi(input, false_label);
5625 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5626 __ b(ge, false_label);
5627 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5628 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5629 final_branch_condition = eq;
5631 } else if (String::Equals(type_name, factory->symbol_string())) {
5632 __ JumpIfSmi(input, false_label);
5633 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5634 final_branch_condition = eq;
5636 } else if (String::Equals(type_name, factory->boolean_string())) {
5637 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5638 __ b(eq, true_label);
5639 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5640 final_branch_condition = eq;
5642 } else if (FLAG_harmony_typeof &&
5643 String::Equals(type_name, factory->null_string())) {
5644 __ CompareRoot(input, Heap::kNullValueRootIndex);
5645 final_branch_condition = eq;
5647 } else if (String::Equals(type_name, factory->undefined_string())) {
5648 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5649 __ b(eq, true_label);
5650 __ JumpIfSmi(input, false_label);
5651 // Check for undetectable objects => true.
5652 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5653 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5654 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5655 final_branch_condition = ne;
5657 } else if (String::Equals(type_name, factory->function_string())) {
5658 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5659 Register type_reg = scratch;
5660 __ JumpIfSmi(input, false_label);
5661 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5662 __ b(eq, true_label);
5663 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5664 final_branch_condition = eq;
5666 } else if (String::Equals(type_name, factory->object_string())) {
5667 Register map = scratch;
5668 __ JumpIfSmi(input, false_label);
5669 if (!FLAG_harmony_typeof) {
5670 __ CompareRoot(input, Heap::kNullValueRootIndex);
5671 __ b(eq, true_label);
5673 __ CheckObjectTypeRange(input,
5675 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5676 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5678 // Check for undetectable objects => false.
5679 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5680 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5681 final_branch_condition = eq;
5687 return final_branch_condition;
5691 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5692 Register temp1 = ToRegister(instr->temp());
5694 EmitIsConstructCall(temp1, scratch0());
5695 EmitBranch(instr, eq);
5699 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5700 ASSERT(!temp1.is(temp2));
5701 // Get the frame pointer for the calling frame.
5702 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5704 // Skip the arguments adaptor frame if it exists.
5705 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5706 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5707 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5709 // Check the marker in the calling frame.
5710 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5711 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5715 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5716 if (!info()->IsStub()) {
5717 // Ensure that we have enough space after the previous lazy-bailout
5718 // instruction for patching the code here.
5719 int current_pc = masm()->pc_offset();
5720 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5721 // Block literal pool emission for duration of padding.
5722 Assembler::BlockConstPoolScope block_const_pool(masm());
5723 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5724 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5725 while (padding_size > 0) {
5727 padding_size -= Assembler::kInstrSize;
5731 last_lazy_deopt_pc_ = masm()->pc_offset();
5735 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5736 last_lazy_deopt_pc_ = masm()->pc_offset();
5737 ASSERT(instr->HasEnvironment());
5738 LEnvironment* env = instr->environment();
5739 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5740 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5744 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5745 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5746 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5747 // needed return address), even though the implementation of LAZY and EAGER is
5748 // now identical. When LAZY is eventually completely folded into EAGER, remove
5749 // the special case below.
5750 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5751 type = Deoptimizer::LAZY;
5754 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5755 DeoptimizeIf(al, instr->environment(), type);
5759 void LCodeGen::DoDummy(LDummy* instr) {
5760 // Nothing to see here, move on!
5764 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5765 // Nothing to see here, move on!
5769 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5770 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5771 LoadContextFromDeferred(instr->context());
5772 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5773 RecordSafepointWithLazyDeopt(
5774 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5775 ASSERT(instr->HasEnvironment());
5776 LEnvironment* env = instr->environment();
5777 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5781 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5782 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5784 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5785 : LDeferredCode(codegen), instr_(instr) { }
5786 virtual void Generate() V8_OVERRIDE {
5787 codegen()->DoDeferredStackCheck(instr_);
5789 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5791 LStackCheck* instr_;
5794 ASSERT(instr->HasEnvironment());
5795 LEnvironment* env = instr->environment();
5796 // There is no LLazyBailout instruction for stack-checks. We have to
5797 // prepare for lazy deoptimization explicitly here.
5798 if (instr->hydrogen()->is_function_entry()) {
5799 // Perform stack overflow check.
5801 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5802 __ cmp(sp, Operand(ip));
5804 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5805 PredictableCodeSizeScope predictable(masm(),
5806 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5807 ASSERT(instr->context()->IsRegister());
5808 ASSERT(ToRegister(instr->context()).is(cp));
5809 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5812 ASSERT(instr->hydrogen()->is_backwards_branch());
5813 // Perform stack overflow check if this goto needs it before jumping.
5814 DeferredStackCheck* deferred_stack_check =
5815 new(zone()) DeferredStackCheck(this, instr);
5816 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5817 __ cmp(sp, Operand(ip));
5818 __ b(lo, deferred_stack_check->entry());
5819 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5820 __ bind(instr->done_label());
5821 deferred_stack_check->SetExit(instr->done_label());
5822 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5823 // Don't record a deoptimization index for the safepoint here.
5824 // This will be done explicitly when emitting call and the safepoint in
5825 // the deferred code.
5830 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5831 // This is a pseudo-instruction that ensures that the environment here is
5832 // properly registered for deoptimization and records the assembler's PC
5834 LEnvironment* environment = instr->environment();
5836 // If the environment were already registered, we would have no way of
5837 // backpatching it with the spill slot operands.
5838 ASSERT(!environment->HasBeenRegistered());
5839 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5841 GenerateOsrPrologue();
5845 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5846 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5848 DeoptimizeIf(eq, instr->environment());
5850 Register null_value = r5;
5851 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5852 __ cmp(r0, null_value);
5853 DeoptimizeIf(eq, instr->environment());
5856 DeoptimizeIf(eq, instr->environment());
5858 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5859 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5860 DeoptimizeIf(le, instr->environment());
5862 Label use_cache, call_runtime;
5863 __ CheckEnumCache(null_value, &call_runtime);
5865 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5868 // Get the set of properties to enumerate.
5869 __ bind(&call_runtime);
5871 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5873 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5874 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5876 DeoptimizeIf(ne, instr->environment());
5877 __ bind(&use_cache);
5881 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5882 Register map = ToRegister(instr->map());
5883 Register result = ToRegister(instr->result());
5884 Label load_cache, done;
5885 __ EnumLength(result, map);
5886 __ cmp(result, Operand(Smi::FromInt(0)));
5887 __ b(ne, &load_cache);
5888 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5891 __ bind(&load_cache);
5892 __ LoadInstanceDescriptors(map, result);
5894 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5896 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5897 __ cmp(result, Operand::Zero());
5898 DeoptimizeIf(eq, instr->environment());
5904 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5905 Register object = ToRegister(instr->value());
5906 Register map = ToRegister(instr->map());
5907 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5908 __ cmp(map, scratch0());
5909 DeoptimizeIf(ne, instr->environment());
5913 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5917 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5920 __ mov(cp, Operand::Zero());
5921 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5922 RecordSafepointWithRegisters(
5923 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5924 __ StoreToSafepointRegisterSlot(r0, result);
5928 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5929 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5931 DeferredLoadMutableDouble(LCodeGen* codegen,
5932 LLoadFieldByIndex* instr,
5936 : LDeferredCode(codegen),
5942 virtual void Generate() V8_OVERRIDE {
5943 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5945 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5947 LLoadFieldByIndex* instr_;
5953 Register object = ToRegister(instr->object());
5954 Register index = ToRegister(instr->index());
5955 Register result = ToRegister(instr->result());
5956 Register scratch = scratch0();
5958 DeferredLoadMutableDouble* deferred;
5959 deferred = new(zone()) DeferredLoadMutableDouble(
5960 this, instr, result, object, index);
5962 Label out_of_object, done;
5964 __ tst(index, Operand(Smi::FromInt(1)));
5965 __ b(ne, deferred->entry());
5966 __ mov(index, Operand(index, ASR, 1));
5968 __ cmp(index, Operand::Zero());
5969 __ b(lt, &out_of_object);
5971 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5972 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5976 __ bind(&out_of_object);
5977 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5978 // Index is equal to negated out of object property index plus 1.
5979 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5980 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5981 __ ldr(result, FieldMemOperand(scratch,
5982 FixedArray::kHeaderSize - kPointerSize));
5983 __ bind(deferred->exit());
5988 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5989 Register context = ToRegister(instr->context());
5990 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5994 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5995 Handle<ScopeInfo> scope_info = instr->scope_info();
5996 __ Push(scope_info);
5997 __ push(ToRegister(instr->function()));
5998 CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
5999 RecordSafepoint(Safepoint::kNoLazyDeopt);
6005 } } // namespace v8::internal