1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
20 class SafepointGenerator FINAL : public CallWrapper {
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
28 virtual ~SafepointGenerator() {}
30 virtual void BeforeCall(int call_size) const OVERRIDE {}
32 virtual void AfterCall() const OVERRIDE {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
45 bool LCodeGen::GenerateCode() {
46 LPhase phase("Z_Code generation", chunk());
50 // Open a frame scope to indicate that there is a frame on the stack. The
51 // NONE indicates that the scope shouldn't actually generate code to set up
52 // the frame (that is done in GeneratePrologue).
53 FrameScope frame_scope(masm_, StackFrame::NONE);
55 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
56 GenerateJumpTable() && GenerateSafepointTable();
60 void LCodeGen::FinishCode(Handle<Code> code) {
62 code->set_stack_slots(GetStackSlotCount());
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
65 PopulateDeoptimizationData(code);
69 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
85 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
101 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // r1: Callee's JS function.
115 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117 // fp: Caller's frame pointer.
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (info_->this_has_uses() &&
124 info_->strict_mode() == SLOPPY &&
125 !info_->is_native()) {
127 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
128 __ ldr(r2, MemOperand(sp, receiver_offset));
129 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
132 __ ldr(r2, GlobalObjectOperand());
133 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
135 __ str(r2, MemOperand(sp, receiver_offset));
141 info()->set_prologue_offset(masm_->pc_offset());
142 if (NeedsEagerFrame()) {
143 if (info()->IsStub()) {
146 __ Prologue(info()->IsCodePreAgingActive());
148 frame_is_built_ = true;
149 info_->AddNoFrameRange(0, masm_->pc_offset());
152 // Reserve space for the stack slots needed by the code.
153 int slots = GetStackSlotCount();
155 if (FLAG_debug_code) {
156 __ sub(sp, sp, Operand(slots * kPointerSize));
159 __ add(r0, sp, Operand(slots * kPointerSize));
160 __ mov(r1, Operand(kSlotsZapValue));
163 __ sub(r0, r0, Operand(kPointerSize));
164 __ str(r1, MemOperand(r0, 2 * kPointerSize));
170 __ sub(sp, sp, Operand(slots * kPointerSize));
174 if (info()->saves_caller_doubles()) {
178 // Possibly allocate a local context.
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180 if (heap_slots > 0) {
181 Comment(";;; Allocate local context");
182 bool need_write_barrier = true;
183 // Argument to NewContext is the function, which is in r1.
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots);
187 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false;
191 __ CallRuntime(Runtime::kNewFunctionContext, 1);
193 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp.
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters();
200 for (int i = 0; i < num_parameters; i++) {
201 Variable* var = scope()->parameter(i);
202 if (var->IsContextSlot()) {
203 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204 (num_parameters - 1 - i) * kPointerSize;
205 // Load parameter from stack.
206 __ ldr(r0, MemOperand(fp, parameter_offset));
207 // Store it in the context.
208 MemOperand target = ContextOperand(cp, var->index());
210 // Update the write barrier. This clobbers r3 and r0.
211 if (need_write_barrier) {
212 __ RecordWriteContextSlot(
217 GetLinkRegisterState(),
219 } else if (FLAG_debug_code) {
221 __ JumpIfInNewSpace(cp, r0, &done);
222 __ Abort(kExpectedNewSpaceObject);
227 Comment(";;; End allocate local context");
231 if (FLAG_trace && info()->IsOptimizing()) {
232 // We have not executed any compiled code yet, so cp still holds the
234 __ CallRuntime(Runtime::kTraceEnter, 0);
236 return !is_aborted();
240 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return;
245 osr_pc_offset_ = masm()->pc_offset();
247 // Adjust the frame size, subsuming the unoptimized frame into the
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
251 __ sub(sp, sp, Operand(slots * kPointerSize));
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
259 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex();
265 bool LCodeGen::GenerateDeferredCode() {
266 DCHECK(is_generating());
267 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i];
272 instructions_->at(code->instruction_index())->hydrogen_value();
273 RecordAndWritePosition(
274 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
276 Comment(";;; <@%d,#%d> "
277 "-------------------- Deferred %s --------------------",
278 code->instruction_index(),
279 code->instr()->hydrogen_value()->id(),
280 code->instr()->Mnemonic());
281 __ bind(code->entry());
282 if (NeedsDeferredFrame()) {
283 Comment(";;; Build frame");
284 DCHECK(!frame_is_built_);
285 DCHECK(info()->IsStub());
286 frame_is_built_ = true;
288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
291 Comment(";;; Deferred code");
294 if (NeedsDeferredFrame()) {
295 Comment(";;; Destroy frame");
296 DCHECK(frame_is_built_);
299 frame_is_built_ = false;
301 __ jmp(code->exit());
305 // Force constant pool emission at the end of the deferred code to make
306 // sure that no constant pools are emitted after.
307 masm()->CheckConstPool(true, false);
309 return !is_aborted();
313 bool LCodeGen::GenerateJumpTable() {
314 // Check that the jump table is accessible from everywhere in the function
315 // code, i.e. that offsets to the table can be encoded in the 24bit signed
316 // immediate of a branch instruction.
317 // To simplify we consider the code size from the first instruction to the
318 // end of the jump table. We also don't consider the pc load delta.
319 // Each entry in the jump table generates one instruction and inlines one
320 // 32bit data after it.
321 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
322 jump_table_.length() * 7)) {
323 Abort(kGeneratedCodeIsTooLarge);
326 if (jump_table_.length() > 0) {
327 Label needs_frame, call_deopt_entry;
329 Comment(";;; -------------------- Jump table --------------------");
330 Address base = jump_table_[0].address;
332 Register entry_offset = scratch0();
334 int length = jump_table_.length();
335 for (int i = 0; i < length; i++) {
336 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
337 __ bind(&table_entry->label);
339 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
340 Address entry = table_entry->address;
341 DeoptComment(table_entry->reason);
343 // Second-level deopt table entries are contiguous and small, so instead
344 // of loading the full, absolute address of each one, load an immediate
345 // offset which will be added to the base address later.
346 __ mov(entry_offset, Operand(entry - base));
348 if (table_entry->needs_frame) {
349 DCHECK(!info()->saves_caller_doubles());
350 if (needs_frame.is_bound()) {
353 __ bind(&needs_frame);
354 Comment(";;; call deopt with frame");
356 // This variant of deopt can only be used with stubs. Since we don't
357 // have a function pointer to install in the stack frame that we're
358 // building, install a special marker there instead.
359 DCHECK(info()->IsStub());
360 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
363 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
364 __ bind(&call_deopt_entry);
365 // Add the base address to the offset previously loaded in
367 __ add(entry_offset, entry_offset,
368 Operand(ExternalReference::ForDeoptEntry(base)));
369 __ blx(entry_offset);
372 masm()->CheckConstPool(false, false);
374 // The last entry can fall through into `call_deopt_entry`, avoiding a
376 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
378 if (need_branch) __ b(&call_deopt_entry);
380 masm()->CheckConstPool(false, !need_branch);
384 if (!call_deopt_entry.is_bound()) {
385 Comment(";;; call deopt");
386 __ bind(&call_deopt_entry);
388 if (info()->saves_caller_doubles()) {
389 DCHECK(info()->IsStub());
390 RestoreCallerDoubles();
393 // Add the base address to the offset previously loaded in entry_offset.
394 __ add(entry_offset, entry_offset,
395 Operand(ExternalReference::ForDeoptEntry(base)));
396 __ blx(entry_offset);
400 // Force constant pool emission at the end of the deopt jump table to make
401 // sure that no constant pools are emitted after.
402 masm()->CheckConstPool(true, false);
404 // The deoptimization jump table is the last part of the instruction
405 // sequence. Mark the generated code as done unless we bailed out.
406 if (!is_aborted()) status_ = DONE;
407 return !is_aborted();
411 bool LCodeGen::GenerateSafepointTable() {
413 safepoints_.Emit(masm(), GetStackSlotCount());
414 return !is_aborted();
418 Register LCodeGen::ToRegister(int index) const {
419 return Register::FromAllocationIndex(index);
423 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
424 return DwVfpRegister::FromAllocationIndex(index);
428 Register LCodeGen::ToRegister(LOperand* op) const {
429 DCHECK(op->IsRegister());
430 return ToRegister(op->index());
434 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
435 if (op->IsRegister()) {
436 return ToRegister(op->index());
437 } else if (op->IsConstantOperand()) {
438 LConstantOperand* const_op = LConstantOperand::cast(op);
439 HConstant* constant = chunk_->LookupConstant(const_op);
440 Handle<Object> literal = constant->handle(isolate());
441 Representation r = chunk_->LookupLiteralRepresentation(const_op);
442 if (r.IsInteger32()) {
443 DCHECK(literal->IsNumber());
444 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
445 } else if (r.IsDouble()) {
446 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
448 DCHECK(r.IsSmiOrTagged());
449 __ Move(scratch, literal);
452 } else if (op->IsStackSlot()) {
453 __ ldr(scratch, ToMemOperand(op));
461 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
462 DCHECK(op->IsDoubleRegister());
463 return ToDoubleRegister(op->index());
467 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
468 SwVfpRegister flt_scratch,
469 DwVfpRegister dbl_scratch) {
470 if (op->IsDoubleRegister()) {
471 return ToDoubleRegister(op->index());
472 } else if (op->IsConstantOperand()) {
473 LConstantOperand* const_op = LConstantOperand::cast(op);
474 HConstant* constant = chunk_->LookupConstant(const_op);
475 Handle<Object> literal = constant->handle(isolate());
476 Representation r = chunk_->LookupLiteralRepresentation(const_op);
477 if (r.IsInteger32()) {
478 DCHECK(literal->IsNumber());
479 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
480 __ vmov(flt_scratch, ip);
481 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
483 } else if (r.IsDouble()) {
484 Abort(kUnsupportedDoubleImmediate);
485 } else if (r.IsTagged()) {
486 Abort(kUnsupportedTaggedImmediate);
488 } else if (op->IsStackSlot()) {
489 // TODO(regis): Why is vldr not taking a MemOperand?
490 // __ vldr(dbl_scratch, ToMemOperand(op));
491 MemOperand mem_op = ToMemOperand(op);
492 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
500 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
501 HConstant* constant = chunk_->LookupConstant(op);
502 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
503 return constant->handle(isolate());
507 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
508 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
512 bool LCodeGen::IsSmi(LConstantOperand* op) const {
513 return chunk_->LookupLiteralRepresentation(op).IsSmi();
517 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
518 return ToRepresentation(op, Representation::Integer32());
522 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
523 const Representation& r) const {
524 HConstant* constant = chunk_->LookupConstant(op);
525 int32_t value = constant->Integer32Value();
526 if (r.IsInteger32()) return value;
527 DCHECK(r.IsSmiOrTagged());
528 return reinterpret_cast<int32_t>(Smi::FromInt(value));
532 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
533 HConstant* constant = chunk_->LookupConstant(op);
534 return Smi::FromInt(constant->Integer32Value());
538 double LCodeGen::ToDouble(LConstantOperand* op) const {
539 HConstant* constant = chunk_->LookupConstant(op);
540 DCHECK(constant->HasDoubleValue());
541 return constant->DoubleValue();
545 Operand LCodeGen::ToOperand(LOperand* op) {
546 if (op->IsConstantOperand()) {
547 LConstantOperand* const_op = LConstantOperand::cast(op);
548 HConstant* constant = chunk()->LookupConstant(const_op);
549 Representation r = chunk_->LookupLiteralRepresentation(const_op);
551 DCHECK(constant->HasSmiValue());
552 return Operand(Smi::FromInt(constant->Integer32Value()));
553 } else if (r.IsInteger32()) {
554 DCHECK(constant->HasInteger32Value());
555 return Operand(constant->Integer32Value());
556 } else if (r.IsDouble()) {
557 Abort(kToOperandUnsupportedDoubleImmediate);
559 DCHECK(r.IsTagged());
560 return Operand(constant->handle(isolate()));
561 } else if (op->IsRegister()) {
562 return Operand(ToRegister(op));
563 } else if (op->IsDoubleRegister()) {
564 Abort(kToOperandIsDoubleRegisterUnimplemented);
565 return Operand::Zero();
567 // Stack slots not implemented, use ToMemOperand instead.
569 return Operand::Zero();
573 static int ArgumentsOffsetWithoutFrame(int index) {
575 return -(index + 1) * kPointerSize;
579 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
580 DCHECK(!op->IsRegister());
581 DCHECK(!op->IsDoubleRegister());
582 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
583 if (NeedsEagerFrame()) {
584 return MemOperand(fp, StackSlotOffset(op->index()));
586 // Retrieve parameter without eager stack-frame relative to the
588 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
593 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
594 DCHECK(op->IsDoubleStackSlot());
595 if (NeedsEagerFrame()) {
596 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
598 // Retrieve parameter without eager stack-frame relative to the
601 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
606 void LCodeGen::WriteTranslation(LEnvironment* environment,
607 Translation* translation) {
608 if (environment == NULL) return;
610 // The translation includes one command per value in the environment.
611 int translation_size = environment->translation_size();
612 // The output frame height does not include the parameters.
613 int height = translation_size - environment->parameter_count();
615 WriteTranslation(environment->outer(), translation);
616 bool has_closure_id = !info()->closure().is_null() &&
617 !info()->closure().is_identical_to(environment->closure());
618 int closure_id = has_closure_id
619 ? DefineDeoptimizationLiteral(environment->closure())
620 : Translation::kSelfLiteralId;
622 switch (environment->frame_type()) {
624 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
627 translation->BeginConstructStubFrame(closure_id, translation_size);
630 DCHECK(translation_size == 1);
632 translation->BeginGetterStubFrame(closure_id);
635 DCHECK(translation_size == 2);
637 translation->BeginSetterStubFrame(closure_id);
640 translation->BeginCompiledStubFrame();
642 case ARGUMENTS_ADAPTOR:
643 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
647 int object_index = 0;
648 int dematerialized_index = 0;
649 for (int i = 0; i < translation_size; ++i) {
650 LOperand* value = environment->values()->at(i);
651 AddToTranslation(environment,
654 environment->HasTaggedValueAt(i),
655 environment->HasUint32ValueAt(i),
657 &dematerialized_index);
662 void LCodeGen::AddToTranslation(LEnvironment* environment,
663 Translation* translation,
667 int* object_index_pointer,
668 int* dematerialized_index_pointer) {
669 if (op == LEnvironment::materialization_marker()) {
670 int object_index = (*object_index_pointer)++;
671 if (environment->ObjectIsDuplicateAt(object_index)) {
672 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
673 translation->DuplicateObject(dupe_of);
676 int object_length = environment->ObjectLengthAt(object_index);
677 if (environment->ObjectIsArgumentsAt(object_index)) {
678 translation->BeginArgumentsObject(object_length);
680 translation->BeginCapturedObject(object_length);
682 int dematerialized_index = *dematerialized_index_pointer;
683 int env_offset = environment->translation_size() + dematerialized_index;
684 *dematerialized_index_pointer += object_length;
685 for (int i = 0; i < object_length; ++i) {
686 LOperand* value = environment->values()->at(env_offset + i);
687 AddToTranslation(environment,
690 environment->HasTaggedValueAt(env_offset + i),
691 environment->HasUint32ValueAt(env_offset + i),
692 object_index_pointer,
693 dematerialized_index_pointer);
698 if (op->IsStackSlot()) {
700 translation->StoreStackSlot(op->index());
701 } else if (is_uint32) {
702 translation->StoreUint32StackSlot(op->index());
704 translation->StoreInt32StackSlot(op->index());
706 } else if (op->IsDoubleStackSlot()) {
707 translation->StoreDoubleStackSlot(op->index());
708 } else if (op->IsRegister()) {
709 Register reg = ToRegister(op);
711 translation->StoreRegister(reg);
712 } else if (is_uint32) {
713 translation->StoreUint32Register(reg);
715 translation->StoreInt32Register(reg);
717 } else if (op->IsDoubleRegister()) {
718 DoubleRegister reg = ToDoubleRegister(op);
719 translation->StoreDoubleRegister(reg);
720 } else if (op->IsConstantOperand()) {
721 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
722 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
723 translation->StoreLiteral(src_index);
730 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
731 int size = masm()->CallSize(code, mode);
732 if (code->kind() == Code::BINARY_OP_IC ||
733 code->kind() == Code::COMPARE_IC) {
734 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
740 void LCodeGen::CallCode(Handle<Code> code,
741 RelocInfo::Mode mode,
743 TargetAddressStorageMode storage_mode) {
744 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
748 void LCodeGen::CallCodeGeneric(Handle<Code> code,
749 RelocInfo::Mode mode,
751 SafepointMode safepoint_mode,
752 TargetAddressStorageMode storage_mode) {
753 DCHECK(instr != NULL);
754 // Block literal pool emission to ensure nop indicating no inlined smi code
755 // is in the correct position.
756 Assembler::BlockConstPoolScope block_const_pool(masm());
757 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
758 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
760 // Signal that we don't inline smi code before these stubs in the
761 // optimizing code generator.
762 if (code->kind() == Code::BINARY_OP_IC ||
763 code->kind() == Code::COMPARE_IC) {
769 void LCodeGen::CallRuntime(const Runtime::Function* function,
772 SaveFPRegsMode save_doubles) {
773 DCHECK(instr != NULL);
775 __ CallRuntime(function, num_arguments, save_doubles);
777 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
781 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
782 if (context->IsRegister()) {
783 __ Move(cp, ToRegister(context));
784 } else if (context->IsStackSlot()) {
785 __ ldr(cp, ToMemOperand(context));
786 } else if (context->IsConstantOperand()) {
787 HConstant* constant =
788 chunk_->LookupConstant(LConstantOperand::cast(context));
789 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
796 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
800 LoadContextFromDeferred(context);
801 __ CallRuntimeSaveDoubles(id);
802 RecordSafepointWithRegisters(
803 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
807 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
808 Safepoint::DeoptMode mode) {
809 environment->set_has_been_used();
810 if (!environment->HasBeenRegistered()) {
811 // Physical stack frame layout:
812 // -x ............. -4 0 ..................................... y
813 // [incoming arguments] [spill slots] [pushed outgoing arguments]
815 // Layout of the environment:
816 // 0 ..................................................... size-1
817 // [parameters] [locals] [expression stack including arguments]
819 // Layout of the translation:
820 // 0 ........................................................ size - 1 + 4
821 // [expression stack including arguments] [locals] [4 words] [parameters]
822 // |>------------ translation_size ------------<|
825 int jsframe_count = 0;
826 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
828 if (e->frame_type() == JS_FUNCTION) {
832 Translation translation(&translations_, frame_count, jsframe_count, zone());
833 WriteTranslation(environment, &translation);
834 int deoptimization_index = deoptimizations_.length();
835 int pc_offset = masm()->pc_offset();
836 environment->Register(deoptimization_index,
838 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
839 deoptimizations_.Add(environment, zone());
844 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
846 Deoptimizer::BailoutType bailout_type) {
847 LEnvironment* environment = instr->environment();
848 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
849 DCHECK(environment->HasBeenRegistered());
850 int id = environment->deoptimization_index();
851 DCHECK(info()->IsOptimizing() || info()->IsStub());
853 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
855 Abort(kBailoutWasNotPrepared);
859 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
860 Register scratch = scratch0();
861 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
863 // Store the condition on the stack if necessary
864 if (condition != al) {
865 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
866 __ mov(scratch, Operand(1), LeaveCC, condition);
871 __ mov(scratch, Operand(count));
872 __ ldr(r1, MemOperand(scratch));
873 __ sub(r1, r1, Operand(1), SetCC);
874 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
875 __ str(r1, MemOperand(scratch));
878 if (condition != al) {
879 // Clean up the stack before the deoptimizer call
883 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
885 // 'Restore' the condition in a slightly hacky way. (It would be better
886 // to use 'msr' and 'mrs' instructions here, but they are not supported by
887 // our ARM simulator).
888 if (condition != al) {
890 __ cmp(scratch, Operand::Zero());
894 if (info()->ShouldTrapOnDeopt()) {
895 __ stop("trap_on_deopt", condition);
898 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
899 instr->Mnemonic(), detail);
900 DCHECK(info()->IsStub() || frame_is_built_);
901 // Go through jump table if we need to handle condition, build frame, or
902 // restore caller doubles.
903 if (condition == al && frame_is_built_ &&
904 !info()->saves_caller_doubles()) {
905 DeoptComment(reason);
906 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
908 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
910 // We often have several deopts to the same entry, reuse the last
911 // jump entry if this is the case.
912 if (jump_table_.is_empty() ||
913 !table_entry.IsEquivalentTo(jump_table_.last())) {
914 jump_table_.Add(table_entry, zone());
916 __ b(condition, &jump_table_.last().label);
921 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
922 const char* detail) {
923 Deoptimizer::BailoutType bailout_type = info()->IsStub()
925 : Deoptimizer::EAGER;
926 DeoptimizeIf(condition, instr, detail, bailout_type);
930 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
931 int length = deoptimizations_.length();
932 if (length == 0) return;
933 Handle<DeoptimizationInputData> data =
934 DeoptimizationInputData::New(isolate(), length, TENURED);
936 Handle<ByteArray> translations =
937 translations_.CreateByteArray(isolate()->factory());
938 data->SetTranslationByteArray(*translations);
939 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
940 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
941 if (info_->IsOptimizing()) {
942 // Reference to shared function info does not change between phases.
943 AllowDeferredHandleDereference allow_handle_dereference;
944 data->SetSharedFunctionInfo(*info_->shared_info());
946 data->SetSharedFunctionInfo(Smi::FromInt(0));
949 Handle<FixedArray> literals =
950 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
951 { AllowDeferredHandleDereference copy_handles;
952 for (int i = 0; i < deoptimization_literals_.length(); i++) {
953 literals->set(i, *deoptimization_literals_[i]);
955 data->SetLiteralArray(*literals);
958 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
959 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
961 // Populate the deoptimization entries.
962 for (int i = 0; i < length; i++) {
963 LEnvironment* env = deoptimizations_[i];
964 data->SetAstId(i, env->ast_id());
965 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
966 data->SetArgumentsStackHeight(i,
967 Smi::FromInt(env->arguments_stack_height()));
968 data->SetPc(i, Smi::FromInt(env->pc_offset()));
970 code->set_deoptimization_data(*data);
974 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
975 int result = deoptimization_literals_.length();
976 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
977 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
979 deoptimization_literals_.Add(literal, zone());
984 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
985 DCHECK(deoptimization_literals_.length() == 0);
987 const ZoneList<Handle<JSFunction> >* inlined_closures =
988 chunk()->inlined_closures();
990 for (int i = 0, length = inlined_closures->length();
993 DefineDeoptimizationLiteral(inlined_closures->at(i));
996 inlined_function_count_ = deoptimization_literals_.length();
1000 void LCodeGen::RecordSafepointWithLazyDeopt(
1001 LInstruction* instr, SafepointMode safepoint_mode) {
1002 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1003 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1005 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1006 RecordSafepointWithRegisters(
1007 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1012 void LCodeGen::RecordSafepoint(
1013 LPointerMap* pointers,
1014 Safepoint::Kind kind,
1016 Safepoint::DeoptMode deopt_mode) {
1017 DCHECK(expected_safepoint_kind_ == kind);
1019 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1020 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
1021 kind, arguments, deopt_mode);
1022 for (int i = 0; i < operands->length(); i++) {
1023 LOperand* pointer = operands->at(i);
1024 if (pointer->IsStackSlot()) {
1025 safepoint.DefinePointerSlot(pointer->index(), zone());
1026 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1027 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1030 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1031 // Register pp always contains a pointer to the constant pool.
1032 safepoint.DefinePointerRegister(pp, zone());
1037 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1038 Safepoint::DeoptMode deopt_mode) {
1039 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1043 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1044 LPointerMap empty_pointers(zone());
1045 RecordSafepoint(&empty_pointers, deopt_mode);
1049 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1051 Safepoint::DeoptMode deopt_mode) {
1053 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1057 void LCodeGen::RecordAndWritePosition(int position) {
1058 if (position == RelocInfo::kNoPosition) return;
1059 masm()->positions_recorder()->RecordPosition(position);
1060 masm()->positions_recorder()->WriteRecordedPositions();
1064 static const char* LabelType(LLabel* label) {
1065 if (label->is_loop_header()) return " (loop header)";
1066 if (label->is_osr_entry()) return " (OSR entry)";
1071 void LCodeGen::DoLabel(LLabel* label) {
1072 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1073 current_instruction_,
1074 label->hydrogen_value()->id(),
1077 __ bind(label->label());
1078 current_block_ = label->block_id();
1083 void LCodeGen::DoParallelMove(LParallelMove* move) {
1084 resolver_.Resolve(move);
1088 void LCodeGen::DoGap(LGap* gap) {
1089 for (int i = LGap::FIRST_INNER_POSITION;
1090 i <= LGap::LAST_INNER_POSITION;
1092 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1093 LParallelMove* move = gap->GetParallelMove(inner_pos);
1094 if (move != NULL) DoParallelMove(move);
1099 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1104 void LCodeGen::DoParameter(LParameter* instr) {
1109 void LCodeGen::DoCallStub(LCallStub* instr) {
1110 DCHECK(ToRegister(instr->context()).is(cp));
1111 DCHECK(ToRegister(instr->result()).is(r0));
1112 switch (instr->hydrogen()->major_key()) {
1113 case CodeStub::RegExpExec: {
1114 RegExpExecStub stub(isolate());
1115 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1118 case CodeStub::SubString: {
1119 SubStringStub stub(isolate());
1120 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1123 case CodeStub::StringCompare: {
1124 StringCompareStub stub(isolate());
1125 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1134 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1135 GenerateOsrPrologue();
1139 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1140 Register dividend = ToRegister(instr->dividend());
1141 int32_t divisor = instr->divisor();
1142 DCHECK(dividend.is(ToRegister(instr->result())));
1144 // Theoretically, a variation of the branch-free code for integer division by
1145 // a power of 2 (calculating the remainder via an additional multiplication
1146 // (which gets simplified to an 'and') and subtraction) should be faster, and
1147 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1148 // indicate that positive dividends are heavily favored, so the branching
1149 // version performs better.
1150 HMod* hmod = instr->hydrogen();
1151 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1152 Label dividend_is_not_negative, done;
1153 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1154 __ cmp(dividend, Operand::Zero());
1155 __ b(pl, ÷nd_is_not_negative);
1156 // Note that this is correct even for kMinInt operands.
1157 __ rsb(dividend, dividend, Operand::Zero());
1158 __ and_(dividend, dividend, Operand(mask));
1159 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1160 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1161 DeoptimizeIf(eq, instr);
1166 __ bind(÷nd_is_not_negative);
1167 __ and_(dividend, dividend, Operand(mask));
1172 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1173 Register dividend = ToRegister(instr->dividend());
1174 int32_t divisor = instr->divisor();
1175 Register result = ToRegister(instr->result());
1176 DCHECK(!dividend.is(result));
1179 DeoptimizeIf(al, instr);
1183 __ TruncatingDiv(result, dividend, Abs(divisor));
1184 __ mov(ip, Operand(Abs(divisor)));
1185 __ smull(result, ip, result, ip);
1186 __ sub(result, dividend, result, SetCC);
1188 // Check for negative zero.
1189 HMod* hmod = instr->hydrogen();
1190 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1191 Label remainder_not_zero;
1192 __ b(ne, &remainder_not_zero);
1193 __ cmp(dividend, Operand::Zero());
1194 DeoptimizeIf(lt, instr);
1195 __ bind(&remainder_not_zero);
1200 void LCodeGen::DoModI(LModI* instr) {
1201 HMod* hmod = instr->hydrogen();
1202 if (CpuFeatures::IsSupported(SUDIV)) {
1203 CpuFeatureScope scope(masm(), SUDIV);
1205 Register left_reg = ToRegister(instr->left());
1206 Register right_reg = ToRegister(instr->right());
1207 Register result_reg = ToRegister(instr->result());
1210 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1211 // case because we can't return a NaN.
1212 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1213 __ cmp(right_reg, Operand::Zero());
1214 DeoptimizeIf(eq, instr);
1217 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1218 // want. We have to deopt if we care about -0, because we can't return that.
1219 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1220 Label no_overflow_possible;
1221 __ cmp(left_reg, Operand(kMinInt));
1222 __ b(ne, &no_overflow_possible);
1223 __ cmp(right_reg, Operand(-1));
1224 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1225 DeoptimizeIf(eq, instr);
1227 __ b(ne, &no_overflow_possible);
1228 __ mov(result_reg, Operand::Zero());
1231 __ bind(&no_overflow_possible);
1234 // For 'r3 = r1 % r2' we can have the following ARM code:
1236 // mls r3, r3, r2, r1
1238 __ sdiv(result_reg, left_reg, right_reg);
1239 __ Mls(result_reg, result_reg, right_reg, left_reg);
1241 // If we care about -0, test if the dividend is <0 and the result is 0.
1242 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1243 __ cmp(result_reg, Operand::Zero());
1245 __ cmp(left_reg, Operand::Zero());
1246 DeoptimizeIf(lt, instr);
1251 // General case, without any SDIV support.
1252 Register left_reg = ToRegister(instr->left());
1253 Register right_reg = ToRegister(instr->right());
1254 Register result_reg = ToRegister(instr->result());
1255 Register scratch = scratch0();
1256 DCHECK(!scratch.is(left_reg));
1257 DCHECK(!scratch.is(right_reg));
1258 DCHECK(!scratch.is(result_reg));
1259 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1260 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1261 DCHECK(!divisor.is(dividend));
1262 LowDwVfpRegister quotient = double_scratch0();
1263 DCHECK(!quotient.is(dividend));
1264 DCHECK(!quotient.is(divisor));
1267 // Check for x % 0, we have to deopt in this case because we can't return a
1269 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1270 __ cmp(right_reg, Operand::Zero());
1271 DeoptimizeIf(eq, instr);
1274 __ Move(result_reg, left_reg);
1275 // Load the arguments in VFP registers. The divisor value is preloaded
1276 // before. Be careful that 'right_reg' is only live on entry.
1277 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1278 __ vmov(double_scratch0().low(), left_reg);
1279 __ vcvt_f64_s32(dividend, double_scratch0().low());
1280 __ vmov(double_scratch0().low(), right_reg);
1281 __ vcvt_f64_s32(divisor, double_scratch0().low());
1283 // We do not care about the sign of the divisor. Note that we still handle
1284 // the kMinInt % -1 case correctly, though.
1285 __ vabs(divisor, divisor);
1286 // Compute the quotient and round it to a 32bit integer.
1287 __ vdiv(quotient, dividend, divisor);
1288 __ vcvt_s32_f64(quotient.low(), quotient);
1289 __ vcvt_f64_s32(quotient, quotient.low());
1291 // Compute the remainder in result.
1292 __ vmul(double_scratch0(), divisor, quotient);
1293 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1294 __ vmov(scratch, double_scratch0().low());
1295 __ sub(result_reg, left_reg, scratch, SetCC);
1297 // If we care about -0, test if the dividend is <0 and the result is 0.
1298 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1300 __ cmp(left_reg, Operand::Zero());
1301 DeoptimizeIf(mi, instr);
1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1309 Register dividend = ToRegister(instr->dividend());
1310 int32_t divisor = instr->divisor();
1311 Register result = ToRegister(instr->result());
1312 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1313 DCHECK(!result.is(dividend));
1315 // Check for (0 / -x) that will produce negative zero.
1316 HDiv* hdiv = instr->hydrogen();
1317 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1318 __ cmp(dividend, Operand::Zero());
1319 DeoptimizeIf(eq, instr);
1321 // Check for (kMinInt / -1).
1322 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1323 __ cmp(dividend, Operand(kMinInt));
1324 DeoptimizeIf(eq, instr);
1326 // Deoptimize if remainder will not be 0.
1327 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1328 divisor != 1 && divisor != -1) {
1329 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1330 __ tst(dividend, Operand(mask));
1331 DeoptimizeIf(ne, instr);
1334 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1335 __ rsb(result, dividend, Operand(0));
1338 int32_t shift = WhichPowerOf2Abs(divisor);
1340 __ mov(result, dividend);
1341 } else if (shift == 1) {
1342 __ add(result, dividend, Operand(dividend, LSR, 31));
1344 __ mov(result, Operand(dividend, ASR, 31));
1345 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1347 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1348 if (divisor < 0) __ rsb(result, result, Operand(0));
1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1353 Register dividend = ToRegister(instr->dividend());
1354 int32_t divisor = instr->divisor();
1355 Register result = ToRegister(instr->result());
1356 DCHECK(!dividend.is(result));
1359 DeoptimizeIf(al, instr);
1363 // Check for (0 / -x) that will produce negative zero.
1364 HDiv* hdiv = instr->hydrogen();
1365 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1366 __ cmp(dividend, Operand::Zero());
1367 DeoptimizeIf(eq, instr);
1370 __ TruncatingDiv(result, dividend, Abs(divisor));
1371 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1373 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1374 __ mov(ip, Operand(divisor));
1375 __ smull(scratch0(), ip, result, ip);
1376 __ sub(scratch0(), scratch0(), dividend, SetCC);
1377 DeoptimizeIf(ne, instr);
1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1383 void LCodeGen::DoDivI(LDivI* instr) {
1384 HBinaryOperation* hdiv = instr->hydrogen();
1385 Register dividend = ToRegister(instr->dividend());
1386 Register divisor = ToRegister(instr->divisor());
1387 Register result = ToRegister(instr->result());
1390 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1391 __ cmp(divisor, Operand::Zero());
1392 DeoptimizeIf(eq, instr);
1395 // Check for (0 / -x) that will produce negative zero.
1396 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1398 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1399 // Do the test only if it hadn't be done above.
1400 __ cmp(divisor, Operand::Zero());
1402 __ b(pl, &positive);
1403 __ cmp(dividend, Operand::Zero());
1404 DeoptimizeIf(eq, instr);
1408 // Check for (kMinInt / -1).
1409 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1410 (!CpuFeatures::IsSupported(SUDIV) ||
1411 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1412 // We don't need to check for overflow when truncating with sdiv
1413 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1414 __ cmp(dividend, Operand(kMinInt));
1415 __ cmp(divisor, Operand(-1), eq);
1416 DeoptimizeIf(eq, instr);
1419 if (CpuFeatures::IsSupported(SUDIV)) {
1420 CpuFeatureScope scope(masm(), SUDIV);
1421 __ sdiv(result, dividend, divisor);
1423 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1424 DoubleRegister vright = double_scratch0();
1425 __ vmov(double_scratch0().low(), dividend);
1426 __ vcvt_f64_s32(vleft, double_scratch0().low());
1427 __ vmov(double_scratch0().low(), divisor);
1428 __ vcvt_f64_s32(vright, double_scratch0().low());
1429 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1430 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1431 __ vmov(result, double_scratch0().low());
1434 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1435 // Compute remainder and deopt if it's not zero.
1436 Register remainder = scratch0();
1437 __ Mls(remainder, result, divisor, dividend);
1438 __ cmp(remainder, Operand::Zero());
1439 DeoptimizeIf(ne, instr);
1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1445 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1446 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1447 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1449 // This is computed in-place.
1450 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1452 __ vmla(addend, multiplier, multiplicand);
1456 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1457 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1458 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1459 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1461 // This is computed in-place.
1462 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1464 __ vmls(minuend, multiplier, multiplicand);
1468 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1469 Register dividend = ToRegister(instr->dividend());
1470 Register result = ToRegister(instr->result());
1471 int32_t divisor = instr->divisor();
1473 // If the divisor is 1, return the dividend.
1475 __ Move(result, dividend);
1479 // If the divisor is positive, things are easy: There can be no deopts and we
1480 // can simply do an arithmetic right shift.
1481 int32_t shift = WhichPowerOf2Abs(divisor);
1483 __ mov(result, Operand(dividend, ASR, shift));
1487 // If the divisor is negative, we have to negate and handle edge cases.
1488 __ rsb(result, dividend, Operand::Zero(), SetCC);
1489 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1490 DeoptimizeIf(eq, instr);
1493 // Dividing by -1 is basically negation, unless we overflow.
1494 if (divisor == -1) {
1495 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1496 DeoptimizeIf(vs, instr);
1501 // If the negation could not overflow, simply shifting is OK.
1502 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1503 __ mov(result, Operand(result, ASR, shift));
1507 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1508 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1513 Register dividend = ToRegister(instr->dividend());
1514 int32_t divisor = instr->divisor();
1515 Register result = ToRegister(instr->result());
1516 DCHECK(!dividend.is(result));
1519 DeoptimizeIf(al, instr);
1523 // Check for (0 / -x) that will produce negative zero.
1524 HMathFloorOfDiv* hdiv = instr->hydrogen();
1525 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1526 __ cmp(dividend, Operand::Zero());
1527 DeoptimizeIf(eq, instr);
1530 // Easy case: We need no dynamic check for the dividend and the flooring
1531 // division is the same as the truncating division.
1532 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1533 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1534 __ TruncatingDiv(result, dividend, Abs(divisor));
1535 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1539 // In the general case we may need to adjust before and after the truncating
1540 // division to get a flooring division.
1541 Register temp = ToRegister(instr->temp());
1542 DCHECK(!temp.is(dividend) && !temp.is(result));
1543 Label needs_adjustment, done;
1544 __ cmp(dividend, Operand::Zero());
1545 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1546 __ TruncatingDiv(result, dividend, Abs(divisor));
1547 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1549 __ bind(&needs_adjustment);
1550 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1551 __ TruncatingDiv(result, temp, Abs(divisor));
1552 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1553 __ sub(result, result, Operand(1));
1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1560 HBinaryOperation* hdiv = instr->hydrogen();
1561 Register left = ToRegister(instr->dividend());
1562 Register right = ToRegister(instr->divisor());
1563 Register result = ToRegister(instr->result());
1566 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1567 __ cmp(right, Operand::Zero());
1568 DeoptimizeIf(eq, instr);
1571 // Check for (0 / -x) that will produce negative zero.
1572 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1574 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1575 // Do the test only if it hadn't be done above.
1576 __ cmp(right, Operand::Zero());
1578 __ b(pl, &positive);
1579 __ cmp(left, Operand::Zero());
1580 DeoptimizeIf(eq, instr);
1584 // Check for (kMinInt / -1).
1585 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1586 (!CpuFeatures::IsSupported(SUDIV) ||
1587 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1588 // We don't need to check for overflow when truncating with sdiv
1589 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1590 __ cmp(left, Operand(kMinInt));
1591 __ cmp(right, Operand(-1), eq);
1592 DeoptimizeIf(eq, instr);
1595 if (CpuFeatures::IsSupported(SUDIV)) {
1596 CpuFeatureScope scope(masm(), SUDIV);
1597 __ sdiv(result, left, right);
1599 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1600 DoubleRegister vright = double_scratch0();
1601 __ vmov(double_scratch0().low(), left);
1602 __ vcvt_f64_s32(vleft, double_scratch0().low());
1603 __ vmov(double_scratch0().low(), right);
1604 __ vcvt_f64_s32(vright, double_scratch0().low());
1605 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1606 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1607 __ vmov(result, double_scratch0().low());
1611 Register remainder = scratch0();
1612 __ Mls(remainder, result, right, left);
1613 __ cmp(remainder, Operand::Zero());
1615 __ eor(remainder, remainder, Operand(right));
1616 __ add(result, result, Operand(remainder, ASR, 31));
1621 void LCodeGen::DoMulI(LMulI* instr) {
1622 Register result = ToRegister(instr->result());
1623 // Note that result may alias left.
1624 Register left = ToRegister(instr->left());
1625 LOperand* right_op = instr->right();
1627 bool bailout_on_minus_zero =
1628 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1629 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1631 if (right_op->IsConstantOperand()) {
1632 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1634 if (bailout_on_minus_zero && (constant < 0)) {
1635 // The case of a null constant will be handled separately.
1636 // If constant is negative and left is null, the result should be -0.
1637 __ cmp(left, Operand::Zero());
1638 DeoptimizeIf(eq, instr);
1644 __ rsb(result, left, Operand::Zero(), SetCC);
1645 DeoptimizeIf(vs, instr);
1647 __ rsb(result, left, Operand::Zero());
1651 if (bailout_on_minus_zero) {
1652 // If left is strictly negative and the constant is null, the
1653 // result is -0. Deoptimize if required, otherwise return 0.
1654 __ cmp(left, Operand::Zero());
1655 DeoptimizeIf(mi, instr);
1657 __ mov(result, Operand::Zero());
1660 __ Move(result, left);
1663 // Multiplying by powers of two and powers of two plus or minus
1664 // one can be done faster with shifted operands.
1665 // For other constants we emit standard code.
1666 int32_t mask = constant >> 31;
1667 uint32_t constant_abs = (constant + mask) ^ mask;
1669 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1670 int32_t shift = WhichPowerOf2(constant_abs);
1671 __ mov(result, Operand(left, LSL, shift));
1672 // Correct the sign of the result is the constant is negative.
1673 if (constant < 0) __ rsb(result, result, Operand::Zero());
1674 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1675 int32_t shift = WhichPowerOf2(constant_abs - 1);
1676 __ add(result, left, Operand(left, LSL, shift));
1677 // Correct the sign of the result is the constant is negative.
1678 if (constant < 0) __ rsb(result, result, Operand::Zero());
1679 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1680 int32_t shift = WhichPowerOf2(constant_abs + 1);
1681 __ rsb(result, left, Operand(left, LSL, shift));
1682 // Correct the sign of the result is the constant is negative.
1683 if (constant < 0) __ rsb(result, result, Operand::Zero());
1685 // Generate standard code.
1686 __ mov(ip, Operand(constant));
1687 __ mul(result, left, ip);
1692 DCHECK(right_op->IsRegister());
1693 Register right = ToRegister(right_op);
1696 Register scratch = scratch0();
1697 // scratch:result = left * right.
1698 if (instr->hydrogen()->representation().IsSmi()) {
1699 __ SmiUntag(result, left);
1700 __ smull(result, scratch, result, right);
1702 __ smull(result, scratch, left, right);
1704 __ cmp(scratch, Operand(result, ASR, 31));
1705 DeoptimizeIf(ne, instr);
1707 if (instr->hydrogen()->representation().IsSmi()) {
1708 __ SmiUntag(result, left);
1709 __ mul(result, result, right);
1711 __ mul(result, left, right);
1715 if (bailout_on_minus_zero) {
1717 __ teq(left, Operand(right));
1719 // Bail out if the result is minus zero.
1720 __ cmp(result, Operand::Zero());
1721 DeoptimizeIf(eq, instr);
1728 void LCodeGen::DoBitI(LBitI* instr) {
1729 LOperand* left_op = instr->left();
1730 LOperand* right_op = instr->right();
1731 DCHECK(left_op->IsRegister());
1732 Register left = ToRegister(left_op);
1733 Register result = ToRegister(instr->result());
1734 Operand right(no_reg);
1736 if (right_op->IsStackSlot()) {
1737 right = Operand(EmitLoadRegister(right_op, ip));
1739 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1740 right = ToOperand(right_op);
1743 switch (instr->op()) {
1744 case Token::BIT_AND:
1745 __ and_(result, left, right);
1748 __ orr(result, left, right);
1750 case Token::BIT_XOR:
1751 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1752 __ mvn(result, Operand(left));
1754 __ eor(result, left, right);
1764 void LCodeGen::DoShiftI(LShiftI* instr) {
1765 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1766 // result may alias either of them.
1767 LOperand* right_op = instr->right();
1768 Register left = ToRegister(instr->left());
1769 Register result = ToRegister(instr->result());
1770 Register scratch = scratch0();
1771 if (right_op->IsRegister()) {
1772 // Mask the right_op operand.
1773 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1774 switch (instr->op()) {
1776 __ mov(result, Operand(left, ROR, scratch));
1779 __ mov(result, Operand(left, ASR, scratch));
1782 if (instr->can_deopt()) {
1783 __ mov(result, Operand(left, LSR, scratch), SetCC);
1784 DeoptimizeIf(mi, instr);
1786 __ mov(result, Operand(left, LSR, scratch));
1790 __ mov(result, Operand(left, LSL, scratch));
1797 // Mask the right_op operand.
1798 int value = ToInteger32(LConstantOperand::cast(right_op));
1799 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1800 switch (instr->op()) {
1802 if (shift_count != 0) {
1803 __ mov(result, Operand(left, ROR, shift_count));
1805 __ Move(result, left);
1809 if (shift_count != 0) {
1810 __ mov(result, Operand(left, ASR, shift_count));
1812 __ Move(result, left);
1816 if (shift_count != 0) {
1817 __ mov(result, Operand(left, LSR, shift_count));
1819 if (instr->can_deopt()) {
1820 __ tst(left, Operand(0x80000000));
1821 DeoptimizeIf(ne, instr);
1823 __ Move(result, left);
1827 if (shift_count != 0) {
1828 if (instr->hydrogen_value()->representation().IsSmi() &&
1829 instr->can_deopt()) {
1830 if (shift_count != 1) {
1831 __ mov(result, Operand(left, LSL, shift_count - 1));
1832 __ SmiTag(result, result, SetCC);
1834 __ SmiTag(result, left, SetCC);
1836 DeoptimizeIf(vs, instr);
1838 __ mov(result, Operand(left, LSL, shift_count));
1841 __ Move(result, left);
1852 void LCodeGen::DoSubI(LSubI* instr) {
1853 LOperand* left = instr->left();
1854 LOperand* right = instr->right();
1855 LOperand* result = instr->result();
1856 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1857 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1859 if (right->IsStackSlot()) {
1860 Register right_reg = EmitLoadRegister(right, ip);
1861 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1863 DCHECK(right->IsRegister() || right->IsConstantOperand());
1864 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1868 DeoptimizeIf(vs, instr);
1873 void LCodeGen::DoRSubI(LRSubI* instr) {
1874 LOperand* left = instr->left();
1875 LOperand* right = instr->right();
1876 LOperand* result = instr->result();
1877 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1878 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1880 if (right->IsStackSlot()) {
1881 Register right_reg = EmitLoadRegister(right, ip);
1882 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1884 DCHECK(right->IsRegister() || right->IsConstantOperand());
1885 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1889 DeoptimizeIf(vs, instr);
1894 void LCodeGen::DoConstantI(LConstantI* instr) {
1895 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1899 void LCodeGen::DoConstantS(LConstantS* instr) {
1900 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1904 void LCodeGen::DoConstantD(LConstantD* instr) {
1905 DCHECK(instr->result()->IsDoubleRegister());
1906 DwVfpRegister result = ToDoubleRegister(instr->result());
1907 double v = instr->value();
1908 __ Vmov(result, v, scratch0());
1912 void LCodeGen::DoConstantE(LConstantE* instr) {
1913 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1917 void LCodeGen::DoConstantT(LConstantT* instr) {
1918 Handle<Object> object = instr->value(isolate());
1919 AllowDeferredHandleDereference smi_check;
1920 __ Move(ToRegister(instr->result()), object);
1924 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1925 Register result = ToRegister(instr->result());
1926 Register map = ToRegister(instr->value());
1927 __ EnumLength(result, map);
1931 void LCodeGen::DoDateField(LDateField* instr) {
1932 Register object = ToRegister(instr->date());
1933 Register result = ToRegister(instr->result());
1934 Register scratch = ToRegister(instr->temp());
1935 Smi* index = instr->index();
1936 Label runtime, done;
1937 DCHECK(object.is(result));
1938 DCHECK(object.is(r0));
1939 DCHECK(!scratch.is(scratch0()));
1940 DCHECK(!scratch.is(object));
1943 DeoptimizeIf(eq, instr);
1944 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1945 DeoptimizeIf(ne, instr);
1947 if (index->value() == 0) {
1948 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1950 if (index->value() < JSDate::kFirstUncachedField) {
1951 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1952 __ mov(scratch, Operand(stamp));
1953 __ ldr(scratch, MemOperand(scratch));
1954 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1955 __ cmp(scratch, scratch0());
1957 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1958 kPointerSize * index->value()));
1962 __ PrepareCallCFunction(2, scratch);
1963 __ mov(r1, Operand(index));
1964 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1970 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1972 String::Encoding encoding) {
1973 if (index->IsConstantOperand()) {
1974 int offset = ToInteger32(LConstantOperand::cast(index));
1975 if (encoding == String::TWO_BYTE_ENCODING) {
1976 offset *= kUC16Size;
1978 STATIC_ASSERT(kCharSize == 1);
1979 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1981 Register scratch = scratch0();
1982 DCHECK(!scratch.is(string));
1983 DCHECK(!scratch.is(ToRegister(index)));
1984 if (encoding == String::ONE_BYTE_ENCODING) {
1985 __ add(scratch, string, Operand(ToRegister(index)));
1987 STATIC_ASSERT(kUC16Size == 2);
1988 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1990 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1994 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1995 String::Encoding encoding = instr->hydrogen()->encoding();
1996 Register string = ToRegister(instr->string());
1997 Register result = ToRegister(instr->result());
1999 if (FLAG_debug_code) {
2000 Register scratch = scratch0();
2001 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2002 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2004 __ and_(scratch, scratch,
2005 Operand(kStringRepresentationMask | kStringEncodingMask));
2006 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2007 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2008 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
2009 ? one_byte_seq_type : two_byte_seq_type));
2010 __ Check(eq, kUnexpectedStringType);
2013 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2014 if (encoding == String::ONE_BYTE_ENCODING) {
2015 __ ldrb(result, operand);
2017 __ ldrh(result, operand);
2022 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2023 String::Encoding encoding = instr->hydrogen()->encoding();
2024 Register string = ToRegister(instr->string());
2025 Register value = ToRegister(instr->value());
2027 if (FLAG_debug_code) {
2028 Register index = ToRegister(instr->index());
2029 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2030 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2032 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2033 ? one_byte_seq_type : two_byte_seq_type;
2034 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2037 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2038 if (encoding == String::ONE_BYTE_ENCODING) {
2039 __ strb(value, operand);
2041 __ strh(value, operand);
2046 void LCodeGen::DoAddI(LAddI* instr) {
2047 LOperand* left = instr->left();
2048 LOperand* right = instr->right();
2049 LOperand* result = instr->result();
2050 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2051 SBit set_cond = can_overflow ? SetCC : LeaveCC;
2053 if (right->IsStackSlot()) {
2054 Register right_reg = EmitLoadRegister(right, ip);
2055 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2057 DCHECK(right->IsRegister() || right->IsConstantOperand());
2058 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2062 DeoptimizeIf(vs, instr);
2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2068 LOperand* left = instr->left();
2069 LOperand* right = instr->right();
2070 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2071 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2072 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2073 Register left_reg = ToRegister(left);
2074 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2076 : Operand(EmitLoadRegister(right, ip));
2077 Register result_reg = ToRegister(instr->result());
2078 __ cmp(left_reg, right_op);
2079 __ Move(result_reg, left_reg, condition);
2080 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2082 DCHECK(instr->hydrogen()->representation().IsDouble());
2083 DwVfpRegister left_reg = ToDoubleRegister(left);
2084 DwVfpRegister right_reg = ToDoubleRegister(right);
2085 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2086 Label result_is_nan, return_left, return_right, check_zero, done;
2087 __ VFPCompareAndSetFlags(left_reg, right_reg);
2088 if (operation == HMathMinMax::kMathMin) {
2089 __ b(mi, &return_left);
2090 __ b(gt, &return_right);
2092 __ b(mi, &return_right);
2093 __ b(gt, &return_left);
2095 __ b(vs, &result_is_nan);
2096 // Left equals right => check for -0.
2097 __ VFPCompareAndSetFlags(left_reg, 0.0);
2098 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2099 __ b(ne, &done); // left == right != 0.
2101 __ b(ne, &return_left); // left == right != 0.
2103 // At this point, both left and right are either 0 or -0.
2104 if (operation == HMathMinMax::kMathMin) {
2105 // We could use a single 'vorr' instruction here if we had NEON support.
2106 __ vneg(left_reg, left_reg);
2107 __ vsub(result_reg, left_reg, right_reg);
2108 __ vneg(result_reg, result_reg);
2110 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2111 // the decision for vadd is easy because vand is a NEON instruction.
2112 __ vadd(result_reg, left_reg, right_reg);
2116 __ bind(&result_is_nan);
2117 __ vadd(result_reg, left_reg, right_reg);
2120 __ bind(&return_right);
2121 __ Move(result_reg, right_reg);
2122 if (!left_reg.is(result_reg)) {
2126 __ bind(&return_left);
2127 __ Move(result_reg, left_reg);
2134 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2135 DwVfpRegister left = ToDoubleRegister(instr->left());
2136 DwVfpRegister right = ToDoubleRegister(instr->right());
2137 DwVfpRegister result = ToDoubleRegister(instr->result());
2138 switch (instr->op()) {
2140 __ vadd(result, left, right);
2143 __ vsub(result, left, right);
2146 __ vmul(result, left, right);
2149 __ vdiv(result, left, right);
2152 __ PrepareCallCFunction(0, 2, scratch0());
2153 __ MovToFloatParameters(left, right);
2155 ExternalReference::mod_two_doubles_operation(isolate()),
2157 // Move the result in the double result register.
2158 __ MovFromFloatResult(result);
2168 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2169 DCHECK(ToRegister(instr->context()).is(cp));
2170 DCHECK(ToRegister(instr->left()).is(r1));
2171 DCHECK(ToRegister(instr->right()).is(r0));
2172 DCHECK(ToRegister(instr->result()).is(r0));
2175 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2176 // Block literal pool emission to ensure nop indicating no inlined smi code
2177 // is in the correct position.
2178 Assembler::BlockConstPoolScope block_const_pool(masm());
2179 CallCode(code, RelocInfo::CODE_TARGET, instr);
2183 template<class InstrType>
2184 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2185 int left_block = instr->TrueDestination(chunk_);
2186 int right_block = instr->FalseDestination(chunk_);
2188 int next_block = GetNextEmittedBlock();
2190 if (right_block == left_block || condition == al) {
2191 EmitGoto(left_block);
2192 } else if (left_block == next_block) {
2193 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2194 } else if (right_block == next_block) {
2195 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2197 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2198 __ b(chunk_->GetAssemblyLabel(right_block));
2203 template<class InstrType>
2204 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2205 int false_block = instr->FalseDestination(chunk_);
2206 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2215 void LCodeGen::DoBranch(LBranch* instr) {
2216 Representation r = instr->hydrogen()->value()->representation();
2217 if (r.IsInteger32() || r.IsSmi()) {
2218 DCHECK(!info()->IsStub());
2219 Register reg = ToRegister(instr->value());
2220 __ cmp(reg, Operand::Zero());
2221 EmitBranch(instr, ne);
2222 } else if (r.IsDouble()) {
2223 DCHECK(!info()->IsStub());
2224 DwVfpRegister reg = ToDoubleRegister(instr->value());
2225 // Test the double value. Zero and NaN are false.
2226 __ VFPCompareAndSetFlags(reg, 0.0);
2227 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2228 EmitBranch(instr, ne);
2230 DCHECK(r.IsTagged());
2231 Register reg = ToRegister(instr->value());
2232 HType type = instr->hydrogen()->value()->type();
2233 if (type.IsBoolean()) {
2234 DCHECK(!info()->IsStub());
2235 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2236 EmitBranch(instr, eq);
2237 } else if (type.IsSmi()) {
2238 DCHECK(!info()->IsStub());
2239 __ cmp(reg, Operand::Zero());
2240 EmitBranch(instr, ne);
2241 } else if (type.IsJSArray()) {
2242 DCHECK(!info()->IsStub());
2243 EmitBranch(instr, al);
2244 } else if (type.IsHeapNumber()) {
2245 DCHECK(!info()->IsStub());
2246 DwVfpRegister dbl_scratch = double_scratch0();
2247 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2248 // Test the double value. Zero and NaN are false.
2249 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2250 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2251 EmitBranch(instr, ne);
2252 } else if (type.IsString()) {
2253 DCHECK(!info()->IsStub());
2254 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2255 __ cmp(ip, Operand::Zero());
2256 EmitBranch(instr, ne);
2258 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2259 // Avoid deopts in the case where we've never executed this path before.
2260 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2262 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2263 // undefined -> false.
2264 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2265 __ b(eq, instr->FalseLabel(chunk_));
2267 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2268 // Boolean -> its value.
2269 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2270 __ b(eq, instr->TrueLabel(chunk_));
2271 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2272 __ b(eq, instr->FalseLabel(chunk_));
2274 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2276 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2277 __ b(eq, instr->FalseLabel(chunk_));
2280 if (expected.Contains(ToBooleanStub::SMI)) {
2281 // Smis: 0 -> false, all other -> true.
2282 __ cmp(reg, Operand::Zero());
2283 __ b(eq, instr->FalseLabel(chunk_));
2284 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2285 } else if (expected.NeedsMap()) {
2286 // If we need a map later and have a Smi -> deopt.
2288 DeoptimizeIf(eq, instr);
2291 const Register map = scratch0();
2292 if (expected.NeedsMap()) {
2293 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2295 if (expected.CanBeUndetectable()) {
2296 // Undetectable -> false.
2297 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2298 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2299 __ b(ne, instr->FalseLabel(chunk_));
2303 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2304 // spec object -> true.
2305 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2306 __ b(ge, instr->TrueLabel(chunk_));
2309 if (expected.Contains(ToBooleanStub::STRING)) {
2310 // String value -> false iff empty.
2312 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2313 __ b(ge, ¬_string);
2314 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2315 __ cmp(ip, Operand::Zero());
2316 __ b(ne, instr->TrueLabel(chunk_));
2317 __ b(instr->FalseLabel(chunk_));
2318 __ bind(¬_string);
2321 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2322 // Symbol value -> true.
2323 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2324 __ b(eq, instr->TrueLabel(chunk_));
2327 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2328 // heap number -> false iff +0, -0, or NaN.
2329 DwVfpRegister dbl_scratch = double_scratch0();
2330 Label not_heap_number;
2331 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2332 __ b(ne, ¬_heap_number);
2333 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2334 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2335 __ cmp(r0, r0, vs); // NaN -> false.
2336 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2337 __ b(instr->TrueLabel(chunk_));
2338 __ bind(¬_heap_number);
2341 if (!expected.IsGeneric()) {
2342 // We've seen something for the first time -> deopt.
2343 // This can only happen if we are not generic already.
2344 DeoptimizeIf(al, instr);
2351 void LCodeGen::EmitGoto(int block) {
2352 if (!IsNextEmittedBlock(block)) {
2353 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2358 void LCodeGen::DoGoto(LGoto* instr) {
2359 EmitGoto(instr->block_id());
2363 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2364 Condition cond = kNoCondition;
2367 case Token::EQ_STRICT:
2371 case Token::NE_STRICT:
2375 cond = is_unsigned ? lo : lt;
2378 cond = is_unsigned ? hi : gt;
2381 cond = is_unsigned ? ls : le;
2384 cond = is_unsigned ? hs : ge;
2387 case Token::INSTANCEOF:
2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2396 LOperand* left = instr->left();
2397 LOperand* right = instr->right();
2399 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2400 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2401 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2403 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2404 // We can statically evaluate the comparison.
2405 double left_val = ToDouble(LConstantOperand::cast(left));
2406 double right_val = ToDouble(LConstantOperand::cast(right));
2407 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2408 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2409 EmitGoto(next_block);
2411 if (instr->is_double()) {
2412 // Compare left and right operands as doubles and load the
2413 // resulting flags into the normal status register.
2414 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2415 // If a NaN is involved, i.e. the result is unordered (V set),
2416 // jump to false block label.
2417 __ b(vs, instr->FalseLabel(chunk_));
2419 if (right->IsConstantOperand()) {
2420 int32_t value = ToInteger32(LConstantOperand::cast(right));
2421 if (instr->hydrogen_value()->representation().IsSmi()) {
2422 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2424 __ cmp(ToRegister(left), Operand(value));
2426 } else if (left->IsConstantOperand()) {
2427 int32_t value = ToInteger32(LConstantOperand::cast(left));
2428 if (instr->hydrogen_value()->representation().IsSmi()) {
2429 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2431 __ cmp(ToRegister(right), Operand(value));
2433 // We commuted the operands, so commute the condition.
2434 cond = CommuteCondition(cond);
2436 __ cmp(ToRegister(left), ToRegister(right));
2439 EmitBranch(instr, cond);
2444 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2445 Register left = ToRegister(instr->left());
2446 Register right = ToRegister(instr->right());
2448 __ cmp(left, Operand(right));
2449 EmitBranch(instr, eq);
2453 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2454 if (instr->hydrogen()->representation().IsTagged()) {
2455 Register input_reg = ToRegister(instr->object());
2456 __ mov(ip, Operand(factory()->the_hole_value()));
2457 __ cmp(input_reg, ip);
2458 EmitBranch(instr, eq);
2462 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2463 __ VFPCompareAndSetFlags(input_reg, input_reg);
2464 EmitFalseBranch(instr, vc);
2466 Register scratch = scratch0();
2467 __ VmovHigh(scratch, input_reg);
2468 __ cmp(scratch, Operand(kHoleNanUpper32));
2469 EmitBranch(instr, eq);
2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2474 Representation rep = instr->hydrogen()->value()->representation();
2475 DCHECK(!rep.IsInteger32());
2476 Register scratch = ToRegister(instr->temp());
2478 if (rep.IsDouble()) {
2479 DwVfpRegister value = ToDoubleRegister(instr->value());
2480 __ VFPCompareAndSetFlags(value, 0.0);
2481 EmitFalseBranch(instr, ne);
2482 __ VmovHigh(scratch, value);
2483 __ cmp(scratch, Operand(0x80000000));
2485 Register value = ToRegister(instr->value());
2488 Heap::kHeapNumberMapRootIndex,
2489 instr->FalseLabel(chunk()),
2491 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2492 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2493 __ cmp(scratch, Operand(0x80000000));
2494 __ cmp(ip, Operand(0x00000000), eq);
2496 EmitBranch(instr, eq);
2500 Condition LCodeGen::EmitIsObject(Register input,
2502 Label* is_not_object,
2504 Register temp2 = scratch0();
2505 __ JumpIfSmi(input, is_not_object);
2507 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2508 __ cmp(input, temp2);
2509 __ b(eq, is_object);
2512 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2513 // Undetectable objects behave like undefined.
2514 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2515 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2516 __ b(ne, is_not_object);
2518 // Load instance type and check that it is in object type range.
2519 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2520 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2521 __ b(lt, is_not_object);
2522 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2527 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2528 Register reg = ToRegister(instr->value());
2529 Register temp1 = ToRegister(instr->temp());
2531 Condition true_cond =
2532 EmitIsObject(reg, temp1,
2533 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2535 EmitBranch(instr, true_cond);
2539 Condition LCodeGen::EmitIsString(Register input,
2541 Label* is_not_string,
2542 SmiCheck check_needed = INLINE_SMI_CHECK) {
2543 if (check_needed == INLINE_SMI_CHECK) {
2544 __ JumpIfSmi(input, is_not_string);
2546 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2552 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2553 Register reg = ToRegister(instr->value());
2554 Register temp1 = ToRegister(instr->temp());
2556 SmiCheck check_needed =
2557 instr->hydrogen()->value()->type().IsHeapObject()
2558 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2559 Condition true_cond =
2560 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2562 EmitBranch(instr, true_cond);
2566 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2567 Register input_reg = EmitLoadRegister(instr->value(), ip);
2568 __ SmiTst(input_reg);
2569 EmitBranch(instr, eq);
2573 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2574 Register input = ToRegister(instr->value());
2575 Register temp = ToRegister(instr->temp());
2577 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2578 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2580 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2581 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2582 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2583 EmitBranch(instr, ne);
2587 static Condition ComputeCompareCondition(Token::Value op) {
2589 case Token::EQ_STRICT:
2602 return kNoCondition;
2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2608 DCHECK(ToRegister(instr->context()).is(cp));
2609 Token::Value op = instr->op();
2611 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2612 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2613 // This instruction also signals no smi code inlined.
2614 __ cmp(r0, Operand::Zero());
2616 Condition condition = ComputeCompareCondition(op);
2618 EmitBranch(instr, condition);
2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2623 InstanceType from = instr->from();
2624 InstanceType to = instr->to();
2625 if (from == FIRST_TYPE) return to;
2626 DCHECK(from == to || to == LAST_TYPE);
2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2632 InstanceType from = instr->from();
2633 InstanceType to = instr->to();
2634 if (from == to) return eq;
2635 if (to == LAST_TYPE) return hs;
2636 if (from == FIRST_TYPE) return ls;
2642 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2643 Register scratch = scratch0();
2644 Register input = ToRegister(instr->value());
2646 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2647 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2650 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2651 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2655 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2656 Register input = ToRegister(instr->value());
2657 Register result = ToRegister(instr->result());
2659 __ AssertString(input);
2661 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2662 __ IndexFromHash(result, result);
2666 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2667 LHasCachedArrayIndexAndBranch* instr) {
2668 Register input = ToRegister(instr->value());
2669 Register scratch = scratch0();
2672 FieldMemOperand(input, String::kHashFieldOffset));
2673 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2674 EmitBranch(instr, eq);
2678 // Branches to a label or falls through with the answer in flags. Trashes
2679 // the temp registers, but not the input.
2680 void LCodeGen::EmitClassOfTest(Label* is_true,
2682 Handle<String>class_name,
2686 DCHECK(!input.is(temp));
2687 DCHECK(!input.is(temp2));
2688 DCHECK(!temp.is(temp2));
2690 __ JumpIfSmi(input, is_false);
2692 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2693 // Assuming the following assertions, we can use the same compares to test
2694 // for both being a function type and being in the object type range.
2695 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2696 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2697 FIRST_SPEC_OBJECT_TYPE + 1);
2698 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2699 LAST_SPEC_OBJECT_TYPE - 1);
2700 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2701 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2704 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2707 // Faster code path to avoid two compares: subtract lower bound from the
2708 // actual type and do a signed compare with the width of the type range.
2709 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2710 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2711 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2712 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2713 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2717 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2718 // Check if the constructor in the map is a function.
2719 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2721 // Objects with a non-function constructor have class 'Object'.
2722 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2723 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2729 // temp now contains the constructor function. Grab the
2730 // instance class name from there.
2731 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2732 __ ldr(temp, FieldMemOperand(temp,
2733 SharedFunctionInfo::kInstanceClassNameOffset));
2734 // The class name we are testing against is internalized since it's a literal.
2735 // The name in the constructor is internalized because of the way the context
2736 // is booted. This routine isn't expected to work for random API-created
2737 // classes and it doesn't have to because you can't access it with natives
2738 // syntax. Since both sides are internalized it is sufficient to use an
2739 // identity comparison.
2740 __ cmp(temp, Operand(class_name));
2741 // End with the answer in flags.
2745 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2746 Register input = ToRegister(instr->value());
2747 Register temp = scratch0();
2748 Register temp2 = ToRegister(instr->temp());
2749 Handle<String> class_name = instr->hydrogen()->class_name();
2751 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2752 class_name, input, temp, temp2);
2754 EmitBranch(instr, eq);
2758 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2759 Register reg = ToRegister(instr->value());
2760 Register temp = ToRegister(instr->temp());
2762 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2763 __ cmp(temp, Operand(instr->map()));
2764 EmitBranch(instr, eq);
2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2769 DCHECK(ToRegister(instr->context()).is(cp));
2770 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
2771 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
2773 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2776 __ cmp(r0, Operand::Zero());
2777 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2778 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2783 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2785 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2786 LInstanceOfKnownGlobal* instr)
2787 : LDeferredCode(codegen), instr_(instr) { }
2788 virtual void Generate() OVERRIDE {
2789 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2792 virtual LInstruction* instr() OVERRIDE { return instr_; }
2793 Label* map_check() { return &map_check_; }
2794 Label* load_bool() { return &load_bool_; }
2797 LInstanceOfKnownGlobal* instr_;
2802 DeferredInstanceOfKnownGlobal* deferred;
2803 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2805 Label done, false_result;
2806 Register object = ToRegister(instr->value());
2807 Register temp = ToRegister(instr->temp());
2808 Register result = ToRegister(instr->result());
2810 // A Smi is not instance of anything.
2811 __ JumpIfSmi(object, &false_result);
2813 // This is the inlined call site instanceof cache. The two occurences of the
2814 // hole value will be patched to the last map/result pair generated by the
2817 Register map = temp;
2818 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2820 // Block constant pool emission to ensure the positions of instructions are
2821 // as expected by the patcher. See InstanceofStub::Generate().
2822 Assembler::BlockConstPoolScope block_const_pool(masm());
2823 __ bind(deferred->map_check()); // Label for calculating code patching.
2824 // We use Factory::the_hole_value() on purpose instead of loading from the
2825 // root array to force relocation to be able to later patch with
2827 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2828 __ mov(ip, Operand(Handle<Object>(cell)));
2829 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2830 __ cmp(map, Operand(ip));
2831 __ b(ne, &cache_miss);
2832 __ bind(deferred->load_bool()); // Label for calculating code patching.
2833 // We use Factory::the_hole_value() on purpose instead of loading from the
2834 // root array to force relocation to be able to later patch
2835 // with true or false.
2836 __ mov(result, Operand(factory()->the_hole_value()));
2840 // The inlined call site cache did not match. Check null and string before
2841 // calling the deferred code.
2842 __ bind(&cache_miss);
2843 // Null is not instance of anything.
2844 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2845 __ cmp(object, Operand(ip));
2846 __ b(eq, &false_result);
2848 // String values is not instance of anything.
2849 Condition is_string = masm_->IsObjectStringType(object, temp);
2850 __ b(is_string, &false_result);
2852 // Go to the deferred code.
2853 __ b(deferred->entry());
2855 __ bind(&false_result);
2856 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2858 // Here result has either true or false. Deferred code also produces true or
2860 __ bind(deferred->exit());
2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2868 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2869 flags = static_cast<InstanceofStub::Flags>(
2870 flags | InstanceofStub::kArgsInRegisters);
2871 flags = static_cast<InstanceofStub::Flags>(
2872 flags | InstanceofStub::kCallSiteInlineCheck);
2873 flags = static_cast<InstanceofStub::Flags>(
2874 flags | InstanceofStub::kReturnTrueFalseObject);
2875 InstanceofStub stub(isolate(), flags);
2877 PushSafepointRegistersScope scope(this);
2878 LoadContextFromDeferred(instr->context());
2880 __ Move(InstanceofStub::right(), instr->function());
2882 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
2883 int additional_delta = (call_size / Assembler::kInstrSize) + 4;
2884 // Make sure that code size is predicable, since we use specific constants
2885 // offsets in the code to find embedded values..
2886 PredictableCodeSizeScope predictable(
2887 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2888 // Make sure we don't emit any additional entries in the constant pool before
2889 // the call to ensure that the CallCodeSize() calculated the correct number of
2890 // instructions for the constant pool load.
2892 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
2893 int map_check_delta =
2894 masm_->InstructionsGeneratedSince(map_check) + additional_delta;
2895 int bool_load_delta =
2896 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2897 Label before_push_delta;
2898 __ bind(&before_push_delta);
2899 __ BlockConstPoolFor(additional_delta);
2900 // r5 is used to communicate the offset to the location of the map check.
2901 __ mov(r5, Operand(map_check_delta * kPointerSize));
2902 // r6 is used to communicate the offset to the location of the bool load.
2903 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2904 // The mov above can generate one or two instructions. The delta was
2905 // computed for two instructions, so we need to pad here in case of one
2907 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2911 CallCodeGeneric(stub.GetCode(),
2912 RelocInfo::CODE_TARGET,
2914 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2915 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2916 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2917 // Put the result value (r0) into the result register slot and
2918 // restore all registers.
2919 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2923 void LCodeGen::DoCmpT(LCmpT* instr) {
2924 DCHECK(ToRegister(instr->context()).is(cp));
2925 Token::Value op = instr->op();
2927 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2928 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929 // This instruction also signals no smi code inlined.
2930 __ cmp(r0, Operand::Zero());
2932 Condition condition = ComputeCompareCondition(op);
2933 __ LoadRoot(ToRegister(instr->result()),
2934 Heap::kTrueValueRootIndex,
2936 __ LoadRoot(ToRegister(instr->result()),
2937 Heap::kFalseValueRootIndex,
2938 NegateCondition(condition));
2942 void LCodeGen::DoReturn(LReturn* instr) {
2943 if (FLAG_trace && info()->IsOptimizing()) {
2944 // Push the return value on the stack as the parameter.
2945 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2946 // managed by the register allocator and tearing down the frame, it's
2947 // safe to write to the context register.
2949 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2950 __ CallRuntime(Runtime::kTraceExit, 1);
2952 if (info()->saves_caller_doubles()) {
2953 RestoreCallerDoubles();
2955 int no_frame_start = -1;
2956 if (NeedsEagerFrame()) {
2957 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2959 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2960 if (instr->has_constant_parameter_count()) {
2961 int parameter_count = ToInteger32(instr->constant_parameter_count());
2962 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2963 if (sp_delta != 0) {
2964 __ add(sp, sp, Operand(sp_delta));
2967 Register reg = ToRegister(instr->parameter_count());
2968 // The argument count parameter is a smi
2970 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2975 if (no_frame_start != -1) {
2976 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2982 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2983 Register result = ToRegister(instr->result());
2984 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2985 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2986 if (instr->hydrogen()->RequiresHoleCheck()) {
2987 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2989 DeoptimizeIf(eq, instr);
2995 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2996 DCHECK(FLAG_vector_ics);
2997 Register vector = ToRegister(instr->temp_vector());
2998 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
2999 __ Move(vector, instr->hydrogen()->feedback_vector());
3000 // No need to allocate this register.
3001 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
3002 __ mov(VectorLoadICDescriptor::SlotRegister(),
3003 Operand(Smi::FromInt(instr->hydrogen()->slot())));
3007 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3008 DCHECK(ToRegister(instr->context()).is(cp));
3009 DCHECK(ToRegister(instr->global_object())
3010 .is(LoadDescriptor::ReceiverRegister()));
3011 DCHECK(ToRegister(instr->result()).is(r0));
3013 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3014 if (FLAG_vector_ics) {
3015 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3017 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3018 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3019 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3023 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3024 Register value = ToRegister(instr->value());
3025 Register cell = scratch0();
3028 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
3030 // If the cell we are storing to contains the hole it could have
3031 // been deleted from the property dictionary. In that case, we need
3032 // to update the property details in the property dictionary to mark
3033 // it as no longer deleted.
3034 if (instr->hydrogen()->RequiresHoleCheck()) {
3035 // We use a temp to check the payload (CompareRoot might clobber ip).
3036 Register payload = ToRegister(instr->temp());
3037 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
3038 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
3039 DeoptimizeIf(eq, instr);
3043 __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
3044 // Cells are always rescanned, so no write barrier here.
3048 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3049 Register context = ToRegister(instr->context());
3050 Register result = ToRegister(instr->result());
3051 __ ldr(result, ContextOperand(context, instr->slot_index()));
3052 if (instr->hydrogen()->RequiresHoleCheck()) {
3053 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3055 if (instr->hydrogen()->DeoptimizesOnHole()) {
3056 DeoptimizeIf(eq, instr);
3058 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3064 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3065 Register context = ToRegister(instr->context());
3066 Register value = ToRegister(instr->value());
3067 Register scratch = scratch0();
3068 MemOperand target = ContextOperand(context, instr->slot_index());
3070 Label skip_assignment;
3072 if (instr->hydrogen()->RequiresHoleCheck()) {
3073 __ ldr(scratch, target);
3074 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3075 __ cmp(scratch, ip);
3076 if (instr->hydrogen()->DeoptimizesOnHole()) {
3077 DeoptimizeIf(eq, instr);
3079 __ b(ne, &skip_assignment);
3083 __ str(value, target);
3084 if (instr->hydrogen()->NeedsWriteBarrier()) {
3085 SmiCheck check_needed =
3086 instr->hydrogen()->value()->type().IsHeapObject()
3087 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3088 __ RecordWriteContextSlot(context,
3092 GetLinkRegisterState(),
3094 EMIT_REMEMBERED_SET,
3098 __ bind(&skip_assignment);
3102 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3103 HObjectAccess access = instr->hydrogen()->access();
3104 int offset = access.offset();
3105 Register object = ToRegister(instr->object());
3107 if (access.IsExternalMemory()) {
3108 Register result = ToRegister(instr->result());
3109 MemOperand operand = MemOperand(object, offset);
3110 __ Load(result, operand, access.representation());
3114 if (instr->hydrogen()->representation().IsDouble()) {
3115 DwVfpRegister result = ToDoubleRegister(instr->result());
3116 __ vldr(result, FieldMemOperand(object, offset));
3120 Register result = ToRegister(instr->result());
3121 if (!access.IsInobject()) {
3122 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3125 MemOperand operand = FieldMemOperand(object, offset);
3126 __ Load(result, operand, access.representation());
3130 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3131 DCHECK(ToRegister(instr->context()).is(cp));
3132 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3133 DCHECK(ToRegister(instr->result()).is(r0));
3135 // Name is always in r2.
3136 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3137 if (FLAG_vector_ics) {
3138 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3140 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3141 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3145 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3146 Register scratch = scratch0();
3147 Register function = ToRegister(instr->function());
3148 Register result = ToRegister(instr->result());
3150 // Get the prototype or initial map from the function.
3152 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3154 // Check that the function has a prototype or an initial map.
3155 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3157 DeoptimizeIf(eq, instr);
3159 // If the function does not have an initial map, we're done.
3161 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3164 // Get the prototype from the initial map.
3165 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3172 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3173 Register result = ToRegister(instr->result());
3174 __ LoadRoot(result, instr->index());
3178 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3179 Register arguments = ToRegister(instr->arguments());
3180 Register result = ToRegister(instr->result());
3181 // There are two words between the frame pointer and the last argument.
3182 // Subtracting from length accounts for one of them add one more.
3183 if (instr->length()->IsConstantOperand()) {
3184 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3185 if (instr->index()->IsConstantOperand()) {
3186 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3187 int index = (const_length - const_index) + 1;
3188 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3190 Register index = ToRegister(instr->index());
3191 __ rsb(result, index, Operand(const_length + 1));
3192 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3194 } else if (instr->index()->IsConstantOperand()) {
3195 Register length = ToRegister(instr->length());
3196 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3197 int loc = const_index - 1;
3199 __ sub(result, length, Operand(loc));
3200 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3202 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3205 Register length = ToRegister(instr->length());
3206 Register index = ToRegister(instr->index());
3207 __ sub(result, length, index);
3208 __ add(result, result, Operand(1));
3209 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3214 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3215 Runtime::FunctionId id) {
3216 // TODO(3095996): Get rid of this. For now, we need to make the
3217 // result register contain a valid pointer because it is already
3218 // contained in the register pointer map.
3219 Register reg = ToRegister(instr->result());
3220 __ mov(reg, Operand::Zero());
3222 PushSafepointRegistersScope scope(this);
3223 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3224 __ CallRuntimeSaveDoubles(id);
3225 RecordSafepointWithRegisters(
3226 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3227 __ sub(r0, r0, Operand(kHeapObjectTag));
3228 __ StoreToSafepointRegisterSlot(r0, reg);
3233 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3234 class DeferredSIMD128ToTagged FINAL : public LDeferredCode {
3236 DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
3237 Runtime::FunctionId id)
3238 : LDeferredCode(codegen), instr_(instr), id_(id) { }
3239 virtual void Generate() OVERRIDE {
3240 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3242 virtual LInstruction* instr() OVERRIDE { return instr_; }
3244 LInstruction* instr_;
3245 Runtime::FunctionId id_;
3248 // Allocate a SIMD128 object on the heap.
3249 Register reg = ToRegister(instr->result());
3250 Register temp = ToRegister(instr->temp());
3251 Register temp2 = ToRegister(instr->temp2());
3252 Register scratch = scratch0();
3254 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3255 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
3256 __ jmp(deferred->entry());
3257 __ bind(deferred->exit());
3259 // Copy the SIMD128 value from the external array to the heap object.
3260 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3261 Register external_pointer = ToRegister(instr->elements());
3262 Register key = no_reg;
3263 ElementsKind elements_kind = instr->elements_kind();
3264 bool key_is_constant = instr->key()->IsConstantOperand();
3265 int constant_key = 0;
3266 if (key_is_constant) {
3267 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3268 if (constant_key & 0xF0000000) {
3269 Abort(kArrayIndexConstantValueTooBig);
3272 key = ToRegister(instr->key());
3274 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3275 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3276 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3277 int base_offset = instr->base_offset();
3278 Operand operand = key_is_constant
3279 ? Operand(constant_key << element_size_shift)
3280 : Operand(key, LSL, shift_size);
3282 __ add(scratch, external_pointer, operand);
3284 // Load the inner FixedTypedArray.
3285 __ ldr(temp2, MemOperand(reg, T::kValueOffset));
3287 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3288 __ ldr(temp, MemOperand(scratch, base_offset + offset));
3293 FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
3296 // Now that we have finished with the object's real address tag it
3297 __ add(reg, reg, Operand(kHeapObjectTag));
3301 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3302 Register external_pointer = ToRegister(instr->elements());
3303 Register key = no_reg;
3304 ElementsKind elements_kind = instr->elements_kind();
3305 bool key_is_constant = instr->key()->IsConstantOperand();
3306 int constant_key = 0;
3307 if (key_is_constant) {
3308 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3309 if (constant_key & 0xF0000000) {
3310 Abort(kArrayIndexConstantValueTooBig);
3313 key = ToRegister(instr->key());
3315 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3316 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3317 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3318 int base_offset = instr->base_offset();
3320 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3321 elements_kind == FLOAT32_ELEMENTS ||
3322 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3323 elements_kind == FLOAT64_ELEMENTS) {
3324 int base_offset = instr->base_offset();
3325 DwVfpRegister result = ToDoubleRegister(instr->result());
3326 Operand operand = key_is_constant
3327 ? Operand(constant_key << element_size_shift)
3328 : Operand(key, LSL, shift_size);
3329 __ add(scratch0(), external_pointer, operand);
3330 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3331 elements_kind == FLOAT32_ELEMENTS) {
3332 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3333 __ vcvt_f64_f32(result, double_scratch0().low());
3334 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3335 __ vldr(result, scratch0(), base_offset);
3337 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3338 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3339 } else if (IsFloat64x2ElementsKind(elements_kind)) {
3340 DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
3341 } else if (IsInt32x4ElementsKind(elements_kind)) {
3342 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3344 Register result = ToRegister(instr->result());
3345 MemOperand mem_operand = PrepareKeyedOperand(
3346 key, external_pointer, key_is_constant, constant_key,
3347 element_size_shift, shift_size, base_offset);
3348 switch (elements_kind) {
3349 case EXTERNAL_INT8_ELEMENTS:
3351 __ ldrsb(result, mem_operand);
3353 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3354 case EXTERNAL_UINT8_ELEMENTS:
3355 case UINT8_ELEMENTS:
3356 case UINT8_CLAMPED_ELEMENTS:
3357 __ ldrb(result, mem_operand);
3359 case EXTERNAL_INT16_ELEMENTS:
3360 case INT16_ELEMENTS:
3361 __ ldrsh(result, mem_operand);
3363 case EXTERNAL_UINT16_ELEMENTS:
3364 case UINT16_ELEMENTS:
3365 __ ldrh(result, mem_operand);
3367 case EXTERNAL_INT32_ELEMENTS:
3368 case INT32_ELEMENTS:
3369 __ ldr(result, mem_operand);
3371 case EXTERNAL_UINT32_ELEMENTS:
3372 case UINT32_ELEMENTS:
3373 __ ldr(result, mem_operand);
3374 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3375 __ cmp(result, Operand(0x80000000));
3376 DeoptimizeIf(cs, instr);
3379 case FLOAT32_ELEMENTS:
3380 case FLOAT64_ELEMENTS:
3381 case EXTERNAL_FLOAT32_ELEMENTS:
3382 case EXTERNAL_FLOAT64_ELEMENTS:
3383 case FLOAT32x4_ELEMENTS:
3384 case FLOAT64x2_ELEMENTS:
3385 case INT32x4_ELEMENTS:
3386 case EXTERNAL_FLOAT32x4_ELEMENTS:
3387 case EXTERNAL_FLOAT64x2_ELEMENTS:
3388 case EXTERNAL_INT32x4_ELEMENTS:
3389 case FAST_HOLEY_DOUBLE_ELEMENTS:
3390 case FAST_HOLEY_ELEMENTS:
3391 case FAST_HOLEY_SMI_ELEMENTS:
3392 case FAST_DOUBLE_ELEMENTS:
3394 case FAST_SMI_ELEMENTS:
3395 case DICTIONARY_ELEMENTS:
3396 case SLOPPY_ARGUMENTS_ELEMENTS:
3404 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3405 Register elements = ToRegister(instr->elements());
3406 bool key_is_constant = instr->key()->IsConstantOperand();
3407 Register key = no_reg;
3408 DwVfpRegister result = ToDoubleRegister(instr->result());
3409 Register scratch = scratch0();
3411 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3413 int base_offset = instr->base_offset();
3414 if (key_is_constant) {
3415 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3416 if (constant_key & 0xF0000000) {
3417 Abort(kArrayIndexConstantValueTooBig);
3419 base_offset += constant_key * kDoubleSize;
3421 __ add(scratch, elements, Operand(base_offset));
3423 if (!key_is_constant) {
3424 key = ToRegister(instr->key());
3425 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3426 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3427 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3430 __ vldr(result, scratch, 0);
3432 if (instr->hydrogen()->RequiresHoleCheck()) {
3433 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3434 __ cmp(scratch, Operand(kHoleNanUpper32));
3435 DeoptimizeIf(eq, instr);
3440 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3441 Register elements = ToRegister(instr->elements());
3442 Register result = ToRegister(instr->result());
3443 Register scratch = scratch0();
3444 Register store_base = scratch;
3445 int offset = instr->base_offset();
3447 if (instr->key()->IsConstantOperand()) {
3448 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3449 offset += ToInteger32(const_operand) * kPointerSize;
3450 store_base = elements;
3452 Register key = ToRegister(instr->key());
3453 // Even though the HLoadKeyed instruction forces the input
3454 // representation for the key to be an integer, the input gets replaced
3455 // during bound check elimination with the index argument to the bounds
3456 // check, which can be tagged, so that case must be handled here, too.
3457 if (instr->hydrogen()->key()->representation().IsSmi()) {
3458 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3460 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3463 __ ldr(result, MemOperand(store_base, offset));
3465 // Check for the hole value.
3466 if (instr->hydrogen()->RequiresHoleCheck()) {
3467 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3469 DeoptimizeIf(ne, instr);
3471 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3472 __ cmp(result, scratch);
3473 DeoptimizeIf(eq, instr);
3479 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3480 if (instr->is_typed_elements()) {
3481 DoLoadKeyedExternalArray(instr);
3482 } else if (instr->hydrogen()->representation().IsDouble()) {
3483 DoLoadKeyedFixedDoubleArray(instr);
3485 DoLoadKeyedFixedArray(instr);
3490 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3492 bool key_is_constant,
3497 if (key_is_constant) {
3498 return MemOperand(base, (constant_key << element_size) + base_offset);
3501 if (base_offset == 0) {
3502 if (shift_size >= 0) {
3503 return MemOperand(base, key, LSL, shift_size);
3505 DCHECK_EQ(-1, shift_size);
3506 return MemOperand(base, key, LSR, 1);
3510 if (shift_size >= 0) {
3511 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3512 return MemOperand(scratch0(), base_offset);
3514 DCHECK_EQ(-1, shift_size);
3515 __ add(scratch0(), base, Operand(key, ASR, 1));
3516 return MemOperand(scratch0(), base_offset);
3521 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3522 DCHECK(ToRegister(instr->context()).is(cp));
3523 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3524 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3526 if (FLAG_vector_ics) {
3527 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3530 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3531 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3535 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3536 Register scratch = scratch0();
3537 Register result = ToRegister(instr->result());
3539 if (instr->hydrogen()->from_inlined()) {
3540 __ sub(result, sp, Operand(2 * kPointerSize));
3542 // Check if the calling frame is an arguments adaptor frame.
3543 Label done, adapted;
3544 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3545 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3546 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3548 // Result is the frame pointer for the frame if not adapted and for the real
3549 // frame below the adaptor frame if adapted.
3550 __ mov(result, fp, LeaveCC, ne);
3551 __ mov(result, scratch, LeaveCC, eq);
3556 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3557 Register elem = ToRegister(instr->elements());
3558 Register result = ToRegister(instr->result());
3562 // If no arguments adaptor frame the number of arguments is fixed.
3564 __ mov(result, Operand(scope()->num_parameters()));
3567 // Arguments adaptor frame present. Get argument length from there.
3568 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3570 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3571 __ SmiUntag(result);
3573 // Argument length is in result register.
3578 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3579 Register receiver = ToRegister(instr->receiver());
3580 Register function = ToRegister(instr->function());
3581 Register result = ToRegister(instr->result());
3582 Register scratch = scratch0();
3584 // If the receiver is null or undefined, we have to pass the global
3585 // object as a receiver to normal functions. Values have to be
3586 // passed unchanged to builtins and strict-mode functions.
3587 Label global_object, result_in_receiver;
3589 if (!instr->hydrogen()->known_function()) {
3590 // Do not transform the receiver to object for strict mode
3593 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3595 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3596 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3597 __ tst(scratch, Operand(mask));
3598 __ b(ne, &result_in_receiver);
3600 // Do not transform the receiver to object for builtins.
3601 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3602 __ b(ne, &result_in_receiver);
3605 // Normal function. Replace undefined or null with global receiver.
3606 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3607 __ cmp(receiver, scratch);
3608 __ b(eq, &global_object);
3609 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3610 __ cmp(receiver, scratch);
3611 __ b(eq, &global_object);
3613 // Deoptimize if the receiver is not a JS object.
3614 __ SmiTst(receiver);
3615 DeoptimizeIf(eq, instr);
3616 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3617 DeoptimizeIf(lt, instr);
3619 __ b(&result_in_receiver);
3620 __ bind(&global_object);
3621 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3623 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3624 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3626 if (result.is(receiver)) {
3627 __ bind(&result_in_receiver);
3631 __ bind(&result_in_receiver);
3632 __ mov(result, receiver);
3633 __ bind(&result_ok);
3638 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3639 Register receiver = ToRegister(instr->receiver());
3640 Register function = ToRegister(instr->function());
3641 Register length = ToRegister(instr->length());
3642 Register elements = ToRegister(instr->elements());
3643 Register scratch = scratch0();
3644 DCHECK(receiver.is(r0)); // Used for parameter count.
3645 DCHECK(function.is(r1)); // Required by InvokeFunction.
3646 DCHECK(ToRegister(instr->result()).is(r0));
3648 // Copy the arguments to this function possibly from the
3649 // adaptor frame below it.
3650 const uint32_t kArgumentsLimit = 1 * KB;
3651 __ cmp(length, Operand(kArgumentsLimit));
3652 DeoptimizeIf(hi, instr);
3654 // Push the receiver and use the register to keep the original
3655 // number of arguments.
3657 __ mov(receiver, length);
3658 // The arguments are at a one pointer size offset from elements.
3659 __ add(elements, elements, Operand(1 * kPointerSize));
3661 // Loop through the arguments pushing them onto the execution
3664 // length is a small non-negative integer, due to the test above.
3665 __ cmp(length, Operand::Zero());
3668 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3670 __ sub(length, length, Operand(1), SetCC);
3674 DCHECK(instr->HasPointerMap());
3675 LPointerMap* pointers = instr->pointer_map();
3676 SafepointGenerator safepoint_generator(
3677 this, pointers, Safepoint::kLazyDeopt);
3678 // The number of arguments is stored in receiver which is r0, as expected
3679 // by InvokeFunction.
3680 ParameterCount actual(receiver);
3681 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3685 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3686 LOperand* argument = instr->value();
3687 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3688 Abort(kDoPushArgumentNotImplementedForDoubleType);
3690 Register argument_reg = EmitLoadRegister(argument, ip);
3691 __ push(argument_reg);
3696 void LCodeGen::DoDrop(LDrop* instr) {
3697 __ Drop(instr->count());
3701 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3702 Register result = ToRegister(instr->result());
3703 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3707 void LCodeGen::DoContext(LContext* instr) {
3708 // If there is a non-return use, the context must be moved to a register.
3709 Register result = ToRegister(instr->result());
3710 if (info()->IsOptimizing()) {
3711 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3713 // If there is no frame, the context must be in cp.
3714 DCHECK(result.is(cp));
3719 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3720 DCHECK(ToRegister(instr->context()).is(cp));
3721 __ push(cp); // The context is the first argument.
3722 __ Move(scratch0(), instr->hydrogen()->pairs());
3723 __ push(scratch0());
3724 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3725 __ push(scratch0());
3726 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3730 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3731 int formal_parameter_count,
3733 LInstruction* instr,
3735 bool dont_adapt_arguments =
3736 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3737 bool can_invoke_directly =
3738 dont_adapt_arguments || formal_parameter_count == arity;
3740 LPointerMap* pointers = instr->pointer_map();
3742 if (can_invoke_directly) {
3743 if (r1_state == R1_UNINITIALIZED) {
3744 __ Move(r1, function);
3748 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3750 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3751 // is available to write to at this point.
3752 if (dont_adapt_arguments) {
3753 __ mov(r0, Operand(arity));
3757 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3760 // Set up deoptimization.
3761 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3763 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3764 ParameterCount count(arity);
3765 ParameterCount expected(formal_parameter_count);
3766 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3771 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3772 DCHECK(instr->context() != NULL);
3773 DCHECK(ToRegister(instr->context()).is(cp));
3774 Register input = ToRegister(instr->value());
3775 Register result = ToRegister(instr->result());
3776 Register scratch = scratch0();
3778 // Deoptimize if not a heap number.
3779 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3780 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3781 __ cmp(scratch, Operand(ip));
3782 DeoptimizeIf(ne, instr);
3785 Register exponent = scratch0();
3787 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3788 // Check the sign of the argument. If the argument is positive, just
3790 __ tst(exponent, Operand(HeapNumber::kSignMask));
3791 // Move the input to the result if necessary.
3792 __ Move(result, input);
3795 // Input is negative. Reverse its sign.
3796 // Preserve the value of all registers.
3798 PushSafepointRegistersScope scope(this);
3800 // Registers were saved at the safepoint, so we can use
3801 // many scratch registers.
3802 Register tmp1 = input.is(r1) ? r0 : r1;
3803 Register tmp2 = input.is(r2) ? r0 : r2;
3804 Register tmp3 = input.is(r3) ? r0 : r3;
3805 Register tmp4 = input.is(r4) ? r0 : r4;
3807 // exponent: floating point exponent value.
3809 Label allocated, slow;
3810 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3811 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3814 // Slow case: Call the runtime system to do the number allocation.
3817 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3819 // Set the pointer to the new heap number in tmp.
3820 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3821 // Restore input_reg after call to runtime.
3822 __ LoadFromSafepointRegisterSlot(input, input);
3823 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3825 __ bind(&allocated);
3826 // exponent: floating point exponent value.
3827 // tmp1: allocated heap number.
3828 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3829 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3830 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3831 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3833 __ StoreToSafepointRegisterSlot(tmp1, result);
3840 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3841 Register input = ToRegister(instr->value());
3842 Register result = ToRegister(instr->result());
3843 __ cmp(input, Operand::Zero());
3844 __ Move(result, input, pl);
3845 // We can make rsb conditional because the previous cmp instruction
3846 // will clear the V (overflow) flag and rsb won't set this flag
3847 // if input is positive.
3848 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3849 // Deoptimize on overflow.
3850 DeoptimizeIf(vs, instr);
3854 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3855 // Class for deferred case.
3856 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3858 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3859 : LDeferredCode(codegen), instr_(instr) { }
3860 virtual void Generate() OVERRIDE {
3861 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3863 virtual LInstruction* instr() OVERRIDE { return instr_; }
3868 Representation r = instr->hydrogen()->value()->representation();
3870 DwVfpRegister input = ToDoubleRegister(instr->value());
3871 DwVfpRegister result = ToDoubleRegister(instr->result());
3872 __ vabs(result, input);
3873 } else if (r.IsSmiOrInteger32()) {
3874 EmitIntegerMathAbs(instr);
3876 // Representation is tagged.
3877 DeferredMathAbsTaggedHeapNumber* deferred =
3878 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3879 Register input = ToRegister(instr->value());
3881 __ JumpIfNotSmi(input, deferred->entry());
3882 // If smi, handle it directly.
3883 EmitIntegerMathAbs(instr);
3884 __ bind(deferred->exit());
3889 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3890 DwVfpRegister input = ToDoubleRegister(instr->value());
3891 Register result = ToRegister(instr->result());
3892 Register input_high = scratch0();
3895 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3896 DeoptimizeIf(al, instr);
3899 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3901 __ cmp(result, Operand::Zero());
3903 __ cmp(input_high, Operand::Zero());
3904 DeoptimizeIf(mi, instr);
3910 void LCodeGen::DoMathRound(LMathRound* instr) {
3911 DwVfpRegister input = ToDoubleRegister(instr->value());
3912 Register result = ToRegister(instr->result());
3913 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3914 DwVfpRegister input_plus_dot_five = double_scratch1;
3915 Register input_high = scratch0();
3916 DwVfpRegister dot_five = double_scratch0();
3917 Label convert, done;
3919 __ Vmov(dot_five, 0.5, scratch0());
3920 __ vabs(double_scratch1, input);
3921 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3922 // If input is in [-0.5, -0], the result is -0.
3923 // If input is in [+0, +0.5[, the result is +0.
3924 // If the input is +0.5, the result is 1.
3925 __ b(hi, &convert); // Out of [-0.5, +0.5].
3926 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3927 __ VmovHigh(input_high, input);
3928 __ cmp(input_high, Operand::Zero());
3929 DeoptimizeIf(mi, instr); // [-0.5, -0].
3931 __ VFPCompareAndSetFlags(input, dot_five);
3932 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3933 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3934 // flag kBailoutOnMinusZero.
3935 __ mov(result, Operand::Zero(), LeaveCC, ne);
3939 __ vadd(input_plus_dot_five, input, dot_five);
3940 // Reuse dot_five (double_scratch0) as we no longer need this value.
3941 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3943 DeoptimizeIf(al, instr);
3948 void LCodeGen::DoMathFround(LMathFround* instr) {
3949 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3950 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3951 LowDwVfpRegister scratch = double_scratch0();
3952 __ vcvt_f32_f64(scratch.low(), input_reg);
3953 __ vcvt_f64_f32(output_reg, scratch.low());
3957 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3958 DwVfpRegister input = ToDoubleRegister(instr->value());
3959 DwVfpRegister result = ToDoubleRegister(instr->result());
3960 __ vsqrt(result, input);
3964 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3965 DwVfpRegister input = ToDoubleRegister(instr->value());
3966 DwVfpRegister result = ToDoubleRegister(instr->result());
3967 DwVfpRegister temp = double_scratch0();
3969 // Note that according to ECMA-262 15.8.2.13:
3970 // Math.pow(-Infinity, 0.5) == Infinity
3971 // Math.sqrt(-Infinity) == NaN
3973 __ vmov(temp, -V8_INFINITY, scratch0());
3974 __ VFPCompareAndSetFlags(input, temp);
3975 __ vneg(result, temp, eq);
3978 // Add +0 to convert -0 to +0.
3979 __ vadd(result, input, kDoubleRegZero);
3980 __ vsqrt(result, result);
3985 void LCodeGen::DoPower(LPower* instr) {
3986 Representation exponent_type = instr->hydrogen()->right()->representation();
3987 // Having marked this as a call, we can use any registers.
3988 // Just make sure that the input/output registers are the expected ones.
3989 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3990 DCHECK(!instr->right()->IsDoubleRegister() ||
3991 ToDoubleRegister(instr->right()).is(d1));
3992 DCHECK(!instr->right()->IsRegister() ||
3993 ToRegister(instr->right()).is(tagged_exponent));
3994 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3995 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3997 if (exponent_type.IsSmi()) {
3998 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4000 } else if (exponent_type.IsTagged()) {
4002 __ JumpIfSmi(tagged_exponent, &no_deopt);
4003 DCHECK(!r6.is(tagged_exponent));
4004 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
4005 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4006 __ cmp(r6, Operand(ip));
4007 DeoptimizeIf(ne, instr);
4009 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4011 } else if (exponent_type.IsInteger32()) {
4012 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4015 DCHECK(exponent_type.IsDouble());
4016 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4022 void LCodeGen::DoMathExp(LMathExp* instr) {
4023 DwVfpRegister input = ToDoubleRegister(instr->value());
4024 DwVfpRegister result = ToDoubleRegister(instr->result());
4025 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4026 DwVfpRegister double_scratch2 = double_scratch0();
4027 Register temp1 = ToRegister(instr->temp1());
4028 Register temp2 = ToRegister(instr->temp2());
4030 MathExpGenerator::EmitMathExp(
4031 masm(), input, result, double_scratch1, double_scratch2,
4032 temp1, temp2, scratch0());
4036 void LCodeGen::DoMathLog(LMathLog* instr) {
4037 __ PrepareCallCFunction(0, 1, scratch0());
4038 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
4039 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4041 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
4045 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4046 Register input = ToRegister(instr->value());
4047 Register result = ToRegister(instr->result());
4048 __ clz(result, input);
4052 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4053 DCHECK(ToRegister(instr->context()).is(cp));
4054 DCHECK(ToRegister(instr->function()).is(r1));
4055 DCHECK(instr->HasPointerMap());
4057 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4058 if (known_function.is_null()) {
4059 LPointerMap* pointers = instr->pointer_map();
4060 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4061 ParameterCount count(instr->arity());
4062 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
4064 CallKnownFunction(known_function,
4065 instr->hydrogen()->formal_parameter_count(),
4068 R1_CONTAINS_TARGET);
4073 void LCodeGen::DoTailCallThroughMegamorphicCache(
4074 LTailCallThroughMegamorphicCache* instr) {
4075 Register receiver = ToRegister(instr->receiver());
4076 Register name = ToRegister(instr->name());
4077 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
4078 DCHECK(name.is(LoadDescriptor::NameRegister()));
4079 DCHECK(receiver.is(r1));
4080 DCHECK(name.is(r2));
4082 Register scratch = r3;
4083 Register extra = r4;
4084 Register extra2 = r5;
4085 Register extra3 = r6;
4087 // Important for the tail-call.
4088 bool must_teardown_frame = NeedsEagerFrame();
4090 // The probe will tail call to a handler if found.
4091 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
4092 must_teardown_frame, receiver, name,
4093 scratch, extra, extra2, extra3);
4095 // Tail call to miss if we ended up here.
4096 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
4097 LoadIC::GenerateMiss(masm());
4101 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4102 DCHECK(ToRegister(instr->result()).is(r0));
4104 LPointerMap* pointers = instr->pointer_map();
4105 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4107 if (instr->target()->IsConstantOperand()) {
4108 LConstantOperand* target = LConstantOperand::cast(instr->target());
4109 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4110 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4111 PlatformInterfaceDescriptor* call_descriptor =
4112 instr->descriptor().platform_specific_descriptor();
4113 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4114 call_descriptor->storage_mode());
4116 DCHECK(instr->target()->IsRegister());
4117 Register target = ToRegister(instr->target());
4118 generator.BeforeCall(__ CallSize(target));
4119 // Make sure we don't emit any additional entries in the constant pool
4120 // before the call to ensure that the CallCodeSize() calculated the correct
4121 // number of instructions for the constant pool load.
4123 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4124 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4128 generator.AfterCall();
4132 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4133 DCHECK(ToRegister(instr->function()).is(r1));
4134 DCHECK(ToRegister(instr->result()).is(r0));
4136 if (instr->hydrogen()->pass_argument_count()) {
4137 __ mov(r0, Operand(instr->arity()));
4141 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4143 // Load the code entry address
4144 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4147 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4151 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4152 DCHECK(ToRegister(instr->context()).is(cp));
4153 DCHECK(ToRegister(instr->function()).is(r1));
4154 DCHECK(ToRegister(instr->result()).is(r0));
4156 int arity = instr->arity();
4157 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4158 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4162 void LCodeGen::DoCallNew(LCallNew* instr) {
4163 DCHECK(ToRegister(instr->context()).is(cp));
4164 DCHECK(ToRegister(instr->constructor()).is(r1));
4165 DCHECK(ToRegister(instr->result()).is(r0));
4167 __ mov(r0, Operand(instr->arity()));
4168 // No cell in r2 for construct type feedback in optimized code
4169 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4170 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4171 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4175 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4176 DCHECK(ToRegister(instr->context()).is(cp));
4177 DCHECK(ToRegister(instr->constructor()).is(r1));
4178 DCHECK(ToRegister(instr->result()).is(r0));
4180 __ mov(r0, Operand(instr->arity()));
4181 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4182 ElementsKind kind = instr->hydrogen()->elements_kind();
4183 AllocationSiteOverrideMode override_mode =
4184 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4185 ? DISABLE_ALLOCATION_SITES
4188 if (instr->arity() == 0) {
4189 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4190 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4191 } else if (instr->arity() == 1) {
4193 if (IsFastPackedElementsKind(kind)) {
4195 // We might need a change here
4196 // look at the first argument
4197 __ ldr(r5, MemOperand(sp, 0));
4198 __ cmp(r5, Operand::Zero());
4199 __ b(eq, &packed_case);
4201 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4202 ArraySingleArgumentConstructorStub stub(isolate(),
4205 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4207 __ bind(&packed_case);
4210 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4211 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4214 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4215 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4220 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4221 CallRuntime(instr->function(), instr->arity(), instr);
4225 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4226 Register function = ToRegister(instr->function());
4227 Register code_object = ToRegister(instr->code_object());
4228 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4230 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4234 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4235 Register result = ToRegister(instr->result());
4236 Register base = ToRegister(instr->base_object());
4237 if (instr->offset()->IsConstantOperand()) {
4238 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4239 __ add(result, base, Operand(ToInteger32(offset)));
4241 Register offset = ToRegister(instr->offset());
4242 __ add(result, base, offset);
4247 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4248 Representation representation = instr->representation();
4250 Register object = ToRegister(instr->object());
4251 Register scratch = scratch0();
4252 HObjectAccess access = instr->hydrogen()->access();
4253 int offset = access.offset();
4255 if (access.IsExternalMemory()) {
4256 Register value = ToRegister(instr->value());
4257 MemOperand operand = MemOperand(object, offset);
4258 __ Store(value, operand, representation);
4262 __ AssertNotSmi(object);
4264 DCHECK(!representation.IsSmi() ||
4265 !instr->value()->IsConstantOperand() ||
4266 IsSmi(LConstantOperand::cast(instr->value())));
4267 if (representation.IsDouble()) {
4268 DCHECK(access.IsInobject());
4269 DCHECK(!instr->hydrogen()->has_transition());
4270 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4271 DwVfpRegister value = ToDoubleRegister(instr->value());
4272 __ vstr(value, FieldMemOperand(object, offset));
4276 if (instr->hydrogen()->has_transition()) {
4277 Handle<Map> transition = instr->hydrogen()->transition_map();
4278 AddDeprecationDependency(transition);
4279 __ mov(scratch, Operand(transition));
4280 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4281 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4282 Register temp = ToRegister(instr->temp());
4283 // Update the write barrier for the map field.
4284 __ RecordWriteForMap(object,
4287 GetLinkRegisterState(),
4293 Register value = ToRegister(instr->value());
4294 if (access.IsInobject()) {
4295 MemOperand operand = FieldMemOperand(object, offset);
4296 __ Store(value, operand, representation);
4297 if (instr->hydrogen()->NeedsWriteBarrier()) {
4298 // Update the write barrier for the object for in-object properties.
4299 __ RecordWriteField(object,
4303 GetLinkRegisterState(),
4305 EMIT_REMEMBERED_SET,
4306 instr->hydrogen()->SmiCheckForWriteBarrier(),
4307 instr->hydrogen()->PointersToHereCheckForValue());
4310 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4311 MemOperand operand = FieldMemOperand(scratch, offset);
4312 __ Store(value, operand, representation);
4313 if (instr->hydrogen()->NeedsWriteBarrier()) {
4314 // Update the write barrier for the properties array.
4315 // object is used as a scratch register.
4316 __ RecordWriteField(scratch,
4320 GetLinkRegisterState(),
4322 EMIT_REMEMBERED_SET,
4323 instr->hydrogen()->SmiCheckForWriteBarrier(),
4324 instr->hydrogen()->PointersToHereCheckForValue());
4330 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4331 DCHECK(ToRegister(instr->context()).is(cp));
4332 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4333 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4335 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4336 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4337 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4341 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4342 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4343 if (instr->index()->IsConstantOperand()) {
4344 Operand index = ToOperand(instr->index());
4345 Register length = ToRegister(instr->length());
4346 __ cmp(length, index);
4347 cc = CommuteCondition(cc);
4349 Register index = ToRegister(instr->index());
4350 Operand length = ToOperand(instr->length());
4351 __ cmp(index, length);
4353 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4355 __ b(NegateCondition(cc), &done);
4356 __ stop("eliminated bounds check failed");
4359 DeoptimizeIf(cc, instr);
4365 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4366 DCHECK(instr->value()->IsRegister());
4367 Register temp = ToRegister(instr->temp());
4368 Register temp2 = ToRegister(instr->temp2());
4369 Register input_reg = ToRegister(instr->value());
4370 __ SmiTst(input_reg);
4371 DeoptimizeIf(eq, instr);
4372 __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
4373 DeoptimizeIf(ne, instr);
4375 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4376 Register external_pointer = ToRegister(instr->elements());
4377 Register key = no_reg;
4378 ElementsKind elements_kind = instr->elements_kind();
4379 bool key_is_constant = instr->key()->IsConstantOperand();
4380 int constant_key = 0;
4381 if (key_is_constant) {
4382 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4383 if (constant_key & 0xF0000000) {
4384 Abort(kArrayIndexConstantValueTooBig);
4387 key = ToRegister(instr->key());
4389 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4390 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4391 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4392 int base_offset = instr->base_offset();
4393 Register address = scratch0();
4394 if (key_is_constant) {
4395 if (constant_key != 0) {
4396 __ add(address, external_pointer,
4397 Operand(constant_key << element_size_shift));
4399 address = external_pointer;
4402 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4405 // Load the inner FixedTypedArray.
4406 __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
4408 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4409 __ ldr(temp, MemOperand(temp2,
4410 FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
4411 __ str(temp, MemOperand(address, base_offset + offset));
4416 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4417 Register external_pointer = ToRegister(instr->elements());
4418 Register key = no_reg;
4419 ElementsKind elements_kind = instr->elements_kind();
4420 bool key_is_constant = instr->key()->IsConstantOperand();
4421 int constant_key = 0;
4422 if (key_is_constant) {
4423 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4424 if (constant_key & 0xF0000000) {
4425 Abort(kArrayIndexConstantValueTooBig);
4428 key = ToRegister(instr->key());
4430 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4431 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4432 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4433 int base_offset = instr->base_offset();
4435 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4436 elements_kind == FLOAT32_ELEMENTS ||
4437 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4438 elements_kind == FLOAT64_ELEMENTS) {
4439 Register address = scratch0();
4440 DwVfpRegister value(ToDoubleRegister(instr->value()));
4441 if (key_is_constant) {
4442 if (constant_key != 0) {
4443 __ add(address, external_pointer,
4444 Operand(constant_key << element_size_shift));
4446 address = external_pointer;
4449 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4451 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4452 elements_kind == FLOAT32_ELEMENTS) {
4453 __ vcvt_f32_f64(double_scratch0().low(), value);
4454 __ vstr(double_scratch0().low(), address, base_offset);
4455 } else { // Storing doubles, not floats.
4456 __ vstr(value, address, base_offset);
4458 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4459 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4460 } else if (IsFloat64x2ElementsKind(elements_kind)) {
4461 DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
4462 } else if (IsInt32x4ElementsKind(elements_kind)) {
4463 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4465 Register value(ToRegister(instr->value()));
4466 MemOperand mem_operand = PrepareKeyedOperand(
4467 key, external_pointer, key_is_constant, constant_key,
4468 element_size_shift, shift_size,
4470 switch (elements_kind) {
4471 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4472 case EXTERNAL_INT8_ELEMENTS:
4473 case EXTERNAL_UINT8_ELEMENTS:
4474 case UINT8_ELEMENTS:
4475 case UINT8_CLAMPED_ELEMENTS:
4477 __ strb(value, mem_operand);
4479 case EXTERNAL_INT16_ELEMENTS:
4480 case EXTERNAL_UINT16_ELEMENTS:
4481 case INT16_ELEMENTS:
4482 case UINT16_ELEMENTS:
4483 __ strh(value, mem_operand);
4485 case EXTERNAL_INT32_ELEMENTS:
4486 case EXTERNAL_UINT32_ELEMENTS:
4487 case INT32_ELEMENTS:
4488 case UINT32_ELEMENTS:
4489 __ str(value, mem_operand);
4491 case FLOAT32_ELEMENTS:
4492 case FLOAT64_ELEMENTS:
4493 case EXTERNAL_FLOAT32_ELEMENTS:
4494 case EXTERNAL_FLOAT64_ELEMENTS:
4495 case FLOAT32x4_ELEMENTS:
4496 case FLOAT64x2_ELEMENTS:
4497 case INT32x4_ELEMENTS:
4498 case EXTERNAL_FLOAT32x4_ELEMENTS:
4499 case EXTERNAL_FLOAT64x2_ELEMENTS:
4500 case EXTERNAL_INT32x4_ELEMENTS:
4501 case FAST_DOUBLE_ELEMENTS:
4503 case FAST_SMI_ELEMENTS:
4504 case FAST_HOLEY_DOUBLE_ELEMENTS:
4505 case FAST_HOLEY_ELEMENTS:
4506 case FAST_HOLEY_SMI_ELEMENTS:
4507 case DICTIONARY_ELEMENTS:
4508 case SLOPPY_ARGUMENTS_ELEMENTS:
4516 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4517 DwVfpRegister value = ToDoubleRegister(instr->value());
4518 Register elements = ToRegister(instr->elements());
4519 Register scratch = scratch0();
4520 DwVfpRegister double_scratch = double_scratch0();
4521 bool key_is_constant = instr->key()->IsConstantOperand();
4522 int base_offset = instr->base_offset();
4524 // Calculate the effective address of the slot in the array to store the
4526 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4527 if (key_is_constant) {
4528 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4529 if (constant_key & 0xF0000000) {
4530 Abort(kArrayIndexConstantValueTooBig);
4532 __ add(scratch, elements,
4533 Operand((constant_key << element_size_shift) + base_offset));
4535 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4536 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4537 __ add(scratch, elements, Operand(base_offset));
4538 __ add(scratch, scratch,
4539 Operand(ToRegister(instr->key()), LSL, shift_size));
4542 if (instr->NeedsCanonicalization()) {
4543 // Force a canonical NaN.
4544 if (masm()->emit_debug_code()) {
4546 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4547 __ Assert(ne, kDefaultNaNModeNotSet);
4549 __ VFPCanonicalizeNaN(double_scratch, value);
4550 __ vstr(double_scratch, scratch, 0);
4552 __ vstr(value, scratch, 0);
4557 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4558 Register value = ToRegister(instr->value());
4559 Register elements = ToRegister(instr->elements());
4560 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4562 Register scratch = scratch0();
4563 Register store_base = scratch;
4564 int offset = instr->base_offset();
4567 if (instr->key()->IsConstantOperand()) {
4568 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4569 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4570 offset += ToInteger32(const_operand) * kPointerSize;
4571 store_base = elements;
4573 // Even though the HLoadKeyed instruction forces the input
4574 // representation for the key to be an integer, the input gets replaced
4575 // during bound check elimination with the index argument to the bounds
4576 // check, which can be tagged, so that case must be handled here, too.
4577 if (instr->hydrogen()->key()->representation().IsSmi()) {
4578 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4580 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4583 __ str(value, MemOperand(store_base, offset));
4585 if (instr->hydrogen()->NeedsWriteBarrier()) {
4586 SmiCheck check_needed =
4587 instr->hydrogen()->value()->type().IsHeapObject()
4588 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4589 // Compute address of modified element and store it into key register.
4590 __ add(key, store_base, Operand(offset));
4591 __ RecordWrite(elements,
4594 GetLinkRegisterState(),
4596 EMIT_REMEMBERED_SET,
4598 instr->hydrogen()->PointersToHereCheckForValue());
4603 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4604 // By cases: external, fast double
4605 if (instr->is_typed_elements()) {
4606 DoStoreKeyedExternalArray(instr);
4607 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4608 DoStoreKeyedFixedDoubleArray(instr);
4610 DoStoreKeyedFixedArray(instr);
4615 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4616 DCHECK(ToRegister(instr->context()).is(cp));
4617 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4618 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4619 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4622 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4623 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4627 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4628 Register object_reg = ToRegister(instr->object());
4629 Register scratch = scratch0();
4631 Handle<Map> from_map = instr->original_map();
4632 Handle<Map> to_map = instr->transitioned_map();
4633 ElementsKind from_kind = instr->from_kind();
4634 ElementsKind to_kind = instr->to_kind();
4636 Label not_applicable;
4637 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4638 __ cmp(scratch, Operand(from_map));
4639 __ b(ne, ¬_applicable);
4641 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4642 Register new_map_reg = ToRegister(instr->new_map_temp());
4643 __ mov(new_map_reg, Operand(to_map));
4644 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4646 __ RecordWriteForMap(object_reg,
4649 GetLinkRegisterState(),
4652 DCHECK(ToRegister(instr->context()).is(cp));
4653 DCHECK(object_reg.is(r0));
4654 PushSafepointRegistersScope scope(this);
4655 __ Move(r1, to_map);
4656 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4657 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4659 RecordSafepointWithRegisters(
4660 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4662 __ bind(¬_applicable);
4666 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4667 Register object = ToRegister(instr->object());
4668 Register temp = ToRegister(instr->temp());
4669 Label no_memento_found;
4670 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4671 DeoptimizeIf(eq, instr);
4672 __ bind(&no_memento_found);
4676 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4677 DCHECK(ToRegister(instr->context()).is(cp));
4678 DCHECK(ToRegister(instr->left()).is(r1));
4679 DCHECK(ToRegister(instr->right()).is(r0));
4680 StringAddStub stub(isolate(),
4681 instr->hydrogen()->flags(),
4682 instr->hydrogen()->pretenure_flag());
4683 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4687 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4688 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4690 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4691 : LDeferredCode(codegen), instr_(instr) { }
4692 virtual void Generate() OVERRIDE {
4693 codegen()->DoDeferredStringCharCodeAt(instr_);
4695 virtual LInstruction* instr() OVERRIDE { return instr_; }
4697 LStringCharCodeAt* instr_;
4700 DeferredStringCharCodeAt* deferred =
4701 new(zone()) DeferredStringCharCodeAt(this, instr);
4703 StringCharLoadGenerator::Generate(masm(),
4704 ToRegister(instr->string()),
4705 ToRegister(instr->index()),
4706 ToRegister(instr->result()),
4708 __ bind(deferred->exit());
4712 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4713 Register string = ToRegister(instr->string());
4714 Register result = ToRegister(instr->result());
4715 Register scratch = scratch0();
4717 // TODO(3095996): Get rid of this. For now, we need to make the
4718 // result register contain a valid pointer because it is already
4719 // contained in the register pointer map.
4720 __ mov(result, Operand::Zero());
4722 PushSafepointRegistersScope scope(this);
4724 // Push the index as a smi. This is safe because of the checks in
4725 // DoStringCharCodeAt above.
4726 if (instr->index()->IsConstantOperand()) {
4727 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4728 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4731 Register index = ToRegister(instr->index());
4735 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4739 __ StoreToSafepointRegisterSlot(r0, result);
4743 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4744 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4746 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4747 : LDeferredCode(codegen), instr_(instr) { }
4748 virtual void Generate() OVERRIDE {
4749 codegen()->DoDeferredStringCharFromCode(instr_);
4751 virtual LInstruction* instr() OVERRIDE { return instr_; }
4753 LStringCharFromCode* instr_;
4756 DeferredStringCharFromCode* deferred =
4757 new(zone()) DeferredStringCharFromCode(this, instr);
4759 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4760 Register char_code = ToRegister(instr->char_code());
4761 Register result = ToRegister(instr->result());
4762 DCHECK(!char_code.is(result));
4764 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4765 __ b(hi, deferred->entry());
4766 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4767 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4768 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4769 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4771 __ b(eq, deferred->entry());
4772 __ bind(deferred->exit());
4776 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4777 Register char_code = ToRegister(instr->char_code());
4778 Register result = ToRegister(instr->result());
4780 // TODO(3095996): Get rid of this. For now, we need to make the
4781 // result register contain a valid pointer because it is already
4782 // contained in the register pointer map.
4783 __ mov(result, Operand::Zero());
4785 PushSafepointRegistersScope scope(this);
4786 __ SmiTag(char_code);
4788 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4789 __ StoreToSafepointRegisterSlot(r0, result);
4793 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4794 LOperand* input = instr->value();
4795 DCHECK(input->IsRegister() || input->IsStackSlot());
4796 LOperand* output = instr->result();
4797 DCHECK(output->IsDoubleRegister());
4798 SwVfpRegister single_scratch = double_scratch0().low();
4799 if (input->IsStackSlot()) {
4800 Register scratch = scratch0();
4801 __ ldr(scratch, ToMemOperand(input));
4802 __ vmov(single_scratch, scratch);
4804 __ vmov(single_scratch, ToRegister(input));
4806 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4810 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4811 LOperand* input = instr->value();
4812 LOperand* output = instr->result();
4814 SwVfpRegister flt_scratch = double_scratch0().low();
4815 __ vmov(flt_scratch, ToRegister(input));
4816 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4820 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4821 class DeferredNumberTagI FINAL : public LDeferredCode {
4823 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4824 : LDeferredCode(codegen), instr_(instr) { }
4825 virtual void Generate() OVERRIDE {
4826 codegen()->DoDeferredNumberTagIU(instr_,
4832 virtual LInstruction* instr() OVERRIDE { return instr_; }
4834 LNumberTagI* instr_;
4837 Register src = ToRegister(instr->value());
4838 Register dst = ToRegister(instr->result());
4840 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4841 __ SmiTag(dst, src, SetCC);
4842 __ b(vs, deferred->entry());
4843 __ bind(deferred->exit());
4847 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4848 class DeferredNumberTagU FINAL : public LDeferredCode {
4850 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4851 : LDeferredCode(codegen), instr_(instr) { }
4852 virtual void Generate() OVERRIDE {
4853 codegen()->DoDeferredNumberTagIU(instr_,
4859 virtual LInstruction* instr() OVERRIDE { return instr_; }
4861 LNumberTagU* instr_;
4864 Register input = ToRegister(instr->value());
4865 Register result = ToRegister(instr->result());
4867 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4868 __ cmp(input, Operand(Smi::kMaxValue));
4869 __ b(hi, deferred->entry());
4870 __ SmiTag(result, input);
4871 __ bind(deferred->exit());
4875 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4879 IntegerSignedness signedness) {
4881 Register src = ToRegister(value);
4882 Register dst = ToRegister(instr->result());
4883 Register tmp1 = scratch0();
4884 Register tmp2 = ToRegister(temp1);
4885 Register tmp3 = ToRegister(temp2);
4886 LowDwVfpRegister dbl_scratch = double_scratch0();
4888 if (signedness == SIGNED_INT32) {
4889 // There was overflow, so bits 30 and 31 of the original integer
4890 // disagree. Try to allocate a heap number in new space and store
4891 // the value in there. If that fails, call the runtime system.
4893 __ SmiUntag(src, dst);
4894 __ eor(src, src, Operand(0x80000000));
4896 __ vmov(dbl_scratch.low(), src);
4897 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4899 __ vmov(dbl_scratch.low(), src);
4900 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4903 if (FLAG_inline_new) {
4904 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4905 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4909 // Slow case: Call the runtime system to do the number allocation.
4912 // TODO(3095996): Put a valid pointer value in the stack slot where the
4913 // result register is stored, as this register is in the pointer map, but
4914 // contains an integer value.
4915 __ mov(dst, Operand::Zero());
4917 // Preserve the value of all registers.
4918 PushSafepointRegistersScope scope(this);
4920 // NumberTagI and NumberTagD use the context from the frame, rather than
4921 // the environment's HContext or HInlinedContext value.
4922 // They only call Runtime::kAllocateHeapNumber.
4923 // The corresponding HChange instructions are added in a phase that does
4924 // not have easy access to the local context.
4925 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4926 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4927 RecordSafepointWithRegisters(
4928 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4929 __ sub(r0, r0, Operand(kHeapObjectTag));
4930 __ StoreToSafepointRegisterSlot(r0, dst);
4933 // Done. Put the value in dbl_scratch into the value of the allocated heap
4936 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4937 __ add(dst, dst, Operand(kHeapObjectTag));
4941 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4942 class DeferredNumberTagD FINAL : public LDeferredCode {
4944 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4945 : LDeferredCode(codegen), instr_(instr) { }
4946 virtual void Generate() OVERRIDE {
4947 codegen()->DoDeferredNumberTagD(instr_);
4949 virtual LInstruction* instr() OVERRIDE { return instr_; }
4951 LNumberTagD* instr_;
4954 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4955 Register scratch = scratch0();
4956 Register reg = ToRegister(instr->result());
4957 Register temp1 = ToRegister(instr->temp());
4958 Register temp2 = ToRegister(instr->temp2());
4960 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4961 if (FLAG_inline_new) {
4962 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4963 // We want the untagged address first for performance
4964 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4967 __ jmp(deferred->entry());
4969 __ bind(deferred->exit());
4970 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4971 // Now that we have finished with the object's real address tag it
4972 __ add(reg, reg, Operand(kHeapObjectTag));
4976 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4977 // TODO(3095996): Get rid of this. For now, we need to make the
4978 // result register contain a valid pointer because it is already
4979 // contained in the register pointer map.
4980 Register reg = ToRegister(instr->result());
4981 __ mov(reg, Operand::Zero());
4983 PushSafepointRegistersScope scope(this);
4984 // NumberTagI and NumberTagD use the context from the frame, rather than
4985 // the environment's HContext or HInlinedContext value.
4986 // They only call Runtime::kAllocateHeapNumber.
4987 // The corresponding HChange instructions are added in a phase that does
4988 // not have easy access to the local context.
4989 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4990 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4991 RecordSafepointWithRegisters(
4992 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4993 __ sub(r0, r0, Operand(kHeapObjectTag));
4994 __ StoreToSafepointRegisterSlot(r0, reg);
4998 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4999 HChange* hchange = instr->hydrogen();
5000 Register input = ToRegister(instr->value());
5001 Register output = ToRegister(instr->result());
5002 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5003 hchange->value()->CheckFlag(HValue::kUint32)) {
5004 __ tst(input, Operand(0xc0000000));
5005 DeoptimizeIf(ne, instr);
5007 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5008 !hchange->value()->CheckFlag(HValue::kUint32)) {
5009 __ SmiTag(output, input, SetCC);
5010 DeoptimizeIf(vs, instr);
5012 __ SmiTag(output, input);
5017 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5018 Register input = ToRegister(instr->value());
5019 Register result = ToRegister(instr->result());
5020 if (instr->needs_check()) {
5021 STATIC_ASSERT(kHeapObjectTag == 1);
5022 // If the input is a HeapObject, SmiUntag will set the carry flag.
5023 __ SmiUntag(result, input, SetCC);
5024 DeoptimizeIf(cs, instr);
5026 __ SmiUntag(result, input);
5031 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
5032 DwVfpRegister result_reg,
5033 NumberUntagDMode mode) {
5034 bool can_convert_undefined_to_nan =
5035 instr->hydrogen()->can_convert_undefined_to_nan();
5036 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5038 Register scratch = scratch0();
5039 SwVfpRegister flt_scratch = double_scratch0().low();
5040 DCHECK(!result_reg.is(double_scratch0()));
5041 Label convert, load_smi, done;
5042 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5044 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5045 // Heap number map check.
5046 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5047 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5048 __ cmp(scratch, Operand(ip));
5049 if (can_convert_undefined_to_nan) {
5052 DeoptimizeIf(ne, instr);
5055 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
5056 if (deoptimize_on_minus_zero) {
5057 __ VmovLow(scratch, result_reg);
5058 __ cmp(scratch, Operand::Zero());
5060 __ VmovHigh(scratch, result_reg);
5061 __ cmp(scratch, Operand(HeapNumber::kSignMask));
5062 DeoptimizeIf(eq, instr);
5065 if (can_convert_undefined_to_nan) {
5067 // Convert undefined (and hole) to NaN.
5068 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5069 __ cmp(input_reg, Operand(ip));
5070 DeoptimizeIf(ne, instr);
5071 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
5072 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
5076 __ SmiUntag(scratch, input_reg);
5077 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5079 // Smi to double register conversion
5081 // scratch: untagged value of input_reg
5082 __ vmov(flt_scratch, scratch);
5083 __ vcvt_f64_s32(result_reg, flt_scratch);
5088 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5089 Register input_reg = ToRegister(instr->value());
5090 Register scratch1 = scratch0();
5091 Register scratch2 = ToRegister(instr->temp());
5092 LowDwVfpRegister double_scratch = double_scratch0();
5093 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5095 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5096 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5100 // The input was optimistically untagged; revert it.
5101 // The carry flag is set when we reach this deferred code as we just executed
5102 // SmiUntag(heap_object, SetCC)
5103 STATIC_ASSERT(kHeapObjectTag == 1);
5104 __ adc(scratch2, input_reg, Operand(input_reg));
5106 // Heap number map check.
5107 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5108 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5109 __ cmp(scratch1, Operand(ip));
5111 if (instr->truncating()) {
5112 // Performs a truncating conversion of a floating point number as used by
5113 // the JS bitwise operations.
5114 Label no_heap_number, check_bools, check_false;
5115 __ b(ne, &no_heap_number);
5116 __ TruncateHeapNumberToI(input_reg, scratch2);
5119 // Check for Oddballs. Undefined/False is converted to zero and True to one
5120 // for truncating conversions.
5121 __ bind(&no_heap_number);
5122 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5123 __ cmp(scratch2, Operand(ip));
5124 __ b(ne, &check_bools);
5125 __ mov(input_reg, Operand::Zero());
5128 __ bind(&check_bools);
5129 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5130 __ cmp(scratch2, Operand(ip));
5131 __ b(ne, &check_false);
5132 __ mov(input_reg, Operand(1));
5135 __ bind(&check_false);
5136 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5137 __ cmp(scratch2, Operand(ip));
5138 DeoptimizeIf(ne, instr, "cannot truncate");
5139 __ mov(input_reg, Operand::Zero());
5141 DeoptimizeIf(ne, instr, "not a heap number");
5143 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5144 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5145 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5146 DeoptimizeIf(ne, instr, "lost precision or NaN");
5148 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5149 __ cmp(input_reg, Operand::Zero());
5151 __ VmovHigh(scratch1, double_scratch2);
5152 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5153 DeoptimizeIf(ne, instr, "minus zero");
5160 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5161 class DeferredTaggedToI FINAL : public LDeferredCode {
5163 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5164 : LDeferredCode(codegen), instr_(instr) { }
5165 virtual void Generate() OVERRIDE {
5166 codegen()->DoDeferredTaggedToI(instr_);
5168 virtual LInstruction* instr() OVERRIDE { return instr_; }
5173 LOperand* input = instr->value();
5174 DCHECK(input->IsRegister());
5175 DCHECK(input->Equals(instr->result()));
5177 Register input_reg = ToRegister(input);
5179 if (instr->hydrogen()->value()->representation().IsSmi()) {
5180 __ SmiUntag(input_reg);
5182 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5184 // Optimistically untag the input.
5185 // If the input is a HeapObject, SmiUntag will set the carry flag.
5186 __ SmiUntag(input_reg, SetCC);
5187 // Branch to deferred code if the input was tagged.
5188 // The deferred code will take care of restoring the tag.
5189 __ b(cs, deferred->entry());
5190 __ bind(deferred->exit());
5195 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5196 LOperand* input = instr->value();
5197 DCHECK(input->IsRegister());
5198 LOperand* result = instr->result();
5199 DCHECK(result->IsDoubleRegister());
5201 Register input_reg = ToRegister(input);
5202 DwVfpRegister result_reg = ToDoubleRegister(result);
5204 HValue* value = instr->hydrogen()->value();
5205 NumberUntagDMode mode = value->representation().IsSmi()
5206 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5208 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5212 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5213 Register result_reg = ToRegister(instr->result());
5214 Register scratch1 = scratch0();
5215 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5216 LowDwVfpRegister double_scratch = double_scratch0();
5218 if (instr->truncating()) {
5219 __ TruncateDoubleToI(result_reg, double_input);
5221 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5222 // Deoptimize if the input wasn't a int32 (inside a double).
5223 DeoptimizeIf(ne, instr);
5224 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5226 __ cmp(result_reg, Operand::Zero());
5228 __ VmovHigh(scratch1, double_input);
5229 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5230 DeoptimizeIf(ne, instr);
5237 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5238 Register result_reg = ToRegister(instr->result());
5239 Register scratch1 = scratch0();
5240 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5241 LowDwVfpRegister double_scratch = double_scratch0();
5243 if (instr->truncating()) {
5244 __ TruncateDoubleToI(result_reg, double_input);
5246 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5247 // Deoptimize if the input wasn't a int32 (inside a double).
5248 DeoptimizeIf(ne, instr);
5249 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5251 __ cmp(result_reg, Operand::Zero());
5253 __ VmovHigh(scratch1, double_input);
5254 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5255 DeoptimizeIf(ne, instr);
5259 __ SmiTag(result_reg, SetCC);
5260 DeoptimizeIf(vs, instr);
5264 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5265 LOperand* input = instr->value();
5266 __ SmiTst(ToRegister(input));
5267 DeoptimizeIf(ne, instr);
5271 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5272 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5273 LOperand* input = instr->value();
5274 __ SmiTst(ToRegister(input));
5275 DeoptimizeIf(eq, instr);
5280 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5281 Register input = ToRegister(instr->value());
5282 Register scratch = scratch0();
5284 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5285 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5287 if (instr->hydrogen()->is_interval_check()) {
5290 instr->hydrogen()->GetCheckInterval(&first, &last);
5292 __ cmp(scratch, Operand(first));
5294 // If there is only one type in the interval check for equality.
5295 if (first == last) {
5296 DeoptimizeIf(ne, instr);
5298 DeoptimizeIf(lo, instr);
5299 // Omit check for the last type.
5300 if (last != LAST_TYPE) {
5301 __ cmp(scratch, Operand(last));
5302 DeoptimizeIf(hi, instr);
5308 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5310 if (base::bits::IsPowerOfTwo32(mask)) {
5311 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5312 __ tst(scratch, Operand(mask));
5313 DeoptimizeIf(tag == 0 ? ne : eq, instr);
5315 __ and_(scratch, scratch, Operand(mask));
5316 __ cmp(scratch, Operand(tag));
5317 DeoptimizeIf(ne, instr);
5323 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5324 Register reg = ToRegister(instr->value());
5325 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5326 AllowDeferredHandleDereference smi_check;
5327 if (isolate()->heap()->InNewSpace(*object)) {
5328 Register reg = ToRegister(instr->value());
5329 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5330 __ mov(ip, Operand(Handle<Object>(cell)));
5331 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5334 __ cmp(reg, Operand(object));
5336 DeoptimizeIf(ne, instr);
5340 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5342 PushSafepointRegistersScope scope(this);
5344 __ mov(cp, Operand::Zero());
5345 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5346 RecordSafepointWithRegisters(
5347 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5348 __ StoreToSafepointRegisterSlot(r0, scratch0());
5350 __ tst(scratch0(), Operand(kSmiTagMask));
5351 DeoptimizeIf(eq, instr);
5355 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5356 class DeferredCheckMaps FINAL : public LDeferredCode {
5358 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5359 : LDeferredCode(codegen), instr_(instr), object_(object) {
5360 SetExit(check_maps());
5362 virtual void Generate() OVERRIDE {
5363 codegen()->DoDeferredInstanceMigration(instr_, object_);
5365 Label* check_maps() { return &check_maps_; }
5366 virtual LInstruction* instr() OVERRIDE { return instr_; }
5373 if (instr->hydrogen()->IsStabilityCheck()) {
5374 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5375 for (int i = 0; i < maps->size(); ++i) {
5376 AddStabilityDependency(maps->at(i).handle());
5381 Register map_reg = scratch0();
5383 LOperand* input = instr->value();
5384 DCHECK(input->IsRegister());
5385 Register reg = ToRegister(input);
5387 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5389 DeferredCheckMaps* deferred = NULL;
5390 if (instr->hydrogen()->HasMigrationTarget()) {
5391 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5392 __ bind(deferred->check_maps());
5395 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5397 for (int i = 0; i < maps->size() - 1; i++) {
5398 Handle<Map> map = maps->at(i).handle();
5399 __ CompareMap(map_reg, map, &success);
5403 Handle<Map> map = maps->at(maps->size() - 1).handle();
5404 __ CompareMap(map_reg, map, &success);
5405 if (instr->hydrogen()->HasMigrationTarget()) {
5406 __ b(ne, deferred->entry());
5408 DeoptimizeIf(ne, instr);
5415 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5416 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5417 Register result_reg = ToRegister(instr->result());
5418 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5422 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5423 Register unclamped_reg = ToRegister(instr->unclamped());
5424 Register result_reg = ToRegister(instr->result());
5425 __ ClampUint8(result_reg, unclamped_reg);
5429 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5430 Register scratch = scratch0();
5431 Register input_reg = ToRegister(instr->unclamped());
5432 Register result_reg = ToRegister(instr->result());
5433 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5434 Label is_smi, done, heap_number;
5436 // Both smi and heap number cases are handled.
5437 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5439 // Check for heap number
5440 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5441 __ cmp(scratch, Operand(factory()->heap_number_map()));
5442 __ b(eq, &heap_number);
5444 // Check for undefined. Undefined is converted to zero for clamping
5446 __ cmp(input_reg, Operand(factory()->undefined_value()));
5447 DeoptimizeIf(ne, instr);
5448 __ mov(result_reg, Operand::Zero());
5452 __ bind(&heap_number);
5453 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5454 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5459 __ ClampUint8(result_reg, result_reg);
5465 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5466 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5467 Register result_reg = ToRegister(instr->result());
5468 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5469 __ VmovHigh(result_reg, value_reg);
5471 __ VmovLow(result_reg, value_reg);
5476 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5477 Register hi_reg = ToRegister(instr->hi());
5478 Register lo_reg = ToRegister(instr->lo());
5479 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5480 __ VmovHigh(result_reg, hi_reg);
5481 __ VmovLow(result_reg, lo_reg);
5485 void LCodeGen::DoAllocate(LAllocate* instr) {
5486 class DeferredAllocate FINAL : public LDeferredCode {
5488 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5489 : LDeferredCode(codegen), instr_(instr) { }
5490 virtual void Generate() OVERRIDE {
5491 codegen()->DoDeferredAllocate(instr_);
5493 virtual LInstruction* instr() OVERRIDE { return instr_; }
5498 DeferredAllocate* deferred =
5499 new(zone()) DeferredAllocate(this, instr);
5501 Register result = ToRegister(instr->result());
5502 Register scratch = ToRegister(instr->temp1());
5503 Register scratch2 = ToRegister(instr->temp2());
5505 // Allocate memory for the object.
5506 AllocationFlags flags = TAG_OBJECT;
5507 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5508 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5510 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5511 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5512 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5513 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5514 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5515 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5516 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5519 if (instr->size()->IsConstantOperand()) {
5520 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5521 if (size <= Page::kMaxRegularHeapObjectSize) {
5522 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5524 __ jmp(deferred->entry());
5527 Register size = ToRegister(instr->size());
5528 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5531 __ bind(deferred->exit());
5533 if (instr->hydrogen()->MustPrefillWithFiller()) {
5534 STATIC_ASSERT(kHeapObjectTag == 1);
5535 if (instr->size()->IsConstantOperand()) {
5536 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5537 __ mov(scratch, Operand(size - kHeapObjectTag));
5539 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5541 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5544 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5545 __ str(scratch2, MemOperand(result, scratch));
5551 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5552 Register result = ToRegister(instr->result());
5554 // TODO(3095996): Get rid of this. For now, we need to make the
5555 // result register contain a valid pointer because it is already
5556 // contained in the register pointer map.
5557 __ mov(result, Operand(Smi::FromInt(0)));
5559 PushSafepointRegistersScope scope(this);
5560 if (instr->size()->IsRegister()) {
5561 Register size = ToRegister(instr->size());
5562 DCHECK(!size.is(result));
5566 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5567 if (size >= 0 && size <= Smi::kMaxValue) {
5568 __ Push(Smi::FromInt(size));
5570 // We should never get here at runtime => abort
5571 __ stop("invalid allocation size");
5576 int flags = AllocateDoubleAlignFlag::encode(
5577 instr->hydrogen()->MustAllocateDoubleAligned());
5578 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5579 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5580 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5581 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5582 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5583 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5584 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5586 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5588 __ Push(Smi::FromInt(flags));
5590 CallRuntimeFromDeferred(
5591 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5592 __ StoreToSafepointRegisterSlot(r0, result);
5596 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5597 DCHECK(ToRegister(instr->value()).is(r0));
5599 CallRuntime(Runtime::kToFastProperties, 1, instr);
5603 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5604 DCHECK(ToRegister(instr->context()).is(cp));
5606 // Registers will be used as follows:
5607 // r6 = literals array.
5608 // r1 = regexp literal.
5609 // r0 = regexp literal clone.
5610 // r2-5 are used as temporaries.
5611 int literal_offset =
5612 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5613 __ Move(r6, instr->hydrogen()->literals());
5614 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5615 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5617 __ b(ne, &materialized);
5619 // Create regexp literal using runtime function
5620 // Result will be in r0.
5621 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5622 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5623 __ mov(r3, Operand(instr->hydrogen()->flags()));
5624 __ Push(r6, r5, r4, r3);
5625 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5628 __ bind(&materialized);
5629 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5630 Label allocated, runtime_allocate;
5632 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5635 __ bind(&runtime_allocate);
5636 __ mov(r0, Operand(Smi::FromInt(size)));
5638 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5641 __ bind(&allocated);
5642 // Copy the content into the newly allocated memory.
5643 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5647 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5648 DCHECK(ToRegister(instr->context()).is(cp));
5649 // Use the fast case closure allocation code that allocates in new
5650 // space for nested functions that don't need literals cloning.
5651 bool pretenure = instr->hydrogen()->pretenure();
5652 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5653 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5654 instr->hydrogen()->kind());
5655 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5656 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5658 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5659 __ mov(r1, Operand(pretenure ? factory()->true_value()
5660 : factory()->false_value()));
5661 __ Push(cp, r2, r1);
5662 CallRuntime(Runtime::kNewClosure, 3, instr);
5667 void LCodeGen::DoTypeof(LTypeof* instr) {
5668 Register input = ToRegister(instr->value());
5670 CallRuntime(Runtime::kTypeof, 1, instr);
5674 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5675 Register input = ToRegister(instr->value());
5677 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5678 instr->FalseLabel(chunk_),
5680 instr->type_literal());
5681 if (final_branch_condition != kNoCondition) {
5682 EmitBranch(instr, final_branch_condition);
5687 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5690 Handle<String> type_name) {
5691 Condition final_branch_condition = kNoCondition;
5692 Register scratch = scratch0();
5693 Factory* factory = isolate()->factory();
5694 if (String::Equals(type_name, factory->number_string())) {
5695 __ JumpIfSmi(input, true_label);
5696 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5697 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5698 final_branch_condition = eq;
5700 } else if (String::Equals(type_name, factory->string_string())) {
5701 __ JumpIfSmi(input, false_label);
5702 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5703 __ b(ge, false_label);
5704 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5705 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5706 final_branch_condition = eq;
5708 } else if (String::Equals(type_name, factory->symbol_string())) {
5709 __ JumpIfSmi(input, false_label);
5710 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5711 final_branch_condition = eq;
5713 } else if (String::Equals(type_name, factory->boolean_string())) {
5714 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5715 __ b(eq, true_label);
5716 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5717 final_branch_condition = eq;
5719 } else if (String::Equals(type_name, factory->undefined_string())) {
5720 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5721 __ b(eq, true_label);
5722 __ JumpIfSmi(input, false_label);
5723 // Check for undetectable objects => true.
5724 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5725 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5726 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5727 final_branch_condition = ne;
5729 } else if (String::Equals(type_name, factory->function_string())) {
5730 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5731 Register type_reg = scratch;
5732 __ JumpIfSmi(input, false_label);
5733 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5734 __ b(eq, true_label);
5735 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5736 final_branch_condition = eq;
5738 } else if (String::Equals(type_name, factory->object_string())) {
5739 Register map = scratch;
5740 __ JumpIfSmi(input, false_label);
5741 __ CompareRoot(input, Heap::kNullValueRootIndex);
5742 __ b(eq, true_label);
5743 __ CheckObjectTypeRange(input,
5745 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5746 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5748 // Check for undetectable objects => false.
5749 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5750 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5751 final_branch_condition = eq;
5757 return final_branch_condition;
5761 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5762 Register temp1 = ToRegister(instr->temp());
5764 EmitIsConstructCall(temp1, scratch0());
5765 EmitBranch(instr, eq);
5769 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5770 DCHECK(!temp1.is(temp2));
5771 // Get the frame pointer for the calling frame.
5772 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5774 // Skip the arguments adaptor frame if it exists.
5775 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5776 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5777 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5779 // Check the marker in the calling frame.
5780 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5781 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5785 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5786 if (!info()->IsStub()) {
5787 // Ensure that we have enough space after the previous lazy-bailout
5788 // instruction for patching the code here.
5789 int current_pc = masm()->pc_offset();
5790 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5791 // Block literal pool emission for duration of padding.
5792 Assembler::BlockConstPoolScope block_const_pool(masm());
5793 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5794 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5795 while (padding_size > 0) {
5797 padding_size -= Assembler::kInstrSize;
5801 last_lazy_deopt_pc_ = masm()->pc_offset();
5805 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5806 last_lazy_deopt_pc_ = masm()->pc_offset();
5807 DCHECK(instr->HasEnvironment());
5808 LEnvironment* env = instr->environment();
5809 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5810 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5814 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5815 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5816 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5817 // needed return address), even though the implementation of LAZY and EAGER is
5818 // now identical. When LAZY is eventually completely folded into EAGER, remove
5819 // the special case below.
5820 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5821 type = Deoptimizer::LAZY;
5824 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5828 void LCodeGen::DoDummy(LDummy* instr) {
5829 // Nothing to see here, move on!
5833 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5834 // Nothing to see here, move on!
5838 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5839 PushSafepointRegistersScope scope(this);
5840 LoadContextFromDeferred(instr->context());
5841 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5842 RecordSafepointWithLazyDeopt(
5843 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5844 DCHECK(instr->HasEnvironment());
5845 LEnvironment* env = instr->environment();
5846 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5850 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5851 class DeferredStackCheck FINAL : public LDeferredCode {
5853 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5854 : LDeferredCode(codegen), instr_(instr) { }
5855 virtual void Generate() OVERRIDE {
5856 codegen()->DoDeferredStackCheck(instr_);
5858 virtual LInstruction* instr() OVERRIDE { return instr_; }
5860 LStackCheck* instr_;
5863 DCHECK(instr->HasEnvironment());
5864 LEnvironment* env = instr->environment();
5865 // There is no LLazyBailout instruction for stack-checks. We have to
5866 // prepare for lazy deoptimization explicitly here.
5867 if (instr->hydrogen()->is_function_entry()) {
5868 // Perform stack overflow check.
5870 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5871 __ cmp(sp, Operand(ip));
5873 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5874 PredictableCodeSizeScope predictable(masm(),
5875 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5876 DCHECK(instr->context()->IsRegister());
5877 DCHECK(ToRegister(instr->context()).is(cp));
5878 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5881 DCHECK(instr->hydrogen()->is_backwards_branch());
5882 // Perform stack overflow check if this goto needs it before jumping.
5883 DeferredStackCheck* deferred_stack_check =
5884 new(zone()) DeferredStackCheck(this, instr);
5885 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5886 __ cmp(sp, Operand(ip));
5887 __ b(lo, deferred_stack_check->entry());
5888 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5889 __ bind(instr->done_label());
5890 deferred_stack_check->SetExit(instr->done_label());
5891 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5892 // Don't record a deoptimization index for the safepoint here.
5893 // This will be done explicitly when emitting call and the safepoint in
5894 // the deferred code.
5899 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5900 // This is a pseudo-instruction that ensures that the environment here is
5901 // properly registered for deoptimization and records the assembler's PC
5903 LEnvironment* environment = instr->environment();
5905 // If the environment were already registered, we would have no way of
5906 // backpatching it with the spill slot operands.
5907 DCHECK(!environment->HasBeenRegistered());
5908 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5910 GenerateOsrPrologue();
5914 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5915 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5917 DeoptimizeIf(eq, instr);
5919 Register null_value = r5;
5920 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5921 __ cmp(r0, null_value);
5922 DeoptimizeIf(eq, instr);
5925 DeoptimizeIf(eq, instr);
5927 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5928 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5929 DeoptimizeIf(le, instr);
5931 Label use_cache, call_runtime;
5932 __ CheckEnumCache(null_value, &call_runtime);
5934 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5937 // Get the set of properties to enumerate.
5938 __ bind(&call_runtime);
5940 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5942 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5943 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5945 DeoptimizeIf(ne, instr);
5946 __ bind(&use_cache);
5950 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5951 Register map = ToRegister(instr->map());
5952 Register result = ToRegister(instr->result());
5953 Label load_cache, done;
5954 __ EnumLength(result, map);
5955 __ cmp(result, Operand(Smi::FromInt(0)));
5956 __ b(ne, &load_cache);
5957 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5960 __ bind(&load_cache);
5961 __ LoadInstanceDescriptors(map, result);
5963 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5965 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5966 __ cmp(result, Operand::Zero());
5967 DeoptimizeIf(eq, instr);
5973 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5974 Register object = ToRegister(instr->value());
5975 Register map = ToRegister(instr->map());
5976 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5977 __ cmp(map, scratch0());
5978 DeoptimizeIf(ne, instr);
5982 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5986 PushSafepointRegistersScope scope(this);
5989 __ mov(cp, Operand::Zero());
5990 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5991 RecordSafepointWithRegisters(
5992 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5993 __ StoreToSafepointRegisterSlot(r0, result);
5997 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5998 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
6000 DeferredLoadMutableDouble(LCodeGen* codegen,
6001 LLoadFieldByIndex* instr,
6005 : LDeferredCode(codegen),
6011 virtual void Generate() OVERRIDE {
6012 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6014 virtual LInstruction* instr() OVERRIDE { return instr_; }
6016 LLoadFieldByIndex* instr_;
6022 Register object = ToRegister(instr->object());
6023 Register index = ToRegister(instr->index());
6024 Register result = ToRegister(instr->result());
6025 Register scratch = scratch0();
6027 DeferredLoadMutableDouble* deferred;
6028 deferred = new(zone()) DeferredLoadMutableDouble(
6029 this, instr, result, object, index);
6031 Label out_of_object, done;
6033 __ tst(index, Operand(Smi::FromInt(1)));
6034 __ b(ne, deferred->entry());
6035 __ mov(index, Operand(index, ASR, 1));
6037 __ cmp(index, Operand::Zero());
6038 __ b(lt, &out_of_object);
6040 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
6041 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
6045 __ bind(&out_of_object);
6046 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6047 // Index is equal to negated out of object property index plus 1.
6048 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
6049 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
6050 __ ldr(result, FieldMemOperand(scratch,
6051 FixedArray::kHeaderSize - kPointerSize));
6052 __ bind(deferred->exit());
6057 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6058 Register context = ToRegister(instr->context());
6059 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6063 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6064 Handle<ScopeInfo> scope_info = instr->scope_info();
6065 __ Push(scope_info);
6066 __ push(ToRegister(instr->function()));
6067 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6068 RecordSafepoint(Safepoint::kNoLazyDeopt);
6074 } } // namespace v8::internal