1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
21 class SafepointGenerator final : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen,
24 LPointerMap* pointers,
25 Safepoint::DeoptMode mode)
29 virtual ~SafepointGenerator() {}
31 void BeforeCall(int call_size) const override {}
33 void AfterCall() const override {
34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 LPointerMap* pointers_;
40 Safepoint::DeoptMode deopt_mode_;
46 bool LCodeGen::GenerateCode() {
47 LPhase phase("Z_Code generation", chunk());
51 // Open a frame scope to indicate that there is a frame on the stack. The
52 // NONE indicates that the scope shouldn't actually generate code to set up
53 // the frame (that is done in GeneratePrologue).
54 FrameScope frame_scope(masm_, StackFrame::NONE);
56 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
57 GenerateJumpTable() && GenerateSafepointTable();
61 void LCodeGen::FinishCode(Handle<Code> code) {
63 code->set_stack_slots(GetStackSlotCount());
64 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
65 PopulateDeoptimizationData(code);
69 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
85 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
101 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // r1: Callee's JS function.
115 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if enabled)
117 // fp: Caller's frame pointer.
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
124 !info_->is_native() && info_->scope()->has_this_declaration()) {
126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
127 __ ldr(r2, MemOperand(sp, receiver_offset));
128 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
131 __ ldr(r2, GlobalObjectOperand());
132 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
134 __ str(r2, MemOperand(sp, receiver_offset));
140 info()->set_prologue_offset(masm_->pc_offset());
141 if (NeedsEagerFrame()) {
142 if (info()->IsStub()) {
145 __ Prologue(info()->IsCodePreAgingActive());
147 frame_is_built_ = true;
148 info_->AddNoFrameRange(0, masm_->pc_offset());
151 // Reserve space for the stack slots needed by the code.
152 int slots = GetStackSlotCount();
154 if (FLAG_debug_code) {
155 __ sub(sp, sp, Operand(slots * kPointerSize));
158 __ add(r0, sp, Operand(slots * kPointerSize));
159 __ mov(r1, Operand(kSlotsZapValue));
162 __ sub(r0, r0, Operand(kPointerSize));
163 __ str(r1, MemOperand(r0, 2 * kPointerSize));
169 __ sub(sp, sp, Operand(slots * kPointerSize));
173 if (info()->saves_caller_doubles()) {
177 // Possibly allocate a local context.
178 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179 if (heap_slots > 0) {
180 Comment(";;; Allocate local context");
181 bool need_write_barrier = true;
182 // Argument to NewContext is the function, which is in r1.
183 DCHECK(!info()->scope()->is_script_scope());
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots);
187 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false;
191 __ CallRuntime(Runtime::kNewFunctionContext, 1);
193 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp.
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters();
200 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
201 for (int i = first_parameter; i < num_parameters; i++) {
202 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
203 if (var->IsContextSlot()) {
204 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
205 (num_parameters - 1 - i) * kPointerSize;
206 // Load parameter from stack.
207 __ ldr(r0, MemOperand(fp, parameter_offset));
208 // Store it in the context.
209 MemOperand target = ContextOperand(cp, var->index());
211 // Update the write barrier. This clobbers r3 and r0.
212 if (need_write_barrier) {
213 __ RecordWriteContextSlot(
218 GetLinkRegisterState(),
220 } else if (FLAG_debug_code) {
222 __ JumpIfInNewSpace(cp, r0, &done);
223 __ Abort(kExpectedNewSpaceObject);
228 Comment(";;; End allocate local context");
232 if (FLAG_trace && info()->IsOptimizing()) {
233 // We have not executed any compiled code yet, so cp still holds the
235 __ CallRuntime(Runtime::kTraceEnter, 0);
237 return !is_aborted();
241 void LCodeGen::GenerateOsrPrologue() {
242 // Generate the OSR entry prologue at the first unknown OSR value, or if there
243 // are none, at the OSR entrypoint instruction.
244 if (osr_pc_offset_ >= 0) return;
246 osr_pc_offset_ = masm()->pc_offset();
248 // Adjust the frame size, subsuming the unoptimized frame into the
250 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
252 __ sub(sp, sp, Operand(slots * kPointerSize));
256 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
257 if (instr->IsCall()) {
258 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
260 if (!instr->IsLazyBailout() && !instr->IsGap()) {
261 safepoints_.BumpLastLazySafepointIndex();
266 bool LCodeGen::GenerateDeferredCode() {
267 DCHECK(is_generating());
268 if (deferred_.length() > 0) {
269 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
270 LDeferredCode* code = deferred_[i];
273 instructions_->at(code->instruction_index())->hydrogen_value();
274 RecordAndWritePosition(
275 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
277 Comment(";;; <@%d,#%d> "
278 "-------------------- Deferred %s --------------------",
279 code->instruction_index(),
280 code->instr()->hydrogen_value()->id(),
281 code->instr()->Mnemonic());
282 __ bind(code->entry());
283 if (NeedsDeferredFrame()) {
284 Comment(";;; Build frame");
285 DCHECK(!frame_is_built_);
286 DCHECK(info()->IsStub());
287 frame_is_built_ = true;
289 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
291 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
292 Comment(";;; Deferred code");
295 if (NeedsDeferredFrame()) {
296 Comment(";;; Destroy frame");
297 DCHECK(frame_is_built_);
300 frame_is_built_ = false;
302 __ jmp(code->exit());
306 // Force constant pool emission at the end of the deferred code to make
307 // sure that no constant pools are emitted after.
308 masm()->CheckConstPool(true, false);
310 return !is_aborted();
314 bool LCodeGen::GenerateJumpTable() {
315 // Check that the jump table is accessible from everywhere in the function
316 // code, i.e. that offsets to the table can be encoded in the 24bit signed
317 // immediate of a branch instruction.
318 // To simplify we consider the code size from the first instruction to the
319 // end of the jump table. We also don't consider the pc load delta.
320 // Each entry in the jump table generates one instruction and inlines one
321 // 32bit data after it.
322 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
323 jump_table_.length() * 7)) {
324 Abort(kGeneratedCodeIsTooLarge);
327 if (jump_table_.length() > 0) {
328 Label needs_frame, call_deopt_entry;
330 Comment(";;; -------------------- Jump table --------------------");
331 Address base = jump_table_[0].address;
333 Register entry_offset = scratch0();
335 int length = jump_table_.length();
336 for (int i = 0; i < length; i++) {
337 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
338 __ bind(&table_entry->label);
340 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
341 Address entry = table_entry->address;
342 DeoptComment(table_entry->deopt_info);
344 // Second-level deopt table entries are contiguous and small, so instead
345 // of loading the full, absolute address of each one, load an immediate
346 // offset which will be added to the base address later.
347 __ mov(entry_offset, Operand(entry - base));
349 if (table_entry->needs_frame) {
350 DCHECK(!info()->saves_caller_doubles());
351 Comment(";;; call deopt with frame");
355 __ bl(&call_deopt_entry);
357 info()->LogDeoptCallPosition(masm()->pc_offset(),
358 table_entry->deopt_info.inlining_id);
359 masm()->CheckConstPool(false, false);
362 if (needs_frame.is_linked()) {
363 __ bind(&needs_frame);
364 // This variant of deopt can only be used with stubs. Since we don't
365 // have a function pointer to install in the stack frame that we're
366 // building, install a special marker there instead.
367 DCHECK(info()->IsStub());
368 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
370 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
373 Comment(";;; call deopt");
374 __ bind(&call_deopt_entry);
376 if (info()->saves_caller_doubles()) {
377 DCHECK(info()->IsStub());
378 RestoreCallerDoubles();
381 // Add the base address to the offset previously loaded in entry_offset.
382 __ add(entry_offset, entry_offset,
383 Operand(ExternalReference::ForDeoptEntry(base)));
387 // Force constant pool emission at the end of the deopt jump table to make
388 // sure that no constant pools are emitted after.
389 masm()->CheckConstPool(true, false);
391 // The deoptimization jump table is the last part of the instruction
392 // sequence. Mark the generated code as done unless we bailed out.
393 if (!is_aborted()) status_ = DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
405 Register LCodeGen::ToRegister(int index) const {
406 return Register::FromAllocationIndex(index);
410 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
411 return DwVfpRegister::FromAllocationIndex(index);
415 Register LCodeGen::ToRegister(LOperand* op) const {
416 DCHECK(op->IsRegister());
417 return ToRegister(op->index());
421 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
422 if (op->IsRegister()) {
423 return ToRegister(op->index());
424 } else if (op->IsConstantOperand()) {
425 LConstantOperand* const_op = LConstantOperand::cast(op);
426 HConstant* constant = chunk_->LookupConstant(const_op);
427 Handle<Object> literal = constant->handle(isolate());
428 Representation r = chunk_->LookupLiteralRepresentation(const_op);
429 if (r.IsInteger32()) {
430 DCHECK(literal->IsNumber());
431 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
432 } else if (r.IsDouble()) {
433 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
435 DCHECK(r.IsSmiOrTagged());
436 __ Move(scratch, literal);
439 } else if (op->IsStackSlot()) {
440 __ ldr(scratch, ToMemOperand(op));
448 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
449 DCHECK(op->IsDoubleRegister());
450 return ToDoubleRegister(op->index());
454 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
455 SwVfpRegister flt_scratch,
456 DwVfpRegister dbl_scratch) {
457 if (op->IsDoubleRegister()) {
458 return ToDoubleRegister(op->index());
459 } else if (op->IsConstantOperand()) {
460 LConstantOperand* const_op = LConstantOperand::cast(op);
461 HConstant* constant = chunk_->LookupConstant(const_op);
462 Handle<Object> literal = constant->handle(isolate());
463 Representation r = chunk_->LookupLiteralRepresentation(const_op);
464 if (r.IsInteger32()) {
465 DCHECK(literal->IsNumber());
466 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
467 __ vmov(flt_scratch, ip);
468 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
470 } else if (r.IsDouble()) {
471 Abort(kUnsupportedDoubleImmediate);
472 } else if (r.IsTagged()) {
473 Abort(kUnsupportedTaggedImmediate);
475 } else if (op->IsStackSlot()) {
476 // TODO(regis): Why is vldr not taking a MemOperand?
477 // __ vldr(dbl_scratch, ToMemOperand(op));
478 MemOperand mem_op = ToMemOperand(op);
479 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
487 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
488 HConstant* constant = chunk_->LookupConstant(op);
489 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
490 return constant->handle(isolate());
494 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
495 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
499 bool LCodeGen::IsSmi(LConstantOperand* op) const {
500 return chunk_->LookupLiteralRepresentation(op).IsSmi();
504 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
505 return ToRepresentation(op, Representation::Integer32());
509 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
510 const Representation& r) const {
511 HConstant* constant = chunk_->LookupConstant(op);
512 int32_t value = constant->Integer32Value();
513 if (r.IsInteger32()) return value;
514 DCHECK(r.IsSmiOrTagged());
515 return reinterpret_cast<int32_t>(Smi::FromInt(value));
519 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
520 HConstant* constant = chunk_->LookupConstant(op);
521 return Smi::FromInt(constant->Integer32Value());
525 double LCodeGen::ToDouble(LConstantOperand* op) const {
526 HConstant* constant = chunk_->LookupConstant(op);
527 DCHECK(constant->HasDoubleValue());
528 return constant->DoubleValue();
532 Operand LCodeGen::ToOperand(LOperand* op) {
533 if (op->IsConstantOperand()) {
534 LConstantOperand* const_op = LConstantOperand::cast(op);
535 HConstant* constant = chunk()->LookupConstant(const_op);
536 Representation r = chunk_->LookupLiteralRepresentation(const_op);
538 DCHECK(constant->HasSmiValue());
539 return Operand(Smi::FromInt(constant->Integer32Value()));
540 } else if (r.IsInteger32()) {
541 DCHECK(constant->HasInteger32Value());
542 return Operand(constant->Integer32Value());
543 } else if (r.IsDouble()) {
544 Abort(kToOperandUnsupportedDoubleImmediate);
546 DCHECK(r.IsTagged());
547 return Operand(constant->handle(isolate()));
548 } else if (op->IsRegister()) {
549 return Operand(ToRegister(op));
550 } else if (op->IsDoubleRegister()) {
551 Abort(kToOperandIsDoubleRegisterUnimplemented);
552 return Operand::Zero();
554 // Stack slots not implemented, use ToMemOperand instead.
556 return Operand::Zero();
560 static int ArgumentsOffsetWithoutFrame(int index) {
562 return -(index + 1) * kPointerSize;
566 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
567 DCHECK(!op->IsRegister());
568 DCHECK(!op->IsDoubleRegister());
569 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
570 if (NeedsEagerFrame()) {
571 return MemOperand(fp, StackSlotOffset(op->index()));
573 // Retrieve parameter without eager stack-frame relative to the
575 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
580 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
581 DCHECK(op->IsDoubleStackSlot());
582 if (NeedsEagerFrame()) {
583 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
585 // Retrieve parameter without eager stack-frame relative to the
588 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
593 void LCodeGen::WriteTranslation(LEnvironment* environment,
594 Translation* translation) {
595 if (environment == NULL) return;
597 // The translation includes one command per value in the environment.
598 int translation_size = environment->translation_size();
599 // The output frame height does not include the parameters.
600 int height = translation_size - environment->parameter_count();
602 WriteTranslation(environment->outer(), translation);
603 bool has_closure_id = !info()->closure().is_null() &&
604 !info()->closure().is_identical_to(environment->closure());
605 int closure_id = has_closure_id
606 ? DefineDeoptimizationLiteral(environment->closure())
607 : Translation::kSelfLiteralId;
609 switch (environment->frame_type()) {
611 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
614 translation->BeginConstructStubFrame(closure_id, translation_size);
617 DCHECK(translation_size == 1);
619 translation->BeginGetterStubFrame(closure_id);
622 DCHECK(translation_size == 2);
624 translation->BeginSetterStubFrame(closure_id);
627 translation->BeginCompiledStubFrame(translation_size);
629 case ARGUMENTS_ADAPTOR:
630 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
634 int object_index = 0;
635 int dematerialized_index = 0;
636 for (int i = 0; i < translation_size; ++i) {
637 LOperand* value = environment->values()->at(i);
638 AddToTranslation(environment,
641 environment->HasTaggedValueAt(i),
642 environment->HasUint32ValueAt(i),
644 &dematerialized_index);
649 void LCodeGen::AddToTranslation(LEnvironment* environment,
650 Translation* translation,
654 int* object_index_pointer,
655 int* dematerialized_index_pointer) {
656 if (op == LEnvironment::materialization_marker()) {
657 int object_index = (*object_index_pointer)++;
658 if (environment->ObjectIsDuplicateAt(object_index)) {
659 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
660 translation->DuplicateObject(dupe_of);
663 int object_length = environment->ObjectLengthAt(object_index);
664 if (environment->ObjectIsArgumentsAt(object_index)) {
665 translation->BeginArgumentsObject(object_length);
667 translation->BeginCapturedObject(object_length);
669 int dematerialized_index = *dematerialized_index_pointer;
670 int env_offset = environment->translation_size() + dematerialized_index;
671 *dematerialized_index_pointer += object_length;
672 for (int i = 0; i < object_length; ++i) {
673 LOperand* value = environment->values()->at(env_offset + i);
674 AddToTranslation(environment,
677 environment->HasTaggedValueAt(env_offset + i),
678 environment->HasUint32ValueAt(env_offset + i),
679 object_index_pointer,
680 dematerialized_index_pointer);
685 if (op->IsStackSlot()) {
687 translation->StoreStackSlot(op->index());
688 } else if (is_uint32) {
689 translation->StoreUint32StackSlot(op->index());
691 translation->StoreInt32StackSlot(op->index());
693 } else if (op->IsDoubleStackSlot()) {
694 translation->StoreDoubleStackSlot(op->index());
695 } else if (op->IsRegister()) {
696 Register reg = ToRegister(op);
698 translation->StoreRegister(reg);
699 } else if (is_uint32) {
700 translation->StoreUint32Register(reg);
702 translation->StoreInt32Register(reg);
704 } else if (op->IsDoubleRegister()) {
705 DoubleRegister reg = ToDoubleRegister(op);
706 translation->StoreDoubleRegister(reg);
707 } else if (op->IsConstantOperand()) {
708 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
709 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
710 translation->StoreLiteral(src_index);
717 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
718 int size = masm()->CallSize(code, mode);
719 if (code->kind() == Code::BINARY_OP_IC ||
720 code->kind() == Code::COMPARE_IC) {
721 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
727 void LCodeGen::CallCode(Handle<Code> code,
728 RelocInfo::Mode mode,
730 TargetAddressStorageMode storage_mode) {
731 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
735 void LCodeGen::CallCodeGeneric(Handle<Code> code,
736 RelocInfo::Mode mode,
738 SafepointMode safepoint_mode,
739 TargetAddressStorageMode storage_mode) {
740 DCHECK(instr != NULL);
741 // Block literal pool emission to ensure nop indicating no inlined smi code
742 // is in the correct position.
743 Assembler::BlockConstPoolScope block_const_pool(masm());
744 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
745 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
747 // Signal that we don't inline smi code before these stubs in the
748 // optimizing code generator.
749 if (code->kind() == Code::BINARY_OP_IC ||
750 code->kind() == Code::COMPARE_IC) {
756 void LCodeGen::CallRuntime(const Runtime::Function* function,
759 SaveFPRegsMode save_doubles) {
760 DCHECK(instr != NULL);
762 __ CallRuntime(function, num_arguments, save_doubles);
764 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
768 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
769 if (context->IsRegister()) {
770 __ Move(cp, ToRegister(context));
771 } else if (context->IsStackSlot()) {
772 __ ldr(cp, ToMemOperand(context));
773 } else if (context->IsConstantOperand()) {
774 HConstant* constant =
775 chunk_->LookupConstant(LConstantOperand::cast(context));
776 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
783 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
787 LoadContextFromDeferred(context);
788 __ CallRuntimeSaveDoubles(id);
789 RecordSafepointWithRegisters(
790 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
794 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
795 Safepoint::DeoptMode mode) {
796 environment->set_has_been_used();
797 if (!environment->HasBeenRegistered()) {
798 // Physical stack frame layout:
799 // -x ............. -4 0 ..................................... y
800 // [incoming arguments] [spill slots] [pushed outgoing arguments]
802 // Layout of the environment:
803 // 0 ..................................................... size-1
804 // [parameters] [locals] [expression stack including arguments]
806 // Layout of the translation:
807 // 0 ........................................................ size - 1 + 4
808 // [expression stack including arguments] [locals] [4 words] [parameters]
809 // |>------------ translation_size ------------<|
812 int jsframe_count = 0;
813 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
815 if (e->frame_type() == JS_FUNCTION) {
819 Translation translation(&translations_, frame_count, jsframe_count, zone());
820 WriteTranslation(environment, &translation);
821 int deoptimization_index = deoptimizations_.length();
822 int pc_offset = masm()->pc_offset();
823 environment->Register(deoptimization_index,
825 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
826 deoptimizations_.Add(environment, zone());
831 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
832 Deoptimizer::DeoptReason deopt_reason,
833 Deoptimizer::BailoutType bailout_type) {
834 LEnvironment* environment = instr->environment();
835 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
836 DCHECK(environment->HasBeenRegistered());
837 int id = environment->deoptimization_index();
838 DCHECK(info()->IsOptimizing() || info()->IsStub());
840 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
842 Abort(kBailoutWasNotPrepared);
846 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
847 Register scratch = scratch0();
848 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
850 // Store the condition on the stack if necessary
851 if (condition != al) {
852 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
853 __ mov(scratch, Operand(1), LeaveCC, condition);
858 __ mov(scratch, Operand(count));
859 __ ldr(r1, MemOperand(scratch));
860 __ sub(r1, r1, Operand(1), SetCC);
861 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
862 __ str(r1, MemOperand(scratch));
865 if (condition != al) {
866 // Clean up the stack before the deoptimizer call
870 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
872 // 'Restore' the condition in a slightly hacky way. (It would be better
873 // to use 'msr' and 'mrs' instructions here, but they are not supported by
874 // our ARM simulator).
875 if (condition != al) {
877 __ cmp(scratch, Operand::Zero());
881 if (info()->ShouldTrapOnDeopt()) {
882 __ stop("trap_on_deopt", condition);
885 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
887 DCHECK(info()->IsStub() || frame_is_built_);
888 // Go through jump table if we need to handle condition, build frame, or
889 // restore caller doubles.
890 if (condition == al && frame_is_built_ &&
891 !info()->saves_caller_doubles()) {
892 DeoptComment(deopt_info);
893 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
894 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
896 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
898 // We often have several deopts to the same entry, reuse the last
899 // jump entry if this is the case.
900 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
901 jump_table_.is_empty() ||
902 !table_entry.IsEquivalentTo(jump_table_.last())) {
903 jump_table_.Add(table_entry, zone());
905 __ b(condition, &jump_table_.last().label);
910 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
911 Deoptimizer::DeoptReason deopt_reason) {
912 Deoptimizer::BailoutType bailout_type = info()->IsStub()
914 : Deoptimizer::EAGER;
915 DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
919 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
920 int length = deoptimizations_.length();
921 if (length == 0) return;
922 Handle<DeoptimizationInputData> data =
923 DeoptimizationInputData::New(isolate(), length, TENURED);
925 Handle<ByteArray> translations =
926 translations_.CreateByteArray(isolate()->factory());
927 data->SetTranslationByteArray(*translations);
928 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
929 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
930 if (info_->IsOptimizing()) {
931 // Reference to shared function info does not change between phases.
932 AllowDeferredHandleDereference allow_handle_dereference;
933 data->SetSharedFunctionInfo(*info_->shared_info());
935 data->SetSharedFunctionInfo(Smi::FromInt(0));
937 data->SetWeakCellCache(Smi::FromInt(0));
939 Handle<FixedArray> literals =
940 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
941 { AllowDeferredHandleDereference copy_handles;
942 for (int i = 0; i < deoptimization_literals_.length(); i++) {
943 literals->set(i, *deoptimization_literals_[i]);
945 data->SetLiteralArray(*literals);
948 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
949 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
951 // Populate the deoptimization entries.
952 for (int i = 0; i < length; i++) {
953 LEnvironment* env = deoptimizations_[i];
954 data->SetAstId(i, env->ast_id());
955 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
956 data->SetArgumentsStackHeight(i,
957 Smi::FromInt(env->arguments_stack_height()));
958 data->SetPc(i, Smi::FromInt(env->pc_offset()));
960 code->set_deoptimization_data(*data);
964 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
965 int result = deoptimization_literals_.length();
966 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
967 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
969 deoptimization_literals_.Add(literal, zone());
974 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
975 DCHECK_EQ(0, deoptimization_literals_.length());
976 for (auto function : chunk()->inlined_functions()) {
977 DefineDeoptimizationLiteral(function);
979 inlined_function_count_ = deoptimization_literals_.length();
983 void LCodeGen::RecordSafepointWithLazyDeopt(
984 LInstruction* instr, SafepointMode safepoint_mode) {
985 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
986 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
988 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
989 RecordSafepointWithRegisters(
990 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
995 void LCodeGen::RecordSafepoint(
996 LPointerMap* pointers,
997 Safepoint::Kind kind,
999 Safepoint::DeoptMode deopt_mode) {
1000 DCHECK(expected_safepoint_kind_ == kind);
1002 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1003 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
1004 kind, arguments, deopt_mode);
1005 for (int i = 0; i < operands->length(); i++) {
1006 LOperand* pointer = operands->at(i);
1007 if (pointer->IsStackSlot()) {
1008 safepoint.DefinePointerSlot(pointer->index(), zone());
1009 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1010 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1016 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1017 Safepoint::DeoptMode deopt_mode) {
1018 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1022 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1023 LPointerMap empty_pointers(zone());
1024 RecordSafepoint(&empty_pointers, deopt_mode);
1028 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1030 Safepoint::DeoptMode deopt_mode) {
1032 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1036 void LCodeGen::RecordAndWritePosition(int position) {
1037 if (position == RelocInfo::kNoPosition) return;
1038 masm()->positions_recorder()->RecordPosition(position);
1039 masm()->positions_recorder()->WriteRecordedPositions();
1043 static const char* LabelType(LLabel* label) {
1044 if (label->is_loop_header()) return " (loop header)";
1045 if (label->is_osr_entry()) return " (OSR entry)";
1050 void LCodeGen::DoLabel(LLabel* label) {
1051 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1052 current_instruction_,
1053 label->hydrogen_value()->id(),
1056 __ bind(label->label());
1057 current_block_ = label->block_id();
1062 void LCodeGen::DoParallelMove(LParallelMove* move) {
1063 resolver_.Resolve(move);
1067 void LCodeGen::DoGap(LGap* gap) {
1068 for (int i = LGap::FIRST_INNER_POSITION;
1069 i <= LGap::LAST_INNER_POSITION;
1071 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1072 LParallelMove* move = gap->GetParallelMove(inner_pos);
1073 if (move != NULL) DoParallelMove(move);
1078 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1083 void LCodeGen::DoParameter(LParameter* instr) {
1088 void LCodeGen::DoCallStub(LCallStub* instr) {
1089 DCHECK(ToRegister(instr->context()).is(cp));
1090 DCHECK(ToRegister(instr->result()).is(r0));
1091 switch (instr->hydrogen()->major_key()) {
1092 case CodeStub::RegExpExec: {
1093 RegExpExecStub stub(isolate());
1094 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1097 case CodeStub::SubString: {
1098 SubStringStub stub(isolate());
1099 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1102 case CodeStub::StringCompare: {
1103 StringCompareStub stub(isolate());
1104 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1113 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1114 GenerateOsrPrologue();
1118 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1119 Register dividend = ToRegister(instr->dividend());
1120 int32_t divisor = instr->divisor();
1121 DCHECK(dividend.is(ToRegister(instr->result())));
1123 // Theoretically, a variation of the branch-free code for integer division by
1124 // a power of 2 (calculating the remainder via an additional multiplication
1125 // (which gets simplified to an 'and') and subtraction) should be faster, and
1126 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1127 // indicate that positive dividends are heavily favored, so the branching
1128 // version performs better.
1129 HMod* hmod = instr->hydrogen();
1130 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1131 Label dividend_is_not_negative, done;
1132 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1133 __ cmp(dividend, Operand::Zero());
1134 __ b(pl, ÷nd_is_not_negative);
1135 // Note that this is correct even for kMinInt operands.
1136 __ rsb(dividend, dividend, Operand::Zero());
1137 __ and_(dividend, dividend, Operand(mask));
1138 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1139 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1140 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1145 __ bind(÷nd_is_not_negative);
1146 __ and_(dividend, dividend, Operand(mask));
1151 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1152 Register dividend = ToRegister(instr->dividend());
1153 int32_t divisor = instr->divisor();
1154 Register result = ToRegister(instr->result());
1155 DCHECK(!dividend.is(result));
1158 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1162 __ TruncatingDiv(result, dividend, Abs(divisor));
1163 __ mov(ip, Operand(Abs(divisor)));
1164 __ smull(result, ip, result, ip);
1165 __ sub(result, dividend, result, SetCC);
1167 // Check for negative zero.
1168 HMod* hmod = instr->hydrogen();
1169 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1170 Label remainder_not_zero;
1171 __ b(ne, &remainder_not_zero);
1172 __ cmp(dividend, Operand::Zero());
1173 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1174 __ bind(&remainder_not_zero);
1179 void LCodeGen::DoModI(LModI* instr) {
1180 HMod* hmod = instr->hydrogen();
1181 if (CpuFeatures::IsSupported(SUDIV)) {
1182 CpuFeatureScope scope(masm(), SUDIV);
1184 Register left_reg = ToRegister(instr->left());
1185 Register right_reg = ToRegister(instr->right());
1186 Register result_reg = ToRegister(instr->result());
1189 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1190 // case because we can't return a NaN.
1191 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1192 __ cmp(right_reg, Operand::Zero());
1193 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1196 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1197 // want. We have to deopt if we care about -0, because we can't return that.
1198 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1199 Label no_overflow_possible;
1200 __ cmp(left_reg, Operand(kMinInt));
1201 __ b(ne, &no_overflow_possible);
1202 __ cmp(right_reg, Operand(-1));
1203 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1204 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1206 __ b(ne, &no_overflow_possible);
1207 __ mov(result_reg, Operand::Zero());
1210 __ bind(&no_overflow_possible);
1213 // For 'r3 = r1 % r2' we can have the following ARM code:
1215 // mls r3, r3, r2, r1
1217 __ sdiv(result_reg, left_reg, right_reg);
1218 __ Mls(result_reg, result_reg, right_reg, left_reg);
1220 // If we care about -0, test if the dividend is <0 and the result is 0.
1221 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1222 __ cmp(result_reg, Operand::Zero());
1224 __ cmp(left_reg, Operand::Zero());
1225 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1230 // General case, without any SDIV support.
1231 Register left_reg = ToRegister(instr->left());
1232 Register right_reg = ToRegister(instr->right());
1233 Register result_reg = ToRegister(instr->result());
1234 Register scratch = scratch0();
1235 DCHECK(!scratch.is(left_reg));
1236 DCHECK(!scratch.is(right_reg));
1237 DCHECK(!scratch.is(result_reg));
1238 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1239 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1240 DCHECK(!divisor.is(dividend));
1241 LowDwVfpRegister quotient = double_scratch0();
1242 DCHECK(!quotient.is(dividend));
1243 DCHECK(!quotient.is(divisor));
1246 // Check for x % 0, we have to deopt in this case because we can't return a
1248 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1249 __ cmp(right_reg, Operand::Zero());
1250 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1253 __ Move(result_reg, left_reg);
1254 // Load the arguments in VFP registers. The divisor value is preloaded
1255 // before. Be careful that 'right_reg' is only live on entry.
1256 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1257 __ vmov(double_scratch0().low(), left_reg);
1258 __ vcvt_f64_s32(dividend, double_scratch0().low());
1259 __ vmov(double_scratch0().low(), right_reg);
1260 __ vcvt_f64_s32(divisor, double_scratch0().low());
1262 // We do not care about the sign of the divisor. Note that we still handle
1263 // the kMinInt % -1 case correctly, though.
1264 __ vabs(divisor, divisor);
1265 // Compute the quotient and round it to a 32bit integer.
1266 __ vdiv(quotient, dividend, divisor);
1267 __ vcvt_s32_f64(quotient.low(), quotient);
1268 __ vcvt_f64_s32(quotient, quotient.low());
1270 // Compute the remainder in result.
1271 __ vmul(double_scratch0(), divisor, quotient);
1272 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1273 __ vmov(scratch, double_scratch0().low());
1274 __ sub(result_reg, left_reg, scratch, SetCC);
1276 // If we care about -0, test if the dividend is <0 and the result is 0.
1277 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1279 __ cmp(left_reg, Operand::Zero());
1280 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1287 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1288 Register dividend = ToRegister(instr->dividend());
1289 int32_t divisor = instr->divisor();
1290 Register result = ToRegister(instr->result());
1291 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1292 DCHECK(!result.is(dividend));
1294 // Check for (0 / -x) that will produce negative zero.
1295 HDiv* hdiv = instr->hydrogen();
1296 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1297 __ cmp(dividend, Operand::Zero());
1298 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1300 // Check for (kMinInt / -1).
1301 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1302 __ cmp(dividend, Operand(kMinInt));
1303 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1305 // Deoptimize if remainder will not be 0.
1306 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1307 divisor != 1 && divisor != -1) {
1308 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1309 __ tst(dividend, Operand(mask));
1310 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1313 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1314 __ rsb(result, dividend, Operand(0));
1317 int32_t shift = WhichPowerOf2Abs(divisor);
1319 __ mov(result, dividend);
1320 } else if (shift == 1) {
1321 __ add(result, dividend, Operand(dividend, LSR, 31));
1323 __ mov(result, Operand(dividend, ASR, 31));
1324 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1326 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1327 if (divisor < 0) __ rsb(result, result, Operand(0));
1331 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1332 Register dividend = ToRegister(instr->dividend());
1333 int32_t divisor = instr->divisor();
1334 Register result = ToRegister(instr->result());
1335 DCHECK(!dividend.is(result));
1338 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1342 // Check for (0 / -x) that will produce negative zero.
1343 HDiv* hdiv = instr->hydrogen();
1344 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1345 __ cmp(dividend, Operand::Zero());
1346 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1349 __ TruncatingDiv(result, dividend, Abs(divisor));
1350 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1352 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1353 __ mov(ip, Operand(divisor));
1354 __ smull(scratch0(), ip, result, ip);
1355 __ sub(scratch0(), scratch0(), dividend, SetCC);
1356 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1361 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1362 void LCodeGen::DoDivI(LDivI* instr) {
1363 HBinaryOperation* hdiv = instr->hydrogen();
1364 Register dividend = ToRegister(instr->dividend());
1365 Register divisor = ToRegister(instr->divisor());
1366 Register result = ToRegister(instr->result());
1369 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1370 __ cmp(divisor, Operand::Zero());
1371 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1374 // Check for (0 / -x) that will produce negative zero.
1375 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1377 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1378 // Do the test only if it hadn't be done above.
1379 __ cmp(divisor, Operand::Zero());
1381 __ b(pl, &positive);
1382 __ cmp(dividend, Operand::Zero());
1383 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1387 // Check for (kMinInt / -1).
1388 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1389 (!CpuFeatures::IsSupported(SUDIV) ||
1390 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1391 // We don't need to check for overflow when truncating with sdiv
1392 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1393 __ cmp(dividend, Operand(kMinInt));
1394 __ cmp(divisor, Operand(-1), eq);
1395 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1398 if (CpuFeatures::IsSupported(SUDIV)) {
1399 CpuFeatureScope scope(masm(), SUDIV);
1400 __ sdiv(result, dividend, divisor);
1402 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1403 DoubleRegister vright = double_scratch0();
1404 __ vmov(double_scratch0().low(), dividend);
1405 __ vcvt_f64_s32(vleft, double_scratch0().low());
1406 __ vmov(double_scratch0().low(), divisor);
1407 __ vcvt_f64_s32(vright, double_scratch0().low());
1408 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1409 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1410 __ vmov(result, double_scratch0().low());
1413 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1414 // Compute remainder and deopt if it's not zero.
1415 Register remainder = scratch0();
1416 __ Mls(remainder, result, divisor, dividend);
1417 __ cmp(remainder, Operand::Zero());
1418 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1423 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1424 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1425 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1426 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1428 // This is computed in-place.
1429 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1431 __ vmla(addend, multiplier, multiplicand);
1435 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1436 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1437 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1438 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1440 // This is computed in-place.
1441 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1443 __ vmls(minuend, multiplier, multiplicand);
1447 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1448 Register dividend = ToRegister(instr->dividend());
1449 Register result = ToRegister(instr->result());
1450 int32_t divisor = instr->divisor();
1452 // If the divisor is 1, return the dividend.
1454 __ Move(result, dividend);
1458 // If the divisor is positive, things are easy: There can be no deopts and we
1459 // can simply do an arithmetic right shift.
1460 int32_t shift = WhichPowerOf2Abs(divisor);
1462 __ mov(result, Operand(dividend, ASR, shift));
1466 // If the divisor is negative, we have to negate and handle edge cases.
1467 __ rsb(result, dividend, Operand::Zero(), SetCC);
1468 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1469 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1472 // Dividing by -1 is basically negation, unless we overflow.
1473 if (divisor == -1) {
1474 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1475 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1480 // If the negation could not overflow, simply shifting is OK.
1481 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1482 __ mov(result, Operand(result, ASR, shift));
1486 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1487 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1491 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1492 Register dividend = ToRegister(instr->dividend());
1493 int32_t divisor = instr->divisor();
1494 Register result = ToRegister(instr->result());
1495 DCHECK(!dividend.is(result));
1498 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1502 // Check for (0 / -x) that will produce negative zero.
1503 HMathFloorOfDiv* hdiv = instr->hydrogen();
1504 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1505 __ cmp(dividend, Operand::Zero());
1506 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1509 // Easy case: We need no dynamic check for the dividend and the flooring
1510 // division is the same as the truncating division.
1511 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1512 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1513 __ TruncatingDiv(result, dividend, Abs(divisor));
1514 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1518 // In the general case we may need to adjust before and after the truncating
1519 // division to get a flooring division.
1520 Register temp = ToRegister(instr->temp());
1521 DCHECK(!temp.is(dividend) && !temp.is(result));
1522 Label needs_adjustment, done;
1523 __ cmp(dividend, Operand::Zero());
1524 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1525 __ TruncatingDiv(result, dividend, Abs(divisor));
1526 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1528 __ bind(&needs_adjustment);
1529 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1530 __ TruncatingDiv(result, temp, Abs(divisor));
1531 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1532 __ sub(result, result, Operand(1));
1537 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1538 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1539 HBinaryOperation* hdiv = instr->hydrogen();
1540 Register left = ToRegister(instr->dividend());
1541 Register right = ToRegister(instr->divisor());
1542 Register result = ToRegister(instr->result());
1545 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1546 __ cmp(right, Operand::Zero());
1547 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1550 // Check for (0 / -x) that will produce negative zero.
1551 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1553 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1554 // Do the test only if it hadn't be done above.
1555 __ cmp(right, Operand::Zero());
1557 __ b(pl, &positive);
1558 __ cmp(left, Operand::Zero());
1559 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1563 // Check for (kMinInt / -1).
1564 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1565 (!CpuFeatures::IsSupported(SUDIV) ||
1566 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1567 // We don't need to check for overflow when truncating with sdiv
1568 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1569 __ cmp(left, Operand(kMinInt));
1570 __ cmp(right, Operand(-1), eq);
1571 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1574 if (CpuFeatures::IsSupported(SUDIV)) {
1575 CpuFeatureScope scope(masm(), SUDIV);
1576 __ sdiv(result, left, right);
1578 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1579 DoubleRegister vright = double_scratch0();
1580 __ vmov(double_scratch0().low(), left);
1581 __ vcvt_f64_s32(vleft, double_scratch0().low());
1582 __ vmov(double_scratch0().low(), right);
1583 __ vcvt_f64_s32(vright, double_scratch0().low());
1584 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1585 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1586 __ vmov(result, double_scratch0().low());
1590 Register remainder = scratch0();
1591 __ Mls(remainder, result, right, left);
1592 __ cmp(remainder, Operand::Zero());
1594 __ eor(remainder, remainder, Operand(right));
1595 __ add(result, result, Operand(remainder, ASR, 31));
1600 void LCodeGen::DoMulI(LMulI* instr) {
1601 Register result = ToRegister(instr->result());
1602 // Note that result may alias left.
1603 Register left = ToRegister(instr->left());
1604 LOperand* right_op = instr->right();
1606 bool bailout_on_minus_zero =
1607 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1608 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1610 if (right_op->IsConstantOperand()) {
1611 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1613 if (bailout_on_minus_zero && (constant < 0)) {
1614 // The case of a null constant will be handled separately.
1615 // If constant is negative and left is null, the result should be -0.
1616 __ cmp(left, Operand::Zero());
1617 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1623 __ rsb(result, left, Operand::Zero(), SetCC);
1624 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1626 __ rsb(result, left, Operand::Zero());
1630 if (bailout_on_minus_zero) {
1631 // If left is strictly negative and the constant is null, the
1632 // result is -0. Deoptimize if required, otherwise return 0.
1633 __ cmp(left, Operand::Zero());
1634 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1636 __ mov(result, Operand::Zero());
1639 __ Move(result, left);
1642 // Multiplying by powers of two and powers of two plus or minus
1643 // one can be done faster with shifted operands.
1644 // For other constants we emit standard code.
1645 int32_t mask = constant >> 31;
1646 uint32_t constant_abs = (constant + mask) ^ mask;
1648 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1649 int32_t shift = WhichPowerOf2(constant_abs);
1650 __ mov(result, Operand(left, LSL, shift));
1651 // Correct the sign of the result is the constant is negative.
1652 if (constant < 0) __ rsb(result, result, Operand::Zero());
1653 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1654 int32_t shift = WhichPowerOf2(constant_abs - 1);
1655 __ add(result, left, Operand(left, LSL, shift));
1656 // Correct the sign of the result is the constant is negative.
1657 if (constant < 0) __ rsb(result, result, Operand::Zero());
1658 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1659 int32_t shift = WhichPowerOf2(constant_abs + 1);
1660 __ rsb(result, left, Operand(left, LSL, shift));
1661 // Correct the sign of the result is the constant is negative.
1662 if (constant < 0) __ rsb(result, result, Operand::Zero());
1664 // Generate standard code.
1665 __ mov(ip, Operand(constant));
1666 __ mul(result, left, ip);
1671 DCHECK(right_op->IsRegister());
1672 Register right = ToRegister(right_op);
1675 Register scratch = scratch0();
1676 // scratch:result = left * right.
1677 if (instr->hydrogen()->representation().IsSmi()) {
1678 __ SmiUntag(result, left);
1679 __ smull(result, scratch, result, right);
1681 __ smull(result, scratch, left, right);
1683 __ cmp(scratch, Operand(result, ASR, 31));
1684 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1686 if (instr->hydrogen()->representation().IsSmi()) {
1687 __ SmiUntag(result, left);
1688 __ mul(result, result, right);
1690 __ mul(result, left, right);
1694 if (bailout_on_minus_zero) {
1696 __ teq(left, Operand(right));
1698 // Bail out if the result is minus zero.
1699 __ cmp(result, Operand::Zero());
1700 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1707 void LCodeGen::DoBitI(LBitI* instr) {
1708 LOperand* left_op = instr->left();
1709 LOperand* right_op = instr->right();
1710 DCHECK(left_op->IsRegister());
1711 Register left = ToRegister(left_op);
1712 Register result = ToRegister(instr->result());
1713 Operand right(no_reg);
1715 if (right_op->IsStackSlot()) {
1716 right = Operand(EmitLoadRegister(right_op, ip));
1718 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1719 right = ToOperand(right_op);
1722 switch (instr->op()) {
1723 case Token::BIT_AND:
1724 __ and_(result, left, right);
1727 __ orr(result, left, right);
1729 case Token::BIT_XOR:
1730 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1731 __ mvn(result, Operand(left));
1733 __ eor(result, left, right);
1743 void LCodeGen::DoShiftI(LShiftI* instr) {
1744 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1745 // result may alias either of them.
1746 LOperand* right_op = instr->right();
1747 Register left = ToRegister(instr->left());
1748 Register result = ToRegister(instr->result());
1749 Register scratch = scratch0();
1750 if (right_op->IsRegister()) {
1751 // Mask the right_op operand.
1752 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1753 switch (instr->op()) {
1755 __ mov(result, Operand(left, ROR, scratch));
1758 __ mov(result, Operand(left, ASR, scratch));
1761 if (instr->can_deopt()) {
1762 __ mov(result, Operand(left, LSR, scratch), SetCC);
1763 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
1765 __ mov(result, Operand(left, LSR, scratch));
1769 __ mov(result, Operand(left, LSL, scratch));
1776 // Mask the right_op operand.
1777 int value = ToInteger32(LConstantOperand::cast(right_op));
1778 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1779 switch (instr->op()) {
1781 if (shift_count != 0) {
1782 __ mov(result, Operand(left, ROR, shift_count));
1784 __ Move(result, left);
1788 if (shift_count != 0) {
1789 __ mov(result, Operand(left, ASR, shift_count));
1791 __ Move(result, left);
1795 if (shift_count != 0) {
1796 __ mov(result, Operand(left, LSR, shift_count));
1798 if (instr->can_deopt()) {
1799 __ tst(left, Operand(0x80000000));
1800 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
1802 __ Move(result, left);
1806 if (shift_count != 0) {
1807 if (instr->hydrogen_value()->representation().IsSmi() &&
1808 instr->can_deopt()) {
1809 if (shift_count != 1) {
1810 __ mov(result, Operand(left, LSL, shift_count - 1));
1811 __ SmiTag(result, result, SetCC);
1813 __ SmiTag(result, left, SetCC);
1815 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1817 __ mov(result, Operand(left, LSL, shift_count));
1820 __ Move(result, left);
1831 void LCodeGen::DoSubI(LSubI* instr) {
1832 LOperand* left = instr->left();
1833 LOperand* right = instr->right();
1834 LOperand* result = instr->result();
1835 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1836 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1838 if (right->IsStackSlot()) {
1839 Register right_reg = EmitLoadRegister(right, ip);
1840 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1842 DCHECK(right->IsRegister() || right->IsConstantOperand());
1843 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1847 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1852 void LCodeGen::DoRSubI(LRSubI* instr) {
1853 LOperand* left = instr->left();
1854 LOperand* right = instr->right();
1855 LOperand* result = instr->result();
1856 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1857 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1859 if (right->IsStackSlot()) {
1860 Register right_reg = EmitLoadRegister(right, ip);
1861 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1863 DCHECK(right->IsRegister() || right->IsConstantOperand());
1864 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1868 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1873 void LCodeGen::DoConstantI(LConstantI* instr) {
1874 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1878 void LCodeGen::DoConstantS(LConstantS* instr) {
1879 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1883 void LCodeGen::DoConstantD(LConstantD* instr) {
1884 DCHECK(instr->result()->IsDoubleRegister());
1885 DwVfpRegister result = ToDoubleRegister(instr->result());
1886 #if V8_HOST_ARCH_IA32
1887 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1889 uint64_t bits = instr->bits();
1890 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1891 V8_UINT64_C(0x7FF0000000000000)) {
1892 uint32_t lo = static_cast<uint32_t>(bits);
1893 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1894 __ mov(ip, Operand(lo));
1895 __ mov(scratch0(), Operand(hi));
1896 __ vmov(result, ip, scratch0());
1900 double v = instr->value();
1901 __ Vmov(result, v, scratch0());
1905 void LCodeGen::DoConstantE(LConstantE* instr) {
1906 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1910 void LCodeGen::DoConstantT(LConstantT* instr) {
1911 Handle<Object> object = instr->value(isolate());
1912 AllowDeferredHandleDereference smi_check;
1913 __ Move(ToRegister(instr->result()), object);
1917 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1918 Register result = ToRegister(instr->result());
1919 Register map = ToRegister(instr->value());
1920 __ EnumLength(result, map);
1924 void LCodeGen::DoDateField(LDateField* instr) {
1925 Register object = ToRegister(instr->date());
1926 Register result = ToRegister(instr->result());
1927 Register scratch = ToRegister(instr->temp());
1928 Smi* index = instr->index();
1929 DCHECK(object.is(result));
1930 DCHECK(object.is(r0));
1931 DCHECK(!scratch.is(scratch0()));
1932 DCHECK(!scratch.is(object));
1934 if (index->value() == 0) {
1935 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1937 Label runtime, done;
1938 if (index->value() < JSDate::kFirstUncachedField) {
1939 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1940 __ mov(scratch, Operand(stamp));
1941 __ ldr(scratch, MemOperand(scratch));
1942 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1943 __ cmp(scratch, scratch0());
1945 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1946 kPointerSize * index->value()));
1950 __ PrepareCallCFunction(2, scratch);
1951 __ mov(r1, Operand(index));
1952 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1958 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1960 String::Encoding encoding) {
1961 if (index->IsConstantOperand()) {
1962 int offset = ToInteger32(LConstantOperand::cast(index));
1963 if (encoding == String::TWO_BYTE_ENCODING) {
1964 offset *= kUC16Size;
1966 STATIC_ASSERT(kCharSize == 1);
1967 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1969 Register scratch = scratch0();
1970 DCHECK(!scratch.is(string));
1971 DCHECK(!scratch.is(ToRegister(index)));
1972 if (encoding == String::ONE_BYTE_ENCODING) {
1973 __ add(scratch, string, Operand(ToRegister(index)));
1975 STATIC_ASSERT(kUC16Size == 2);
1976 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1978 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1982 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1983 String::Encoding encoding = instr->hydrogen()->encoding();
1984 Register string = ToRegister(instr->string());
1985 Register result = ToRegister(instr->result());
1987 if (FLAG_debug_code) {
1988 Register scratch = scratch0();
1989 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1990 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1992 __ and_(scratch, scratch,
1993 Operand(kStringRepresentationMask | kStringEncodingMask));
1994 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1995 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1996 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1997 ? one_byte_seq_type : two_byte_seq_type));
1998 __ Check(eq, kUnexpectedStringType);
2001 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2002 if (encoding == String::ONE_BYTE_ENCODING) {
2003 __ ldrb(result, operand);
2005 __ ldrh(result, operand);
2010 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2011 String::Encoding encoding = instr->hydrogen()->encoding();
2012 Register string = ToRegister(instr->string());
2013 Register value = ToRegister(instr->value());
2015 if (FLAG_debug_code) {
2016 Register index = ToRegister(instr->index());
2017 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2018 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2020 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2021 ? one_byte_seq_type : two_byte_seq_type;
2022 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2025 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2026 if (encoding == String::ONE_BYTE_ENCODING) {
2027 __ strb(value, operand);
2029 __ strh(value, operand);
2034 void LCodeGen::DoAddI(LAddI* instr) {
2035 LOperand* left = instr->left();
2036 LOperand* right = instr->right();
2037 LOperand* result = instr->result();
2038 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2039 SBit set_cond = can_overflow ? SetCC : LeaveCC;
2041 if (right->IsStackSlot()) {
2042 Register right_reg = EmitLoadRegister(right, ip);
2043 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2045 DCHECK(right->IsRegister() || right->IsConstantOperand());
2046 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2050 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2055 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2056 LOperand* left = instr->left();
2057 LOperand* right = instr->right();
2058 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2059 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2060 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2061 Register left_reg = ToRegister(left);
2062 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2064 : Operand(EmitLoadRegister(right, ip));
2065 Register result_reg = ToRegister(instr->result());
2066 __ cmp(left_reg, right_op);
2067 __ Move(result_reg, left_reg, condition);
2068 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2070 DCHECK(instr->hydrogen()->representation().IsDouble());
2071 DwVfpRegister left_reg = ToDoubleRegister(left);
2072 DwVfpRegister right_reg = ToDoubleRegister(right);
2073 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2074 Label result_is_nan, return_left, return_right, check_zero, done;
2075 __ VFPCompareAndSetFlags(left_reg, right_reg);
2076 if (operation == HMathMinMax::kMathMin) {
2077 __ b(mi, &return_left);
2078 __ b(gt, &return_right);
2080 __ b(mi, &return_right);
2081 __ b(gt, &return_left);
2083 __ b(vs, &result_is_nan);
2084 // Left equals right => check for -0.
2085 __ VFPCompareAndSetFlags(left_reg, 0.0);
2086 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2087 __ b(ne, &done); // left == right != 0.
2089 __ b(ne, &return_left); // left == right != 0.
2091 // At this point, both left and right are either 0 or -0.
2092 if (operation == HMathMinMax::kMathMin) {
2093 // We could use a single 'vorr' instruction here if we had NEON support.
2094 __ vneg(left_reg, left_reg);
2095 __ vsub(result_reg, left_reg, right_reg);
2096 __ vneg(result_reg, result_reg);
2098 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2099 // the decision for vadd is easy because vand is a NEON instruction.
2100 __ vadd(result_reg, left_reg, right_reg);
2104 __ bind(&result_is_nan);
2105 __ vadd(result_reg, left_reg, right_reg);
2108 __ bind(&return_right);
2109 __ Move(result_reg, right_reg);
2110 if (!left_reg.is(result_reg)) {
2114 __ bind(&return_left);
2115 __ Move(result_reg, left_reg);
2122 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2123 DwVfpRegister left = ToDoubleRegister(instr->left());
2124 DwVfpRegister right = ToDoubleRegister(instr->right());
2125 DwVfpRegister result = ToDoubleRegister(instr->result());
2126 switch (instr->op()) {
2128 __ vadd(result, left, right);
2131 __ vsub(result, left, right);
2134 __ vmul(result, left, right);
2137 __ vdiv(result, left, right);
2140 __ PrepareCallCFunction(0, 2, scratch0());
2141 __ MovToFloatParameters(left, right);
2143 ExternalReference::mod_two_doubles_operation(isolate()),
2145 // Move the result in the double result register.
2146 __ MovFromFloatResult(result);
2156 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2157 DCHECK(ToRegister(instr->context()).is(cp));
2158 DCHECK(ToRegister(instr->left()).is(r1));
2159 DCHECK(ToRegister(instr->right()).is(r0));
2160 DCHECK(ToRegister(instr->result()).is(r0));
2162 Handle<Code> code = CodeFactory::BinaryOpIC(
2163 isolate(), instr->op(), instr->language_mode()).code();
2164 // Block literal pool emission to ensure nop indicating no inlined smi code
2165 // is in the correct position.
2166 Assembler::BlockConstPoolScope block_const_pool(masm());
2167 CallCode(code, RelocInfo::CODE_TARGET, instr);
2171 template<class InstrType>
2172 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2173 int left_block = instr->TrueDestination(chunk_);
2174 int right_block = instr->FalseDestination(chunk_);
2176 int next_block = GetNextEmittedBlock();
2178 if (right_block == left_block || condition == al) {
2179 EmitGoto(left_block);
2180 } else if (left_block == next_block) {
2181 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2182 } else if (right_block == next_block) {
2183 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2185 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2186 __ b(chunk_->GetAssemblyLabel(right_block));
2191 template<class InstrType>
2192 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2193 int false_block = instr->FalseDestination(chunk_);
2194 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2198 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2203 void LCodeGen::DoBranch(LBranch* instr) {
2204 Representation r = instr->hydrogen()->value()->representation();
2205 if (r.IsInteger32() || r.IsSmi()) {
2206 DCHECK(!info()->IsStub());
2207 Register reg = ToRegister(instr->value());
2208 __ cmp(reg, Operand::Zero());
2209 EmitBranch(instr, ne);
2210 } else if (r.IsDouble()) {
2211 DCHECK(!info()->IsStub());
2212 DwVfpRegister reg = ToDoubleRegister(instr->value());
2213 // Test the double value. Zero and NaN are false.
2214 __ VFPCompareAndSetFlags(reg, 0.0);
2215 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2216 EmitBranch(instr, ne);
2218 DCHECK(r.IsTagged());
2219 Register reg = ToRegister(instr->value());
2220 HType type = instr->hydrogen()->value()->type();
2221 if (type.IsBoolean()) {
2222 DCHECK(!info()->IsStub());
2223 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2224 EmitBranch(instr, eq);
2225 } else if (type.IsSmi()) {
2226 DCHECK(!info()->IsStub());
2227 __ cmp(reg, Operand::Zero());
2228 EmitBranch(instr, ne);
2229 } else if (type.IsJSArray()) {
2230 DCHECK(!info()->IsStub());
2231 EmitBranch(instr, al);
2232 } else if (type.IsHeapNumber()) {
2233 DCHECK(!info()->IsStub());
2234 DwVfpRegister dbl_scratch = double_scratch0();
2235 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2236 // Test the double value. Zero and NaN are false.
2237 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2238 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2239 EmitBranch(instr, ne);
2240 } else if (type.IsString()) {
2241 DCHECK(!info()->IsStub());
2242 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2243 __ cmp(ip, Operand::Zero());
2244 EmitBranch(instr, ne);
2246 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2247 // Avoid deopts in the case where we've never executed this path before.
2248 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2250 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2251 // undefined -> false.
2252 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2253 __ b(eq, instr->FalseLabel(chunk_));
2255 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2256 // Boolean -> its value.
2257 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2258 __ b(eq, instr->TrueLabel(chunk_));
2259 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2260 __ b(eq, instr->FalseLabel(chunk_));
2262 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2264 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2265 __ b(eq, instr->FalseLabel(chunk_));
2268 if (expected.Contains(ToBooleanStub::SMI)) {
2269 // Smis: 0 -> false, all other -> true.
2270 __ cmp(reg, Operand::Zero());
2271 __ b(eq, instr->FalseLabel(chunk_));
2272 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2273 } else if (expected.NeedsMap()) {
2274 // If we need a map later and have a Smi -> deopt.
2276 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
2279 const Register map = scratch0();
2280 if (expected.NeedsMap()) {
2281 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2283 if (expected.CanBeUndetectable()) {
2284 // Undetectable -> false.
2285 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2286 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2287 __ b(ne, instr->FalseLabel(chunk_));
2291 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2292 // spec object -> true.
2293 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2294 __ b(ge, instr->TrueLabel(chunk_));
2297 if (expected.Contains(ToBooleanStub::STRING)) {
2298 // String value -> false iff empty.
2300 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2301 __ b(ge, ¬_string);
2302 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2303 __ cmp(ip, Operand::Zero());
2304 __ b(ne, instr->TrueLabel(chunk_));
2305 __ b(instr->FalseLabel(chunk_));
2306 __ bind(¬_string);
2309 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2310 // Symbol value -> true.
2311 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2312 __ b(eq, instr->TrueLabel(chunk_));
2315 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2316 // heap number -> false iff +0, -0, or NaN.
2317 DwVfpRegister dbl_scratch = double_scratch0();
2318 Label not_heap_number;
2319 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2320 __ b(ne, ¬_heap_number);
2321 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2322 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2323 __ cmp(r0, r0, vs); // NaN -> false.
2324 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2325 __ b(instr->TrueLabel(chunk_));
2326 __ bind(¬_heap_number);
2329 if (!expected.IsGeneric()) {
2330 // We've seen something for the first time -> deopt.
2331 // This can only happen if we are not generic already.
2332 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2339 void LCodeGen::EmitGoto(int block) {
2340 if (!IsNextEmittedBlock(block)) {
2341 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2346 void LCodeGen::DoGoto(LGoto* instr) {
2347 EmitGoto(instr->block_id());
2351 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2352 Condition cond = kNoCondition;
2355 case Token::EQ_STRICT:
2359 case Token::NE_STRICT:
2363 cond = is_unsigned ? lo : lt;
2366 cond = is_unsigned ? hi : gt;
2369 cond = is_unsigned ? ls : le;
2372 cond = is_unsigned ? hs : ge;
2375 case Token::INSTANCEOF:
2383 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2384 LOperand* left = instr->left();
2385 LOperand* right = instr->right();
2387 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2388 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2389 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2391 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2392 // We can statically evaluate the comparison.
2393 double left_val = ToDouble(LConstantOperand::cast(left));
2394 double right_val = ToDouble(LConstantOperand::cast(right));
2395 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2396 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2397 EmitGoto(next_block);
2399 if (instr->is_double()) {
2400 // Compare left and right operands as doubles and load the
2401 // resulting flags into the normal status register.
2402 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2403 // If a NaN is involved, i.e. the result is unordered (V set),
2404 // jump to false block label.
2405 __ b(vs, instr->FalseLabel(chunk_));
2407 if (right->IsConstantOperand()) {
2408 int32_t value = ToInteger32(LConstantOperand::cast(right));
2409 if (instr->hydrogen_value()->representation().IsSmi()) {
2410 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2412 __ cmp(ToRegister(left), Operand(value));
2414 } else if (left->IsConstantOperand()) {
2415 int32_t value = ToInteger32(LConstantOperand::cast(left));
2416 if (instr->hydrogen_value()->representation().IsSmi()) {
2417 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2419 __ cmp(ToRegister(right), Operand(value));
2421 // We commuted the operands, so commute the condition.
2422 cond = CommuteCondition(cond);
2424 __ cmp(ToRegister(left), ToRegister(right));
2427 EmitBranch(instr, cond);
2432 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2433 Register left = ToRegister(instr->left());
2434 Register right = ToRegister(instr->right());
2436 __ cmp(left, Operand(right));
2437 EmitBranch(instr, eq);
2441 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2442 if (instr->hydrogen()->representation().IsTagged()) {
2443 Register input_reg = ToRegister(instr->object());
2444 __ mov(ip, Operand(factory()->the_hole_value()));
2445 __ cmp(input_reg, ip);
2446 EmitBranch(instr, eq);
2450 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2451 __ VFPCompareAndSetFlags(input_reg, input_reg);
2452 EmitFalseBranch(instr, vc);
2454 Register scratch = scratch0();
2455 __ VmovHigh(scratch, input_reg);
2456 __ cmp(scratch, Operand(kHoleNanUpper32));
2457 EmitBranch(instr, eq);
2461 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2462 Representation rep = instr->hydrogen()->value()->representation();
2463 DCHECK(!rep.IsInteger32());
2464 Register scratch = ToRegister(instr->temp());
2466 if (rep.IsDouble()) {
2467 DwVfpRegister value = ToDoubleRegister(instr->value());
2468 __ VFPCompareAndSetFlags(value, 0.0);
2469 EmitFalseBranch(instr, ne);
2470 __ VmovHigh(scratch, value);
2471 __ cmp(scratch, Operand(0x80000000));
2473 Register value = ToRegister(instr->value());
2476 Heap::kHeapNumberMapRootIndex,
2477 instr->FalseLabel(chunk()),
2479 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2480 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2481 __ cmp(scratch, Operand(0x80000000));
2482 __ cmp(ip, Operand(0x00000000), eq);
2484 EmitBranch(instr, eq);
2488 Condition LCodeGen::EmitIsObject(Register input,
2490 Label* is_not_object,
2492 Register temp2 = scratch0();
2493 __ JumpIfSmi(input, is_not_object);
2495 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2496 __ cmp(input, temp2);
2497 __ b(eq, is_object);
2500 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2501 // Undetectable objects behave like undefined.
2502 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2503 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2504 __ b(ne, is_not_object);
2506 // Load instance type and check that it is in object type range.
2507 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2508 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2509 __ b(lt, is_not_object);
2510 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2515 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2516 Register reg = ToRegister(instr->value());
2517 Register temp1 = ToRegister(instr->temp());
2519 Condition true_cond =
2520 EmitIsObject(reg, temp1,
2521 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2523 EmitBranch(instr, true_cond);
2527 Condition LCodeGen::EmitIsString(Register input,
2529 Label* is_not_string,
2530 SmiCheck check_needed = INLINE_SMI_CHECK) {
2531 if (check_needed == INLINE_SMI_CHECK) {
2532 __ JumpIfSmi(input, is_not_string);
2534 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2540 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2541 Register reg = ToRegister(instr->value());
2542 Register temp1 = ToRegister(instr->temp());
2544 SmiCheck check_needed =
2545 instr->hydrogen()->value()->type().IsHeapObject()
2546 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2547 Condition true_cond =
2548 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2550 EmitBranch(instr, true_cond);
2554 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2555 Register input_reg = EmitLoadRegister(instr->value(), ip);
2556 __ SmiTst(input_reg);
2557 EmitBranch(instr, eq);
2561 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2562 Register input = ToRegister(instr->value());
2563 Register temp = ToRegister(instr->temp());
2565 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2566 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2568 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2569 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2570 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2571 EmitBranch(instr, ne);
2575 static Condition ComputeCompareCondition(Token::Value op) {
2577 case Token::EQ_STRICT:
2590 return kNoCondition;
2595 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2596 DCHECK(ToRegister(instr->context()).is(cp));
2597 Token::Value op = instr->op();
2599 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op, SLOPPY).code();
2600 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2601 // This instruction also signals no smi code inlined.
2602 __ cmp(r0, Operand::Zero());
2604 Condition condition = ComputeCompareCondition(op);
2606 EmitBranch(instr, condition);
2610 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2611 InstanceType from = instr->from();
2612 InstanceType to = instr->to();
2613 if (from == FIRST_TYPE) return to;
2614 DCHECK(from == to || to == LAST_TYPE);
2619 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2620 InstanceType from = instr->from();
2621 InstanceType to = instr->to();
2622 if (from == to) return eq;
2623 if (to == LAST_TYPE) return hs;
2624 if (from == FIRST_TYPE) return ls;
2630 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2631 Register scratch = scratch0();
2632 Register input = ToRegister(instr->value());
2634 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2635 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2638 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2639 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2643 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2644 Register input = ToRegister(instr->value());
2645 Register result = ToRegister(instr->result());
2647 __ AssertString(input);
2649 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2650 __ IndexFromHash(result, result);
2654 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2655 LHasCachedArrayIndexAndBranch* instr) {
2656 Register input = ToRegister(instr->value());
2657 Register scratch = scratch0();
2660 FieldMemOperand(input, String::kHashFieldOffset));
2661 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2662 EmitBranch(instr, eq);
2666 // Branches to a label or falls through with the answer in flags. Trashes
2667 // the temp registers, but not the input.
2668 void LCodeGen::EmitClassOfTest(Label* is_true,
2670 Handle<String>class_name,
2674 DCHECK(!input.is(temp));
2675 DCHECK(!input.is(temp2));
2676 DCHECK(!temp.is(temp2));
2678 __ JumpIfSmi(input, is_false);
2680 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2681 // Assuming the following assertions, we can use the same compares to test
2682 // for both being a function type and being in the object type range.
2683 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2684 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2685 FIRST_SPEC_OBJECT_TYPE + 1);
2686 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2687 LAST_SPEC_OBJECT_TYPE - 1);
2688 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2689 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2692 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2695 // Faster code path to avoid two compares: subtract lower bound from the
2696 // actual type and do a signed compare with the width of the type range.
2697 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2698 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2699 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2700 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2701 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2705 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2706 // Check if the constructor in the map is a function.
2707 Register instance_type = ip;
2708 __ GetMapConstructor(temp, temp, temp2, instance_type);
2710 // Objects with a non-function constructor have class 'Object'.
2711 __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
2712 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2718 // temp now contains the constructor function. Grab the
2719 // instance class name from there.
2720 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2721 __ ldr(temp, FieldMemOperand(temp,
2722 SharedFunctionInfo::kInstanceClassNameOffset));
2723 // The class name we are testing against is internalized since it's a literal.
2724 // The name in the constructor is internalized because of the way the context
2725 // is booted. This routine isn't expected to work for random API-created
2726 // classes and it doesn't have to because you can't access it with natives
2727 // syntax. Since both sides are internalized it is sufficient to use an
2728 // identity comparison.
2729 __ cmp(temp, Operand(class_name));
2730 // End with the answer in flags.
2734 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2735 Register input = ToRegister(instr->value());
2736 Register temp = scratch0();
2737 Register temp2 = ToRegister(instr->temp());
2738 Handle<String> class_name = instr->hydrogen()->class_name();
2740 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2741 class_name, input, temp, temp2);
2743 EmitBranch(instr, eq);
2747 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2748 Register reg = ToRegister(instr->value());
2749 Register temp = ToRegister(instr->temp());
2751 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2752 __ cmp(temp, Operand(instr->map()));
2753 EmitBranch(instr, eq);
2757 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2758 DCHECK(ToRegister(instr->context()).is(cp));
2759 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
2760 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
2762 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2763 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2765 __ cmp(r0, Operand::Zero());
2766 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2767 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2771 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2772 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2774 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2775 LInstanceOfKnownGlobal* instr)
2776 : LDeferredCode(codegen), instr_(instr) { }
2777 void Generate() override {
2778 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2781 LInstruction* instr() override { return instr_; }
2782 Label* map_check() { return &map_check_; }
2783 Label* load_bool() { return &load_bool_; }
2786 LInstanceOfKnownGlobal* instr_;
2791 DeferredInstanceOfKnownGlobal* deferred;
2792 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2794 Label done, false_result;
2795 Register object = ToRegister(instr->value());
2796 Register temp = ToRegister(instr->temp());
2797 Register result = ToRegister(instr->result());
2799 // A Smi is not instance of anything.
2800 __ JumpIfSmi(object, &false_result);
2802 // This is the inlined call site instanceof cache. The two occurences of the
2803 // hole value will be patched to the last map/result pair generated by the
2806 Register map = temp;
2807 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2809 // Block constant pool emission to ensure the positions of instructions are
2810 // as expected by the patcher. See InstanceofStub::Generate().
2811 Assembler::BlockConstPoolScope block_const_pool(masm());
2812 __ bind(deferred->map_check()); // Label for calculating code patching.
2813 // We use Factory::the_hole_value() on purpose instead of loading from the
2814 // root array to force relocation to be able to later patch with
2816 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2817 __ mov(ip, Operand(cell));
2818 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
2819 __ cmp(map, Operand(ip));
2820 __ b(ne, &cache_miss);
2821 __ bind(deferred->load_bool()); // Label for calculating code patching.
2822 // We use Factory::the_hole_value() on purpose instead of loading from the
2823 // root array to force relocation to be able to later patch
2824 // with true or false.
2825 __ mov(result, Operand(factory()->the_hole_value()));
2829 // The inlined call site cache did not match. Check null and string before
2830 // calling the deferred code.
2831 __ bind(&cache_miss);
2832 // Null is not instance of anything.
2833 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2834 __ cmp(object, Operand(ip));
2835 __ b(eq, &false_result);
2837 // String values is not instance of anything.
2838 Condition is_string = masm_->IsObjectStringType(object, temp);
2839 __ b(is_string, &false_result);
2841 // Go to the deferred code.
2842 __ b(deferred->entry());
2844 __ bind(&false_result);
2845 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2847 // Here result has either true or false. Deferred code also produces true or
2849 __ bind(deferred->exit());
2854 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2857 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2858 flags = static_cast<InstanceofStub::Flags>(
2859 flags | InstanceofStub::kArgsInRegisters);
2860 flags = static_cast<InstanceofStub::Flags>(
2861 flags | InstanceofStub::kCallSiteInlineCheck);
2862 flags = static_cast<InstanceofStub::Flags>(
2863 flags | InstanceofStub::kReturnTrueFalseObject);
2864 InstanceofStub stub(isolate(), flags);
2866 PushSafepointRegistersScope scope(this);
2867 LoadContextFromDeferred(instr->context());
2869 __ Move(InstanceofStub::right(), instr->function());
2871 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
2872 int additional_delta = (call_size / Assembler::kInstrSize) + 4;
2873 // Make sure that code size is predicable, since we use specific constants
2874 // offsets in the code to find embedded values..
2875 PredictableCodeSizeScope predictable(
2876 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2877 // Make sure we don't emit any additional entries in the constant pool before
2878 // the call to ensure that the CallCodeSize() calculated the correct number of
2879 // instructions for the constant pool load.
2881 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
2882 int map_check_delta =
2883 masm_->InstructionsGeneratedSince(map_check) + additional_delta;
2884 int bool_load_delta =
2885 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2886 Label before_push_delta;
2887 __ bind(&before_push_delta);
2888 __ BlockConstPoolFor(additional_delta);
2889 // r5 is used to communicate the offset to the location of the map check.
2890 __ mov(r5, Operand(map_check_delta * kPointerSize));
2891 // r6 is used to communicate the offset to the location of the bool load.
2892 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2893 // The mov above can generate one or two instructions. The delta was
2894 // computed for two instructions, so we need to pad here in case of one
2896 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2900 CallCodeGeneric(stub.GetCode(),
2901 RelocInfo::CODE_TARGET,
2903 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2904 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2905 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2906 // Put the result value (r0) into the result register slot and
2907 // restore all registers.
2908 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2912 void LCodeGen::DoCmpT(LCmpT* instr) {
2913 DCHECK(ToRegister(instr->context()).is(cp));
2914 Token::Value op = instr->op();
2917 CodeFactory::CompareIC(isolate(), op, instr->language_mode()).code();
2918 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2919 // This instruction also signals no smi code inlined.
2920 __ cmp(r0, Operand::Zero());
2922 Condition condition = ComputeCompareCondition(op);
2923 __ LoadRoot(ToRegister(instr->result()),
2924 Heap::kTrueValueRootIndex,
2926 __ LoadRoot(ToRegister(instr->result()),
2927 Heap::kFalseValueRootIndex,
2928 NegateCondition(condition));
2932 void LCodeGen::DoReturn(LReturn* instr) {
2933 if (FLAG_trace && info()->IsOptimizing()) {
2934 // Push the return value on the stack as the parameter.
2935 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2936 // managed by the register allocator and tearing down the frame, it's
2937 // safe to write to the context register.
2939 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2940 __ CallRuntime(Runtime::kTraceExit, 1);
2942 if (info()->saves_caller_doubles()) {
2943 RestoreCallerDoubles();
2945 int no_frame_start = -1;
2946 if (NeedsEagerFrame()) {
2947 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2949 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2950 if (instr->has_constant_parameter_count()) {
2951 int parameter_count = ToInteger32(instr->constant_parameter_count());
2952 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2953 if (sp_delta != 0) {
2954 __ add(sp, sp, Operand(sp_delta));
2957 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2958 Register reg = ToRegister(instr->parameter_count());
2959 // The argument count parameter is a smi
2961 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2966 if (no_frame_start != -1) {
2967 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2974 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2975 Register vector_register = ToRegister(instr->temp_vector());
2976 Register slot_register = LoadDescriptor::SlotRegister();
2977 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2978 DCHECK(slot_register.is(r0));
2980 AllowDeferredHandleDereference vector_structure_check;
2981 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2982 __ Move(vector_register, vector);
2983 // No need to allocate this register.
2984 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2985 int index = vector->GetIndex(slot);
2986 __ mov(slot_register, Operand(Smi::FromInt(index)));
2990 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2991 DCHECK(ToRegister(instr->context()).is(cp));
2992 DCHECK(ToRegister(instr->global_object())
2993 .is(LoadDescriptor::ReceiverRegister()));
2994 DCHECK(ToRegister(instr->result()).is(r0));
2996 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2997 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2998 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2999 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
3000 PREMONOMORPHIC).code();
3001 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3005 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3006 Register context = ToRegister(instr->context());
3007 Register result = ToRegister(instr->result());
3008 __ ldr(result, ContextOperand(context, instr->slot_index()));
3009 if (instr->hydrogen()->RequiresHoleCheck()) {
3010 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3012 if (instr->hydrogen()->DeoptimizesOnHole()) {
3013 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3015 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3021 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3022 Register context = ToRegister(instr->context());
3023 Register value = ToRegister(instr->value());
3024 Register scratch = scratch0();
3025 MemOperand target = ContextOperand(context, instr->slot_index());
3027 Label skip_assignment;
3029 if (instr->hydrogen()->RequiresHoleCheck()) {
3030 __ ldr(scratch, target);
3031 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3032 __ cmp(scratch, ip);
3033 if (instr->hydrogen()->DeoptimizesOnHole()) {
3034 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3036 __ b(ne, &skip_assignment);
3040 __ str(value, target);
3041 if (instr->hydrogen()->NeedsWriteBarrier()) {
3042 SmiCheck check_needed =
3043 instr->hydrogen()->value()->type().IsHeapObject()
3044 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3045 __ RecordWriteContextSlot(context,
3049 GetLinkRegisterState(),
3051 EMIT_REMEMBERED_SET,
3055 __ bind(&skip_assignment);
3059 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3060 HObjectAccess access = instr->hydrogen()->access();
3061 int offset = access.offset();
3062 Register object = ToRegister(instr->object());
3064 if (access.IsExternalMemory()) {
3065 Register result = ToRegister(instr->result());
3066 MemOperand operand = MemOperand(object, offset);
3067 __ Load(result, operand, access.representation());
3071 if (instr->hydrogen()->representation().IsDouble()) {
3072 DwVfpRegister result = ToDoubleRegister(instr->result());
3073 __ vldr(result, FieldMemOperand(object, offset));
3077 Register result = ToRegister(instr->result());
3078 if (!access.IsInobject()) {
3079 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3082 MemOperand operand = FieldMemOperand(object, offset);
3083 __ Load(result, operand, access.representation());
3087 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3088 DCHECK(ToRegister(instr->context()).is(cp));
3089 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3090 DCHECK(ToRegister(instr->result()).is(r0));
3092 // Name is always in r2.
3093 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3094 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3095 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3096 isolate(), NOT_CONTEXTUAL,
3097 instr->hydrogen()->initialization_state()).code();
3098 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3102 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3103 Register scratch = scratch0();
3104 Register function = ToRegister(instr->function());
3105 Register result = ToRegister(instr->result());
3107 // Get the prototype or initial map from the function.
3109 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3111 // Check that the function has a prototype or an initial map.
3112 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3114 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3116 // If the function does not have an initial map, we're done.
3118 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3121 // Get the prototype from the initial map.
3122 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3129 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3130 Register result = ToRegister(instr->result());
3131 __ LoadRoot(result, instr->index());
3135 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3136 Register arguments = ToRegister(instr->arguments());
3137 Register result = ToRegister(instr->result());
3138 // There are two words between the frame pointer and the last argument.
3139 // Subtracting from length accounts for one of them add one more.
3140 if (instr->length()->IsConstantOperand()) {
3141 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3142 if (instr->index()->IsConstantOperand()) {
3143 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3144 int index = (const_length - const_index) + 1;
3145 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3147 Register index = ToRegister(instr->index());
3148 __ rsb(result, index, Operand(const_length + 1));
3149 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3151 } else if (instr->index()->IsConstantOperand()) {
3152 Register length = ToRegister(instr->length());
3153 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3154 int loc = const_index - 1;
3156 __ sub(result, length, Operand(loc));
3157 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3159 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3162 Register length = ToRegister(instr->length());
3163 Register index = ToRegister(instr->index());
3164 __ sub(result, length, index);
3165 __ add(result, result, Operand(1));
3166 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3171 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3172 Register external_pointer = ToRegister(instr->elements());
3173 Register key = no_reg;
3174 ElementsKind elements_kind = instr->elements_kind();
3175 bool key_is_constant = instr->key()->IsConstantOperand();
3176 int constant_key = 0;
3177 if (key_is_constant) {
3178 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3179 if (constant_key & 0xF0000000) {
3180 Abort(kArrayIndexConstantValueTooBig);
3183 key = ToRegister(instr->key());
3185 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3186 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3187 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3188 int base_offset = instr->base_offset();
3190 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3191 elements_kind == FLOAT32_ELEMENTS ||
3192 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3193 elements_kind == FLOAT64_ELEMENTS) {
3194 DwVfpRegister result = ToDoubleRegister(instr->result());
3195 Operand operand = key_is_constant
3196 ? Operand(constant_key << element_size_shift)
3197 : Operand(key, LSL, shift_size);
3198 __ add(scratch0(), external_pointer, operand);
3199 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3200 elements_kind == FLOAT32_ELEMENTS) {
3201 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3202 __ vcvt_f64_f32(result, double_scratch0().low());
3203 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3204 __ vldr(result, scratch0(), base_offset);
3207 Register result = ToRegister(instr->result());
3208 MemOperand mem_operand = PrepareKeyedOperand(
3209 key, external_pointer, key_is_constant, constant_key,
3210 element_size_shift, shift_size, base_offset);
3211 switch (elements_kind) {
3212 case EXTERNAL_INT8_ELEMENTS:
3214 __ ldrsb(result, mem_operand);
3216 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3217 case EXTERNAL_UINT8_ELEMENTS:
3218 case UINT8_ELEMENTS:
3219 case UINT8_CLAMPED_ELEMENTS:
3220 __ ldrb(result, mem_operand);
3222 case EXTERNAL_INT16_ELEMENTS:
3223 case INT16_ELEMENTS:
3224 __ ldrsh(result, mem_operand);
3226 case EXTERNAL_UINT16_ELEMENTS:
3227 case UINT16_ELEMENTS:
3228 __ ldrh(result, mem_operand);
3230 case EXTERNAL_INT32_ELEMENTS:
3231 case INT32_ELEMENTS:
3232 __ ldr(result, mem_operand);
3234 case EXTERNAL_UINT32_ELEMENTS:
3235 case UINT32_ELEMENTS:
3236 __ ldr(result, mem_operand);
3237 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3238 __ cmp(result, Operand(0x80000000));
3239 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
3242 case FLOAT32_ELEMENTS:
3243 case FLOAT64_ELEMENTS:
3244 case EXTERNAL_FLOAT32_ELEMENTS:
3245 case EXTERNAL_FLOAT64_ELEMENTS:
3246 case FAST_HOLEY_DOUBLE_ELEMENTS:
3247 case FAST_HOLEY_ELEMENTS:
3248 case FAST_HOLEY_SMI_ELEMENTS:
3249 case FAST_DOUBLE_ELEMENTS:
3251 case FAST_SMI_ELEMENTS:
3252 case DICTIONARY_ELEMENTS:
3253 case SLOPPY_ARGUMENTS_ELEMENTS:
3261 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3262 Register elements = ToRegister(instr->elements());
3263 bool key_is_constant = instr->key()->IsConstantOperand();
3264 Register key = no_reg;
3265 DwVfpRegister result = ToDoubleRegister(instr->result());
3266 Register scratch = scratch0();
3268 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3270 int base_offset = instr->base_offset();
3271 if (key_is_constant) {
3272 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3273 if (constant_key & 0xF0000000) {
3274 Abort(kArrayIndexConstantValueTooBig);
3276 base_offset += constant_key * kDoubleSize;
3278 __ add(scratch, elements, Operand(base_offset));
3280 if (!key_is_constant) {
3281 key = ToRegister(instr->key());
3282 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3283 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3284 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3287 __ vldr(result, scratch, 0);
3289 if (instr->hydrogen()->RequiresHoleCheck()) {
3290 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3291 __ cmp(scratch, Operand(kHoleNanUpper32));
3292 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3297 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3298 Register elements = ToRegister(instr->elements());
3299 Register result = ToRegister(instr->result());
3300 Register scratch = scratch0();
3301 Register store_base = scratch;
3302 int offset = instr->base_offset();
3304 if (instr->key()->IsConstantOperand()) {
3305 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3306 offset += ToInteger32(const_operand) * kPointerSize;
3307 store_base = elements;
3309 Register key = ToRegister(instr->key());
3310 // Even though the HLoadKeyed instruction forces the input
3311 // representation for the key to be an integer, the input gets replaced
3312 // during bound check elimination with the index argument to the bounds
3313 // check, which can be tagged, so that case must be handled here, too.
3314 if (instr->hydrogen()->key()->representation().IsSmi()) {
3315 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3317 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3320 __ ldr(result, MemOperand(store_base, offset));
3322 // Check for the hole value.
3323 if (instr->hydrogen()->RequiresHoleCheck()) {
3324 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3326 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
3328 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3329 __ cmp(result, scratch);
3330 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3332 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3333 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3335 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3336 __ cmp(result, scratch);
3338 if (info()->IsStub()) {
3339 // A stub can safely convert the hole to undefined only if the array
3340 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3341 // it needs to bail out.
3342 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3343 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3344 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3345 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3347 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3353 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3354 if (instr->is_typed_elements()) {
3355 DoLoadKeyedExternalArray(instr);
3356 } else if (instr->hydrogen()->representation().IsDouble()) {
3357 DoLoadKeyedFixedDoubleArray(instr);
3359 DoLoadKeyedFixedArray(instr);
3364 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3366 bool key_is_constant,
3371 if (key_is_constant) {
3372 return MemOperand(base, (constant_key << element_size) + base_offset);
3375 if (base_offset == 0) {
3376 if (shift_size >= 0) {
3377 return MemOperand(base, key, LSL, shift_size);
3379 DCHECK_EQ(-1, shift_size);
3380 return MemOperand(base, key, LSR, 1);
3384 if (shift_size >= 0) {
3385 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3386 return MemOperand(scratch0(), base_offset);
3388 DCHECK_EQ(-1, shift_size);
3389 __ add(scratch0(), base, Operand(key, ASR, 1));
3390 return MemOperand(scratch0(), base_offset);
3395 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3396 DCHECK(ToRegister(instr->context()).is(cp));
3397 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3398 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3400 if (instr->hydrogen()->HasVectorAndSlot()) {
3401 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3405 CodeFactory::KeyedLoadICInOptimizedCode(
3406 isolate(), instr->hydrogen()->initialization_state()).code();
3407 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3411 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3412 Register scratch = scratch0();
3413 Register result = ToRegister(instr->result());
3415 if (instr->hydrogen()->from_inlined()) {
3416 __ sub(result, sp, Operand(2 * kPointerSize));
3418 // Check if the calling frame is an arguments adaptor frame.
3419 Label done, adapted;
3420 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3421 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3422 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3424 // Result is the frame pointer for the frame if not adapted and for the real
3425 // frame below the adaptor frame if adapted.
3426 __ mov(result, fp, LeaveCC, ne);
3427 __ mov(result, scratch, LeaveCC, eq);
3432 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3433 Register elem = ToRegister(instr->elements());
3434 Register result = ToRegister(instr->result());
3438 // If no arguments adaptor frame the number of arguments is fixed.
3440 __ mov(result, Operand(scope()->num_parameters()));
3443 // Arguments adaptor frame present. Get argument length from there.
3444 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3446 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3447 __ SmiUntag(result);
3449 // Argument length is in result register.
3454 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3455 Register receiver = ToRegister(instr->receiver());
3456 Register function = ToRegister(instr->function());
3457 Register result = ToRegister(instr->result());
3458 Register scratch = scratch0();
3460 // If the receiver is null or undefined, we have to pass the global
3461 // object as a receiver to normal functions. Values have to be
3462 // passed unchanged to builtins and strict-mode functions.
3463 Label global_object, result_in_receiver;
3465 if (!instr->hydrogen()->known_function()) {
3466 // Do not transform the receiver to object for strict mode
3469 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3471 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3472 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3473 __ tst(scratch, Operand(mask));
3474 __ b(ne, &result_in_receiver);
3476 // Do not transform the receiver to object for builtins.
3477 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3478 __ b(ne, &result_in_receiver);
3481 // Normal function. Replace undefined or null with global receiver.
3482 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3483 __ cmp(receiver, scratch);
3484 __ b(eq, &global_object);
3485 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3486 __ cmp(receiver, scratch);
3487 __ b(eq, &global_object);
3489 // Deoptimize if the receiver is not a JS object.
3490 __ SmiTst(receiver);
3491 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
3492 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3493 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3495 __ b(&result_in_receiver);
3496 __ bind(&global_object);
3497 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3499 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3500 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3502 if (result.is(receiver)) {
3503 __ bind(&result_in_receiver);
3507 __ bind(&result_in_receiver);
3508 __ mov(result, receiver);
3509 __ bind(&result_ok);
3514 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3515 Register receiver = ToRegister(instr->receiver());
3516 Register function = ToRegister(instr->function());
3517 Register length = ToRegister(instr->length());
3518 Register elements = ToRegister(instr->elements());
3519 Register scratch = scratch0();
3520 DCHECK(receiver.is(r0)); // Used for parameter count.
3521 DCHECK(function.is(r1)); // Required by InvokeFunction.
3522 DCHECK(ToRegister(instr->result()).is(r0));
3524 // Copy the arguments to this function possibly from the
3525 // adaptor frame below it.
3526 const uint32_t kArgumentsLimit = 1 * KB;
3527 __ cmp(length, Operand(kArgumentsLimit));
3528 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
3530 // Push the receiver and use the register to keep the original
3531 // number of arguments.
3533 __ mov(receiver, length);
3534 // The arguments are at a one pointer size offset from elements.
3535 __ add(elements, elements, Operand(1 * kPointerSize));
3537 // Loop through the arguments pushing them onto the execution
3540 // length is a small non-negative integer, due to the test above.
3541 __ cmp(length, Operand::Zero());
3544 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3546 __ sub(length, length, Operand(1), SetCC);
3550 DCHECK(instr->HasPointerMap());
3551 LPointerMap* pointers = instr->pointer_map();
3552 SafepointGenerator safepoint_generator(
3553 this, pointers, Safepoint::kLazyDeopt);
3554 // The number of arguments is stored in receiver which is r0, as expected
3555 // by InvokeFunction.
3556 ParameterCount actual(receiver);
3557 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3561 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3562 LOperand* argument = instr->value();
3563 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3564 Abort(kDoPushArgumentNotImplementedForDoubleType);
3566 Register argument_reg = EmitLoadRegister(argument, ip);
3567 __ push(argument_reg);
3572 void LCodeGen::DoDrop(LDrop* instr) {
3573 __ Drop(instr->count());
3577 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3578 Register result = ToRegister(instr->result());
3579 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3583 void LCodeGen::DoContext(LContext* instr) {
3584 // If there is a non-return use, the context must be moved to a register.
3585 Register result = ToRegister(instr->result());
3586 if (info()->IsOptimizing()) {
3587 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3589 // If there is no frame, the context must be in cp.
3590 DCHECK(result.is(cp));
3595 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3596 DCHECK(ToRegister(instr->context()).is(cp));
3597 __ push(cp); // The context is the first argument.
3598 __ Move(scratch0(), instr->hydrogen()->pairs());
3599 __ push(scratch0());
3600 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3601 __ push(scratch0());
3602 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3606 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3607 int formal_parameter_count, int arity,
3608 LInstruction* instr) {
3609 bool dont_adapt_arguments =
3610 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3611 bool can_invoke_directly =
3612 dont_adapt_arguments || formal_parameter_count == arity;
3614 Register function_reg = r1;
3616 LPointerMap* pointers = instr->pointer_map();
3618 if (can_invoke_directly) {
3620 __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3622 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3623 // is available to write to at this point.
3624 if (dont_adapt_arguments) {
3625 __ mov(r0, Operand(arity));
3629 __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3632 // Set up deoptimization.
3633 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3635 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3636 ParameterCount count(arity);
3637 ParameterCount expected(formal_parameter_count);
3638 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3643 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3644 DCHECK(instr->context() != NULL);
3645 DCHECK(ToRegister(instr->context()).is(cp));
3646 Register input = ToRegister(instr->value());
3647 Register result = ToRegister(instr->result());
3648 Register scratch = scratch0();
3650 // Deoptimize if not a heap number.
3651 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3652 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3653 __ cmp(scratch, Operand(ip));
3654 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3657 Register exponent = scratch0();
3659 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3660 // Check the sign of the argument. If the argument is positive, just
3662 __ tst(exponent, Operand(HeapNumber::kSignMask));
3663 // Move the input to the result if necessary.
3664 __ Move(result, input);
3667 // Input is negative. Reverse its sign.
3668 // Preserve the value of all registers.
3670 PushSafepointRegistersScope scope(this);
3672 // Registers were saved at the safepoint, so we can use
3673 // many scratch registers.
3674 Register tmp1 = input.is(r1) ? r0 : r1;
3675 Register tmp2 = input.is(r2) ? r0 : r2;
3676 Register tmp3 = input.is(r3) ? r0 : r3;
3677 Register tmp4 = input.is(r4) ? r0 : r4;
3679 // exponent: floating point exponent value.
3681 Label allocated, slow;
3682 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3683 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3686 // Slow case: Call the runtime system to do the number allocation.
3689 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3691 // Set the pointer to the new heap number in tmp.
3692 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3693 // Restore input_reg after call to runtime.
3694 __ LoadFromSafepointRegisterSlot(input, input);
3695 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3697 __ bind(&allocated);
3698 // exponent: floating point exponent value.
3699 // tmp1: allocated heap number.
3700 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3701 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3702 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3703 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3705 __ StoreToSafepointRegisterSlot(tmp1, result);
3712 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3713 Register input = ToRegister(instr->value());
3714 Register result = ToRegister(instr->result());
3715 __ cmp(input, Operand::Zero());
3716 __ Move(result, input, pl);
3717 // We can make rsb conditional because the previous cmp instruction
3718 // will clear the V (overflow) flag and rsb won't set this flag
3719 // if input is positive.
3720 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3721 // Deoptimize on overflow.
3722 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3726 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3727 // Class for deferred case.
3728 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3730 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3731 : LDeferredCode(codegen), instr_(instr) { }
3732 void Generate() override {
3733 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3735 LInstruction* instr() override { return instr_; }
3741 Representation r = instr->hydrogen()->value()->representation();
3743 DwVfpRegister input = ToDoubleRegister(instr->value());
3744 DwVfpRegister result = ToDoubleRegister(instr->result());
3745 __ vabs(result, input);
3746 } else if (r.IsSmiOrInteger32()) {
3747 EmitIntegerMathAbs(instr);
3749 // Representation is tagged.
3750 DeferredMathAbsTaggedHeapNumber* deferred =
3751 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3752 Register input = ToRegister(instr->value());
3754 __ JumpIfNotSmi(input, deferred->entry());
3755 // If smi, handle it directly.
3756 EmitIntegerMathAbs(instr);
3757 __ bind(deferred->exit());
3762 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3763 DwVfpRegister input = ToDoubleRegister(instr->value());
3764 Register result = ToRegister(instr->result());
3765 Register input_high = scratch0();
3768 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3769 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3772 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3774 __ cmp(result, Operand::Zero());
3776 __ cmp(input_high, Operand::Zero());
3777 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3783 void LCodeGen::DoMathRound(LMathRound* instr) {
3784 DwVfpRegister input = ToDoubleRegister(instr->value());
3785 Register result = ToRegister(instr->result());
3786 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3787 DwVfpRegister input_plus_dot_five = double_scratch1;
3788 Register input_high = scratch0();
3789 DwVfpRegister dot_five = double_scratch0();
3790 Label convert, done;
3792 __ Vmov(dot_five, 0.5, scratch0());
3793 __ vabs(double_scratch1, input);
3794 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3795 // If input is in [-0.5, -0], the result is -0.
3796 // If input is in [+0, +0.5[, the result is +0.
3797 // If the input is +0.5, the result is 1.
3798 __ b(hi, &convert); // Out of [-0.5, +0.5].
3799 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3800 __ VmovHigh(input_high, input);
3801 __ cmp(input_high, Operand::Zero());
3803 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3805 __ VFPCompareAndSetFlags(input, dot_five);
3806 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3807 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3808 // flag kBailoutOnMinusZero.
3809 __ mov(result, Operand::Zero(), LeaveCC, ne);
3813 __ vadd(input_plus_dot_five, input, dot_five);
3814 // Reuse dot_five (double_scratch0) as we no longer need this value.
3815 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3817 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3822 void LCodeGen::DoMathFround(LMathFround* instr) {
3823 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3824 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3825 LowDwVfpRegister scratch = double_scratch0();
3826 __ vcvt_f32_f64(scratch.low(), input_reg);
3827 __ vcvt_f64_f32(output_reg, scratch.low());
3831 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3832 DwVfpRegister input = ToDoubleRegister(instr->value());
3833 DwVfpRegister result = ToDoubleRegister(instr->result());
3834 __ vsqrt(result, input);
3838 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3839 DwVfpRegister input = ToDoubleRegister(instr->value());
3840 DwVfpRegister result = ToDoubleRegister(instr->result());
3841 DwVfpRegister temp = double_scratch0();
3843 // Note that according to ECMA-262 15.8.2.13:
3844 // Math.pow(-Infinity, 0.5) == Infinity
3845 // Math.sqrt(-Infinity) == NaN
3847 __ vmov(temp, -V8_INFINITY, scratch0());
3848 __ VFPCompareAndSetFlags(input, temp);
3849 __ vneg(result, temp, eq);
3852 // Add +0 to convert -0 to +0.
3853 __ vadd(result, input, kDoubleRegZero);
3854 __ vsqrt(result, result);
3859 void LCodeGen::DoPower(LPower* instr) {
3860 Representation exponent_type = instr->hydrogen()->right()->representation();
3861 // Having marked this as a call, we can use any registers.
3862 // Just make sure that the input/output registers are the expected ones.
3863 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3864 DCHECK(!instr->right()->IsDoubleRegister() ||
3865 ToDoubleRegister(instr->right()).is(d1));
3866 DCHECK(!instr->right()->IsRegister() ||
3867 ToRegister(instr->right()).is(tagged_exponent));
3868 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3869 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3871 if (exponent_type.IsSmi()) {
3872 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3874 } else if (exponent_type.IsTagged()) {
3876 __ JumpIfSmi(tagged_exponent, &no_deopt);
3877 DCHECK(!r6.is(tagged_exponent));
3878 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3879 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3880 __ cmp(r6, Operand(ip));
3881 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3883 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3885 } else if (exponent_type.IsInteger32()) {
3886 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3889 DCHECK(exponent_type.IsDouble());
3890 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3896 void LCodeGen::DoMathExp(LMathExp* instr) {
3897 DwVfpRegister input = ToDoubleRegister(instr->value());
3898 DwVfpRegister result = ToDoubleRegister(instr->result());
3899 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3900 DwVfpRegister double_scratch2 = double_scratch0();
3901 Register temp1 = ToRegister(instr->temp1());
3902 Register temp2 = ToRegister(instr->temp2());
3904 MathExpGenerator::EmitMathExp(
3905 masm(), input, result, double_scratch1, double_scratch2,
3906 temp1, temp2, scratch0());
3910 void LCodeGen::DoMathLog(LMathLog* instr) {
3911 __ PrepareCallCFunction(0, 1, scratch0());
3912 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3913 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3915 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3919 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3920 Register input = ToRegister(instr->value());
3921 Register result = ToRegister(instr->result());
3922 __ clz(result, input);
3926 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3927 DCHECK(ToRegister(instr->context()).is(cp));
3928 DCHECK(ToRegister(instr->function()).is(r1));
3929 DCHECK(instr->HasPointerMap());
3931 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3932 if (known_function.is_null()) {
3933 LPointerMap* pointers = instr->pointer_map();
3934 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3935 ParameterCount count(instr->arity());
3936 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3938 CallKnownFunction(known_function,
3939 instr->hydrogen()->formal_parameter_count(),
3940 instr->arity(), instr);
3945 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3946 DCHECK(ToRegister(instr->result()).is(r0));
3948 if (instr->hydrogen()->IsTailCall()) {
3949 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3951 if (instr->target()->IsConstantOperand()) {
3952 LConstantOperand* target = LConstantOperand::cast(instr->target());
3953 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3954 __ Jump(code, RelocInfo::CODE_TARGET);
3956 DCHECK(instr->target()->IsRegister());
3957 Register target = ToRegister(instr->target());
3958 // Make sure we don't emit any additional entries in the constant pool
3959 // before the call to ensure that the CallCodeSize() calculated the
3961 // number of instructions for the constant pool load.
3963 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3964 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3969 LPointerMap* pointers = instr->pointer_map();
3970 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3972 if (instr->target()->IsConstantOperand()) {
3973 LConstantOperand* target = LConstantOperand::cast(instr->target());
3974 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3975 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3976 PlatformInterfaceDescriptor* call_descriptor =
3977 instr->descriptor().platform_specific_descriptor();
3978 if (call_descriptor != NULL) {
3979 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3980 call_descriptor->storage_mode());
3982 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
3985 DCHECK(instr->target()->IsRegister());
3986 Register target = ToRegister(instr->target());
3987 generator.BeforeCall(__ CallSize(target));
3988 // Make sure we don't emit any additional entries in the constant pool
3989 // before the call to ensure that the CallCodeSize() calculated the
3991 // number of instructions for the constant pool load.
3993 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3994 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3998 generator.AfterCall();
4003 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4004 DCHECK(ToRegister(instr->function()).is(r1));
4005 DCHECK(ToRegister(instr->result()).is(r0));
4007 if (instr->hydrogen()->pass_argument_count()) {
4008 __ mov(r0, Operand(instr->arity()));
4012 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4014 // Load the code entry address
4015 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4018 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4022 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4023 DCHECK(ToRegister(instr->context()).is(cp));
4024 DCHECK(ToRegister(instr->function()).is(r1));
4025 DCHECK(ToRegister(instr->result()).is(r0));
4027 int arity = instr->arity();
4028 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4029 if (instr->hydrogen()->HasVectorAndSlot()) {
4030 Register slot_register = ToRegister(instr->temp_slot());
4031 Register vector_register = ToRegister(instr->temp_vector());
4032 DCHECK(slot_register.is(r3));
4033 DCHECK(vector_register.is(r2));
4035 AllowDeferredHandleDereference vector_structure_check;
4036 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4037 int index = vector->GetIndex(instr->hydrogen()->slot());
4039 __ Move(vector_register, vector);
4040 __ mov(slot_register, Operand(Smi::FromInt(index)));
4042 CallICState::CallType call_type =
4043 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4046 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4047 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4049 CallFunctionStub stub(isolate(), arity, flags);
4050 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4055 void LCodeGen::DoCallNew(LCallNew* instr) {
4056 DCHECK(ToRegister(instr->context()).is(cp));
4057 DCHECK(ToRegister(instr->constructor()).is(r1));
4058 DCHECK(ToRegister(instr->result()).is(r0));
4060 __ mov(r0, Operand(instr->arity()));
4061 // No cell in r2 for construct type feedback in optimized code
4062 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4063 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4064 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4068 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4069 DCHECK(ToRegister(instr->context()).is(cp));
4070 DCHECK(ToRegister(instr->constructor()).is(r1));
4071 DCHECK(ToRegister(instr->result()).is(r0));
4073 __ mov(r0, Operand(instr->arity()));
4074 if (instr->arity() == 1) {
4075 // We only need the allocation site for the case we have a length argument.
4076 // The case may bail out to the runtime, which will determine the correct
4077 // elements kind with the site.
4078 __ Move(r2, instr->hydrogen()->site());
4080 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4082 ElementsKind kind = instr->hydrogen()->elements_kind();
4083 AllocationSiteOverrideMode override_mode =
4084 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4085 ? DISABLE_ALLOCATION_SITES
4088 if (instr->arity() == 0) {
4089 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4090 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4091 } else if (instr->arity() == 1) {
4093 if (IsFastPackedElementsKind(kind)) {
4095 // We might need a change here
4096 // look at the first argument
4097 __ ldr(r5, MemOperand(sp, 0));
4098 __ cmp(r5, Operand::Zero());
4099 __ b(eq, &packed_case);
4101 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4102 ArraySingleArgumentConstructorStub stub(isolate(),
4105 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4107 __ bind(&packed_case);
4110 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4111 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4114 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4115 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4120 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4121 CallRuntime(instr->function(), instr->arity(), instr);
4125 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4126 Register function = ToRegister(instr->function());
4127 Register code_object = ToRegister(instr->code_object());
4128 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4130 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4134 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4135 Register result = ToRegister(instr->result());
4136 Register base = ToRegister(instr->base_object());
4137 if (instr->offset()->IsConstantOperand()) {
4138 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4139 __ add(result, base, Operand(ToInteger32(offset)));
4141 Register offset = ToRegister(instr->offset());
4142 __ add(result, base, offset);
4147 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4148 Representation representation = instr->representation();
4150 Register object = ToRegister(instr->object());
4151 Register scratch = scratch0();
4152 HObjectAccess access = instr->hydrogen()->access();
4153 int offset = access.offset();
4155 if (access.IsExternalMemory()) {
4156 Register value = ToRegister(instr->value());
4157 MemOperand operand = MemOperand(object, offset);
4158 __ Store(value, operand, representation);
4162 __ AssertNotSmi(object);
4164 DCHECK(!representation.IsSmi() ||
4165 !instr->value()->IsConstantOperand() ||
4166 IsSmi(LConstantOperand::cast(instr->value())));
4167 if (representation.IsDouble()) {
4168 DCHECK(access.IsInobject());
4169 DCHECK(!instr->hydrogen()->has_transition());
4170 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4171 DwVfpRegister value = ToDoubleRegister(instr->value());
4172 __ vstr(value, FieldMemOperand(object, offset));
4176 if (instr->hydrogen()->has_transition()) {
4177 Handle<Map> transition = instr->hydrogen()->transition_map();
4178 AddDeprecationDependency(transition);
4179 __ mov(scratch, Operand(transition));
4180 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4181 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4182 Register temp = ToRegister(instr->temp());
4183 // Update the write barrier for the map field.
4184 __ RecordWriteForMap(object,
4187 GetLinkRegisterState(),
4193 Register value = ToRegister(instr->value());
4194 if (access.IsInobject()) {
4195 MemOperand operand = FieldMemOperand(object, offset);
4196 __ Store(value, operand, representation);
4197 if (instr->hydrogen()->NeedsWriteBarrier()) {
4198 // Update the write barrier for the object for in-object properties.
4199 __ RecordWriteField(object,
4203 GetLinkRegisterState(),
4205 EMIT_REMEMBERED_SET,
4206 instr->hydrogen()->SmiCheckForWriteBarrier(),
4207 instr->hydrogen()->PointersToHereCheckForValue());
4210 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4211 MemOperand operand = FieldMemOperand(scratch, offset);
4212 __ Store(value, operand, representation);
4213 if (instr->hydrogen()->NeedsWriteBarrier()) {
4214 // Update the write barrier for the properties array.
4215 // object is used as a scratch register.
4216 __ RecordWriteField(scratch,
4220 GetLinkRegisterState(),
4222 EMIT_REMEMBERED_SET,
4223 instr->hydrogen()->SmiCheckForWriteBarrier(),
4224 instr->hydrogen()->PointersToHereCheckForValue());
4230 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4231 DCHECK(ToRegister(instr->context()).is(cp));
4232 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4233 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4235 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4237 StoreIC::initialize_stub(isolate(), instr->language_mode(),
4238 instr->hydrogen()->initialization_state());
4239 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4243 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4244 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4245 if (instr->index()->IsConstantOperand()) {
4246 Operand index = ToOperand(instr->index());
4247 Register length = ToRegister(instr->length());
4248 __ cmp(length, index);
4249 cc = CommuteCondition(cc);
4251 Register index = ToRegister(instr->index());
4252 Operand length = ToOperand(instr->length());
4253 __ cmp(index, length);
4255 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4257 __ b(NegateCondition(cc), &done);
4258 __ stop("eliminated bounds check failed");
4261 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4266 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4267 Register external_pointer = ToRegister(instr->elements());
4268 Register key = no_reg;
4269 ElementsKind elements_kind = instr->elements_kind();
4270 bool key_is_constant = instr->key()->IsConstantOperand();
4271 int constant_key = 0;
4272 if (key_is_constant) {
4273 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4274 if (constant_key & 0xF0000000) {
4275 Abort(kArrayIndexConstantValueTooBig);
4278 key = ToRegister(instr->key());
4280 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4281 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4282 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4283 int base_offset = instr->base_offset();
4285 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4286 elements_kind == FLOAT32_ELEMENTS ||
4287 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4288 elements_kind == FLOAT64_ELEMENTS) {
4289 Register address = scratch0();
4290 DwVfpRegister value(ToDoubleRegister(instr->value()));
4291 if (key_is_constant) {
4292 if (constant_key != 0) {
4293 __ add(address, external_pointer,
4294 Operand(constant_key << element_size_shift));
4296 address = external_pointer;
4299 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4301 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4302 elements_kind == FLOAT32_ELEMENTS) {
4303 __ vcvt_f32_f64(double_scratch0().low(), value);
4304 __ vstr(double_scratch0().low(), address, base_offset);
4305 } else { // Storing doubles, not floats.
4306 __ vstr(value, address, base_offset);
4309 Register value(ToRegister(instr->value()));
4310 MemOperand mem_operand = PrepareKeyedOperand(
4311 key, external_pointer, key_is_constant, constant_key,
4312 element_size_shift, shift_size,
4314 switch (elements_kind) {
4315 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4316 case EXTERNAL_INT8_ELEMENTS:
4317 case EXTERNAL_UINT8_ELEMENTS:
4318 case UINT8_ELEMENTS:
4319 case UINT8_CLAMPED_ELEMENTS:
4321 __ strb(value, mem_operand);
4323 case EXTERNAL_INT16_ELEMENTS:
4324 case EXTERNAL_UINT16_ELEMENTS:
4325 case INT16_ELEMENTS:
4326 case UINT16_ELEMENTS:
4327 __ strh(value, mem_operand);
4329 case EXTERNAL_INT32_ELEMENTS:
4330 case EXTERNAL_UINT32_ELEMENTS:
4331 case INT32_ELEMENTS:
4332 case UINT32_ELEMENTS:
4333 __ str(value, mem_operand);
4335 case FLOAT32_ELEMENTS:
4336 case FLOAT64_ELEMENTS:
4337 case EXTERNAL_FLOAT32_ELEMENTS:
4338 case EXTERNAL_FLOAT64_ELEMENTS:
4339 case FAST_DOUBLE_ELEMENTS:
4341 case FAST_SMI_ELEMENTS:
4342 case FAST_HOLEY_DOUBLE_ELEMENTS:
4343 case FAST_HOLEY_ELEMENTS:
4344 case FAST_HOLEY_SMI_ELEMENTS:
4345 case DICTIONARY_ELEMENTS:
4346 case SLOPPY_ARGUMENTS_ELEMENTS:
4354 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4355 DwVfpRegister value = ToDoubleRegister(instr->value());
4356 Register elements = ToRegister(instr->elements());
4357 Register scratch = scratch0();
4358 DwVfpRegister double_scratch = double_scratch0();
4359 bool key_is_constant = instr->key()->IsConstantOperand();
4360 int base_offset = instr->base_offset();
4362 // Calculate the effective address of the slot in the array to store the
4364 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4365 if (key_is_constant) {
4366 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4367 if (constant_key & 0xF0000000) {
4368 Abort(kArrayIndexConstantValueTooBig);
4370 __ add(scratch, elements,
4371 Operand((constant_key << element_size_shift) + base_offset));
4373 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4374 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4375 __ add(scratch, elements, Operand(base_offset));
4376 __ add(scratch, scratch,
4377 Operand(ToRegister(instr->key()), LSL, shift_size));
4380 if (instr->NeedsCanonicalization()) {
4381 // Force a canonical NaN.
4382 if (masm()->emit_debug_code()) {
4384 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4385 __ Assert(ne, kDefaultNaNModeNotSet);
4387 __ VFPCanonicalizeNaN(double_scratch, value);
4388 __ vstr(double_scratch, scratch, 0);
4390 __ vstr(value, scratch, 0);
4395 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4396 Register value = ToRegister(instr->value());
4397 Register elements = ToRegister(instr->elements());
4398 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4400 Register scratch = scratch0();
4401 Register store_base = scratch;
4402 int offset = instr->base_offset();
4405 if (instr->key()->IsConstantOperand()) {
4406 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4407 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4408 offset += ToInteger32(const_operand) * kPointerSize;
4409 store_base = elements;
4411 // Even though the HLoadKeyed instruction forces the input
4412 // representation for the key to be an integer, the input gets replaced
4413 // during bound check elimination with the index argument to the bounds
4414 // check, which can be tagged, so that case must be handled here, too.
4415 if (instr->hydrogen()->key()->representation().IsSmi()) {
4416 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4418 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4421 __ str(value, MemOperand(store_base, offset));
4423 if (instr->hydrogen()->NeedsWriteBarrier()) {
4424 SmiCheck check_needed =
4425 instr->hydrogen()->value()->type().IsHeapObject()
4426 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4427 // Compute address of modified element and store it into key register.
4428 __ add(key, store_base, Operand(offset));
4429 __ RecordWrite(elements,
4432 GetLinkRegisterState(),
4434 EMIT_REMEMBERED_SET,
4436 instr->hydrogen()->PointersToHereCheckForValue());
4441 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4442 // By cases: external, fast double
4443 if (instr->is_typed_elements()) {
4444 DoStoreKeyedExternalArray(instr);
4445 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4446 DoStoreKeyedFixedDoubleArray(instr);
4448 DoStoreKeyedFixedArray(instr);
4453 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4454 DCHECK(ToRegister(instr->context()).is(cp));
4455 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4456 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4457 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4459 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4460 isolate(), instr->language_mode(),
4461 instr->hydrogen()->initialization_state()).code();
4462 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4466 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4467 class DeferredMaybeGrowElements final : public LDeferredCode {
4469 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4470 : LDeferredCode(codegen), instr_(instr) {}
4471 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4472 LInstruction* instr() override { return instr_; }
4475 LMaybeGrowElements* instr_;
4478 Register result = r0;
4479 DeferredMaybeGrowElements* deferred =
4480 new (zone()) DeferredMaybeGrowElements(this, instr);
4481 LOperand* key = instr->key();
4482 LOperand* current_capacity = instr->current_capacity();
4484 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4485 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4486 DCHECK(key->IsConstantOperand() || key->IsRegister());
4487 DCHECK(current_capacity->IsConstantOperand() ||
4488 current_capacity->IsRegister());
4490 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4491 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4492 int32_t constant_capacity =
4493 ToInteger32(LConstantOperand::cast(current_capacity));
4494 if (constant_key >= constant_capacity) {
4496 __ jmp(deferred->entry());
4498 } else if (key->IsConstantOperand()) {
4499 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4500 __ cmp(ToRegister(current_capacity), Operand(constant_key));
4501 __ b(le, deferred->entry());
4502 } else if (current_capacity->IsConstantOperand()) {
4503 int32_t constant_capacity =
4504 ToInteger32(LConstantOperand::cast(current_capacity));
4505 __ cmp(ToRegister(key), Operand(constant_capacity));
4506 __ b(ge, deferred->entry());
4508 __ cmp(ToRegister(key), ToRegister(current_capacity));
4509 __ b(ge, deferred->entry());
4512 if (instr->elements()->IsRegister()) {
4513 __ Move(result, ToRegister(instr->elements()));
4515 __ ldr(result, ToMemOperand(instr->elements()));
4518 __ bind(deferred->exit());
4522 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4523 // TODO(3095996): Get rid of this. For now, we need to make the
4524 // result register contain a valid pointer because it is already
4525 // contained in the register pointer map.
4526 Register result = r0;
4527 __ mov(result, Operand::Zero());
4529 // We have to call a stub.
4531 PushSafepointRegistersScope scope(this);
4532 if (instr->object()->IsRegister()) {
4533 __ Move(result, ToRegister(instr->object()));
4535 __ ldr(result, ToMemOperand(instr->object()));
4538 LOperand* key = instr->key();
4539 if (key->IsConstantOperand()) {
4540 __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
4542 __ Move(r3, ToRegister(key));
4546 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4547 instr->hydrogen()->kind());
4549 RecordSafepointWithLazyDeopt(
4550 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4551 __ StoreToSafepointRegisterSlot(result, result);
4554 // Deopt on smi, which means the elements array changed to dictionary mode.
4556 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4560 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4561 Register object_reg = ToRegister(instr->object());
4562 Register scratch = scratch0();
4564 Handle<Map> from_map = instr->original_map();
4565 Handle<Map> to_map = instr->transitioned_map();
4566 ElementsKind from_kind = instr->from_kind();
4567 ElementsKind to_kind = instr->to_kind();
4569 Label not_applicable;
4570 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4571 __ cmp(scratch, Operand(from_map));
4572 __ b(ne, ¬_applicable);
4574 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4575 Register new_map_reg = ToRegister(instr->new_map_temp());
4576 __ mov(new_map_reg, Operand(to_map));
4577 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4579 __ RecordWriteForMap(object_reg,
4582 GetLinkRegisterState(),
4585 DCHECK(ToRegister(instr->context()).is(cp));
4586 DCHECK(object_reg.is(r0));
4587 PushSafepointRegistersScope scope(this);
4588 __ Move(r1, to_map);
4589 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4590 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4592 RecordSafepointWithRegisters(
4593 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4595 __ bind(¬_applicable);
4599 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4600 Register object = ToRegister(instr->object());
4601 Register temp = ToRegister(instr->temp());
4602 Label no_memento_found;
4603 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4604 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4605 __ bind(&no_memento_found);
4609 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4610 DCHECK(ToRegister(instr->context()).is(cp));
4611 DCHECK(ToRegister(instr->left()).is(r1));
4612 DCHECK(ToRegister(instr->right()).is(r0));
4613 StringAddStub stub(isolate(),
4614 instr->hydrogen()->flags(),
4615 instr->hydrogen()->pretenure_flag());
4616 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4620 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4621 class DeferredStringCharCodeAt final : public LDeferredCode {
4623 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4624 : LDeferredCode(codegen), instr_(instr) { }
4625 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4626 LInstruction* instr() override { return instr_; }
4629 LStringCharCodeAt* instr_;
4632 DeferredStringCharCodeAt* deferred =
4633 new(zone()) DeferredStringCharCodeAt(this, instr);
4635 StringCharLoadGenerator::Generate(masm(),
4636 ToRegister(instr->string()),
4637 ToRegister(instr->index()),
4638 ToRegister(instr->result()),
4640 __ bind(deferred->exit());
4644 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4645 Register string = ToRegister(instr->string());
4646 Register result = ToRegister(instr->result());
4647 Register scratch = scratch0();
4649 // TODO(3095996): Get rid of this. For now, we need to make the
4650 // result register contain a valid pointer because it is already
4651 // contained in the register pointer map.
4652 __ mov(result, Operand::Zero());
4654 PushSafepointRegistersScope scope(this);
4656 // Push the index as a smi. This is safe because of the checks in
4657 // DoStringCharCodeAt above.
4658 if (instr->index()->IsConstantOperand()) {
4659 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4660 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4663 Register index = ToRegister(instr->index());
4667 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4671 __ StoreToSafepointRegisterSlot(r0, result);
4675 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4676 class DeferredStringCharFromCode final : public LDeferredCode {
4678 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4679 : LDeferredCode(codegen), instr_(instr) { }
4680 void Generate() override {
4681 codegen()->DoDeferredStringCharFromCode(instr_);
4683 LInstruction* instr() override { return instr_; }
4686 LStringCharFromCode* instr_;
4689 DeferredStringCharFromCode* deferred =
4690 new(zone()) DeferredStringCharFromCode(this, instr);
4692 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4693 Register char_code = ToRegister(instr->char_code());
4694 Register result = ToRegister(instr->result());
4695 DCHECK(!char_code.is(result));
4697 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4698 __ b(hi, deferred->entry());
4699 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4700 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4701 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4702 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4704 __ b(eq, deferred->entry());
4705 __ bind(deferred->exit());
4709 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4710 Register char_code = ToRegister(instr->char_code());
4711 Register result = ToRegister(instr->result());
4713 // TODO(3095996): Get rid of this. For now, we need to make the
4714 // result register contain a valid pointer because it is already
4715 // contained in the register pointer map.
4716 __ mov(result, Operand::Zero());
4718 PushSafepointRegistersScope scope(this);
4719 __ SmiTag(char_code);
4721 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4722 __ StoreToSafepointRegisterSlot(r0, result);
4726 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4727 LOperand* input = instr->value();
4728 DCHECK(input->IsRegister() || input->IsStackSlot());
4729 LOperand* output = instr->result();
4730 DCHECK(output->IsDoubleRegister());
4731 SwVfpRegister single_scratch = double_scratch0().low();
4732 if (input->IsStackSlot()) {
4733 Register scratch = scratch0();
4734 __ ldr(scratch, ToMemOperand(input));
4735 __ vmov(single_scratch, scratch);
4737 __ vmov(single_scratch, ToRegister(input));
4739 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4743 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4744 LOperand* input = instr->value();
4745 LOperand* output = instr->result();
4747 SwVfpRegister flt_scratch = double_scratch0().low();
4748 __ vmov(flt_scratch, ToRegister(input));
4749 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4753 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4754 class DeferredNumberTagI final : public LDeferredCode {
4756 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4757 : LDeferredCode(codegen), instr_(instr) { }
4758 void Generate() override {
4759 codegen()->DoDeferredNumberTagIU(instr_,
4765 LInstruction* instr() override { return instr_; }
4768 LNumberTagI* instr_;
4771 Register src = ToRegister(instr->value());
4772 Register dst = ToRegister(instr->result());
4774 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4775 __ SmiTag(dst, src, SetCC);
4776 __ b(vs, deferred->entry());
4777 __ bind(deferred->exit());
4781 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4782 class DeferredNumberTagU final : public LDeferredCode {
4784 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4785 : LDeferredCode(codegen), instr_(instr) { }
4786 void Generate() override {
4787 codegen()->DoDeferredNumberTagIU(instr_,
4793 LInstruction* instr() override { return instr_; }
4796 LNumberTagU* instr_;
4799 Register input = ToRegister(instr->value());
4800 Register result = ToRegister(instr->result());
4802 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4803 __ cmp(input, Operand(Smi::kMaxValue));
4804 __ b(hi, deferred->entry());
4805 __ SmiTag(result, input);
4806 __ bind(deferred->exit());
4810 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4814 IntegerSignedness signedness) {
4816 Register src = ToRegister(value);
4817 Register dst = ToRegister(instr->result());
4818 Register tmp1 = scratch0();
4819 Register tmp2 = ToRegister(temp1);
4820 Register tmp3 = ToRegister(temp2);
4821 LowDwVfpRegister dbl_scratch = double_scratch0();
4823 if (signedness == SIGNED_INT32) {
4824 // There was overflow, so bits 30 and 31 of the original integer
4825 // disagree. Try to allocate a heap number in new space and store
4826 // the value in there. If that fails, call the runtime system.
4828 __ SmiUntag(src, dst);
4829 __ eor(src, src, Operand(0x80000000));
4831 __ vmov(dbl_scratch.low(), src);
4832 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4834 __ vmov(dbl_scratch.low(), src);
4835 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4838 if (FLAG_inline_new) {
4839 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4840 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4844 // Slow case: Call the runtime system to do the number allocation.
4847 // TODO(3095996): Put a valid pointer value in the stack slot where the
4848 // result register is stored, as this register is in the pointer map, but
4849 // contains an integer value.
4850 __ mov(dst, Operand::Zero());
4852 // Preserve the value of all registers.
4853 PushSafepointRegistersScope scope(this);
4855 // NumberTagI and NumberTagD use the context from the frame, rather than
4856 // the environment's HContext or HInlinedContext value.
4857 // They only call Runtime::kAllocateHeapNumber.
4858 // The corresponding HChange instructions are added in a phase that does
4859 // not have easy access to the local context.
4860 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4861 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4862 RecordSafepointWithRegisters(
4863 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4864 __ sub(r0, r0, Operand(kHeapObjectTag));
4865 __ StoreToSafepointRegisterSlot(r0, dst);
4868 // Done. Put the value in dbl_scratch into the value of the allocated heap
4871 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4872 __ add(dst, dst, Operand(kHeapObjectTag));
4876 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4877 class DeferredNumberTagD final : public LDeferredCode {
4879 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4880 : LDeferredCode(codegen), instr_(instr) { }
4881 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4882 LInstruction* instr() override { return instr_; }
4885 LNumberTagD* instr_;
4888 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4889 Register scratch = scratch0();
4890 Register reg = ToRegister(instr->result());
4891 Register temp1 = ToRegister(instr->temp());
4892 Register temp2 = ToRegister(instr->temp2());
4894 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4895 if (FLAG_inline_new) {
4896 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4897 // We want the untagged address first for performance
4898 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4901 __ jmp(deferred->entry());
4903 __ bind(deferred->exit());
4904 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4905 // Now that we have finished with the object's real address tag it
4906 __ add(reg, reg, Operand(kHeapObjectTag));
4910 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4911 // TODO(3095996): Get rid of this. For now, we need to make the
4912 // result register contain a valid pointer because it is already
4913 // contained in the register pointer map.
4914 Register reg = ToRegister(instr->result());
4915 __ mov(reg, Operand::Zero());
4917 PushSafepointRegistersScope scope(this);
4918 // NumberTagI and NumberTagD use the context from the frame, rather than
4919 // the environment's HContext or HInlinedContext value.
4920 // They only call Runtime::kAllocateHeapNumber.
4921 // The corresponding HChange instructions are added in a phase that does
4922 // not have easy access to the local context.
4923 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4924 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4925 RecordSafepointWithRegisters(
4926 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4927 __ sub(r0, r0, Operand(kHeapObjectTag));
4928 __ StoreToSafepointRegisterSlot(r0, reg);
4932 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4933 HChange* hchange = instr->hydrogen();
4934 Register input = ToRegister(instr->value());
4935 Register output = ToRegister(instr->result());
4936 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4937 hchange->value()->CheckFlag(HValue::kUint32)) {
4938 __ tst(input, Operand(0xc0000000));
4939 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4941 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4942 !hchange->value()->CheckFlag(HValue::kUint32)) {
4943 __ SmiTag(output, input, SetCC);
4944 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4946 __ SmiTag(output, input);
4951 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4952 Register input = ToRegister(instr->value());
4953 Register result = ToRegister(instr->result());
4954 if (instr->needs_check()) {
4955 STATIC_ASSERT(kHeapObjectTag == 1);
4956 // If the input is a HeapObject, SmiUntag will set the carry flag.
4957 __ SmiUntag(result, input, SetCC);
4958 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
4960 __ SmiUntag(result, input);
4965 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4966 DwVfpRegister result_reg,
4967 NumberUntagDMode mode) {
4968 bool can_convert_undefined_to_nan =
4969 instr->hydrogen()->can_convert_undefined_to_nan();
4970 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4972 Register scratch = scratch0();
4973 SwVfpRegister flt_scratch = double_scratch0().low();
4974 DCHECK(!result_reg.is(double_scratch0()));
4975 Label convert, load_smi, done;
4976 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4978 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4979 // Heap number map check.
4980 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4981 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4982 __ cmp(scratch, Operand(ip));
4983 if (can_convert_undefined_to_nan) {
4986 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4989 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4990 if (deoptimize_on_minus_zero) {
4991 __ VmovLow(scratch, result_reg);
4992 __ cmp(scratch, Operand::Zero());
4994 __ VmovHigh(scratch, result_reg);
4995 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4996 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4999 if (can_convert_undefined_to_nan) {
5001 // Convert undefined (and hole) to NaN.
5002 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5003 __ cmp(input_reg, Operand(ip));
5004 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5005 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
5006 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
5010 __ SmiUntag(scratch, input_reg);
5011 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5013 // Smi to double register conversion
5015 // scratch: untagged value of input_reg
5016 __ vmov(flt_scratch, scratch);
5017 __ vcvt_f64_s32(result_reg, flt_scratch);
5022 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5023 Register input_reg = ToRegister(instr->value());
5024 Register scratch1 = scratch0();
5025 Register scratch2 = ToRegister(instr->temp());
5026 LowDwVfpRegister double_scratch = double_scratch0();
5027 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5029 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5030 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5034 // The input was optimistically untagged; revert it.
5035 // The carry flag is set when we reach this deferred code as we just executed
5036 // SmiUntag(heap_object, SetCC)
5037 STATIC_ASSERT(kHeapObjectTag == 1);
5038 __ adc(scratch2, input_reg, Operand(input_reg));
5040 // Heap number map check.
5041 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5042 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5043 __ cmp(scratch1, Operand(ip));
5045 if (instr->truncating()) {
5046 // Performs a truncating conversion of a floating point number as used by
5047 // the JS bitwise operations.
5048 Label no_heap_number, check_bools, check_false;
5049 __ b(ne, &no_heap_number);
5050 __ TruncateHeapNumberToI(input_reg, scratch2);
5053 // Check for Oddballs. Undefined/False is converted to zero and True to one
5054 // for truncating conversions.
5055 __ bind(&no_heap_number);
5056 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5057 __ cmp(scratch2, Operand(ip));
5058 __ b(ne, &check_bools);
5059 __ mov(input_reg, Operand::Zero());
5062 __ bind(&check_bools);
5063 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5064 __ cmp(scratch2, Operand(ip));
5065 __ b(ne, &check_false);
5066 __ mov(input_reg, Operand(1));
5069 __ bind(&check_false);
5070 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5071 __ cmp(scratch2, Operand(ip));
5072 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5073 __ mov(input_reg, Operand::Zero());
5075 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5077 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5078 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5079 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5080 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5082 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5083 __ cmp(input_reg, Operand::Zero());
5085 __ VmovHigh(scratch1, double_scratch2);
5086 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5087 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5094 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5095 class DeferredTaggedToI final : public LDeferredCode {
5097 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5098 : LDeferredCode(codegen), instr_(instr) { }
5099 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
5100 LInstruction* instr() override { return instr_; }
5106 LOperand* input = instr->value();
5107 DCHECK(input->IsRegister());
5108 DCHECK(input->Equals(instr->result()));
5110 Register input_reg = ToRegister(input);
5112 if (instr->hydrogen()->value()->representation().IsSmi()) {
5113 __ SmiUntag(input_reg);
5115 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5117 // Optimistically untag the input.
5118 // If the input is a HeapObject, SmiUntag will set the carry flag.
5119 __ SmiUntag(input_reg, SetCC);
5120 // Branch to deferred code if the input was tagged.
5121 // The deferred code will take care of restoring the tag.
5122 __ b(cs, deferred->entry());
5123 __ bind(deferred->exit());
5128 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5129 LOperand* input = instr->value();
5130 DCHECK(input->IsRegister());
5131 LOperand* result = instr->result();
5132 DCHECK(result->IsDoubleRegister());
5134 Register input_reg = ToRegister(input);
5135 DwVfpRegister result_reg = ToDoubleRegister(result);
5137 HValue* value = instr->hydrogen()->value();
5138 NumberUntagDMode mode = value->representation().IsSmi()
5139 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5141 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5145 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5146 Register result_reg = ToRegister(instr->result());
5147 Register scratch1 = scratch0();
5148 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5149 LowDwVfpRegister double_scratch = double_scratch0();
5151 if (instr->truncating()) {
5152 __ TruncateDoubleToI(result_reg, double_input);
5154 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5155 // Deoptimize if the input wasn't a int32 (inside a double).
5156 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5157 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5159 __ cmp(result_reg, Operand::Zero());
5161 __ VmovHigh(scratch1, double_input);
5162 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5163 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5170 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5171 Register result_reg = ToRegister(instr->result());
5172 Register scratch1 = scratch0();
5173 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5174 LowDwVfpRegister double_scratch = double_scratch0();
5176 if (instr->truncating()) {
5177 __ TruncateDoubleToI(result_reg, double_input);
5179 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5180 // Deoptimize if the input wasn't a int32 (inside a double).
5181 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5182 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5184 __ cmp(result_reg, Operand::Zero());
5186 __ VmovHigh(scratch1, double_input);
5187 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5188 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5192 __ SmiTag(result_reg, SetCC);
5193 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5197 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5198 LOperand* input = instr->value();
5199 __ SmiTst(ToRegister(input));
5200 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
5204 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5205 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5206 LOperand* input = instr->value();
5207 __ SmiTst(ToRegister(input));
5208 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5213 void LCodeGen::DoCheckArrayBufferNotNeutered(
5214 LCheckArrayBufferNotNeutered* instr) {
5215 Register view = ToRegister(instr->view());
5216 Register scratch = scratch0();
5218 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5219 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5220 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5221 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
5225 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5226 Register input = ToRegister(instr->value());
5227 Register scratch = scratch0();
5229 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5230 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5232 if (instr->hydrogen()->is_interval_check()) {
5235 instr->hydrogen()->GetCheckInterval(&first, &last);
5237 __ cmp(scratch, Operand(first));
5239 // If there is only one type in the interval check for equality.
5240 if (first == last) {
5241 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5243 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
5244 // Omit check for the last type.
5245 if (last != LAST_TYPE) {
5246 __ cmp(scratch, Operand(last));
5247 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
5253 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5255 if (base::bits::IsPowerOfTwo32(mask)) {
5256 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5257 __ tst(scratch, Operand(mask));
5258 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
5260 __ and_(scratch, scratch, Operand(mask));
5261 __ cmp(scratch, Operand(tag));
5262 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5268 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5269 Register reg = ToRegister(instr->value());
5270 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5271 AllowDeferredHandleDereference smi_check;
5272 if (isolate()->heap()->InNewSpace(*object)) {
5273 Register reg = ToRegister(instr->value());
5274 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5275 __ mov(ip, Operand(cell));
5276 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5279 __ cmp(reg, Operand(object));
5281 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5285 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5287 PushSafepointRegistersScope scope(this);
5289 __ mov(cp, Operand::Zero());
5290 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5291 RecordSafepointWithRegisters(
5292 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5293 __ StoreToSafepointRegisterSlot(r0, scratch0());
5295 __ tst(scratch0(), Operand(kSmiTagMask));
5296 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
5300 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5301 class DeferredCheckMaps final : public LDeferredCode {
5303 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5304 : LDeferredCode(codegen), instr_(instr), object_(object) {
5305 SetExit(check_maps());
5307 void Generate() override {
5308 codegen()->DoDeferredInstanceMigration(instr_, object_);
5310 Label* check_maps() { return &check_maps_; }
5311 LInstruction* instr() override { return instr_; }
5319 if (instr->hydrogen()->IsStabilityCheck()) {
5320 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5321 for (int i = 0; i < maps->size(); ++i) {
5322 AddStabilityDependency(maps->at(i).handle());
5327 Register map_reg = scratch0();
5329 LOperand* input = instr->value();
5330 DCHECK(input->IsRegister());
5331 Register reg = ToRegister(input);
5333 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5335 DeferredCheckMaps* deferred = NULL;
5336 if (instr->hydrogen()->HasMigrationTarget()) {
5337 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5338 __ bind(deferred->check_maps());
5341 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5343 for (int i = 0; i < maps->size() - 1; i++) {
5344 Handle<Map> map = maps->at(i).handle();
5345 __ CompareMap(map_reg, map, &success);
5349 Handle<Map> map = maps->at(maps->size() - 1).handle();
5350 __ CompareMap(map_reg, map, &success);
5351 if (instr->hydrogen()->HasMigrationTarget()) {
5352 __ b(ne, deferred->entry());
5354 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5361 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5362 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5363 Register result_reg = ToRegister(instr->result());
5364 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5368 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5369 Register unclamped_reg = ToRegister(instr->unclamped());
5370 Register result_reg = ToRegister(instr->result());
5371 __ ClampUint8(result_reg, unclamped_reg);
5375 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5376 Register scratch = scratch0();
5377 Register input_reg = ToRegister(instr->unclamped());
5378 Register result_reg = ToRegister(instr->result());
5379 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5380 Label is_smi, done, heap_number;
5382 // Both smi and heap number cases are handled.
5383 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5385 // Check for heap number
5386 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5387 __ cmp(scratch, Operand(factory()->heap_number_map()));
5388 __ b(eq, &heap_number);
5390 // Check for undefined. Undefined is converted to zero for clamping
5392 __ cmp(input_reg, Operand(factory()->undefined_value()));
5393 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5394 __ mov(result_reg, Operand::Zero());
5398 __ bind(&heap_number);
5399 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5400 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5405 __ ClampUint8(result_reg, result_reg);
5411 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5412 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5413 Register result_reg = ToRegister(instr->result());
5414 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5415 __ VmovHigh(result_reg, value_reg);
5417 __ VmovLow(result_reg, value_reg);
5422 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5423 Register hi_reg = ToRegister(instr->hi());
5424 Register lo_reg = ToRegister(instr->lo());
5425 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5426 __ VmovHigh(result_reg, hi_reg);
5427 __ VmovLow(result_reg, lo_reg);
5431 void LCodeGen::DoAllocate(LAllocate* instr) {
5432 class DeferredAllocate final : public LDeferredCode {
5434 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5435 : LDeferredCode(codegen), instr_(instr) { }
5436 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5437 LInstruction* instr() override { return instr_; }
5443 DeferredAllocate* deferred =
5444 new(zone()) DeferredAllocate(this, instr);
5446 Register result = ToRegister(instr->result());
5447 Register scratch = ToRegister(instr->temp1());
5448 Register scratch2 = ToRegister(instr->temp2());
5450 // Allocate memory for the object.
5451 AllocationFlags flags = TAG_OBJECT;
5452 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5453 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5455 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5456 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5457 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5460 if (instr->size()->IsConstantOperand()) {
5461 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5462 if (size <= Page::kMaxRegularHeapObjectSize) {
5463 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5465 __ jmp(deferred->entry());
5468 Register size = ToRegister(instr->size());
5469 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5472 __ bind(deferred->exit());
5474 if (instr->hydrogen()->MustPrefillWithFiller()) {
5475 STATIC_ASSERT(kHeapObjectTag == 1);
5476 if (instr->size()->IsConstantOperand()) {
5477 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5478 __ mov(scratch, Operand(size - kHeapObjectTag));
5480 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5482 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5485 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5486 __ str(scratch2, MemOperand(result, scratch));
5492 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5493 Register result = ToRegister(instr->result());
5495 // TODO(3095996): Get rid of this. For now, we need to make the
5496 // result register contain a valid pointer because it is already
5497 // contained in the register pointer map.
5498 __ mov(result, Operand(Smi::FromInt(0)));
5500 PushSafepointRegistersScope scope(this);
5501 if (instr->size()->IsRegister()) {
5502 Register size = ToRegister(instr->size());
5503 DCHECK(!size.is(result));
5507 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5508 if (size >= 0 && size <= Smi::kMaxValue) {
5509 __ Push(Smi::FromInt(size));
5511 // We should never get here at runtime => abort
5512 __ stop("invalid allocation size");
5517 int flags = AllocateDoubleAlignFlag::encode(
5518 instr->hydrogen()->MustAllocateDoubleAligned());
5519 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5520 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5521 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5523 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5525 __ Push(Smi::FromInt(flags));
5527 CallRuntimeFromDeferred(
5528 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5529 __ StoreToSafepointRegisterSlot(r0, result);
5533 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5534 DCHECK(ToRegister(instr->value()).is(r0));
5536 CallRuntime(Runtime::kToFastProperties, 1, instr);
5540 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5541 DCHECK(ToRegister(instr->context()).is(cp));
5543 // Registers will be used as follows:
5544 // r6 = literals array.
5545 // r1 = regexp literal.
5546 // r0 = regexp literal clone.
5547 // r2-5 are used as temporaries.
5548 int literal_offset =
5549 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5550 __ Move(r6, instr->hydrogen()->literals());
5551 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5552 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5554 __ b(ne, &materialized);
5556 // Create regexp literal using runtime function
5557 // Result will be in r0.
5558 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5559 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5560 __ mov(r3, Operand(instr->hydrogen()->flags()));
5561 __ Push(r6, r5, r4, r3);
5562 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5565 __ bind(&materialized);
5566 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5567 Label allocated, runtime_allocate;
5569 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5572 __ bind(&runtime_allocate);
5573 __ mov(r0, Operand(Smi::FromInt(size)));
5575 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5578 __ bind(&allocated);
5579 // Copy the content into the newly allocated memory.
5580 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5584 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5585 DCHECK(ToRegister(instr->context()).is(cp));
5586 // Use the fast case closure allocation code that allocates in new
5587 // space for nested functions that don't need literals cloning.
5588 bool pretenure = instr->hydrogen()->pretenure();
5589 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5590 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5591 instr->hydrogen()->kind());
5592 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5593 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5595 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5596 __ mov(r1, Operand(pretenure ? factory()->true_value()
5597 : factory()->false_value()));
5598 __ Push(cp, r2, r1);
5599 CallRuntime(Runtime::kNewClosure, 3, instr);
5604 void LCodeGen::DoTypeof(LTypeof* instr) {
5605 DCHECK(ToRegister(instr->value()).is(r3));
5606 DCHECK(ToRegister(instr->result()).is(r0));
5608 Register value_register = ToRegister(instr->value());
5609 __ JumpIfNotSmi(value_register, &do_call);
5610 __ mov(r0, Operand(isolate()->factory()->number_string()));
5613 TypeofStub stub(isolate());
5614 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5619 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5620 Register input = ToRegister(instr->value());
5622 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5623 instr->FalseLabel(chunk_),
5625 instr->type_literal());
5626 if (final_branch_condition != kNoCondition) {
5627 EmitBranch(instr, final_branch_condition);
5632 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5635 Handle<String> type_name) {
5636 Condition final_branch_condition = kNoCondition;
5637 Register scratch = scratch0();
5638 Factory* factory = isolate()->factory();
5639 if (String::Equals(type_name, factory->number_string())) {
5640 __ JumpIfSmi(input, true_label);
5641 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5642 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5643 final_branch_condition = eq;
5645 } else if (String::Equals(type_name, factory->string_string())) {
5646 __ JumpIfSmi(input, false_label);
5647 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5648 __ b(ge, false_label);
5649 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5650 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5651 final_branch_condition = eq;
5653 } else if (String::Equals(type_name, factory->symbol_string())) {
5654 __ JumpIfSmi(input, false_label);
5655 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5656 final_branch_condition = eq;
5658 } else if (String::Equals(type_name, factory->boolean_string())) {
5659 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5660 __ b(eq, true_label);
5661 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5662 final_branch_condition = eq;
5664 } else if (String::Equals(type_name, factory->undefined_string())) {
5665 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5666 __ b(eq, true_label);
5667 __ JumpIfSmi(input, false_label);
5668 // Check for undetectable objects => true.
5669 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5670 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5671 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5672 final_branch_condition = ne;
5674 } else if (String::Equals(type_name, factory->function_string())) {
5675 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5676 Register type_reg = scratch;
5677 __ JumpIfSmi(input, false_label);
5678 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5679 __ b(eq, true_label);
5680 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5681 final_branch_condition = eq;
5683 } else if (String::Equals(type_name, factory->object_string())) {
5684 Register map = scratch;
5685 __ JumpIfSmi(input, false_label);
5686 __ CompareRoot(input, Heap::kNullValueRootIndex);
5687 __ b(eq, true_label);
5688 __ CheckObjectTypeRange(input,
5690 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5691 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5693 // Check for undetectable objects => false.
5694 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5695 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5696 final_branch_condition = eq;
5702 return final_branch_condition;
5706 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5707 Register temp1 = ToRegister(instr->temp());
5709 EmitIsConstructCall(temp1, scratch0());
5710 EmitBranch(instr, eq);
5714 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5715 DCHECK(!temp1.is(temp2));
5716 // Get the frame pointer for the calling frame.
5717 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5719 // Skip the arguments adaptor frame if it exists.
5720 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5721 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5722 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5724 // Check the marker in the calling frame.
5725 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5726 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5730 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5731 if (!info()->IsStub()) {
5732 // Ensure that we have enough space after the previous lazy-bailout
5733 // instruction for patching the code here.
5734 int current_pc = masm()->pc_offset();
5735 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5736 // Block literal pool emission for duration of padding.
5737 Assembler::BlockConstPoolScope block_const_pool(masm());
5738 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5739 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5740 while (padding_size > 0) {
5742 padding_size -= Assembler::kInstrSize;
5746 last_lazy_deopt_pc_ = masm()->pc_offset();
5750 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5751 last_lazy_deopt_pc_ = masm()->pc_offset();
5752 DCHECK(instr->HasEnvironment());
5753 LEnvironment* env = instr->environment();
5754 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5755 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5759 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5760 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5761 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5762 // needed return address), even though the implementation of LAZY and EAGER is
5763 // now identical. When LAZY is eventually completely folded into EAGER, remove
5764 // the special case below.
5765 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5766 type = Deoptimizer::LAZY;
5769 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5773 void LCodeGen::DoDummy(LDummy* instr) {
5774 // Nothing to see here, move on!
5778 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5779 // Nothing to see here, move on!
5783 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5784 PushSafepointRegistersScope scope(this);
5785 LoadContextFromDeferred(instr->context());
5786 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5787 RecordSafepointWithLazyDeopt(
5788 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5789 DCHECK(instr->HasEnvironment());
5790 LEnvironment* env = instr->environment();
5791 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5795 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5796 class DeferredStackCheck final : public LDeferredCode {
5798 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5799 : LDeferredCode(codegen), instr_(instr) { }
5800 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5801 LInstruction* instr() override { return instr_; }
5804 LStackCheck* instr_;
5807 DCHECK(instr->HasEnvironment());
5808 LEnvironment* env = instr->environment();
5809 // There is no LLazyBailout instruction for stack-checks. We have to
5810 // prepare for lazy deoptimization explicitly here.
5811 if (instr->hydrogen()->is_function_entry()) {
5812 // Perform stack overflow check.
5814 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5815 __ cmp(sp, Operand(ip));
5817 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5818 PredictableCodeSizeScope predictable(masm(),
5819 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5820 DCHECK(instr->context()->IsRegister());
5821 DCHECK(ToRegister(instr->context()).is(cp));
5822 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5825 DCHECK(instr->hydrogen()->is_backwards_branch());
5826 // Perform stack overflow check if this goto needs it before jumping.
5827 DeferredStackCheck* deferred_stack_check =
5828 new(zone()) DeferredStackCheck(this, instr);
5829 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5830 __ cmp(sp, Operand(ip));
5831 __ b(lo, deferred_stack_check->entry());
5832 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5833 __ bind(instr->done_label());
5834 deferred_stack_check->SetExit(instr->done_label());
5835 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5836 // Don't record a deoptimization index for the safepoint here.
5837 // This will be done explicitly when emitting call and the safepoint in
5838 // the deferred code.
5843 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5844 // This is a pseudo-instruction that ensures that the environment here is
5845 // properly registered for deoptimization and records the assembler's PC
5847 LEnvironment* environment = instr->environment();
5849 // If the environment were already registered, we would have no way of
5850 // backpatching it with the spill slot operands.
5851 DCHECK(!environment->HasBeenRegistered());
5852 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5854 GenerateOsrPrologue();
5858 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5860 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5862 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5863 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5864 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
5866 Label use_cache, call_runtime;
5867 Register null_value = r5;
5868 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5869 __ CheckEnumCache(null_value, &call_runtime);
5871 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5874 // Get the set of properties to enumerate.
5875 __ bind(&call_runtime);
5877 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5879 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5880 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5882 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5883 __ bind(&use_cache);
5887 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5888 Register map = ToRegister(instr->map());
5889 Register result = ToRegister(instr->result());
5890 Label load_cache, done;
5891 __ EnumLength(result, map);
5892 __ cmp(result, Operand(Smi::FromInt(0)));
5893 __ b(ne, &load_cache);
5894 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5897 __ bind(&load_cache);
5898 __ LoadInstanceDescriptors(map, result);
5900 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5902 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5903 __ cmp(result, Operand::Zero());
5904 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5910 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5911 Register object = ToRegister(instr->value());
5912 Register map = ToRegister(instr->map());
5913 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5914 __ cmp(map, scratch0());
5915 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5919 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5923 PushSafepointRegistersScope scope(this);
5926 __ mov(cp, Operand::Zero());
5927 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5928 RecordSafepointWithRegisters(
5929 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5930 __ StoreToSafepointRegisterSlot(r0, result);
5934 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5935 class DeferredLoadMutableDouble final : public LDeferredCode {
5937 DeferredLoadMutableDouble(LCodeGen* codegen,
5938 LLoadFieldByIndex* instr,
5942 : LDeferredCode(codegen),
5948 void Generate() override {
5949 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5951 LInstruction* instr() override { return instr_; }
5954 LLoadFieldByIndex* instr_;
5960 Register object = ToRegister(instr->object());
5961 Register index = ToRegister(instr->index());
5962 Register result = ToRegister(instr->result());
5963 Register scratch = scratch0();
5965 DeferredLoadMutableDouble* deferred;
5966 deferred = new(zone()) DeferredLoadMutableDouble(
5967 this, instr, result, object, index);
5969 Label out_of_object, done;
5971 __ tst(index, Operand(Smi::FromInt(1)));
5972 __ b(ne, deferred->entry());
5973 __ mov(index, Operand(index, ASR, 1));
5975 __ cmp(index, Operand::Zero());
5976 __ b(lt, &out_of_object);
5978 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5979 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5983 __ bind(&out_of_object);
5984 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5985 // Index is equal to negated out of object property index plus 1.
5986 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5987 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5988 __ ldr(result, FieldMemOperand(scratch,
5989 FixedArray::kHeaderSize - kPointerSize));
5990 __ bind(deferred->exit());
5995 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5996 Register context = ToRegister(instr->context());
5997 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6001 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6002 Handle<ScopeInfo> scope_info = instr->scope_info();
6003 __ Push(scope_info);
6004 __ push(ToRegister(instr->function()));
6005 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6006 RecordSafepoint(Safepoint::kNoLazyDeopt);
6012 } // namespace internal