1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/arm64/frames-arm64.h"
6 #include "src/arm64/lithium-codegen-arm64.h"
7 #include "src/arm64/lithium-gap-resolver-arm64.h"
8 #include "src/base/bits.h"
9 #include "src/code-factory.h"
10 #include "src/code-stubs.h"
11 #include "src/cpu-profiler.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
20 class SafepointGenerator final : public CallWrapper {
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
28 virtual ~SafepointGenerator() { }
30 virtual void BeforeCall(int call_size) const { }
32 virtual void AfterCall() const {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
45 // Emit code to branch if the given condition holds.
46 // The code generated here doesn't modify the flags and they must have
47 // been set by some prior instructions.
49 // The EmitInverted function simply inverts the condition.
50 class BranchOnCondition : public BranchGenerator {
52 BranchOnCondition(LCodeGen* codegen, Condition cond)
53 : BranchGenerator(codegen),
56 virtual void Emit(Label* label) const {
60 virtual void EmitInverted(Label* label) const {
62 __ B(NegateCondition(cond_), label);
71 // Emit code to compare lhs and rhs and branch if the condition holds.
72 // This uses MacroAssembler's CompareAndBranch function so it will handle
73 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
75 // EmitInverted still compares the two operands but inverts the condition.
76 class CompareAndBranch : public BranchGenerator {
78 CompareAndBranch(LCodeGen* codegen,
82 : BranchGenerator(codegen),
87 virtual void Emit(Label* label) const {
88 __ CompareAndBranch(lhs_, rhs_, cond_, label);
91 virtual void EmitInverted(Label* label) const {
92 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
102 // Test the input with the given mask and branch if the condition holds.
103 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
104 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
105 // conversion to Tbz/Tbnz when possible.
106 class TestAndBranch : public BranchGenerator {
108 TestAndBranch(LCodeGen* codegen,
110 const Register& value,
112 : BranchGenerator(codegen),
117 virtual void Emit(Label* label) const {
120 __ TestAndBranchIfAllClear(value_, mask_, label);
123 __ TestAndBranchIfAnySet(value_, mask_, label);
126 __ Tst(value_, mask_);
131 virtual void EmitInverted(Label* label) const {
132 // The inverse of "all clear" is "any set" and vice versa.
135 __ TestAndBranchIfAnySet(value_, mask_, label);
138 __ TestAndBranchIfAllClear(value_, mask_, label);
141 __ Tst(value_, mask_);
142 __ B(NegateCondition(cond_), label);
148 const Register& value_;
153 // Test the input and branch if it is non-zero and not a NaN.
154 class BranchIfNonZeroNumber : public BranchGenerator {
156 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
157 const FPRegister& scratch)
158 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
160 virtual void Emit(Label* label) const {
161 __ Fabs(scratch_, value_);
162 // Compare with 0.0. Because scratch_ is positive, the result can be one of
163 // nZCv (equal), nzCv (greater) or nzCV (unordered).
164 __ Fcmp(scratch_, 0.0);
168 virtual void EmitInverted(Label* label) const {
169 __ Fabs(scratch_, value_);
170 __ Fcmp(scratch_, 0.0);
175 const FPRegister& value_;
176 const FPRegister& scratch_;
180 // Test the input and branch if it is a heap number.
181 class BranchIfHeapNumber : public BranchGenerator {
183 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
184 : BranchGenerator(codegen), value_(value) { }
186 virtual void Emit(Label* label) const {
187 __ JumpIfHeapNumber(value_, label);
190 virtual void EmitInverted(Label* label) const {
191 __ JumpIfNotHeapNumber(value_, label);
195 const Register& value_;
199 // Test the input and branch if it is the specified root value.
200 class BranchIfRoot : public BranchGenerator {
202 BranchIfRoot(LCodeGen* codegen, const Register& value,
203 Heap::RootListIndex index)
204 : BranchGenerator(codegen), value_(value), index_(index) { }
206 virtual void Emit(Label* label) const {
207 __ JumpIfRoot(value_, index_, label);
210 virtual void EmitInverted(Label* label) const {
211 __ JumpIfNotRoot(value_, index_, label);
215 const Register& value_;
216 const Heap::RootListIndex index_;
220 void LCodeGen::WriteTranslation(LEnvironment* environment,
221 Translation* translation) {
222 if (environment == NULL) return;
224 // The translation includes one command per value in the environment.
225 int translation_size = environment->translation_size();
227 WriteTranslation(environment->outer(), translation);
228 WriteTranslationFrame(environment, translation);
230 int object_index = 0;
231 int dematerialized_index = 0;
232 for (int i = 0; i < translation_size; ++i) {
233 LOperand* value = environment->values()->at(i);
235 environment, translation, value, environment->HasTaggedValueAt(i),
236 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
241 void LCodeGen::AddToTranslation(LEnvironment* environment,
242 Translation* translation,
246 int* object_index_pointer,
247 int* dematerialized_index_pointer) {
248 if (op == LEnvironment::materialization_marker()) {
249 int object_index = (*object_index_pointer)++;
250 if (environment->ObjectIsDuplicateAt(object_index)) {
251 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
252 translation->DuplicateObject(dupe_of);
255 int object_length = environment->ObjectLengthAt(object_index);
256 if (environment->ObjectIsArgumentsAt(object_index)) {
257 translation->BeginArgumentsObject(object_length);
259 translation->BeginCapturedObject(object_length);
261 int dematerialized_index = *dematerialized_index_pointer;
262 int env_offset = environment->translation_size() + dematerialized_index;
263 *dematerialized_index_pointer += object_length;
264 for (int i = 0; i < object_length; ++i) {
265 LOperand* value = environment->values()->at(env_offset + i);
266 AddToTranslation(environment,
269 environment->HasTaggedValueAt(env_offset + i),
270 environment->HasUint32ValueAt(env_offset + i),
271 object_index_pointer,
272 dematerialized_index_pointer);
277 if (op->IsStackSlot()) {
278 int index = op->index();
280 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
283 translation->StoreStackSlot(index);
284 } else if (is_uint32) {
285 translation->StoreUint32StackSlot(index);
287 translation->StoreInt32StackSlot(index);
289 } else if (op->IsDoubleStackSlot()) {
290 int index = op->index();
292 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
294 translation->StoreDoubleStackSlot(index);
295 } else if (op->IsRegister()) {
296 Register reg = ToRegister(op);
298 translation->StoreRegister(reg);
299 } else if (is_uint32) {
300 translation->StoreUint32Register(reg);
302 translation->StoreInt32Register(reg);
304 } else if (op->IsDoubleRegister()) {
305 DoubleRegister reg = ToDoubleRegister(op);
306 translation->StoreDoubleRegister(reg);
307 } else if (op->IsConstantOperand()) {
308 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
309 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
310 translation->StoreLiteral(src_index);
317 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
318 Safepoint::DeoptMode mode) {
319 environment->set_has_been_used();
320 if (!environment->HasBeenRegistered()) {
322 int jsframe_count = 0;
323 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
325 if (e->frame_type() == JS_FUNCTION) {
329 Translation translation(&translations_, frame_count, jsframe_count, zone());
330 WriteTranslation(environment, &translation);
331 int deoptimization_index = deoptimizations_.length();
332 int pc_offset = masm()->pc_offset();
333 environment->Register(deoptimization_index,
335 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
336 deoptimizations_.Add(environment, zone());
341 void LCodeGen::CallCode(Handle<Code> code,
342 RelocInfo::Mode mode,
343 LInstruction* instr) {
344 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
348 void LCodeGen::CallCodeGeneric(Handle<Code> code,
349 RelocInfo::Mode mode,
351 SafepointMode safepoint_mode) {
352 DCHECK(instr != NULL);
354 Assembler::BlockPoolsScope scope(masm_);
356 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
358 if ((code->kind() == Code::BINARY_OP_IC) ||
359 (code->kind() == Code::COMPARE_IC)) {
360 // Signal that we don't inline smi code before these stubs in the
361 // optimizing code generator.
362 InlineSmiCheckInfo::EmitNotInlined(masm());
367 void LCodeGen::DoCallFunction(LCallFunction* instr) {
368 DCHECK(ToRegister(instr->context()).is(cp));
369 DCHECK(ToRegister(instr->function()).Is(x1));
370 DCHECK(ToRegister(instr->result()).Is(x0));
372 int arity = instr->arity();
373 CallFunctionFlags flags = instr->hydrogen()->function_flags();
374 if (instr->hydrogen()->HasVectorAndSlot()) {
375 Register slot_register = ToRegister(instr->temp_slot());
376 Register vector_register = ToRegister(instr->temp_vector());
377 DCHECK(slot_register.is(x3));
378 DCHECK(vector_register.is(x2));
380 AllowDeferredHandleDereference vector_structure_check;
381 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
382 int index = vector->GetIndex(instr->hydrogen()->slot());
384 __ Mov(vector_register, vector);
385 __ Mov(slot_register, Operand(Smi::FromInt(index)));
387 CallICState::CallType call_type =
388 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
391 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
392 CallCode(ic, RelocInfo::CODE_TARGET, instr);
394 CallFunctionStub stub(isolate(), arity, flags);
395 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
397 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
401 void LCodeGen::DoCallNew(LCallNew* instr) {
402 DCHECK(ToRegister(instr->context()).is(cp));
403 DCHECK(instr->IsMarkedAsCall());
404 DCHECK(ToRegister(instr->constructor()).is(x1));
406 __ Mov(x0, instr->arity());
407 // No cell in x2 for construct type feedback in optimized code.
408 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
410 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
411 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
412 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
414 DCHECK(ToRegister(instr->result()).is(x0));
418 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
419 DCHECK(instr->IsMarkedAsCall());
420 DCHECK(ToRegister(instr->context()).is(cp));
421 DCHECK(ToRegister(instr->constructor()).is(x1));
423 __ Mov(x0, Operand(instr->arity()));
424 if (instr->arity() == 1) {
425 // We only need the allocation site for the case we have a length argument.
426 // The case may bail out to the runtime, which will determine the correct
427 // elements kind with the site.
428 __ Mov(x2, instr->hydrogen()->site());
430 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
434 ElementsKind kind = instr->hydrogen()->elements_kind();
435 AllocationSiteOverrideMode override_mode =
436 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
437 ? DISABLE_ALLOCATION_SITES
440 if (instr->arity() == 0) {
441 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
442 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
443 } else if (instr->arity() == 1) {
445 if (IsFastPackedElementsKind(kind)) {
448 // We might need to create a holey array; look at the first argument.
450 __ Cbz(x10, &packed_case);
452 ElementsKind holey_kind = GetHoleyElementsKind(kind);
453 ArraySingleArgumentConstructorStub stub(isolate(),
456 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
458 __ Bind(&packed_case);
461 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
462 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
465 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
466 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
468 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
470 DCHECK(ToRegister(instr->result()).is(x0));
474 void LCodeGen::CallRuntime(const Runtime::Function* function,
477 SaveFPRegsMode save_doubles) {
478 DCHECK(instr != NULL);
480 __ CallRuntime(function, num_arguments, save_doubles);
482 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
486 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
487 if (context->IsRegister()) {
488 __ Mov(cp, ToRegister(context));
489 } else if (context->IsStackSlot()) {
490 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
491 } else if (context->IsConstantOperand()) {
492 HConstant* constant =
493 chunk_->LookupConstant(LConstantOperand::cast(context));
494 __ LoadHeapObject(cp,
495 Handle<HeapObject>::cast(constant->handle(isolate())));
502 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
506 LoadContextFromDeferred(context);
507 __ CallRuntimeSaveDoubles(id);
508 RecordSafepointWithRegisters(
509 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
513 void LCodeGen::RecordAndWritePosition(int position) {
514 if (position == RelocInfo::kNoPosition) return;
515 masm()->positions_recorder()->RecordPosition(position);
516 masm()->positions_recorder()->WriteRecordedPositions();
520 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
521 SafepointMode safepoint_mode) {
522 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
523 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
525 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
526 RecordSafepointWithRegisters(
527 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
532 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
533 Safepoint::Kind kind,
535 Safepoint::DeoptMode deopt_mode) {
536 DCHECK(expected_safepoint_kind_ == kind);
538 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
539 Safepoint safepoint = safepoints_.DefineSafepoint(
540 masm(), kind, arguments, deopt_mode);
542 for (int i = 0; i < operands->length(); i++) {
543 LOperand* pointer = operands->at(i);
544 if (pointer->IsStackSlot()) {
545 safepoint.DefinePointerSlot(pointer->index(), zone());
546 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
547 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
552 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
553 Safepoint::DeoptMode deopt_mode) {
554 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
558 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
559 LPointerMap empty_pointers(zone());
560 RecordSafepoint(&empty_pointers, deopt_mode);
564 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
566 Safepoint::DeoptMode deopt_mode) {
567 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
571 bool LCodeGen::GenerateCode() {
572 LPhase phase("Z_Code generation", chunk());
574 status_ = GENERATING;
576 // Open a frame scope to indicate that there is a frame on the stack. The
577 // NONE indicates that the scope shouldn't actually generate code to set up
578 // the frame (that is done in GeneratePrologue).
579 FrameScope frame_scope(masm_, StackFrame::NONE);
581 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
582 GenerateJumpTable() && GenerateSafepointTable();
586 void LCodeGen::SaveCallerDoubles() {
587 DCHECK(info()->saves_caller_doubles());
588 DCHECK(NeedsEagerFrame());
589 Comment(";;; Save clobbered callee double registers");
590 BitVector* doubles = chunk()->allocated_double_registers();
591 BitVector::Iterator iterator(doubles);
593 while (!iterator.Done()) {
594 // TODO(all): Is this supposed to save just the callee-saved doubles? It
595 // looks like it's saving all of them.
596 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
597 __ Poke(value, count * kDoubleSize);
604 void LCodeGen::RestoreCallerDoubles() {
605 DCHECK(info()->saves_caller_doubles());
606 DCHECK(NeedsEagerFrame());
607 Comment(";;; Restore clobbered callee double registers");
608 BitVector* doubles = chunk()->allocated_double_registers();
609 BitVector::Iterator iterator(doubles);
611 while (!iterator.Done()) {
612 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
613 // looks like it's restoring all of them.
614 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
615 __ Peek(value, count * kDoubleSize);
622 bool LCodeGen::GeneratePrologue() {
623 DCHECK(is_generating());
625 if (info()->IsOptimizing()) {
626 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
628 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
630 // Sloppy mode functions and builtins need to replace the receiver with the
631 // global proxy when called as functions (without an explicit receiver
633 if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
634 !info()->is_native() && info()->scope()->has_this_declaration()) {
636 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
637 __ Peek(x10, receiver_offset);
638 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
640 __ Ldr(x10, GlobalObjectMemOperand());
641 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
642 __ Poke(x10, receiver_offset);
648 DCHECK(__ StackPointer().Is(jssp));
649 info()->set_prologue_offset(masm_->pc_offset());
650 if (NeedsEagerFrame()) {
651 if (info()->IsStub()) {
654 __ Prologue(info()->IsCodePreAgingActive());
656 frame_is_built_ = true;
657 info_->AddNoFrameRange(0, masm_->pc_offset());
660 // Reserve space for the stack slots needed by the code.
661 int slots = GetStackSlotCount();
663 __ Claim(slots, kPointerSize);
666 if (info()->saves_caller_doubles()) {
670 // Allocate a local context if needed.
671 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
672 if (heap_slots > 0) {
673 Comment(";;; Allocate local context");
674 bool need_write_barrier = true;
675 // Argument to NewContext is the function, which is in x1.
676 DCHECK(!info()->scope()->is_script_scope());
677 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
678 FastNewContextStub stub(isolate(), heap_slots);
680 // Result of FastNewContextStub is always in new space.
681 need_write_barrier = false;
684 __ CallRuntime(Runtime::kNewFunctionContext, 1);
686 RecordSafepoint(Safepoint::kNoLazyDeopt);
687 // Context is returned in x0. It replaces the context passed to us. It's
688 // saved in the stack and kept live in cp.
690 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
691 // Copy any necessary parameters into the context.
692 int num_parameters = scope()->num_parameters();
693 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
694 for (int i = first_parameter; i < num_parameters; i++) {
695 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
696 if (var->IsContextSlot()) {
698 Register scratch = x3;
700 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
701 (num_parameters - 1 - i) * kPointerSize;
702 // Load parameter from stack.
703 __ Ldr(value, MemOperand(fp, parameter_offset));
704 // Store it in the context.
705 MemOperand target = ContextMemOperand(cp, var->index());
706 __ Str(value, target);
707 // Update the write barrier. This clobbers value and scratch.
708 if (need_write_barrier) {
709 __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
710 value, scratch, GetLinkRegisterState(),
712 } else if (FLAG_debug_code) {
714 __ JumpIfInNewSpace(cp, &done);
715 __ Abort(kExpectedNewSpaceObject);
720 Comment(";;; End allocate local context");
724 if (FLAG_trace && info()->IsOptimizing()) {
725 // We have not executed any compiled code yet, so cp still holds the
727 __ CallRuntime(Runtime::kTraceEnter, 0);
730 return !is_aborted();
734 void LCodeGen::GenerateOsrPrologue() {
735 // Generate the OSR entry prologue at the first unknown OSR value, or if there
736 // are none, at the OSR entrypoint instruction.
737 if (osr_pc_offset_ >= 0) return;
739 osr_pc_offset_ = masm()->pc_offset();
741 // Adjust the frame size, subsuming the unoptimized frame into the
743 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
749 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
750 if (instr->IsCall()) {
751 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
753 if (!instr->IsLazyBailout() && !instr->IsGap()) {
754 safepoints_.BumpLastLazySafepointIndex();
759 bool LCodeGen::GenerateDeferredCode() {
760 DCHECK(is_generating());
761 if (deferred_.length() > 0) {
762 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
763 LDeferredCode* code = deferred_[i];
766 instructions_->at(code->instruction_index())->hydrogen_value();
767 RecordAndWritePosition(
768 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
770 Comment(";;; <@%d,#%d> "
771 "-------------------- Deferred %s --------------------",
772 code->instruction_index(),
773 code->instr()->hydrogen_value()->id(),
774 code->instr()->Mnemonic());
776 __ Bind(code->entry());
778 if (NeedsDeferredFrame()) {
779 Comment(";;; Build frame");
780 DCHECK(!frame_is_built_);
781 DCHECK(info()->IsStub());
782 frame_is_built_ = true;
784 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
786 __ Add(fp, __ StackPointer(),
787 StandardFrameConstants::kFixedFrameSizeFromFp);
788 Comment(";;; Deferred code");
793 if (NeedsDeferredFrame()) {
794 Comment(";;; Destroy frame");
795 DCHECK(frame_is_built_);
796 __ Pop(xzr, cp, fp, lr);
797 frame_is_built_ = false;
804 // Force constant pool emission at the end of the deferred code to make
805 // sure that no constant pools are emitted after deferred code because
806 // deferred code generation is the last step which generates code. The two
807 // following steps will only output data used by crakshaft.
808 masm()->CheckConstPool(true, false);
810 return !is_aborted();
814 bool LCodeGen::GenerateJumpTable() {
815 Label needs_frame, call_deopt_entry;
817 if (jump_table_.length() > 0) {
818 Comment(";;; -------------------- Jump table --------------------");
819 Address base = jump_table_[0]->address;
821 UseScratchRegisterScope temps(masm());
822 Register entry_offset = temps.AcquireX();
824 int length = jump_table_.length();
825 for (int i = 0; i < length; i++) {
826 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
827 __ Bind(&table_entry->label);
829 Address entry = table_entry->address;
830 DeoptComment(table_entry->deopt_info);
832 // Second-level deopt table entries are contiguous and small, so instead
833 // of loading the full, absolute address of each one, load the base
834 // address and add an immediate offset.
835 __ Mov(entry_offset, entry - base);
837 if (table_entry->needs_frame) {
838 DCHECK(!info()->saves_caller_doubles());
839 Comment(";;; call deopt with frame");
840 // Save lr before Bl, fp will be adjusted in the needs_frame code.
842 // Reuse the existing needs_frame code.
845 // There is nothing special to do, so just continue to the second-level
847 __ Bl(&call_deopt_entry);
849 info()->LogDeoptCallPosition(masm()->pc_offset(),
850 table_entry->deopt_info.inlining_id);
852 masm()->CheckConstPool(false, false);
855 if (needs_frame.is_linked()) {
856 // This variant of deopt can only be used with stubs. Since we don't
857 // have a function pointer to install in the stack frame that we're
858 // building, install a special marker there instead.
859 DCHECK(info()->IsStub());
861 Comment(";;; needs_frame common code");
862 UseScratchRegisterScope temps(masm());
863 Register stub_marker = temps.AcquireX();
864 __ Bind(&needs_frame);
865 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
866 __ Push(cp, stub_marker);
867 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
870 // Generate common code for calling the second-level deopt table.
871 __ Bind(&call_deopt_entry);
873 if (info()->saves_caller_doubles()) {
874 DCHECK(info()->IsStub());
875 RestoreCallerDoubles();
878 Register deopt_entry = temps.AcquireX();
879 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
880 RelocInfo::RUNTIME_ENTRY));
881 __ Add(deopt_entry, deopt_entry, entry_offset);
885 // Force constant pool emission at the end of the deopt jump table to make
886 // sure that no constant pools are emitted after.
887 masm()->CheckConstPool(true, false);
889 // The deoptimization jump table is the last part of the instruction
890 // sequence. Mark the generated code as done unless we bailed out.
891 if (!is_aborted()) status_ = DONE;
892 return !is_aborted();
896 bool LCodeGen::GenerateSafepointTable() {
898 // We do not know how much data will be emitted for the safepoint table, so
899 // force emission of the veneer pool.
900 masm()->CheckVeneerPool(true, true);
901 safepoints_.Emit(masm(), GetStackSlotCount());
902 return !is_aborted();
906 void LCodeGen::FinishCode(Handle<Code> code) {
908 code->set_stack_slots(GetStackSlotCount());
909 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
910 PopulateDeoptimizationData(code);
914 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
915 int length = deoptimizations_.length();
916 if (length == 0) return;
918 Handle<DeoptimizationInputData> data =
919 DeoptimizationInputData::New(isolate(), length, TENURED);
921 Handle<ByteArray> translations =
922 translations_.CreateByteArray(isolate()->factory());
923 data->SetTranslationByteArray(*translations);
924 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
925 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
926 if (info_->IsOptimizing()) {
927 // Reference to shared function info does not change between phases.
928 AllowDeferredHandleDereference allow_handle_dereference;
929 data->SetSharedFunctionInfo(*info_->shared_info());
931 data->SetSharedFunctionInfo(Smi::FromInt(0));
933 data->SetWeakCellCache(Smi::FromInt(0));
935 Handle<FixedArray> literals =
936 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
937 { AllowDeferredHandleDereference copy_handles;
938 for (int i = 0; i < deoptimization_literals_.length(); i++) {
939 literals->set(i, *deoptimization_literals_[i]);
941 data->SetLiteralArray(*literals);
944 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
945 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
947 // Populate the deoptimization entries.
948 for (int i = 0; i < length; i++) {
949 LEnvironment* env = deoptimizations_[i];
950 data->SetAstId(i, env->ast_id());
951 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
952 data->SetArgumentsStackHeight(i,
953 Smi::FromInt(env->arguments_stack_height()));
954 data->SetPc(i, Smi::FromInt(env->pc_offset()));
957 code->set_deoptimization_data(*data);
961 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
962 DCHECK_EQ(0, deoptimization_literals_.length());
963 for (auto function : chunk()->inlined_functions()) {
964 DefineDeoptimizationLiteral(function);
966 inlined_function_count_ = deoptimization_literals_.length();
970 void LCodeGen::DeoptimizeBranch(
971 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
972 BranchType branch_type, Register reg, int bit,
973 Deoptimizer::BailoutType* override_bailout_type) {
974 LEnvironment* environment = instr->environment();
975 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
976 Deoptimizer::BailoutType bailout_type =
977 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
979 if (override_bailout_type != NULL) {
980 bailout_type = *override_bailout_type;
983 DCHECK(environment->HasBeenRegistered());
984 DCHECK(info()->IsOptimizing() || info()->IsStub());
985 int id = environment->deoptimization_index();
987 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
990 Abort(kBailoutWasNotPrepared);
993 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
995 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1000 __ Ldr(w1, MemOperand(x0));
1002 __ B(gt, ¬_zero);
1003 __ Mov(w1, FLAG_deopt_every_n_times);
1004 __ Str(w1, MemOperand(x0));
1006 DCHECK(frame_is_built_);
1007 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1011 __ Str(w1, MemOperand(x0));
1016 if (info()->ShouldTrapOnDeopt()) {
1018 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1019 __ Debug("trap_on_deopt", __LINE__, BREAK);
1020 __ Bind(&dont_trap);
1023 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
1025 DCHECK(info()->IsStub() || frame_is_built_);
1026 // Go through jump table if we need to build frame, or restore caller doubles.
1027 if (branch_type == always &&
1028 frame_is_built_ && !info()->saves_caller_doubles()) {
1029 DeoptComment(deopt_info);
1030 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1031 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
1033 Deoptimizer::JumpTableEntry* table_entry =
1034 new (zone()) Deoptimizer::JumpTableEntry(
1035 entry, deopt_info, bailout_type, !frame_is_built_);
1036 // We often have several deopts to the same entry, reuse the last
1037 // jump entry if this is the case.
1038 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1039 jump_table_.is_empty() ||
1040 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1041 jump_table_.Add(table_entry, zone());
1043 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1048 void LCodeGen::Deoptimize(LInstruction* instr,
1049 Deoptimizer::DeoptReason deopt_reason,
1050 Deoptimizer::BailoutType* override_bailout_type) {
1051 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
1052 override_bailout_type);
1056 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1057 Deoptimizer::DeoptReason deopt_reason) {
1058 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
1062 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1063 Deoptimizer::DeoptReason deopt_reason) {
1064 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
1068 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1069 Deoptimizer::DeoptReason deopt_reason) {
1070 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
1074 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1075 Deoptimizer::DeoptReason deopt_reason) {
1076 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1077 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
1081 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1082 Deoptimizer::DeoptReason deopt_reason) {
1083 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1087 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1088 Deoptimizer::DeoptReason deopt_reason) {
1089 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1093 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1094 LInstruction* instr,
1095 Deoptimizer::DeoptReason deopt_reason) {
1096 __ CompareRoot(rt, index);
1097 DeoptimizeIf(eq, instr, deopt_reason);
1101 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1102 LInstruction* instr,
1103 Deoptimizer::DeoptReason deopt_reason) {
1104 __ CompareRoot(rt, index);
1105 DeoptimizeIf(ne, instr, deopt_reason);
1109 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1110 Deoptimizer::DeoptReason deopt_reason) {
1111 __ TestForMinusZero(input);
1112 DeoptimizeIf(vs, instr, deopt_reason);
1116 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1117 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1118 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
1122 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1123 Deoptimizer::DeoptReason deopt_reason) {
1124 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
1128 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1129 Deoptimizer::DeoptReason deopt_reason) {
1130 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
1134 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1135 if (!info()->IsStub()) {
1136 // Ensure that we have enough space after the previous lazy-bailout
1137 // instruction for patching the code here.
1138 intptr_t current_pc = masm()->pc_offset();
1140 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1141 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1142 DCHECK((padding_size % kInstructionSize) == 0);
1143 InstructionAccurateScope instruction_accurate(
1144 masm(), padding_size / kInstructionSize);
1146 while (padding_size > 0) {
1148 padding_size -= kInstructionSize;
1152 last_lazy_deopt_pc_ = masm()->pc_offset();
1156 Register LCodeGen::ToRegister(LOperand* op) const {
1157 // TODO(all): support zero register results, as ToRegister32.
1158 DCHECK((op != NULL) && op->IsRegister());
1159 return Register::FromAllocationIndex(op->index());
1163 Register LCodeGen::ToRegister32(LOperand* op) const {
1165 if (op->IsConstantOperand()) {
1166 // If this is a constant operand, the result must be the zero register.
1167 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1170 return ToRegister(op).W();
1175 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1176 HConstant* constant = chunk_->LookupConstant(op);
1177 return Smi::FromInt(constant->Integer32Value());
1181 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1182 DCHECK((op != NULL) && op->IsDoubleRegister());
1183 return DoubleRegister::FromAllocationIndex(op->index());
1187 Operand LCodeGen::ToOperand(LOperand* op) {
1189 if (op->IsConstantOperand()) {
1190 LConstantOperand* const_op = LConstantOperand::cast(op);
1191 HConstant* constant = chunk()->LookupConstant(const_op);
1192 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1194 DCHECK(constant->HasSmiValue());
1195 return Operand(Smi::FromInt(constant->Integer32Value()));
1196 } else if (r.IsInteger32()) {
1197 DCHECK(constant->HasInteger32Value());
1198 return Operand(constant->Integer32Value());
1199 } else if (r.IsDouble()) {
1200 Abort(kToOperandUnsupportedDoubleImmediate);
1202 DCHECK(r.IsTagged());
1203 return Operand(constant->handle(isolate()));
1204 } else if (op->IsRegister()) {
1205 return Operand(ToRegister(op));
1206 } else if (op->IsDoubleRegister()) {
1207 Abort(kToOperandIsDoubleRegisterUnimplemented);
1210 // Stack slots not implemented, use ToMemOperand instead.
1216 Operand LCodeGen::ToOperand32(LOperand* op) {
1218 if (op->IsRegister()) {
1219 return Operand(ToRegister32(op));
1220 } else if (op->IsConstantOperand()) {
1221 LConstantOperand* const_op = LConstantOperand::cast(op);
1222 HConstant* constant = chunk()->LookupConstant(const_op);
1223 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1224 if (r.IsInteger32()) {
1225 return Operand(constant->Integer32Value());
1227 // Other constants not implemented.
1228 Abort(kToOperand32UnsupportedImmediate);
1231 // Other cases are not implemented.
1237 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1239 return -(index + 1) * kPointerSize;
1243 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1245 DCHECK(!op->IsRegister());
1246 DCHECK(!op->IsDoubleRegister());
1247 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1248 if (NeedsEagerFrame()) {
1249 int fp_offset = StackSlotOffset(op->index());
1250 // Loads and stores have a bigger reach in positive offset than negative.
1251 // We try to access using jssp (positive offset) first, then fall back to
1252 // fp (negative offset) if that fails.
1254 // We can reference a stack slot from jssp only if we know how much we've
1255 // put on the stack. We don't know this in the following cases:
1256 // - stack_mode != kCanUseStackPointer: this is the case when deferred
1257 // code has saved the registers.
1258 // - saves_caller_doubles(): some double registers have been pushed, jssp
1259 // references the end of the double registers and not the end of the stack
1261 // In both of the cases above, we _could_ add the tracking information
1262 // required so that we can use jssp here, but in practice it isn't worth it.
1263 if ((stack_mode == kCanUseStackPointer) &&
1264 !info()->saves_caller_doubles()) {
1265 int jssp_offset_to_fp =
1266 StandardFrameConstants::kFixedFrameSizeFromFp +
1267 (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
1268 int jssp_offset = fp_offset + jssp_offset_to_fp;
1269 if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
1270 return MemOperand(masm()->StackPointer(), jssp_offset);
1273 return MemOperand(fp, fp_offset);
1275 // Retrieve parameter without eager stack-frame relative to the
1277 return MemOperand(masm()->StackPointer(),
1278 ArgumentsOffsetWithoutFrame(op->index()));
1283 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1284 HConstant* constant = chunk_->LookupConstant(op);
1285 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1286 return constant->handle(isolate());
1291 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
1292 if (shift_info->shift() == NO_SHIFT) {
1293 return ToOperand32(right);
1296 ToRegister32(right),
1297 shift_info->shift(),
1298 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1303 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1304 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1308 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1309 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1313 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1314 HConstant* constant = chunk_->LookupConstant(op);
1315 return constant->Integer32Value();
1319 double LCodeGen::ToDouble(LConstantOperand* op) const {
1320 HConstant* constant = chunk_->LookupConstant(op);
1321 DCHECK(constant->HasDoubleValue());
1322 return constant->DoubleValue();
1326 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1327 Condition cond = nv;
1330 case Token::EQ_STRICT:
1334 case Token::NE_STRICT:
1338 cond = is_unsigned ? lo : lt;
1341 cond = is_unsigned ? hi : gt;
1344 cond = is_unsigned ? ls : le;
1347 cond = is_unsigned ? hs : ge;
1350 case Token::INSTANCEOF:
1358 template<class InstrType>
1359 void LCodeGen::EmitBranchGeneric(InstrType instr,
1360 const BranchGenerator& branch) {
1361 int left_block = instr->TrueDestination(chunk_);
1362 int right_block = instr->FalseDestination(chunk_);
1364 int next_block = GetNextEmittedBlock();
1366 if (right_block == left_block) {
1367 EmitGoto(left_block);
1368 } else if (left_block == next_block) {
1369 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1371 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1372 if (right_block != next_block) {
1373 __ B(chunk_->GetAssemblyLabel(right_block));
1379 template<class InstrType>
1380 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1381 DCHECK((condition != al) && (condition != nv));
1382 BranchOnCondition branch(this, condition);
1383 EmitBranchGeneric(instr, branch);
1387 template<class InstrType>
1388 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1389 Condition condition,
1390 const Register& lhs,
1391 const Operand& rhs) {
1392 DCHECK((condition != al) && (condition != nv));
1393 CompareAndBranch branch(this, condition, lhs, rhs);
1394 EmitBranchGeneric(instr, branch);
1398 template<class InstrType>
1399 void LCodeGen::EmitTestAndBranch(InstrType instr,
1400 Condition condition,
1401 const Register& value,
1403 DCHECK((condition != al) && (condition != nv));
1404 TestAndBranch branch(this, condition, value, mask);
1405 EmitBranchGeneric(instr, branch);
1409 template<class InstrType>
1410 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1411 const FPRegister& value,
1412 const FPRegister& scratch) {
1413 BranchIfNonZeroNumber branch(this, value, scratch);
1414 EmitBranchGeneric(instr, branch);
1418 template<class InstrType>
1419 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1420 const Register& value) {
1421 BranchIfHeapNumber branch(this, value);
1422 EmitBranchGeneric(instr, branch);
1426 template<class InstrType>
1427 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1428 const Register& value,
1429 Heap::RootListIndex index) {
1430 BranchIfRoot branch(this, value, index);
1431 EmitBranchGeneric(instr, branch);
1435 void LCodeGen::DoGap(LGap* gap) {
1436 for (int i = LGap::FIRST_INNER_POSITION;
1437 i <= LGap::LAST_INNER_POSITION;
1439 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1440 LParallelMove* move = gap->GetParallelMove(inner_pos);
1442 resolver_.Resolve(move);
1448 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1449 Register arguments = ToRegister(instr->arguments());
1450 Register result = ToRegister(instr->result());
1452 // The pointer to the arguments array come from DoArgumentsElements.
1453 // It does not point directly to the arguments and there is an offest of
1454 // two words that we must take into account when accessing an argument.
1455 // Subtracting the index from length accounts for one, so we add one more.
1457 if (instr->length()->IsConstantOperand() &&
1458 instr->index()->IsConstantOperand()) {
1459 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1460 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1461 int offset = ((length - index) + 1) * kPointerSize;
1462 __ Ldr(result, MemOperand(arguments, offset));
1463 } else if (instr->index()->IsConstantOperand()) {
1464 Register length = ToRegister32(instr->length());
1465 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1466 int loc = index - 1;
1468 __ Sub(result.W(), length, loc);
1469 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1471 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1474 Register length = ToRegister32(instr->length());
1475 Operand index = ToOperand32(instr->index());
1476 __ Sub(result.W(), length, index);
1477 __ Add(result.W(), result.W(), 1);
1478 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1483 void LCodeGen::DoAddE(LAddE* instr) {
1484 Register result = ToRegister(instr->result());
1485 Register left = ToRegister(instr->left());
1486 Operand right = Operand(x0); // Dummy initialization.
1487 if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
1488 right = Operand(ToRegister(instr->right()));
1489 } else if (instr->right()->IsConstantOperand()) {
1490 right = ToInteger32(LConstantOperand::cast(instr->right()));
1492 right = Operand(ToRegister32(instr->right()), SXTW);
1495 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1496 __ Add(result, left, right);
1500 void LCodeGen::DoAddI(LAddI* instr) {
1501 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1502 Register result = ToRegister32(instr->result());
1503 Register left = ToRegister32(instr->left());
1504 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1507 __ Adds(result, left, right);
1508 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1510 __ Add(result, left, right);
1515 void LCodeGen::DoAddS(LAddS* instr) {
1516 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1517 Register result = ToRegister(instr->result());
1518 Register left = ToRegister(instr->left());
1519 Operand right = ToOperand(instr->right());
1521 __ Adds(result, left, right);
1522 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1524 __ Add(result, left, right);
1529 void LCodeGen::DoAllocate(LAllocate* instr) {
1530 class DeferredAllocate: public LDeferredCode {
1532 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1533 : LDeferredCode(codegen), instr_(instr) { }
1534 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1535 virtual LInstruction* instr() { return instr_; }
1540 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1542 Register result = ToRegister(instr->result());
1543 Register temp1 = ToRegister(instr->temp1());
1544 Register temp2 = ToRegister(instr->temp2());
1546 // Allocate memory for the object.
1547 AllocationFlags flags = TAG_OBJECT;
1548 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1549 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1552 if (instr->hydrogen()->IsOldSpaceAllocation()) {
1553 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1554 flags = static_cast<AllocationFlags>(flags | PRETENURE);
1557 if (instr->size()->IsConstantOperand()) {
1558 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1559 if (size <= Page::kMaxRegularHeapObjectSize) {
1560 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1562 __ B(deferred->entry());
1565 Register size = ToRegister32(instr->size());
1566 __ Sxtw(size.X(), size);
1567 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1570 __ Bind(deferred->exit());
1572 if (instr->hydrogen()->MustPrefillWithFiller()) {
1573 Register filler_count = temp1;
1574 Register filler = temp2;
1575 Register untagged_result = ToRegister(instr->temp3());
1577 if (instr->size()->IsConstantOperand()) {
1578 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1579 __ Mov(filler_count, size / kPointerSize);
1581 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1584 __ Sub(untagged_result, result, kHeapObjectTag);
1585 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1586 __ FillFields(untagged_result, filler_count, filler);
1588 DCHECK(instr->temp3() == NULL);
1593 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1594 // TODO(3095996): Get rid of this. For now, we need to make the
1595 // result register contain a valid pointer because it is already
1596 // contained in the register pointer map.
1597 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1599 PushSafepointRegistersScope scope(this);
1600 // We're in a SafepointRegistersScope so we can use any scratch registers.
1602 if (instr->size()->IsConstantOperand()) {
1603 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1605 __ SmiTag(size, ToRegister32(instr->size()).X());
1607 int flags = AllocateDoubleAlignFlag::encode(
1608 instr->hydrogen()->MustAllocateDoubleAligned());
1609 if (instr->hydrogen()->IsOldSpaceAllocation()) {
1610 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1611 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
1613 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1615 __ Mov(x10, Smi::FromInt(flags));
1618 CallRuntimeFromDeferred(
1619 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1620 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1624 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1625 Register receiver = ToRegister(instr->receiver());
1626 Register function = ToRegister(instr->function());
1627 Register length = ToRegister32(instr->length());
1629 Register elements = ToRegister(instr->elements());
1630 Register scratch = x5;
1631 DCHECK(receiver.Is(x0)); // Used for parameter count.
1632 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1633 DCHECK(ToRegister(instr->result()).Is(x0));
1634 DCHECK(instr->IsMarkedAsCall());
1636 // Copy the arguments to this function possibly from the
1637 // adaptor frame below it.
1638 const uint32_t kArgumentsLimit = 1 * KB;
1639 __ Cmp(length, kArgumentsLimit);
1640 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
1642 // Push the receiver and use the register to keep the original
1643 // number of arguments.
1645 Register argc = receiver;
1647 __ Sxtw(argc, length);
1648 // The arguments are at a one pointer size offset from elements.
1649 __ Add(elements, elements, 1 * kPointerSize);
1651 // Loop through the arguments pushing them onto the execution
1654 // length is a small non-negative integer, due to the test above.
1655 __ Cbz(length, &invoke);
1657 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1659 __ Subs(length, length, 1);
1663 DCHECK(instr->HasPointerMap());
1664 LPointerMap* pointers = instr->pointer_map();
1665 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1666 // The number of arguments is stored in argc (receiver) which is x0, as
1667 // expected by InvokeFunction.
1668 ParameterCount actual(argc);
1669 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1673 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1674 Register result = ToRegister(instr->result());
1676 if (instr->hydrogen()->from_inlined()) {
1677 // When we are inside an inlined function, the arguments are the last things
1678 // that have been pushed on the stack. Therefore the arguments array can be
1679 // accessed directly from jssp.
1680 // However in the normal case, it is accessed via fp but there are two words
1681 // on the stack between fp and the arguments (the saved lr and fp) and the
1682 // LAccessArgumentsAt implementation take that into account.
1683 // In the inlined case we need to subtract the size of 2 words to jssp to
1684 // get a pointer which will work well with LAccessArgumentsAt.
1685 DCHECK(masm()->StackPointer().Is(jssp));
1686 __ Sub(result, jssp, 2 * kPointerSize);
1688 DCHECK(instr->temp() != NULL);
1689 Register previous_fp = ToRegister(instr->temp());
1692 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1694 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1695 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1696 __ Csel(result, fp, previous_fp, ne);
1701 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1702 Register elements = ToRegister(instr->elements());
1703 Register result = ToRegister32(instr->result());
1706 // If no arguments adaptor frame the number of arguments is fixed.
1707 __ Cmp(fp, elements);
1708 __ Mov(result, scope()->num_parameters());
1711 // Arguments adaptor frame present. Get argument length from there.
1712 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1714 UntagSmiMemOperand(result.X(),
1715 ArgumentsAdaptorFrameConstants::kLengthOffset));
1717 // Argument length is in result register.
1722 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1723 DoubleRegister left = ToDoubleRegister(instr->left());
1724 DoubleRegister right = ToDoubleRegister(instr->right());
1725 DoubleRegister result = ToDoubleRegister(instr->result());
1727 switch (instr->op()) {
1728 case Token::ADD: __ Fadd(result, left, right); break;
1729 case Token::SUB: __ Fsub(result, left, right); break;
1730 case Token::MUL: __ Fmul(result, left, right); break;
1731 case Token::DIV: __ Fdiv(result, left, right); break;
1733 // The ECMA-262 remainder operator is the remainder from a truncating
1734 // (round-towards-zero) division. Note that this differs from IEEE-754.
1736 // TODO(jbramley): See if it's possible to do this inline, rather than by
1737 // calling a helper function. With frintz (to produce the intermediate
1738 // quotient) and fmsub (to calculate the remainder without loss of
1739 // precision), it should be possible. However, we would need support for
1740 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1741 // support that yet.
1742 DCHECK(left.Is(d0));
1743 DCHECK(right.Is(d1));
1745 ExternalReference::mod_two_doubles_operation(isolate()),
1747 DCHECK(result.Is(d0));
1757 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1758 DCHECK(ToRegister(instr->context()).is(cp));
1759 DCHECK(ToRegister(instr->left()).is(x1));
1760 DCHECK(ToRegister(instr->right()).is(x0));
1761 DCHECK(ToRegister(instr->result()).is(x0));
1764 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
1765 CallCode(code, RelocInfo::CODE_TARGET, instr);
1769 void LCodeGen::DoBitI(LBitI* instr) {
1770 Register result = ToRegister32(instr->result());
1771 Register left = ToRegister32(instr->left());
1772 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1774 switch (instr->op()) {
1775 case Token::BIT_AND: __ And(result, left, right); break;
1776 case Token::BIT_OR: __ Orr(result, left, right); break;
1777 case Token::BIT_XOR: __ Eor(result, left, right); break;
1785 void LCodeGen::DoBitS(LBitS* instr) {
1786 Register result = ToRegister(instr->result());
1787 Register left = ToRegister(instr->left());
1788 Operand right = ToOperand(instr->right());
1790 switch (instr->op()) {
1791 case Token::BIT_AND: __ And(result, left, right); break;
1792 case Token::BIT_OR: __ Orr(result, left, right); break;
1793 case Token::BIT_XOR: __ Eor(result, left, right); break;
1801 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1802 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1803 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1804 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1805 if (instr->index()->IsConstantOperand()) {
1806 Operand index = ToOperand32(instr->index());
1807 Register length = ToRegister32(instr->length());
1808 __ Cmp(length, index);
1809 cond = CommuteCondition(cond);
1811 Register index = ToRegister32(instr->index());
1812 Operand length = ToOperand32(instr->length());
1813 __ Cmp(index, length);
1815 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1816 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1818 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
1823 void LCodeGen::DoBranch(LBranch* instr) {
1824 Representation r = instr->hydrogen()->value()->representation();
1825 Label* true_label = instr->TrueLabel(chunk_);
1826 Label* false_label = instr->FalseLabel(chunk_);
1828 if (r.IsInteger32()) {
1829 DCHECK(!info()->IsStub());
1830 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1831 } else if (r.IsSmi()) {
1832 DCHECK(!info()->IsStub());
1833 STATIC_ASSERT(kSmiTag == 0);
1834 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1835 } else if (r.IsDouble()) {
1836 DoubleRegister value = ToDoubleRegister(instr->value());
1837 // Test the double value. Zero and NaN are false.
1838 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1840 DCHECK(r.IsTagged());
1841 Register value = ToRegister(instr->value());
1842 HType type = instr->hydrogen()->value()->type();
1844 if (type.IsBoolean()) {
1845 DCHECK(!info()->IsStub());
1846 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1847 EmitBranch(instr, eq);
1848 } else if (type.IsSmi()) {
1849 DCHECK(!info()->IsStub());
1850 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1851 } else if (type.IsJSArray()) {
1852 DCHECK(!info()->IsStub());
1853 EmitGoto(instr->TrueDestination(chunk()));
1854 } else if (type.IsHeapNumber()) {
1855 DCHECK(!info()->IsStub());
1856 __ Ldr(double_scratch(), FieldMemOperand(value,
1857 HeapNumber::kValueOffset));
1858 // Test the double value. Zero and NaN are false.
1859 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1860 } else if (type.IsString()) {
1861 DCHECK(!info()->IsStub());
1862 Register temp = ToRegister(instr->temp1());
1863 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1864 EmitCompareAndBranch(instr, ne, temp, 0);
1866 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1867 // Avoid deopts in the case where we've never executed this path before.
1868 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1870 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1871 // undefined -> false.
1873 value, Heap::kUndefinedValueRootIndex, false_label);
1876 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1877 // Boolean -> its value.
1879 value, Heap::kTrueValueRootIndex, true_label);
1881 value, Heap::kFalseValueRootIndex, false_label);
1884 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1887 value, Heap::kNullValueRootIndex, false_label);
1890 if (expected.Contains(ToBooleanStub::SMI)) {
1891 // Smis: 0 -> false, all other -> true.
1892 DCHECK(Smi::FromInt(0) == 0);
1893 __ Cbz(value, false_label);
1894 __ JumpIfSmi(value, true_label);
1895 } else if (expected.NeedsMap()) {
1896 // If we need a map later and have a smi, deopt.
1897 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
1900 Register map = NoReg;
1901 Register scratch = NoReg;
1903 if (expected.NeedsMap()) {
1904 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1905 map = ToRegister(instr->temp1());
1906 scratch = ToRegister(instr->temp2());
1908 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1910 if (expected.CanBeUndetectable()) {
1911 // Undetectable -> false.
1912 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1913 __ TestAndBranchIfAnySet(
1914 scratch, 1 << Map::kIsUndetectable, false_label);
1918 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1919 // spec object -> true.
1920 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1921 __ B(ge, true_label);
1924 if (expected.Contains(ToBooleanStub::STRING)) {
1925 // String value -> false iff empty.
1927 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1928 __ B(ge, ¬_string);
1929 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1930 __ Cbz(scratch, false_label);
1932 __ Bind(¬_string);
1935 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1936 // Symbol value -> true.
1937 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1938 __ B(eq, true_label);
1941 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
1942 // SIMD value -> true.
1943 __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
1944 __ B(eq, true_label);
1947 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1948 Label not_heap_number;
1949 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1951 __ Ldr(double_scratch(),
1952 FieldMemOperand(value, HeapNumber::kValueOffset));
1953 __ Fcmp(double_scratch(), 0.0);
1954 // If we got a NaN (overflow bit is set), jump to the false branch.
1955 __ B(vs, false_label);
1956 __ B(eq, false_label);
1958 __ Bind(¬_heap_number);
1961 if (!expected.IsGeneric()) {
1962 // We've seen something for the first time -> deopt.
1963 // This can only happen if we are not generic already.
1964 Deoptimize(instr, Deoptimizer::kUnexpectedObject);
1971 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1972 int formal_parameter_count, int arity,
1973 LInstruction* instr) {
1974 bool dont_adapt_arguments =
1975 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1976 bool can_invoke_directly =
1977 dont_adapt_arguments || formal_parameter_count == arity;
1979 // The function interface relies on the following register assignments.
1980 Register function_reg = x1;
1981 Register arity_reg = x0;
1983 LPointerMap* pointers = instr->pointer_map();
1985 if (FLAG_debug_code) {
1987 // Try to confirm that function_reg (x1) is a tagged pointer.
1988 __ JumpIfNotSmi(function_reg, &is_not_smi);
1989 __ Abort(kExpectedFunctionObject);
1990 __ Bind(&is_not_smi);
1993 if (can_invoke_directly) {
1995 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1997 // Set the arguments count if adaption is not needed. Assumes that x0 is
1998 // available to write to at this point.
1999 if (dont_adapt_arguments) {
2000 __ Mov(arity_reg, arity);
2004 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2007 // Set up deoptimization.
2008 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2010 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2011 ParameterCount count(arity);
2012 ParameterCount expected(formal_parameter_count);
2013 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2018 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2019 DCHECK(instr->IsMarkedAsCall());
2020 DCHECK(ToRegister(instr->result()).Is(x0));
2022 if (instr->hydrogen()->IsTailCall()) {
2023 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
2025 if (instr->target()->IsConstantOperand()) {
2026 LConstantOperand* target = LConstantOperand::cast(instr->target());
2027 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2028 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2029 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2030 // this understanding is correct.
2031 __ Jump(code, RelocInfo::CODE_TARGET);
2033 DCHECK(instr->target()->IsRegister());
2034 Register target = ToRegister(instr->target());
2035 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2039 LPointerMap* pointers = instr->pointer_map();
2040 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2042 if (instr->target()->IsConstantOperand()) {
2043 LConstantOperand* target = LConstantOperand::cast(instr->target());
2044 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2045 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2046 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2047 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2048 // this understanding is correct.
2049 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2051 DCHECK(instr->target()->IsRegister());
2052 Register target = ToRegister(instr->target());
2053 generator.BeforeCall(__ CallSize(target));
2054 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2057 generator.AfterCall();
2060 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
2064 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2065 DCHECK(instr->IsMarkedAsCall());
2066 DCHECK(ToRegister(instr->function()).is(x1));
2068 if (instr->hydrogen()->pass_argument_count()) {
2069 __ Mov(x0, Operand(instr->arity()));
2073 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2075 // Load the code entry address
2076 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2079 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2080 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
2084 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2085 CallRuntime(instr->function(), instr->arity(), instr);
2086 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
2090 void LCodeGen::DoCallStub(LCallStub* instr) {
2091 DCHECK(ToRegister(instr->context()).is(cp));
2092 DCHECK(ToRegister(instr->result()).is(x0));
2093 switch (instr->hydrogen()->major_key()) {
2094 case CodeStub::RegExpExec: {
2095 RegExpExecStub stub(isolate());
2096 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2099 case CodeStub::SubString: {
2100 SubStringStub stub(isolate());
2101 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2104 case CodeStub::StringCompare: {
2105 StringCompareStub stub(isolate());
2106 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2112 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
2116 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2117 GenerateOsrPrologue();
2121 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2122 Register temp = ToRegister(instr->temp());
2124 PushSafepointRegistersScope scope(this);
2127 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2128 RecordSafepointWithRegisters(
2129 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2130 __ StoreToSafepointRegisterSlot(x0, temp);
2132 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
2136 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2137 class DeferredCheckMaps: public LDeferredCode {
2139 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2140 : LDeferredCode(codegen), instr_(instr), object_(object) {
2141 SetExit(check_maps());
2143 virtual void Generate() {
2144 codegen()->DoDeferredInstanceMigration(instr_, object_);
2146 Label* check_maps() { return &check_maps_; }
2147 virtual LInstruction* instr() { return instr_; }
2154 if (instr->hydrogen()->IsStabilityCheck()) {
2155 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2156 for (int i = 0; i < maps->size(); ++i) {
2157 AddStabilityDependency(maps->at(i).handle());
2162 Register object = ToRegister(instr->value());
2163 Register map_reg = ToRegister(instr->temp());
2165 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2167 DeferredCheckMaps* deferred = NULL;
2168 if (instr->hydrogen()->HasMigrationTarget()) {
2169 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2170 __ Bind(deferred->check_maps());
2173 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2175 for (int i = 0; i < maps->size() - 1; i++) {
2176 Handle<Map> map = maps->at(i).handle();
2177 __ CompareMap(map_reg, map);
2180 Handle<Map> map = maps->at(maps->size() - 1).handle();
2181 __ CompareMap(map_reg, map);
2183 // We didn't match a map.
2184 if (instr->hydrogen()->HasMigrationTarget()) {
2185 __ B(ne, deferred->entry());
2187 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
2194 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2195 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2196 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
2201 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2202 Register value = ToRegister(instr->value());
2203 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2204 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
2208 void LCodeGen::DoCheckArrayBufferNotNeutered(
2209 LCheckArrayBufferNotNeutered* instr) {
2210 UseScratchRegisterScope temps(masm());
2211 Register view = ToRegister(instr->view());
2212 Register scratch = temps.AcquireX();
2214 __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
2215 __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
2216 __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
2217 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
2221 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2222 Register input = ToRegister(instr->value());
2223 Register scratch = ToRegister(instr->temp());
2225 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2226 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2228 if (instr->hydrogen()->is_interval_check()) {
2229 InstanceType first, last;
2230 instr->hydrogen()->GetCheckInterval(&first, &last);
2232 __ Cmp(scratch, first);
2233 if (first == last) {
2234 // If there is only one type in the interval check for equality.
2235 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2236 } else if (last == LAST_TYPE) {
2237 // We don't need to compare with the higher bound of the interval.
2238 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
2240 // If we are below the lower bound, set the C flag and clear the Z flag
2241 // to force a deopt.
2242 __ Ccmp(scratch, last, CFlag, hs);
2243 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
2248 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2250 if (base::bits::IsPowerOfTwo32(mask)) {
2251 DCHECK((tag == 0) || (tag == mask));
2253 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2254 Deoptimizer::kWrongInstanceType);
2256 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2257 Deoptimizer::kWrongInstanceType);
2261 __ Tst(scratch, mask);
2263 __ And(scratch, scratch, mask);
2264 __ Cmp(scratch, tag);
2266 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2272 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2273 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2274 Register result = ToRegister32(instr->result());
2275 __ ClampDoubleToUint8(result, input, double_scratch());
2279 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2280 Register input = ToRegister32(instr->unclamped());
2281 Register result = ToRegister32(instr->result());
2282 __ ClampInt32ToUint8(result, input);
2286 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2287 Register input = ToRegister(instr->unclamped());
2288 Register result = ToRegister32(instr->result());
2291 // Both smi and heap number cases are handled.
2293 __ JumpIfNotSmi(input, &is_not_smi);
2294 __ SmiUntag(result.X(), input);
2295 __ ClampInt32ToUint8(result);
2298 __ Bind(&is_not_smi);
2300 // Check for heap number.
2301 Label is_heap_number;
2302 __ JumpIfHeapNumber(input, &is_heap_number);
2304 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2305 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2306 Deoptimizer::kNotAHeapNumberUndefined);
2310 // Heap number case.
2311 __ Bind(&is_heap_number);
2312 DoubleRegister dbl_scratch = double_scratch();
2313 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2314 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2315 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2321 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2322 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2323 Register result_reg = ToRegister(instr->result());
2324 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2325 __ Fmov(result_reg, value_reg);
2326 __ Lsr(result_reg, result_reg, 32);
2328 __ Fmov(result_reg.W(), value_reg.S());
2333 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2334 Register hi_reg = ToRegister(instr->hi());
2335 Register lo_reg = ToRegister(instr->lo());
2336 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2338 // Insert the least significant 32 bits of hi_reg into the most significant
2339 // 32 bits of lo_reg, and move to a floating point register.
2340 __ Bfi(lo_reg, hi_reg, 32, 32);
2341 __ Fmov(result_reg, lo_reg);
2345 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2346 Handle<String> class_name = instr->hydrogen()->class_name();
2347 Label* true_label = instr->TrueLabel(chunk_);
2348 Label* false_label = instr->FalseLabel(chunk_);
2349 Register input = ToRegister(instr->value());
2350 Register scratch1 = ToRegister(instr->temp1());
2351 Register scratch2 = ToRegister(instr->temp2());
2353 __ JumpIfSmi(input, false_label);
2355 Register map = scratch2;
2356 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2357 // Assuming the following assertions, we can use the same compares to test
2358 // for both being a function type and being in the object type range.
2359 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2360 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2361 FIRST_SPEC_OBJECT_TYPE + 1);
2362 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2363 LAST_SPEC_OBJECT_TYPE - 1);
2364 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2366 // We expect CompareObjectType to load the object instance type in scratch1.
2367 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2368 __ B(lt, false_label);
2369 __ B(eq, true_label);
2370 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2371 __ B(eq, true_label);
2373 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2376 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2377 // Check if the constructor in the map is a function.
2379 UseScratchRegisterScope temps(masm());
2380 Register instance_type = temps.AcquireX();
2381 __ GetMapConstructor(scratch1, map, scratch2, instance_type);
2382 __ Cmp(instance_type, JS_FUNCTION_TYPE);
2384 // Objects with a non-function constructor have class 'Object'.
2385 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2386 __ B(ne, true_label);
2388 __ B(ne, false_label);
2391 // The constructor function is in scratch1. Get its instance class name.
2393 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2395 FieldMemOperand(scratch1,
2396 SharedFunctionInfo::kInstanceClassNameOffset));
2398 // The class name we are testing against is internalized since it's a literal.
2399 // The name in the constructor is internalized because of the way the context
2400 // is booted. This routine isn't expected to work for random API-created
2401 // classes and it doesn't have to because you can't access it with natives
2402 // syntax. Since both sides are internalized it is sufficient to use an
2403 // identity comparison.
2404 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2408 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2409 DCHECK(instr->hydrogen()->representation().IsDouble());
2410 FPRegister object = ToDoubleRegister(instr->object());
2411 Register temp = ToRegister(instr->temp());
2413 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2414 // (relatively expensive) hole-NaN check.
2415 __ Fcmp(object, object);
2416 __ B(vc, instr->FalseLabel(chunk_));
2418 // We have a NaN, but is it the hole?
2419 __ Fmov(temp, object);
2420 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2424 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2425 DCHECK(instr->hydrogen()->representation().IsTagged());
2426 Register object = ToRegister(instr->object());
2428 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2432 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2433 Register value = ToRegister(instr->value());
2434 Register map = ToRegister(instr->temp());
2436 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2437 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2441 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2442 Representation rep = instr->hydrogen()->value()->representation();
2443 DCHECK(!rep.IsInteger32());
2444 Register scratch = ToRegister(instr->temp());
2446 if (rep.IsDouble()) {
2447 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2448 instr->TrueLabel(chunk()));
2450 Register value = ToRegister(instr->value());
2451 __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2452 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2453 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2455 EmitGoto(instr->FalseDestination(chunk()));
2459 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2460 LOperand* left = instr->left();
2461 LOperand* right = instr->right();
2463 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2464 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2465 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2467 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2468 // We can statically evaluate the comparison.
2469 double left_val = ToDouble(LConstantOperand::cast(left));
2470 double right_val = ToDouble(LConstantOperand::cast(right));
2471 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2472 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2473 EmitGoto(next_block);
2475 if (instr->is_double()) {
2476 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2478 // If a NaN is involved, i.e. the result is unordered (V set),
2479 // jump to false block label.
2480 __ B(vs, instr->FalseLabel(chunk_));
2481 EmitBranch(instr, cond);
2483 if (instr->hydrogen_value()->representation().IsInteger32()) {
2484 if (right->IsConstantOperand()) {
2485 EmitCompareAndBranch(instr, cond, ToRegister32(left),
2486 ToOperand32(right));
2488 // Commute the operands and the condition.
2489 EmitCompareAndBranch(instr, CommuteCondition(cond),
2490 ToRegister32(right), ToOperand32(left));
2493 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2494 if (right->IsConstantOperand()) {
2495 int32_t value = ToInteger32(LConstantOperand::cast(right));
2496 EmitCompareAndBranch(instr,
2499 Operand(Smi::FromInt(value)));
2500 } else if (left->IsConstantOperand()) {
2501 // Commute the operands and the condition.
2502 int32_t value = ToInteger32(LConstantOperand::cast(left));
2503 EmitCompareAndBranch(instr,
2504 CommuteCondition(cond),
2506 Operand(Smi::FromInt(value)));
2508 EmitCompareAndBranch(instr,
2519 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2520 Register left = ToRegister(instr->left());
2521 Register right = ToRegister(instr->right());
2522 EmitCompareAndBranch(instr, eq, left, right);
2526 void LCodeGen::DoCmpT(LCmpT* instr) {
2527 DCHECK(ToRegister(instr->context()).is(cp));
2528 Token::Value op = instr->op();
2529 Condition cond = TokenToCondition(op, false);
2531 DCHECK(ToRegister(instr->left()).Is(x1));
2532 DCHECK(ToRegister(instr->right()).Is(x0));
2534 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2535 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2536 // Signal that we don't inline smi code before this stub.
2537 InlineSmiCheckInfo::EmitNotInlined(masm());
2539 // Return true or false depending on CompareIC result.
2540 // This instruction is marked as call. We can clobber any register.
2541 DCHECK(instr->IsMarkedAsCall());
2542 __ LoadTrueFalseRoots(x1, x2);
2544 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2548 void LCodeGen::DoConstantD(LConstantD* instr) {
2549 DCHECK(instr->result()->IsDoubleRegister());
2550 DoubleRegister result = ToDoubleRegister(instr->result());
2551 if (instr->value() == 0) {
2552 if (copysign(1.0, instr->value()) == 1.0) {
2553 __ Fmov(result, fp_zero);
2555 __ Fneg(result, fp_zero);
2558 __ Fmov(result, instr->value());
2563 void LCodeGen::DoConstantE(LConstantE* instr) {
2564 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2568 void LCodeGen::DoConstantI(LConstantI* instr) {
2569 DCHECK(is_int32(instr->value()));
2570 // Cast the value here to ensure that the value isn't sign extended by the
2571 // implicit Operand constructor.
2572 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2576 void LCodeGen::DoConstantS(LConstantS* instr) {
2577 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2581 void LCodeGen::DoConstantT(LConstantT* instr) {
2582 Handle<Object> object = instr->value(isolate());
2583 AllowDeferredHandleDereference smi_check;
2584 __ LoadObject(ToRegister(instr->result()), object);
2588 void LCodeGen::DoContext(LContext* instr) {
2589 // If there is a non-return use, the context must be moved to a register.
2590 Register result = ToRegister(instr->result());
2591 if (info()->IsOptimizing()) {
2592 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2594 // If there is no frame, the context must be in cp.
2595 DCHECK(result.is(cp));
2600 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2601 Register reg = ToRegister(instr->value());
2602 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2603 AllowDeferredHandleDereference smi_check;
2604 if (isolate()->heap()->InNewSpace(*object)) {
2605 UseScratchRegisterScope temps(masm());
2606 Register temp = temps.AcquireX();
2607 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2608 __ Mov(temp, Operand(cell));
2609 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2612 __ Cmp(reg, Operand(object));
2614 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
2618 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2619 last_lazy_deopt_pc_ = masm()->pc_offset();
2620 DCHECK(instr->HasEnvironment());
2621 LEnvironment* env = instr->environment();
2622 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2623 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2627 void LCodeGen::DoDateField(LDateField* instr) {
2628 Register object = ToRegister(instr->date());
2629 Register result = ToRegister(instr->result());
2630 Register temp1 = x10;
2631 Register temp2 = x11;
2632 Smi* index = instr->index();
2634 DCHECK(object.is(result) && object.Is(x0));
2635 DCHECK(instr->IsMarkedAsCall());
2637 if (index->value() == 0) {
2638 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2640 Label runtime, done;
2641 if (index->value() < JSDate::kFirstUncachedField) {
2642 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2643 __ Mov(temp1, Operand(stamp));
2644 __ Ldr(temp1, MemOperand(temp1));
2645 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2646 __ Cmp(temp1, temp2);
2648 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2649 kPointerSize * index->value()));
2654 __ Mov(x1, Operand(index));
2655 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2661 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2662 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2663 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2664 // needed return address), even though the implementation of LAZY and EAGER is
2665 // now identical. When LAZY is eventually completely folded into EAGER, remove
2666 // the special case below.
2667 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2668 type = Deoptimizer::LAZY;
2671 Deoptimize(instr, instr->hydrogen()->reason(), &type);
2675 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2676 Register dividend = ToRegister32(instr->dividend());
2677 int32_t divisor = instr->divisor();
2678 Register result = ToRegister32(instr->result());
2679 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2680 DCHECK(!result.is(dividend));
2682 // Check for (0 / -x) that will produce negative zero.
2683 HDiv* hdiv = instr->hydrogen();
2684 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2685 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
2687 // Check for (kMinInt / -1).
2688 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2689 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2691 __ Cmp(dividend, 1);
2692 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2694 // Deoptimize if remainder will not be 0.
2695 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2696 divisor != 1 && divisor != -1) {
2697 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2698 __ Tst(dividend, mask);
2699 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
2702 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2703 __ Neg(result, dividend);
2706 int32_t shift = WhichPowerOf2Abs(divisor);
2708 __ Mov(result, dividend);
2709 } else if (shift == 1) {
2710 __ Add(result, dividend, Operand(dividend, LSR, 31));
2712 __ Mov(result, Operand(dividend, ASR, 31));
2713 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2715 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2716 if (divisor < 0) __ Neg(result, result);
2720 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2721 Register dividend = ToRegister32(instr->dividend());
2722 int32_t divisor = instr->divisor();
2723 Register result = ToRegister32(instr->result());
2724 DCHECK(!AreAliased(dividend, result));
2727 Deoptimize(instr, Deoptimizer::kDivisionByZero);
2731 // Check for (0 / -x) that will produce negative zero.
2732 HDiv* hdiv = instr->hydrogen();
2733 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2734 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
2737 __ TruncatingDiv(result, dividend, Abs(divisor));
2738 if (divisor < 0) __ Neg(result, result);
2740 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2741 Register temp = ToRegister32(instr->temp());
2742 DCHECK(!AreAliased(dividend, result, temp));
2743 __ Sxtw(dividend.X(), dividend);
2744 __ Mov(temp, divisor);
2745 __ Smsubl(temp.X(), result, temp, dividend.X());
2746 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
2751 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2752 void LCodeGen::DoDivI(LDivI* instr) {
2753 HBinaryOperation* hdiv = instr->hydrogen();
2754 Register dividend = ToRegister32(instr->dividend());
2755 Register divisor = ToRegister32(instr->divisor());
2756 Register result = ToRegister32(instr->result());
2758 // Issue the division first, and then check for any deopt cases whilst the
2759 // result is computed.
2760 __ Sdiv(result, dividend, divisor);
2762 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2763 DCHECK(!instr->temp());
2768 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2769 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
2772 // Check for (0 / -x) as that will produce negative zero.
2773 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2776 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2777 // zero, ie. zero dividend with negative divisor deopts.
2778 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2779 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2780 __ Ccmp(dividend, 0, NoFlag, mi);
2781 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
2784 // Check for (kMinInt / -1).
2785 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2786 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2788 __ Cmp(dividend, 1);
2789 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2790 // -1. If overflow is clear, set the flags for condition ne, as the
2791 // dividend isn't -1, and thus we shouldn't deopt.
2792 __ Ccmp(divisor, -1, NoFlag, vs);
2793 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
2796 // Compute remainder and deopt if it's not zero.
2797 Register remainder = ToRegister32(instr->temp());
2798 __ Msub(remainder, result, divisor, dividend);
2799 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
2803 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2804 DoubleRegister input = ToDoubleRegister(instr->value());
2805 Register result = ToRegister32(instr->result());
2807 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2808 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
2811 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2812 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
2814 if (instr->tag_result()) {
2815 __ SmiTag(result.X());
2820 void LCodeGen::DoDrop(LDrop* instr) {
2821 __ Drop(instr->count());
2825 void LCodeGen::DoDummy(LDummy* instr) {
2826 // Nothing to see here, move on!
2830 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2831 // Nothing to see here, move on!
2835 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2836 DCHECK(ToRegister(instr->context()).is(cp));
2837 // FunctionLiteral instruction is marked as call, we can trash any register.
2838 DCHECK(instr->IsMarkedAsCall());
2840 // Use the fast case closure allocation code that allocates in new
2841 // space for nested functions that don't need literals cloning.
2842 bool pretenure = instr->hydrogen()->pretenure();
2843 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2844 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
2845 instr->hydrogen()->kind());
2846 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2847 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2849 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2850 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2851 : factory()->false_value()));
2852 __ Push(cp, x2, x1);
2853 CallRuntime(Runtime::kNewClosure, 3, instr);
2858 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2859 Register map = ToRegister(instr->map());
2860 Register result = ToRegister(instr->result());
2861 Label load_cache, done;
2863 __ EnumLengthUntagged(result, map);
2864 __ Cbnz(result, &load_cache);
2866 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2869 __ Bind(&load_cache);
2870 __ LoadInstanceDescriptors(map, result);
2871 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2872 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2873 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
2879 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2880 Register object = ToRegister(instr->object());
2881 Register null_value = x5;
2883 DCHECK(instr->IsMarkedAsCall());
2884 DCHECK(object.Is(x0));
2886 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2888 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2889 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2890 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
2892 Label use_cache, call_runtime;
2893 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2894 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2896 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2899 // Get the set of properties to enumerate.
2900 __ Bind(&call_runtime);
2902 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2904 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2905 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
2906 Deoptimizer::kWrongMap);
2908 __ Bind(&use_cache);
2912 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2913 Register input = ToRegister(instr->value());
2914 Register result = ToRegister(instr->result());
2916 __ AssertString(input);
2918 // Assert that we can use a W register load to get the hash.
2919 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2920 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2921 __ IndexFromHash(result, result);
2925 void LCodeGen::EmitGoto(int block) {
2926 // Do not emit jump if we are emitting a goto to the next block.
2927 if (!IsNextEmittedBlock(block)) {
2928 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2933 void LCodeGen::DoGoto(LGoto* instr) {
2934 EmitGoto(instr->block_id());
2938 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2939 LHasCachedArrayIndexAndBranch* instr) {
2940 Register input = ToRegister(instr->value());
2941 Register temp = ToRegister32(instr->temp());
2943 // Assert that the cache status bits fit in a W register.
2944 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2945 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2946 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2947 EmitBranch(instr, eq);
2951 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2952 // to test but is only used in very restricted ways. The only possible kinds
2953 // of intervals are:
2954 // - [ FIRST_TYPE, instr->to() ]
2955 // - [ instr->form(), LAST_TYPE ]
2956 // - instr->from() == instr->to()
2958 // These kinds of intervals can be check with only one compare instruction
2959 // providing the correct value and test condition are used.
2961 // TestType() will return the value to use in the compare instruction and
2962 // BranchCondition() will return the condition to use depending on the kind
2963 // of interval actually specified in the instruction.
2964 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2965 InstanceType from = instr->from();
2966 InstanceType to = instr->to();
2967 if (from == FIRST_TYPE) return to;
2968 DCHECK((from == to) || (to == LAST_TYPE));
2973 // See comment above TestType function for what this function does.
2974 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2975 InstanceType from = instr->from();
2976 InstanceType to = instr->to();
2977 if (from == to) return eq;
2978 if (to == LAST_TYPE) return hs;
2979 if (from == FIRST_TYPE) return ls;
2985 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2986 Register input = ToRegister(instr->value());
2987 Register scratch = ToRegister(instr->temp());
2989 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2990 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2992 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2993 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2997 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2998 Register result = ToRegister(instr->result());
2999 Register base = ToRegister(instr->base_object());
3000 if (instr->offset()->IsConstantOperand()) {
3001 __ Add(result, base, ToOperand32(instr->offset()));
3003 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3008 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3009 DCHECK(ToRegister(instr->context()).is(cp));
3010 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
3011 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
3012 DCHECK(ToRegister(instr->result()).is(x0));
3013 InstanceOfStub stub(isolate());
3014 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3018 void LCodeGen::DoHasInPrototypeChainAndBranch(
3019 LHasInPrototypeChainAndBranch* instr) {
3020 Register const object = ToRegister(instr->object());
3021 Register const object_map = ToRegister(instr->scratch());
3022 Register const object_prototype = object_map;
3023 Register const prototype = ToRegister(instr->prototype());
3025 // The {object} must be a spec object. It's sufficient to know that {object}
3026 // is not a smi, since all other non-spec objects have {null} prototypes and
3027 // will be ruled out below.
3028 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
3029 __ JumpIfSmi(object, instr->FalseLabel(chunk_));
3032 // Loop through the {object}s prototype chain looking for the {prototype}.
3033 __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
3036 __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
3037 __ Cmp(object_prototype, prototype);
3038 __ B(eq, instr->TrueLabel(chunk_));
3039 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
3040 __ B(eq, instr->FalseLabel(chunk_));
3041 __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
3046 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3051 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3052 Register value = ToRegister32(instr->value());
3053 DoubleRegister result = ToDoubleRegister(instr->result());
3054 __ Scvtf(result, value);
3058 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3059 DCHECK(ToRegister(instr->context()).is(cp));
3060 // The function is required to be in x1.
3061 DCHECK(ToRegister(instr->function()).is(x1));
3062 DCHECK(instr->HasPointerMap());
3064 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3065 if (known_function.is_null()) {
3066 LPointerMap* pointers = instr->pointer_map();
3067 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3068 ParameterCount count(instr->arity());
3069 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3071 CallKnownFunction(known_function,
3072 instr->hydrogen()->formal_parameter_count(),
3073 instr->arity(), instr);
3075 RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
3079 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3080 Register temp1 = ToRegister(instr->temp1());
3081 Register temp2 = ToRegister(instr->temp2());
3083 // Get the frame pointer for the calling frame.
3084 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3086 // Skip the arguments adaptor frame if it exists.
3087 Label check_frame_marker;
3088 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3089 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3090 __ B(ne, &check_frame_marker);
3091 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3093 // Check the marker in the calling frame.
3094 __ Bind(&check_frame_marker);
3095 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3097 EmitCompareAndBranch(
3098 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3102 Condition LCodeGen::EmitIsString(Register input,
3104 Label* is_not_string,
3105 SmiCheck check_needed = INLINE_SMI_CHECK) {
3106 if (check_needed == INLINE_SMI_CHECK) {
3107 __ JumpIfSmi(input, is_not_string);
3109 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3115 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3116 Register val = ToRegister(instr->value());
3117 Register scratch = ToRegister(instr->temp());
3119 SmiCheck check_needed =
3120 instr->hydrogen()->value()->type().IsHeapObject()
3121 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3122 Condition true_cond =
3123 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3125 EmitBranch(instr, true_cond);
3129 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3130 Register value = ToRegister(instr->value());
3131 STATIC_ASSERT(kSmiTag == 0);
3132 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3136 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3137 Register input = ToRegister(instr->value());
3138 Register temp = ToRegister(instr->temp());
3140 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3141 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3143 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3144 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3146 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3150 static const char* LabelType(LLabel* label) {
3151 if (label->is_loop_header()) return " (loop header)";
3152 if (label->is_osr_entry()) return " (OSR entry)";
3157 void LCodeGen::DoLabel(LLabel* label) {
3158 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3159 current_instruction_,
3160 label->hydrogen_value()->id(),
3164 // Inherit pushed_arguments_ from the predecessor's argument count.
3165 if (label->block()->HasPredecessor()) {
3166 pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
3168 for (auto p : *label->block()->predecessors()) {
3169 DCHECK_EQ(p->argument_count(), pushed_arguments_);
3174 __ Bind(label->label());
3175 current_block_ = label->block_id();
3180 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3181 Register context = ToRegister(instr->context());
3182 Register result = ToRegister(instr->result());
3183 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3184 if (instr->hydrogen()->RequiresHoleCheck()) {
3185 if (instr->hydrogen()->DeoptimizesOnHole()) {
3186 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3187 Deoptimizer::kHole);
3190 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3191 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3192 __ Bind(¬_the_hole);
3198 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3199 Register function = ToRegister(instr->function());
3200 Register result = ToRegister(instr->result());
3201 Register temp = ToRegister(instr->temp());
3203 // Get the prototype or initial map from the function.
3204 __ Ldr(result, FieldMemOperand(function,
3205 JSFunction::kPrototypeOrInitialMapOffset));
3207 // Check that the function has a prototype or an initial map.
3208 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3209 Deoptimizer::kHole);
3211 // If the function does not have an initial map, we're done.
3213 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3216 // Get the prototype from the initial map.
3217 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3225 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3226 Register vector_register = ToRegister(instr->temp_vector());
3227 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
3228 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
3229 DCHECK(slot_register.is(x0));
3231 AllowDeferredHandleDereference vector_structure_check;
3232 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3233 __ Mov(vector_register, vector);
3234 // No need to allocate this register.
3235 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3236 int index = vector->GetIndex(slot);
3237 __ Mov(slot_register, Smi::FromInt(index));
3242 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
3243 Register vector_register = ToRegister(instr->temp_vector());
3244 Register slot_register = ToRegister(instr->temp_slot());
3246 AllowDeferredHandleDereference vector_structure_check;
3247 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3248 __ Mov(vector_register, vector);
3249 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3250 int index = vector->GetIndex(slot);
3251 __ Mov(slot_register, Smi::FromInt(index));
3255 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3256 DCHECK(ToRegister(instr->context()).is(cp));
3257 DCHECK(ToRegister(instr->global_object())
3258 .is(LoadDescriptor::ReceiverRegister()));
3259 DCHECK(ToRegister(instr->result()).Is(x0));
3260 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3261 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3263 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
3264 SLOPPY, PREMONOMORPHIC).code();
3265 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3269 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
3270 DCHECK(ToRegister(instr->context()).is(cp));
3271 DCHECK(ToRegister(instr->result()).is(x0));
3273 int const slot = instr->slot_index();
3274 int const depth = instr->depth();
3275 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
3276 __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
3278 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
3279 CallCode(stub, RelocInfo::CODE_TARGET, instr);
3281 __ Push(Smi::FromInt(slot));
3282 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
3287 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3292 bool key_is_constant,
3294 ElementsKind elements_kind,
3296 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3298 if (key_is_constant) {
3299 int key_offset = constant_key << element_size_shift;
3300 return MemOperand(base, key_offset + base_offset);
3304 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3305 return MemOperand(scratch, base_offset);
3308 if (base_offset == 0) {
3309 return MemOperand(base, key, SXTW, element_size_shift);
3312 DCHECK(!AreAliased(scratch, key));
3313 __ Add(scratch, base, base_offset);
3314 return MemOperand(scratch, key, SXTW, element_size_shift);
3318 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3319 Register ext_ptr = ToRegister(instr->elements());
3321 ElementsKind elements_kind = instr->elements_kind();
3323 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3324 bool key_is_constant = instr->key()->IsConstantOperand();
3325 Register key = no_reg;
3326 int constant_key = 0;
3327 if (key_is_constant) {
3328 DCHECK(instr->temp() == NULL);
3329 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3330 if (constant_key & 0xf0000000) {
3331 Abort(kArrayIndexConstantValueTooBig);
3334 scratch = ToRegister(instr->temp());
3335 key = ToRegister(instr->key());
3339 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3340 key_is_constant, constant_key,
3342 instr->base_offset());
3344 if (elements_kind == FLOAT32_ELEMENTS) {
3345 DoubleRegister result = ToDoubleRegister(instr->result());
3346 __ Ldr(result.S(), mem_op);
3347 __ Fcvt(result, result.S());
3348 } else if (elements_kind == FLOAT64_ELEMENTS) {
3349 DoubleRegister result = ToDoubleRegister(instr->result());
3350 __ Ldr(result, mem_op);
3352 Register result = ToRegister(instr->result());
3354 switch (elements_kind) {
3356 __ Ldrsb(result, mem_op);
3358 case UINT8_ELEMENTS:
3359 case UINT8_CLAMPED_ELEMENTS:
3360 __ Ldrb(result, mem_op);
3362 case INT16_ELEMENTS:
3363 __ Ldrsh(result, mem_op);
3365 case UINT16_ELEMENTS:
3366 __ Ldrh(result, mem_op);
3368 case INT32_ELEMENTS:
3369 __ Ldrsw(result, mem_op);
3371 case UINT32_ELEMENTS:
3372 __ Ldr(result.W(), mem_op);
3373 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3374 // Deopt if value > 0x80000000.
3375 __ Tst(result, 0xFFFFFFFF80000000);
3376 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
3379 case FLOAT32_ELEMENTS:
3380 case FLOAT64_ELEMENTS:
3381 case FAST_HOLEY_DOUBLE_ELEMENTS:
3382 case FAST_HOLEY_ELEMENTS:
3383 case FAST_HOLEY_SMI_ELEMENTS:
3384 case FAST_DOUBLE_ELEMENTS:
3386 case FAST_SMI_ELEMENTS:
3387 case DICTIONARY_ELEMENTS:
3388 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3389 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3397 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3401 ElementsKind elements_kind,
3402 Representation representation,
3404 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3405 STATIC_ASSERT(kSmiTag == 0);
3406 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3408 // Even though the HLoad/StoreKeyed instructions force the input
3409 // representation for the key to be an integer, the input gets replaced during
3410 // bounds check elimination with the index argument to the bounds check, which
3411 // can be tagged, so that case must be handled here, too.
3412 if (key_is_tagged) {
3413 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3414 if (representation.IsInteger32()) {
3415 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3416 // Read or write only the smi payload in the case of fast smi arrays.
3417 return UntagSmiMemOperand(base, base_offset);
3419 return MemOperand(base, base_offset);
3422 // Sign extend key because it could be a 32-bit negative value or contain
3423 // garbage in the top 32-bits. The address computation happens in 64-bit.
3424 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3425 if (representation.IsInteger32()) {
3426 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3427 // Read or write only the smi payload in the case of fast smi arrays.
3428 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3429 return UntagSmiMemOperand(base, base_offset);
3431 __ Add(base, elements, base_offset);
3432 return MemOperand(base, key, SXTW, element_size_shift);
3438 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3439 Register elements = ToRegister(instr->elements());
3440 DoubleRegister result = ToDoubleRegister(instr->result());
3443 if (instr->key()->IsConstantOperand()) {
3444 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3445 (instr->temp() == NULL));
3447 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3448 if (constant_key & 0xf0000000) {
3449 Abort(kArrayIndexConstantValueTooBig);
3451 int offset = instr->base_offset() + constant_key * kDoubleSize;
3452 mem_op = MemOperand(elements, offset);
3454 Register load_base = ToRegister(instr->temp());
3455 Register key = ToRegister(instr->key());
3456 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3457 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3458 instr->hydrogen()->elements_kind(),
3459 instr->hydrogen()->representation(),
3460 instr->base_offset());
3463 __ Ldr(result, mem_op);
3465 if (instr->hydrogen()->RequiresHoleCheck()) {
3466 Register scratch = ToRegister(instr->temp());
3467 __ Fmov(scratch, result);
3468 __ Eor(scratch, scratch, kHoleNanInt64);
3469 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
3474 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3475 Register elements = ToRegister(instr->elements());
3476 Register result = ToRegister(instr->result());
3479 Representation representation = instr->hydrogen()->representation();
3480 if (instr->key()->IsConstantOperand()) {
3481 DCHECK(instr->temp() == NULL);
3482 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3483 int offset = instr->base_offset() +
3484 ToInteger32(const_operand) * kPointerSize;
3485 if (representation.IsInteger32()) {
3486 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3487 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3488 STATIC_ASSERT(kSmiTag == 0);
3489 mem_op = UntagSmiMemOperand(elements, offset);
3491 mem_op = MemOperand(elements, offset);
3494 Register load_base = ToRegister(instr->temp());
3495 Register key = ToRegister(instr->key());
3496 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3498 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3499 instr->hydrogen()->elements_kind(),
3500 representation, instr->base_offset());
3503 __ Load(result, mem_op, representation);
3505 if (instr->hydrogen()->RequiresHoleCheck()) {
3506 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3507 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
3509 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3510 Deoptimizer::kHole);
3512 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3513 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3515 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3517 if (info()->IsStub()) {
3518 // A stub can safely convert the hole to undefined only if the array
3519 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3520 // it needs to bail out.
3521 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3522 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3523 __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3524 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3526 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3532 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3533 DCHECK(ToRegister(instr->context()).is(cp));
3534 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3535 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3537 if (instr->hydrogen()->HasVectorAndSlot()) {
3538 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3541 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3542 isolate(), instr->hydrogen()->language_mode(),
3543 instr->hydrogen()->initialization_state()).code();
3544 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3546 DCHECK(ToRegister(instr->result()).Is(x0));
3550 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3551 HObjectAccess access = instr->hydrogen()->access();
3552 int offset = access.offset();
3553 Register object = ToRegister(instr->object());
3555 if (access.IsExternalMemory()) {
3556 Register result = ToRegister(instr->result());
3557 __ Load(result, MemOperand(object, offset), access.representation());
3561 if (instr->hydrogen()->representation().IsDouble()) {
3562 DCHECK(access.IsInobject());
3563 FPRegister result = ToDoubleRegister(instr->result());
3564 __ Ldr(result, FieldMemOperand(object, offset));
3568 Register result = ToRegister(instr->result());
3570 if (access.IsInobject()) {
3573 // Load the properties array, using result as a scratch register.
3574 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3578 if (access.representation().IsSmi() &&
3579 instr->hydrogen()->representation().IsInteger32()) {
3580 // Read int value directly from upper half of the smi.
3581 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3582 STATIC_ASSERT(kSmiTag == 0);
3583 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3584 Representation::Integer32());
3586 __ Load(result, FieldMemOperand(source, offset), access.representation());
3591 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3592 DCHECK(ToRegister(instr->context()).is(cp));
3593 // LoadIC expects name and receiver in registers.
3594 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3595 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3596 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3598 CodeFactory::LoadICInOptimizedCode(
3599 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3600 instr->hydrogen()->initialization_state()).code();
3601 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3603 DCHECK(ToRegister(instr->result()).is(x0));
3607 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3608 Register result = ToRegister(instr->result());
3609 __ LoadRoot(result, instr->index());
3613 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3614 Register result = ToRegister(instr->result());
3615 Register map = ToRegister(instr->value());
3616 __ EnumLengthSmi(result, map);
3620 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3621 Representation r = instr->hydrogen()->value()->representation();
3623 DoubleRegister input = ToDoubleRegister(instr->value());
3624 DoubleRegister result = ToDoubleRegister(instr->result());
3625 __ Fabs(result, input);
3626 } else if (r.IsSmi() || r.IsInteger32()) {
3627 Register input = r.IsSmi() ? ToRegister(instr->value())
3628 : ToRegister32(instr->value());
3629 Register result = r.IsSmi() ? ToRegister(instr->result())
3630 : ToRegister32(instr->result());
3631 __ Abs(result, input);
3632 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3637 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3639 Label* allocation_entry) {
3640 // Handle the tricky cases of MathAbsTagged:
3641 // - HeapNumber inputs.
3642 // - Negative inputs produce a positive result, so a new HeapNumber is
3643 // allocated to hold it.
3644 // - Positive inputs are returned as-is, since there is no need to allocate
3645 // a new HeapNumber for the result.
3646 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3647 // a smi. In this case, the inline code sets the result and jumps directly
3648 // to the allocation_entry label.
3649 DCHECK(instr->context() != NULL);
3650 DCHECK(ToRegister(instr->context()).is(cp));
3651 Register input = ToRegister(instr->value());
3652 Register temp1 = ToRegister(instr->temp1());
3653 Register temp2 = ToRegister(instr->temp2());
3654 Register result_bits = ToRegister(instr->temp3());
3655 Register result = ToRegister(instr->result());
3657 Label runtime_allocation;
3659 // Deoptimize if the input is not a HeapNumber.
3660 DeoptimizeIfNotHeapNumber(input, instr);
3662 // If the argument is positive, we can return it as-is, without any need to
3663 // allocate a new HeapNumber for the result. We have to do this in integer
3664 // registers (rather than with fabs) because we need to be able to distinguish
3666 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3667 __ Mov(result, input);
3668 __ Tbz(result_bits, kXSignBit, exit);
3670 // Calculate abs(input) by clearing the sign bit.
3671 __ Bic(result_bits, result_bits, kXSignMask);
3673 // Allocate a new HeapNumber to hold the result.
3674 // result_bits The bit representation of the (double) result.
3675 __ Bind(allocation_entry);
3676 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3677 // The inline (non-deferred) code will store result_bits into result.
3680 __ Bind(&runtime_allocation);
3681 if (FLAG_debug_code) {
3682 // Because result is in the pointer map, we need to make sure it has a valid
3683 // tagged value before we call the runtime. We speculatively set it to the
3684 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3687 Register input = ToRegister(instr->value());
3688 __ JumpIfSmi(result, &result_ok);
3689 __ Cmp(input, result);
3690 __ Assert(eq, kUnexpectedValue);
3691 __ Bind(&result_ok);
3694 { PushSafepointRegistersScope scope(this);
3695 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3697 __ StoreToSafepointRegisterSlot(x0, result);
3699 // The inline (non-deferred) code will store result_bits into result.
3703 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3704 // Class for deferred case.
3705 class DeferredMathAbsTagged: public LDeferredCode {
3707 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3708 : LDeferredCode(codegen), instr_(instr) { }
3709 virtual void Generate() {
3710 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3711 allocation_entry());
3713 virtual LInstruction* instr() { return instr_; }
3714 Label* allocation_entry() { return &allocation; }
3716 LMathAbsTagged* instr_;
3720 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3721 // in GenerateDeferredCode. Tidy this up.
3722 DCHECK(!NeedsDeferredFrame());
3724 DeferredMathAbsTagged* deferred =
3725 new(zone()) DeferredMathAbsTagged(this, instr);
3727 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3728 instr->hydrogen()->value()->representation().IsSmi());
3729 Register input = ToRegister(instr->value());
3730 Register result_bits = ToRegister(instr->temp3());
3731 Register result = ToRegister(instr->result());
3734 // Handle smis inline.
3735 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3736 // never get set by the negation. This is therefore the same as the Integer32
3737 // case in DoMathAbs, except that it operates on 64-bit values.
3738 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3740 __ JumpIfNotSmi(input, deferred->entry());
3742 __ Abs(result, input, NULL, &done);
3744 // The result is the magnitude (abs) of the smallest value a smi can
3745 // represent, encoded as a double.
3746 __ Mov(result_bits, double_to_rawbits(0x80000000));
3747 __ B(deferred->allocation_entry());
3749 __ Bind(deferred->exit());
3750 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3756 void LCodeGen::DoMathExp(LMathExp* instr) {
3757 DoubleRegister input = ToDoubleRegister(instr->value());
3758 DoubleRegister result = ToDoubleRegister(instr->result());
3759 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3760 DoubleRegister double_temp2 = double_scratch();
3761 Register temp1 = ToRegister(instr->temp1());
3762 Register temp2 = ToRegister(instr->temp2());
3763 Register temp3 = ToRegister(instr->temp3());
3765 MathExpGenerator::EmitMathExp(masm(), input, result,
3766 double_temp1, double_temp2,
3767 temp1, temp2, temp3);
3771 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3772 DoubleRegister input = ToDoubleRegister(instr->value());
3773 DoubleRegister result = ToDoubleRegister(instr->result());
3775 __ Frintm(result, input);
3779 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3780 DoubleRegister input = ToDoubleRegister(instr->value());
3781 Register result = ToRegister(instr->result());
3783 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3784 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
3787 __ Fcvtms(result, input);
3789 // Check that the result fits into a 32-bit integer.
3790 // - The result did not overflow.
3791 __ Cmp(result, Operand(result, SXTW));
3792 // - The input was not NaN.
3793 __ Fccmp(input, input, NoFlag, eq);
3794 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
3798 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3799 Register dividend = ToRegister32(instr->dividend());
3800 Register result = ToRegister32(instr->result());
3801 int32_t divisor = instr->divisor();
3803 // If the divisor is 1, return the dividend.
3805 __ Mov(result, dividend, kDiscardForSameWReg);
3809 // If the divisor is positive, things are easy: There can be no deopts and we
3810 // can simply do an arithmetic right shift.
3811 int32_t shift = WhichPowerOf2Abs(divisor);
3813 __ Mov(result, Operand(dividend, ASR, shift));
3817 // If the divisor is negative, we have to negate and handle edge cases.
3818 __ Negs(result, dividend);
3819 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3820 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3823 // Dividing by -1 is basically negation, unless we overflow.
3824 if (divisor == -1) {
3825 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3826 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3831 // If the negation could not overflow, simply shifting is OK.
3832 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3833 __ Mov(result, Operand(dividend, ASR, shift));
3837 __ Asr(result, result, shift);
3838 __ Csel(result, result, kMinInt / divisor, vc);
3842 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3843 Register dividend = ToRegister32(instr->dividend());
3844 int32_t divisor = instr->divisor();
3845 Register result = ToRegister32(instr->result());
3846 DCHECK(!AreAliased(dividend, result));
3849 Deoptimize(instr, Deoptimizer::kDivisionByZero);
3853 // Check for (0 / -x) that will produce negative zero.
3854 HMathFloorOfDiv* hdiv = instr->hydrogen();
3855 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3856 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
3859 // Easy case: We need no dynamic check for the dividend and the flooring
3860 // division is the same as the truncating division.
3861 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3862 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3863 __ TruncatingDiv(result, dividend, Abs(divisor));
3864 if (divisor < 0) __ Neg(result, result);
3868 // In the general case we may need to adjust before and after the truncating
3869 // division to get a flooring division.
3870 Register temp = ToRegister32(instr->temp());
3871 DCHECK(!AreAliased(temp, dividend, result));
3872 Label needs_adjustment, done;
3873 __ Cmp(dividend, 0);
3874 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3875 __ TruncatingDiv(result, dividend, Abs(divisor));
3876 if (divisor < 0) __ Neg(result, result);
3878 __ Bind(&needs_adjustment);
3879 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3880 __ TruncatingDiv(result, temp, Abs(divisor));
3881 if (divisor < 0) __ Neg(result, result);
3882 __ Sub(result, result, Operand(1));
3887 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3888 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3889 Register dividend = ToRegister32(instr->dividend());
3890 Register divisor = ToRegister32(instr->divisor());
3891 Register remainder = ToRegister32(instr->temp());
3892 Register result = ToRegister32(instr->result());
3894 // This can't cause an exception on ARM, so we can speculatively
3895 // execute it already now.
3896 __ Sdiv(result, dividend, divisor);
3899 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
3901 // Check for (kMinInt / -1).
3902 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3903 // The V flag will be set iff dividend == kMinInt.
3904 __ Cmp(dividend, 1);
3905 __ Ccmp(divisor, -1, NoFlag, vs);
3906 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3909 // Check for (0 / -x) that will produce negative zero.
3910 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3912 __ Ccmp(dividend, 0, ZFlag, mi);
3913 // "divisor" can't be null because the code would have already been
3914 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
3915 // In this case we need to deoptimize to produce a -0.
3916 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3920 // If both operands have the same sign then we are done.
3921 __ Eor(remainder, dividend, divisor);
3922 __ Tbz(remainder, kWSignBit, &done);
3924 // Check if the result needs to be corrected.
3925 __ Msub(remainder, result, divisor, dividend);
3926 __ Cbz(remainder, &done);
3927 __ Sub(result, result, 1);
3933 void LCodeGen::DoMathLog(LMathLog* instr) {
3934 DCHECK(instr->IsMarkedAsCall());
3935 DCHECK(ToDoubleRegister(instr->value()).is(d0));
3936 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3938 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
3942 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3943 Register input = ToRegister32(instr->value());
3944 Register result = ToRegister32(instr->result());
3945 __ Clz(result, input);
3949 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3950 DoubleRegister input = ToDoubleRegister(instr->value());
3951 DoubleRegister result = ToDoubleRegister(instr->result());
3954 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
3955 // Math.pow(-Infinity, 0.5) == +Infinity
3956 // Math.pow(-0.0, 0.5) == +0.0
3958 // Catch -infinity inputs first.
3959 // TODO(jbramley): A constant infinity register would be helpful here.
3960 __ Fmov(double_scratch(), kFP64NegativeInfinity);
3961 __ Fcmp(double_scratch(), input);
3962 __ Fabs(result, input);
3965 // Add +0.0 to convert -0.0 to +0.0.
3966 __ Fadd(double_scratch(), input, fp_zero);
3967 __ Fsqrt(result, double_scratch());
3973 void LCodeGen::DoPower(LPower* instr) {
3974 Representation exponent_type = instr->hydrogen()->right()->representation();
3975 // Having marked this as a call, we can use any registers.
3976 // Just make sure that the input/output registers are the expected ones.
3977 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3978 Register integer_exponent = MathPowIntegerDescriptor::exponent();
3979 DCHECK(!instr->right()->IsDoubleRegister() ||
3980 ToDoubleRegister(instr->right()).is(d1));
3981 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
3982 ToRegister(instr->right()).is(tagged_exponent));
3983 DCHECK(!exponent_type.IsInteger32() ||
3984 ToRegister(instr->right()).is(integer_exponent));
3985 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3986 DCHECK(ToDoubleRegister(instr->result()).is(d0));
3988 if (exponent_type.IsSmi()) {
3989 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3991 } else if (exponent_type.IsTagged()) {
3993 __ JumpIfSmi(tagged_exponent, &no_deopt);
3994 DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
3996 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3998 } else if (exponent_type.IsInteger32()) {
3999 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4000 // supports large integer exponents.
4001 __ Sxtw(integer_exponent, integer_exponent);
4002 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4005 DCHECK(exponent_type.IsDouble());
4006 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4012 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4013 DoubleRegister input = ToDoubleRegister(instr->value());
4014 DoubleRegister result = ToDoubleRegister(instr->result());
4015 DoubleRegister scratch_d = double_scratch();
4017 DCHECK(!AreAliased(input, result, scratch_d));
4021 __ Frinta(result, input);
4022 __ Fcmp(input, 0.0);
4023 __ Fccmp(result, input, ZFlag, lt);
4024 // The result is correct if the input was in [-0, +infinity], or was a
4025 // negative integral value.
4028 // Here the input is negative, non integral, with an exponent lower than 52.
4029 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4030 // case. So we can safely add 0.5.
4031 __ Fmov(scratch_d, 0.5);
4032 __ Fadd(result, input, scratch_d);
4033 __ Frintm(result, result);
4034 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4035 __ Fabs(result, result);
4036 __ Fneg(result, result);
4042 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4043 DoubleRegister input = ToDoubleRegister(instr->value());
4044 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4045 DoubleRegister dot_five = double_scratch();
4046 Register result = ToRegister(instr->result());
4049 // Math.round() rounds to the nearest integer, with ties going towards
4050 // +infinity. This does not match any IEEE-754 rounding mode.
4051 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4052 // they can't be represented as integers.
4053 // - The sign of the result is the same as the sign of the input. This means
4054 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4057 // Add 0.5 and round towards -infinity.
4058 __ Fmov(dot_five, 0.5);
4059 __ Fadd(temp, input, dot_five);
4060 __ Fcvtms(result, temp);
4062 // The result is correct if:
4063 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4064 // result is not 1, as 0.499...94 will wrongly map to 1.
4065 // result fits in 32 bits.
4066 __ Cmp(result, Operand(result.W(), SXTW));
4067 __ Ccmp(result, 1, ZFlag, eq);
4070 // At this point, we have to handle possible inputs of NaN or numbers in the
4071 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4073 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4075 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
4077 // Deoptimize for negative inputs, which at this point are only numbers in
4078 // the range [-0.5, -0.0]
4079 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4080 __ Fmov(result, input);
4081 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
4084 // Deoptimize if the input was NaN.
4085 __ Fcmp(input, dot_five);
4086 DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
4088 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4089 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4090 // else 0; we avoid dealing with 0.499...94 directly.
4091 __ Cset(result, ge);
4096 void LCodeGen::DoMathFround(LMathFround* instr) {
4097 DoubleRegister input = ToDoubleRegister(instr->value());
4098 DoubleRegister result = ToDoubleRegister(instr->result());
4099 __ Fcvt(result.S(), input);
4100 __ Fcvt(result, result.S());
4104 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4105 DoubleRegister input = ToDoubleRegister(instr->value());
4106 DoubleRegister result = ToDoubleRegister(instr->result());
4107 __ Fsqrt(result, input);
4111 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4112 HMathMinMax::Operation op = instr->hydrogen()->operation();
4113 if (instr->hydrogen()->representation().IsInteger32()) {
4114 Register result = ToRegister32(instr->result());
4115 Register left = ToRegister32(instr->left());
4116 Operand right = ToOperand32(instr->right());
4118 __ Cmp(left, right);
4119 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4120 } else if (instr->hydrogen()->representation().IsSmi()) {
4121 Register result = ToRegister(instr->result());
4122 Register left = ToRegister(instr->left());
4123 Operand right = ToOperand(instr->right());
4125 __ Cmp(left, right);
4126 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4128 DCHECK(instr->hydrogen()->representation().IsDouble());
4129 DoubleRegister result = ToDoubleRegister(instr->result());
4130 DoubleRegister left = ToDoubleRegister(instr->left());
4131 DoubleRegister right = ToDoubleRegister(instr->right());
4133 if (op == HMathMinMax::kMathMax) {
4134 __ Fmax(result, left, right);
4136 DCHECK(op == HMathMinMax::kMathMin);
4137 __ Fmin(result, left, right);
4143 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4144 Register dividend = ToRegister32(instr->dividend());
4145 int32_t divisor = instr->divisor();
4146 DCHECK(dividend.is(ToRegister32(instr->result())));
4148 // Theoretically, a variation of the branch-free code for integer division by
4149 // a power of 2 (calculating the remainder via an additional multiplication
4150 // (which gets simplified to an 'and') and subtraction) should be faster, and
4151 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4152 // indicate that positive dividends are heavily favored, so the branching
4153 // version performs better.
4154 HMod* hmod = instr->hydrogen();
4155 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4156 Label dividend_is_not_negative, done;
4157 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4158 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4159 // Note that this is correct even for kMinInt operands.
4160 __ Neg(dividend, dividend);
4161 __ And(dividend, dividend, mask);
4162 __ Negs(dividend, dividend);
4163 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4164 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4169 __ bind(÷nd_is_not_negative);
4170 __ And(dividend, dividend, mask);
4175 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4176 Register dividend = ToRegister32(instr->dividend());
4177 int32_t divisor = instr->divisor();
4178 Register result = ToRegister32(instr->result());
4179 Register temp = ToRegister32(instr->temp());
4180 DCHECK(!AreAliased(dividend, result, temp));
4183 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4187 __ TruncatingDiv(result, dividend, Abs(divisor));
4188 __ Sxtw(dividend.X(), dividend);
4189 __ Mov(temp, Abs(divisor));
4190 __ Smsubl(result.X(), result, temp, dividend.X());
4192 // Check for negative zero.
4193 HMod* hmod = instr->hydrogen();
4194 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4195 Label remainder_not_zero;
4196 __ Cbnz(result, &remainder_not_zero);
4197 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4198 __ bind(&remainder_not_zero);
4203 void LCodeGen::DoModI(LModI* instr) {
4204 Register dividend = ToRegister32(instr->left());
4205 Register divisor = ToRegister32(instr->right());
4206 Register result = ToRegister32(instr->result());
4209 // modulo = dividend - quotient * divisor
4210 __ Sdiv(result, dividend, divisor);
4211 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4212 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4214 __ Msub(result, result, divisor, dividend);
4215 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4216 __ Cbnz(result, &done);
4217 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4223 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4224 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4225 bool is_smi = instr->hydrogen()->representation().IsSmi();
4227 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4229 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4230 int32_t right = ToInteger32(instr->right());
4231 DCHECK((right > -kMaxInt) && (right < kMaxInt));
4233 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4234 bool bailout_on_minus_zero =
4235 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4237 if (bailout_on_minus_zero) {
4239 // The result is -0 if right is negative and left is zero.
4240 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
4241 } else if (right == 0) {
4242 // The result is -0 if the right is zero and the left is negative.
4243 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
4248 // Cases which can detect overflow.
4251 // Only 0x80000000 can overflow here.
4252 __ Negs(result, left);
4253 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4255 __ Neg(result, left);
4259 // This case can never overflow.
4263 // This case can never overflow.
4264 __ Mov(result, left, kDiscardForSameWReg);
4268 __ Adds(result, left, left);
4269 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4271 __ Add(result, left, left);
4276 // Multiplication by constant powers of two (and some related values)
4277 // can be done efficiently with shifted operands.
4278 int32_t right_abs = Abs(right);
4280 if (base::bits::IsPowerOfTwo32(right_abs)) {
4281 int right_log2 = WhichPowerOf2(right_abs);
4284 Register scratch = result;
4285 DCHECK(!AreAliased(scratch, left));
4286 __ Cls(scratch, left);
4287 __ Cmp(scratch, right_log2);
4288 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
4292 // result = left << log2(right)
4293 __ Lsl(result, left, right_log2);
4295 // result = -left << log2(-right)
4297 __ Negs(result, Operand(left, LSL, right_log2));
4298 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4300 __ Neg(result, Operand(left, LSL, right_log2));
4307 // For the following cases, we could perform a conservative overflow check
4308 // with CLS as above. However the few cycles saved are likely not worth
4309 // the risk of deoptimizing more often than required.
4310 DCHECK(!can_overflow);
4313 if (base::bits::IsPowerOfTwo32(right - 1)) {
4314 // result = left + left << log2(right - 1)
4315 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4316 } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4317 // result = -left + left << log2(right + 1)
4318 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4319 __ Neg(result, result);
4324 if (base::bits::IsPowerOfTwo32(-right + 1)) {
4325 // result = left - left << log2(-right + 1)
4326 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4327 } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4328 // result = -left - left << log2(-right - 1)
4329 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4330 __ Neg(result, result);
4339 void LCodeGen::DoMulI(LMulI* instr) {
4340 Register result = ToRegister32(instr->result());
4341 Register left = ToRegister32(instr->left());
4342 Register right = ToRegister32(instr->right());
4344 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4345 bool bailout_on_minus_zero =
4346 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4348 if (bailout_on_minus_zero && !left.Is(right)) {
4349 // If one operand is zero and the other is negative, the result is -0.
4350 // - Set Z (eq) if either left or right, or both, are 0.
4352 __ Ccmp(right, 0, ZFlag, ne);
4353 // - If so (eq), set N (mi) if left + right is negative.
4354 // - Otherwise, clear N.
4355 __ Ccmn(left, right, NoFlag, eq);
4356 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4360 __ Smull(result.X(), left, right);
4361 __ Cmp(result.X(), Operand(result, SXTW));
4362 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4364 __ Mul(result, left, right);
4369 void LCodeGen::DoMulS(LMulS* instr) {
4370 Register result = ToRegister(instr->result());
4371 Register left = ToRegister(instr->left());
4372 Register right = ToRegister(instr->right());
4374 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4375 bool bailout_on_minus_zero =
4376 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4378 if (bailout_on_minus_zero && !left.Is(right)) {
4379 // If one operand is zero and the other is negative, the result is -0.
4380 // - Set Z (eq) if either left or right, or both, are 0.
4382 __ Ccmp(right, 0, ZFlag, ne);
4383 // - If so (eq), set N (mi) if left + right is negative.
4384 // - Otherwise, clear N.
4385 __ Ccmn(left, right, NoFlag, eq);
4386 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4389 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4391 __ Smulh(result, left, right);
4392 __ Cmp(result, Operand(result.W(), SXTW));
4394 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4396 if (AreAliased(result, left, right)) {
4397 // All three registers are the same: half untag the input and then
4398 // multiply, giving a tagged result.
4399 STATIC_ASSERT((kSmiShift % 2) == 0);
4400 __ Asr(result, left, kSmiShift / 2);
4401 __ Mul(result, result, result);
4402 } else if (result.Is(left) && !left.Is(right)) {
4403 // Registers result and left alias, right is distinct: untag left into
4404 // result, and then multiply by right, giving a tagged result.
4405 __ SmiUntag(result, left);
4406 __ Mul(result, result, right);
4408 DCHECK(!left.Is(result));
4409 // Registers result and right alias, left is distinct, or all registers
4410 // are distinct: untag right into result, and then multiply by left,
4411 // giving a tagged result.
4412 __ SmiUntag(result, right);
4413 __ Mul(result, left, result);
4419 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4420 // TODO(3095996): Get rid of this. For now, we need to make the
4421 // result register contain a valid pointer because it is already
4422 // contained in the register pointer map.
4423 Register result = ToRegister(instr->result());
4426 PushSafepointRegistersScope scope(this);
4427 // NumberTagU and NumberTagD use the context from the frame, rather than
4428 // the environment's HContext or HInlinedContext value.
4429 // They only call Runtime::kAllocateHeapNumber.
4430 // The corresponding HChange instructions are added in a phase that does
4431 // not have easy access to the local context.
4432 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4433 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4434 RecordSafepointWithRegisters(
4435 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4436 __ StoreToSafepointRegisterSlot(x0, result);
4440 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4441 class DeferredNumberTagD: public LDeferredCode {
4443 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4444 : LDeferredCode(codegen), instr_(instr) { }
4445 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4446 virtual LInstruction* instr() { return instr_; }
4448 LNumberTagD* instr_;
4451 DoubleRegister input = ToDoubleRegister(instr->value());
4452 Register result = ToRegister(instr->result());
4453 Register temp1 = ToRegister(instr->temp1());
4454 Register temp2 = ToRegister(instr->temp2());
4456 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4457 if (FLAG_inline_new) {
4458 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4460 __ B(deferred->entry());
4463 __ Bind(deferred->exit());
4464 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4468 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4472 Label slow, convert_and_store;
4473 Register src = ToRegister32(value);
4474 Register dst = ToRegister(instr->result());
4475 Register scratch1 = ToRegister(temp1);
4477 if (FLAG_inline_new) {
4478 Register scratch2 = ToRegister(temp2);
4479 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4480 __ B(&convert_and_store);
4483 // Slow case: call the runtime system to do the number allocation.
4485 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4486 // register is stored, as this register is in the pointer map, but contains an
4490 // Preserve the value of all registers.
4491 PushSafepointRegistersScope scope(this);
4493 // NumberTagU and NumberTagD use the context from the frame, rather than
4494 // the environment's HContext or HInlinedContext value.
4495 // They only call Runtime::kAllocateHeapNumber.
4496 // The corresponding HChange instructions are added in a phase that does
4497 // not have easy access to the local context.
4498 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4499 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4500 RecordSafepointWithRegisters(
4501 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4502 __ StoreToSafepointRegisterSlot(x0, dst);
4505 // Convert number to floating point and store in the newly allocated heap
4507 __ Bind(&convert_and_store);
4508 DoubleRegister dbl_scratch = double_scratch();
4509 __ Ucvtf(dbl_scratch, src);
4510 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4514 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4515 class DeferredNumberTagU: public LDeferredCode {
4517 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4518 : LDeferredCode(codegen), instr_(instr) { }
4519 virtual void Generate() {
4520 codegen()->DoDeferredNumberTagU(instr_,
4525 virtual LInstruction* instr() { return instr_; }
4527 LNumberTagU* instr_;
4530 Register value = ToRegister32(instr->value());
4531 Register result = ToRegister(instr->result());
4533 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4534 __ Cmp(value, Smi::kMaxValue);
4535 __ B(hi, deferred->entry());
4536 __ SmiTag(result, value.X());
4537 __ Bind(deferred->exit());
4541 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4542 Register input = ToRegister(instr->value());
4543 Register scratch = ToRegister(instr->temp());
4544 DoubleRegister result = ToDoubleRegister(instr->result());
4545 bool can_convert_undefined_to_nan =
4546 instr->hydrogen()->can_convert_undefined_to_nan();
4548 Label done, load_smi;
4550 // Work out what untag mode we're working with.
4551 HValue* value = instr->hydrogen()->value();
4552 NumberUntagDMode mode = value->representation().IsSmi()
4553 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4555 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4556 __ JumpIfSmi(input, &load_smi);
4558 Label convert_undefined;
4560 // Heap number map check.
4561 if (can_convert_undefined_to_nan) {
4562 __ JumpIfNotHeapNumber(input, &convert_undefined);
4564 DeoptimizeIfNotHeapNumber(input, instr);
4567 // Load heap number.
4568 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4569 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4570 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
4574 if (can_convert_undefined_to_nan) {
4575 __ Bind(&convert_undefined);
4576 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4577 Deoptimizer::kNotAHeapNumberUndefined);
4579 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4580 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4585 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4586 // Fall through to load_smi.
4589 // Smi to double register conversion.
4591 __ SmiUntagToDouble(result, input);
4597 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4598 // This is a pseudo-instruction that ensures that the environment here is
4599 // properly registered for deoptimization and records the assembler's PC
4601 LEnvironment* environment = instr->environment();
4603 // If the environment were already registered, we would have no way of
4604 // backpatching it with the spill slot operands.
4605 DCHECK(!environment->HasBeenRegistered());
4606 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4608 GenerateOsrPrologue();
4612 void LCodeGen::DoParameter(LParameter* instr) {
4617 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4618 __ PushPreamble(instr->argc(), kPointerSize);
4622 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4623 MacroAssembler::PushPopQueue args(masm());
4625 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4626 LOperand* arg = instr->argument(i);
4627 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4628 Abort(kDoPushArgumentNotImplementedForDoubleType);
4631 args.Queue(ToRegister(arg));
4634 // The preamble was done by LPreparePushArguments.
4635 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4637 RecordPushedArgumentsDelta(instr->ArgumentCount());
4641 void LCodeGen::DoReturn(LReturn* instr) {
4642 if (FLAG_trace && info()->IsOptimizing()) {
4643 // Push the return value on the stack as the parameter.
4644 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4645 // managed by the register allocator and tearing down the frame, it's
4646 // safe to write to the context register.
4648 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4649 __ CallRuntime(Runtime::kTraceExit, 1);
4652 if (info()->saves_caller_doubles()) {
4653 RestoreCallerDoubles();
4656 int no_frame_start = -1;
4657 if (NeedsEagerFrame()) {
4658 Register stack_pointer = masm()->StackPointer();
4659 __ Mov(stack_pointer, fp);
4660 no_frame_start = masm_->pc_offset();
4664 if (instr->has_constant_parameter_count()) {
4665 int parameter_count = ToInteger32(instr->constant_parameter_count());
4666 __ Drop(parameter_count + 1);
4668 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
4669 Register parameter_count = ToRegister(instr->parameter_count());
4670 __ DropBySMI(parameter_count);
4674 if (no_frame_start != -1) {
4675 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4680 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4683 String::Encoding encoding) {
4684 if (index->IsConstantOperand()) {
4685 int offset = ToInteger32(LConstantOperand::cast(index));
4686 if (encoding == String::TWO_BYTE_ENCODING) {
4687 offset *= kUC16Size;
4689 STATIC_ASSERT(kCharSize == 1);
4690 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4693 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4694 if (encoding == String::ONE_BYTE_ENCODING) {
4695 return MemOperand(temp, ToRegister32(index), SXTW);
4697 STATIC_ASSERT(kUC16Size == 2);
4698 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4703 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4704 String::Encoding encoding = instr->hydrogen()->encoding();
4705 Register string = ToRegister(instr->string());
4706 Register result = ToRegister(instr->result());
4707 Register temp = ToRegister(instr->temp());
4709 if (FLAG_debug_code) {
4710 // Even though this lithium instruction comes with a temp register, we
4711 // can't use it here because we want to use "AtStart" constraints on the
4712 // inputs and the debug code here needs a scratch register.
4713 UseScratchRegisterScope temps(masm());
4714 Register dbg_temp = temps.AcquireX();
4716 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4717 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4719 __ And(dbg_temp, dbg_temp,
4720 Operand(kStringRepresentationMask | kStringEncodingMask));
4721 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4722 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4723 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4724 ? one_byte_seq_type : two_byte_seq_type));
4725 __ Check(eq, kUnexpectedStringType);
4728 MemOperand operand =
4729 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4730 if (encoding == String::ONE_BYTE_ENCODING) {
4731 __ Ldrb(result, operand);
4733 __ Ldrh(result, operand);
4738 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4739 String::Encoding encoding = instr->hydrogen()->encoding();
4740 Register string = ToRegister(instr->string());
4741 Register value = ToRegister(instr->value());
4742 Register temp = ToRegister(instr->temp());
4744 if (FLAG_debug_code) {
4745 DCHECK(ToRegister(instr->context()).is(cp));
4746 Register index = ToRegister(instr->index());
4747 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4748 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4750 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4751 ? one_byte_seq_type : two_byte_seq_type;
4752 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4755 MemOperand operand =
4756 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4757 if (encoding == String::ONE_BYTE_ENCODING) {
4758 __ Strb(value, operand);
4760 __ Strh(value, operand);
4765 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4766 HChange* hchange = instr->hydrogen();
4767 Register input = ToRegister(instr->value());
4768 Register output = ToRegister(instr->result());
4769 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4770 hchange->value()->CheckFlag(HValue::kUint32)) {
4771 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
4773 __ SmiTag(output, input);
4777 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4778 Register input = ToRegister(instr->value());
4779 Register result = ToRegister(instr->result());
4782 if (instr->needs_check()) {
4783 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
4787 __ SmiUntag(result, input);
4792 void LCodeGen::DoShiftI(LShiftI* instr) {
4793 LOperand* right_op = instr->right();
4794 Register left = ToRegister32(instr->left());
4795 Register result = ToRegister32(instr->result());
4797 if (right_op->IsRegister()) {
4798 Register right = ToRegister32(instr->right());
4799 switch (instr->op()) {
4800 case Token::ROR: __ Ror(result, left, right); break;
4801 case Token::SAR: __ Asr(result, left, right); break;
4802 case Token::SHL: __ Lsl(result, left, right); break;
4804 __ Lsr(result, left, right);
4805 if (instr->can_deopt()) {
4806 // If `left >>> right` >= 0x80000000, the result is not representable
4807 // in a signed 32-bit smi.
4808 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4811 default: UNREACHABLE();
4814 DCHECK(right_op->IsConstantOperand());
4815 int shift_count = JSShiftAmountFromLConstant(right_op);
4816 if (shift_count == 0) {
4817 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4818 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4820 __ Mov(result, left, kDiscardForSameWReg);
4822 switch (instr->op()) {
4823 case Token::ROR: __ Ror(result, left, shift_count); break;
4824 case Token::SAR: __ Asr(result, left, shift_count); break;
4825 case Token::SHL: __ Lsl(result, left, shift_count); break;
4826 case Token::SHR: __ Lsr(result, left, shift_count); break;
4827 default: UNREACHABLE();
4834 void LCodeGen::DoShiftS(LShiftS* instr) {
4835 LOperand* right_op = instr->right();
4836 Register left = ToRegister(instr->left());
4837 Register result = ToRegister(instr->result());
4839 if (right_op->IsRegister()) {
4840 Register right = ToRegister(instr->right());
4842 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4843 // Since we're handling smis in X registers, we have to extract these bits
4845 __ Ubfx(result, right, kSmiShift, 5);
4847 switch (instr->op()) {
4849 // This is the only case that needs a scratch register. To keep things
4850 // simple for the other cases, borrow a MacroAssembler scratch register.
4851 UseScratchRegisterScope temps(masm());
4852 Register temp = temps.AcquireW();
4853 __ SmiUntag(temp, left);
4854 __ Ror(result.W(), temp.W(), result.W());
4859 __ Asr(result, left, result);
4860 __ Bic(result, result, kSmiShiftMask);
4863 __ Lsl(result, left, result);
4866 __ Lsr(result, left, result);
4867 __ Bic(result, result, kSmiShiftMask);
4868 if (instr->can_deopt()) {
4869 // If `left >>> right` >= 0x80000000, the result is not representable
4870 // in a signed 32-bit smi.
4871 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4874 default: UNREACHABLE();
4877 DCHECK(right_op->IsConstantOperand());
4878 int shift_count = JSShiftAmountFromLConstant(right_op);
4879 if (shift_count == 0) {
4880 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4881 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4883 __ Mov(result, left);
4885 switch (instr->op()) {
4887 __ SmiUntag(result, left);
4888 __ Ror(result.W(), result.W(), shift_count);
4892 __ Asr(result, left, shift_count);
4893 __ Bic(result, result, kSmiShiftMask);
4896 __ Lsl(result, left, shift_count);
4899 __ Lsr(result, left, shift_count);
4900 __ Bic(result, result, kSmiShiftMask);
4902 default: UNREACHABLE();
4909 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4910 __ Debug("LDebugBreak", 0, BREAK);
4914 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4915 DCHECK(ToRegister(instr->context()).is(cp));
4916 Register scratch1 = x5;
4917 Register scratch2 = x6;
4918 DCHECK(instr->IsMarkedAsCall());
4920 // TODO(all): if Mov could handle object in new space then it could be used
4922 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4923 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
4924 __ Push(scratch1, scratch2);
4925 CallRuntime(Runtime::kDeclareGlobals, 2, instr);
4929 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4930 PushSafepointRegistersScope scope(this);
4931 LoadContextFromDeferred(instr->context());
4932 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4933 RecordSafepointWithLazyDeopt(
4934 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4935 DCHECK(instr->HasEnvironment());
4936 LEnvironment* env = instr->environment();
4937 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4941 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4942 class DeferredStackCheck: public LDeferredCode {
4944 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4945 : LDeferredCode(codegen), instr_(instr) { }
4946 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4947 virtual LInstruction* instr() { return instr_; }
4949 LStackCheck* instr_;
4952 DCHECK(instr->HasEnvironment());
4953 LEnvironment* env = instr->environment();
4954 // There is no LLazyBailout instruction for stack-checks. We have to
4955 // prepare for lazy deoptimization explicitly here.
4956 if (instr->hydrogen()->is_function_entry()) {
4957 // Perform stack overflow check.
4959 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4962 PredictableCodeSizeScope predictable(masm_,
4963 Assembler::kCallSizeWithRelocation);
4964 DCHECK(instr->context()->IsRegister());
4965 DCHECK(ToRegister(instr->context()).is(cp));
4966 CallCode(isolate()->builtins()->StackCheck(),
4967 RelocInfo::CODE_TARGET,
4971 DCHECK(instr->hydrogen()->is_backwards_branch());
4972 // Perform stack overflow check if this goto needs it before jumping.
4973 DeferredStackCheck* deferred_stack_check =
4974 new(zone()) DeferredStackCheck(this, instr);
4975 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4976 __ B(lo, deferred_stack_check->entry());
4978 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4979 __ Bind(instr->done_label());
4980 deferred_stack_check->SetExit(instr->done_label());
4981 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4982 // Don't record a deoptimization index for the safepoint here.
4983 // This will be done explicitly when emitting call and the safepoint in
4984 // the deferred code.
4989 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4990 Register function = ToRegister(instr->function());
4991 Register code_object = ToRegister(instr->code_object());
4992 Register temp = ToRegister(instr->temp());
4993 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
4994 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4998 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
4999 Register context = ToRegister(instr->context());
5000 Register value = ToRegister(instr->value());
5001 Register scratch = ToRegister(instr->temp());
5002 MemOperand target = ContextMemOperand(context, instr->slot_index());
5004 Label skip_assignment;
5006 if (instr->hydrogen()->RequiresHoleCheck()) {
5007 __ Ldr(scratch, target);
5008 if (instr->hydrogen()->DeoptimizesOnHole()) {
5009 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
5010 Deoptimizer::kHole);
5012 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5016 __ Str(value, target);
5017 if (instr->hydrogen()->NeedsWriteBarrier()) {
5018 SmiCheck check_needed =
5019 instr->hydrogen()->value()->type().IsHeapObject()
5020 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5021 __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
5022 scratch, GetLinkRegisterState(), kSaveFPRegs,
5023 EMIT_REMEMBERED_SET, check_needed);
5025 __ Bind(&skip_assignment);
5029 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5030 Register ext_ptr = ToRegister(instr->elements());
5031 Register key = no_reg;
5033 ElementsKind elements_kind = instr->elements_kind();
5035 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5036 bool key_is_constant = instr->key()->IsConstantOperand();
5037 int constant_key = 0;
5038 if (key_is_constant) {
5039 DCHECK(instr->temp() == NULL);
5040 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5041 if (constant_key & 0xf0000000) {
5042 Abort(kArrayIndexConstantValueTooBig);
5045 key = ToRegister(instr->key());
5046 scratch = ToRegister(instr->temp());
5050 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5051 key_is_constant, constant_key,
5053 instr->base_offset());
5055 if (elements_kind == FLOAT32_ELEMENTS) {
5056 DoubleRegister value = ToDoubleRegister(instr->value());
5057 DoubleRegister dbl_scratch = double_scratch();
5058 __ Fcvt(dbl_scratch.S(), value);
5059 __ Str(dbl_scratch.S(), dst);
5060 } else if (elements_kind == FLOAT64_ELEMENTS) {
5061 DoubleRegister value = ToDoubleRegister(instr->value());
5064 Register value = ToRegister(instr->value());
5066 switch (elements_kind) {
5067 case UINT8_ELEMENTS:
5068 case UINT8_CLAMPED_ELEMENTS:
5070 __ Strb(value, dst);
5072 case INT16_ELEMENTS:
5073 case UINT16_ELEMENTS:
5074 __ Strh(value, dst);
5076 case INT32_ELEMENTS:
5077 case UINT32_ELEMENTS:
5078 __ Str(value.W(), dst);
5080 case FLOAT32_ELEMENTS:
5081 case FLOAT64_ELEMENTS:
5082 case FAST_DOUBLE_ELEMENTS:
5084 case FAST_SMI_ELEMENTS:
5085 case FAST_HOLEY_DOUBLE_ELEMENTS:
5086 case FAST_HOLEY_ELEMENTS:
5087 case FAST_HOLEY_SMI_ELEMENTS:
5088 case DICTIONARY_ELEMENTS:
5089 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
5090 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
5098 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5099 Register elements = ToRegister(instr->elements());
5100 DoubleRegister value = ToDoubleRegister(instr->value());
5103 if (instr->key()->IsConstantOperand()) {
5104 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5105 if (constant_key & 0xf0000000) {
5106 Abort(kArrayIndexConstantValueTooBig);
5108 int offset = instr->base_offset() + constant_key * kDoubleSize;
5109 mem_op = MemOperand(elements, offset);
5111 Register store_base = ToRegister(instr->temp());
5112 Register key = ToRegister(instr->key());
5113 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5114 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5115 instr->hydrogen()->elements_kind(),
5116 instr->hydrogen()->representation(),
5117 instr->base_offset());
5120 if (instr->NeedsCanonicalization()) {
5121 __ CanonicalizeNaN(double_scratch(), value);
5122 __ Str(double_scratch(), mem_op);
5124 __ Str(value, mem_op);
5129 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5130 Register value = ToRegister(instr->value());
5131 Register elements = ToRegister(instr->elements());
5132 Register scratch = no_reg;
5133 Register store_base = no_reg;
5134 Register key = no_reg;
5137 if (!instr->key()->IsConstantOperand() ||
5138 instr->hydrogen()->NeedsWriteBarrier()) {
5139 scratch = ToRegister(instr->temp());
5142 Representation representation = instr->hydrogen()->value()->representation();
5143 if (instr->key()->IsConstantOperand()) {
5144 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5145 int offset = instr->base_offset() +
5146 ToInteger32(const_operand) * kPointerSize;
5147 store_base = elements;
5148 if (representation.IsInteger32()) {
5149 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5150 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5151 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5152 STATIC_ASSERT(kSmiTag == 0);
5153 mem_op = UntagSmiMemOperand(store_base, offset);
5155 mem_op = MemOperand(store_base, offset);
5158 store_base = scratch;
5159 key = ToRegister(instr->key());
5160 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5162 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5163 instr->hydrogen()->elements_kind(),
5164 representation, instr->base_offset());
5167 __ Store(value, mem_op, representation);
5169 if (instr->hydrogen()->NeedsWriteBarrier()) {
5170 DCHECK(representation.IsTagged());
5171 // This assignment may cause element_addr to alias store_base.
5172 Register element_addr = scratch;
5173 SmiCheck check_needed =
5174 instr->hydrogen()->value()->type().IsHeapObject()
5175 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5176 // Compute address of modified element and store it into key register.
5177 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5178 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5179 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5180 instr->hydrogen()->PointersToHereCheckForValue());
5185 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5186 DCHECK(ToRegister(instr->context()).is(cp));
5187 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5188 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
5189 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5191 if (instr->hydrogen()->HasVectorAndSlot()) {
5192 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
5195 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
5196 isolate(), instr->language_mode(),
5197 instr->hydrogen()->initialization_state()).code();
5198 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5202 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
5203 class DeferredMaybeGrowElements final : public LDeferredCode {
5205 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
5206 : LDeferredCode(codegen), instr_(instr) {}
5207 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
5208 LInstruction* instr() override { return instr_; }
5211 LMaybeGrowElements* instr_;
5214 Register result = x0;
5215 DeferredMaybeGrowElements* deferred =
5216 new (zone()) DeferredMaybeGrowElements(this, instr);
5217 LOperand* key = instr->key();
5218 LOperand* current_capacity = instr->current_capacity();
5220 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
5221 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
5222 DCHECK(key->IsConstantOperand() || key->IsRegister());
5223 DCHECK(current_capacity->IsConstantOperand() ||
5224 current_capacity->IsRegister());
5226 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
5227 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
5228 int32_t constant_capacity =
5229 ToInteger32(LConstantOperand::cast(current_capacity));
5230 if (constant_key >= constant_capacity) {
5232 __ B(deferred->entry());
5234 } else if (key->IsConstantOperand()) {
5235 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
5236 __ Cmp(ToRegister(current_capacity), Operand(constant_key));
5237 __ B(le, deferred->entry());
5238 } else if (current_capacity->IsConstantOperand()) {
5239 int32_t constant_capacity =
5240 ToInteger32(LConstantOperand::cast(current_capacity));
5241 __ Cmp(ToRegister(key), Operand(constant_capacity));
5242 __ B(ge, deferred->entry());
5244 __ Cmp(ToRegister(key), ToRegister(current_capacity));
5245 __ B(ge, deferred->entry());
5248 __ Mov(result, ToRegister(instr->elements()));
5250 __ Bind(deferred->exit());
5254 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
5255 // TODO(3095996): Get rid of this. For now, we need to make the
5256 // result register contain a valid pointer because it is already
5257 // contained in the register pointer map.
5258 Register result = x0;
5261 // We have to call a stub.
5263 PushSafepointRegistersScope scope(this);
5264 __ Move(result, ToRegister(instr->object()));
5266 LOperand* key = instr->key();
5267 if (key->IsConstantOperand()) {
5268 __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
5270 __ Mov(x3, ToRegister(key));
5274 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
5275 instr->hydrogen()->kind());
5277 RecordSafepointWithLazyDeopt(
5278 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5279 __ StoreToSafepointRegisterSlot(result, result);
5282 // Deopt on smi, which means the elements array changed to dictionary mode.
5283 DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
5287 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5288 Representation representation = instr->representation();
5290 Register object = ToRegister(instr->object());
5291 HObjectAccess access = instr->hydrogen()->access();
5292 int offset = access.offset();
5294 if (access.IsExternalMemory()) {
5295 DCHECK(!instr->hydrogen()->has_transition());
5296 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5297 Register value = ToRegister(instr->value());
5298 __ Store(value, MemOperand(object, offset), representation);
5302 __ AssertNotSmi(object);
5304 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
5305 DCHECK(access.IsInobject());
5306 DCHECK(!instr->hydrogen()->has_transition());
5307 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5308 FPRegister value = ToDoubleRegister(instr->value());
5309 __ Str(value, FieldMemOperand(object, offset));
5313 DCHECK(!representation.IsSmi() ||
5314 !instr->value()->IsConstantOperand() ||
5315 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5317 if (instr->hydrogen()->has_transition()) {
5318 Handle<Map> transition = instr->hydrogen()->transition_map();
5319 AddDeprecationDependency(transition);
5320 // Store the new map value.
5321 Register new_map_value = ToRegister(instr->temp0());
5322 __ Mov(new_map_value, Operand(transition));
5323 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5324 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5325 // Update the write barrier for the map field.
5326 __ RecordWriteForMap(object,
5328 ToRegister(instr->temp1()),
5329 GetLinkRegisterState(),
5335 Register destination;
5336 if (access.IsInobject()) {
5337 destination = object;
5339 Register temp0 = ToRegister(instr->temp0());
5340 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5341 destination = temp0;
5344 if (FLAG_unbox_double_fields && representation.IsDouble()) {
5345 DCHECK(access.IsInobject());
5346 FPRegister value = ToDoubleRegister(instr->value());
5347 __ Str(value, FieldMemOperand(object, offset));
5348 } else if (representation.IsSmi() &&
5349 instr->hydrogen()->value()->representation().IsInteger32()) {
5350 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5352 Register temp0 = ToRegister(instr->temp0());
5353 __ Ldr(temp0, FieldMemOperand(destination, offset));
5354 __ AssertSmi(temp0);
5355 // If destination aliased temp0, restore it to the address calculated
5357 if (destination.Is(temp0)) {
5358 DCHECK(!access.IsInobject());
5359 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5362 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5363 STATIC_ASSERT(kSmiTag == 0);
5364 Register value = ToRegister(instr->value());
5365 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5366 Representation::Integer32());
5368 Register value = ToRegister(instr->value());
5369 __ Store(value, FieldMemOperand(destination, offset), representation);
5371 if (instr->hydrogen()->NeedsWriteBarrier()) {
5372 Register value = ToRegister(instr->value());
5373 __ RecordWriteField(destination,
5375 value, // Clobbered.
5376 ToRegister(instr->temp1()), // Clobbered.
5377 GetLinkRegisterState(),
5379 EMIT_REMEMBERED_SET,
5380 instr->hydrogen()->SmiCheckForWriteBarrier(),
5381 instr->hydrogen()->PointersToHereCheckForValue());
5386 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5387 DCHECK(ToRegister(instr->context()).is(cp));
5388 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5389 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5391 if (instr->hydrogen()->HasVectorAndSlot()) {
5392 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
5395 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5396 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
5397 isolate(), instr->language_mode(),
5398 instr->hydrogen()->initialization_state()).code();
5399 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5403 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
5404 DCHECK(ToRegister(instr->context()).is(cp));
5405 DCHECK(ToRegister(instr->value())
5406 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
5408 int const slot = instr->slot_index();
5409 int const depth = instr->depth();
5410 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
5411 __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
5412 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
5413 isolate(), depth, instr->language_mode())
5415 CallCode(stub, RelocInfo::CODE_TARGET, instr);
5417 __ Push(Smi::FromInt(slot));
5418 __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
5419 __ CallRuntime(is_strict(instr->language_mode())
5420 ? Runtime::kStoreGlobalViaContext_Strict
5421 : Runtime::kStoreGlobalViaContext_Sloppy,
5427 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5428 DCHECK(ToRegister(instr->context()).is(cp));
5429 DCHECK(ToRegister(instr->left()).Is(x1));
5430 DCHECK(ToRegister(instr->right()).Is(x0));
5431 StringAddStub stub(isolate(),
5432 instr->hydrogen()->flags(),
5433 instr->hydrogen()->pretenure_flag());
5434 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5438 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5439 class DeferredStringCharCodeAt: public LDeferredCode {
5441 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5442 : LDeferredCode(codegen), instr_(instr) { }
5443 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5444 virtual LInstruction* instr() { return instr_; }
5446 LStringCharCodeAt* instr_;
5449 DeferredStringCharCodeAt* deferred =
5450 new(zone()) DeferredStringCharCodeAt(this, instr);
5452 StringCharLoadGenerator::Generate(masm(),
5453 ToRegister(instr->string()),
5454 ToRegister32(instr->index()),
5455 ToRegister(instr->result()),
5457 __ Bind(deferred->exit());
5461 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5462 Register string = ToRegister(instr->string());
5463 Register result = ToRegister(instr->result());
5465 // TODO(3095996): Get rid of this. For now, we need to make the
5466 // result register contain a valid pointer because it is already
5467 // contained in the register pointer map.
5470 PushSafepointRegistersScope scope(this);
5472 // Push the index as a smi. This is safe because of the checks in
5473 // DoStringCharCodeAt above.
5474 Register index = ToRegister(instr->index());
5475 __ SmiTagAndPush(index);
5477 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5481 __ StoreToSafepointRegisterSlot(x0, result);
5485 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5486 class DeferredStringCharFromCode: public LDeferredCode {
5488 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5489 : LDeferredCode(codegen), instr_(instr) { }
5490 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5491 virtual LInstruction* instr() { return instr_; }
5493 LStringCharFromCode* instr_;
5496 DeferredStringCharFromCode* deferred =
5497 new(zone()) DeferredStringCharFromCode(this, instr);
5499 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5500 Register char_code = ToRegister32(instr->char_code());
5501 Register result = ToRegister(instr->result());
5503 __ Cmp(char_code, String::kMaxOneByteCharCode);
5504 __ B(hi, deferred->entry());
5505 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5506 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5507 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5508 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5509 __ B(eq, deferred->entry());
5510 __ Bind(deferred->exit());
5514 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5515 Register char_code = ToRegister(instr->char_code());
5516 Register result = ToRegister(instr->result());
5518 // TODO(3095996): Get rid of this. For now, we need to make the
5519 // result register contain a valid pointer because it is already
5520 // contained in the register pointer map.
5523 PushSafepointRegistersScope scope(this);
5524 __ SmiTagAndPush(char_code);
5525 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5526 __ StoreToSafepointRegisterSlot(x0, result);
5530 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5531 DCHECK(ToRegister(instr->context()).is(cp));
5532 Token::Value op = instr->op();
5535 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
5536 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5537 InlineSmiCheckInfo::EmitNotInlined(masm());
5539 Condition condition = TokenToCondition(op, false);
5541 EmitCompareAndBranch(instr, condition, x0, 0);
5545 void LCodeGen::DoSubI(LSubI* instr) {
5546 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5547 Register result = ToRegister32(instr->result());
5548 Register left = ToRegister32(instr->left());
5549 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5552 __ Subs(result, left, right);
5553 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5555 __ Sub(result, left, right);
5560 void LCodeGen::DoSubS(LSubS* instr) {
5561 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5562 Register result = ToRegister(instr->result());
5563 Register left = ToRegister(instr->left());
5564 Operand right = ToOperand(instr->right());
5566 __ Subs(result, left, right);
5567 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5569 __ Sub(result, left, right);
5574 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5578 Register input = ToRegister(value);
5579 Register scratch1 = ToRegister(temp1);
5580 DoubleRegister dbl_scratch1 = double_scratch();
5584 if (instr->truncating()) {
5585 Register output = ToRegister(instr->result());
5588 // If it's not a heap number, jump to undefined check.
5589 __ JumpIfNotHeapNumber(input, &check_bools);
5591 // A heap number: load value and convert to int32 using truncating function.
5592 __ TruncateHeapNumberToI(output, input);
5595 __ Bind(&check_bools);
5597 Register true_root = output;
5598 Register false_root = scratch1;
5599 __ LoadTrueFalseRoots(true_root, false_root);
5600 __ Cmp(input, true_root);
5601 __ Cset(output, eq);
5602 __ Ccmp(input, false_root, ZFlag, ne);
5605 // Output contains zero, undefined is converted to zero for truncating
5607 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5608 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5610 Register output = ToRegister32(instr->result());
5611 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5613 DeoptimizeIfNotHeapNumber(input, instr);
5615 // A heap number: load value and convert to int32 using non-truncating
5616 // function. If the result is out of range, branch to deoptimize.
5617 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5618 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5619 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5621 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5624 __ Fmov(scratch1, dbl_scratch1);
5625 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
5632 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5633 class DeferredTaggedToI: public LDeferredCode {
5635 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5636 : LDeferredCode(codegen), instr_(instr) { }
5637 virtual void Generate() {
5638 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5642 virtual LInstruction* instr() { return instr_; }
5647 Register input = ToRegister(instr->value());
5648 Register output = ToRegister(instr->result());
5650 if (instr->hydrogen()->value()->representation().IsSmi()) {
5651 __ SmiUntag(output, input);
5653 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5655 __ JumpIfNotSmi(input, deferred->entry());
5656 __ SmiUntag(output, input);
5657 __ Bind(deferred->exit());
5662 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5663 Register result = ToRegister(instr->result());
5664 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5668 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5669 DCHECK(ToRegister(instr->value()).Is(x0));
5670 DCHECK(ToRegister(instr->result()).Is(x0));
5672 CallRuntime(Runtime::kToFastProperties, 1, instr);
5676 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5677 DCHECK(ToRegister(instr->context()).is(cp));
5679 // Registers will be used as follows:
5680 // x7 = literals array.
5681 // x1 = regexp literal.
5682 // x0 = regexp literal clone.
5683 // x10-x12 are used as temporaries.
5684 int literal_offset =
5685 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5686 __ LoadObject(x7, instr->hydrogen()->literals());
5687 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5688 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5690 // Create regexp literal using runtime function
5691 // Result will be in x0.
5692 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5693 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5694 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5695 __ Push(x7, x12, x11, x10);
5696 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5699 __ Bind(&materialized);
5700 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5701 Label allocated, runtime_allocate;
5703 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5706 __ Bind(&runtime_allocate);
5707 __ Mov(x0, Smi::FromInt(size));
5709 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5712 __ Bind(&allocated);
5713 // Copy the content into the newly allocated memory.
5714 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5718 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5719 Register object = ToRegister(instr->object());
5721 Handle<Map> from_map = instr->original_map();
5722 Handle<Map> to_map = instr->transitioned_map();
5723 ElementsKind from_kind = instr->from_kind();
5724 ElementsKind to_kind = instr->to_kind();
5726 Label not_applicable;
5728 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5729 Register temp1 = ToRegister(instr->temp1());
5730 Register new_map = ToRegister(instr->temp2());
5731 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5732 __ Mov(new_map, Operand(to_map));
5733 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5735 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5739 UseScratchRegisterScope temps(masm());
5740 // Use the temp register only in a restricted scope - the codegen checks
5741 // that we do not use any register across a call.
5742 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5745 DCHECK(object.is(x0));
5746 DCHECK(ToRegister(instr->context()).is(cp));
5747 PushSafepointRegistersScope scope(this);
5748 __ Mov(x1, Operand(to_map));
5749 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5750 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5752 RecordSafepointWithRegisters(
5753 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5755 __ Bind(¬_applicable);
5759 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5760 Register object = ToRegister(instr->object());
5761 Register temp1 = ToRegister(instr->temp1());
5762 Register temp2 = ToRegister(instr->temp2());
5764 Label no_memento_found;
5765 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5766 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
5767 __ Bind(&no_memento_found);
5771 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5772 DoubleRegister input = ToDoubleRegister(instr->value());
5773 Register result = ToRegister(instr->result());
5774 __ TruncateDoubleToI(result, input);
5775 if (instr->tag_result()) {
5776 __ SmiTag(result, result);
5781 void LCodeGen::DoTypeof(LTypeof* instr) {
5782 DCHECK(ToRegister(instr->value()).is(x3));
5783 DCHECK(ToRegister(instr->result()).is(x0));
5785 Register value_register = ToRegister(instr->value());
5786 __ JumpIfNotSmi(value_register, &do_call);
5787 __ Mov(x0, Immediate(isolate()->factory()->number_string()));
5790 TypeofStub stub(isolate());
5791 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5796 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5797 Handle<String> type_name = instr->type_literal();
5798 Label* true_label = instr->TrueLabel(chunk_);
5799 Label* false_label = instr->FalseLabel(chunk_);
5800 Register value = ToRegister(instr->value());
5802 Factory* factory = isolate()->factory();
5803 if (String::Equals(type_name, factory->number_string())) {
5804 __ JumpIfSmi(value, true_label);
5806 int true_block = instr->TrueDestination(chunk_);
5807 int false_block = instr->FalseDestination(chunk_);
5808 int next_block = GetNextEmittedBlock();
5810 if (true_block == false_block) {
5811 EmitGoto(true_block);
5812 } else if (true_block == next_block) {
5813 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5815 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5816 if (false_block != next_block) {
5817 __ B(chunk_->GetAssemblyLabel(false_block));
5821 } else if (String::Equals(type_name, factory->string_string())) {
5822 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5823 Register map = ToRegister(instr->temp1());
5824 Register scratch = ToRegister(instr->temp2());
5826 __ JumpIfSmi(value, false_label);
5827 __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
5828 EmitBranch(instr, lt);
5830 } else if (String::Equals(type_name, factory->symbol_string())) {
5831 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5832 Register map = ToRegister(instr->temp1());
5833 Register scratch = ToRegister(instr->temp2());
5835 __ JumpIfSmi(value, false_label);
5836 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5837 EmitBranch(instr, eq);
5839 } else if (String::Equals(type_name, factory->boolean_string())) {
5840 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5841 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5842 EmitBranch(instr, eq);
5844 } else if (String::Equals(type_name, factory->undefined_string())) {
5845 DCHECK(instr->temp1() != NULL);
5846 Register scratch = ToRegister(instr->temp1());
5848 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5849 __ JumpIfSmi(value, false_label);
5850 // Check for undetectable objects and jump to the true branch in this case.
5851 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5852 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5853 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5855 } else if (String::Equals(type_name, factory->function_string())) {
5856 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5857 DCHECK(instr->temp1() != NULL);
5858 Register type = ToRegister(instr->temp1());
5860 __ JumpIfSmi(value, false_label);
5861 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5862 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5863 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5865 } else if (String::Equals(type_name, factory->object_string())) {
5866 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5867 Register map = ToRegister(instr->temp1());
5868 Register scratch = ToRegister(instr->temp2());
5870 __ JumpIfSmi(value, false_label);
5871 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5872 __ JumpIfObjectType(value, map, scratch,
5873 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5874 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5875 __ B(gt, false_label);
5876 // Check for undetectable objects => false.
5877 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5878 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5881 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5882 } else if (String::Equals(type_name, factory->type##_string())) { \
5883 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \
5884 Register map = ToRegister(instr->temp1()); \
5886 __ JumpIfSmi(value, false_label); \
5887 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \
5888 __ CompareRoot(map, Heap::k##Type##MapRootIndex); \
5889 EmitBranch(instr, eq);
5890 SIMD128_TYPES(SIMD128_TYPE)
5900 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5901 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5905 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5906 Register object = ToRegister(instr->value());
5907 Register map = ToRegister(instr->map());
5908 Register temp = ToRegister(instr->temp());
5909 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5911 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5915 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5916 Register receiver = ToRegister(instr->receiver());
5917 Register function = ToRegister(instr->function());
5918 Register result = ToRegister(instr->result());
5920 // If the receiver is null or undefined, we have to pass the global object as
5921 // a receiver to normal functions. Values have to be passed unchanged to
5922 // builtins and strict-mode functions.
5923 Label global_object, done, copy_receiver;
5925 if (!instr->hydrogen()->known_function()) {
5926 __ Ldr(result, FieldMemOperand(function,
5927 JSFunction::kSharedFunctionInfoOffset));
5929 // CompilerHints is an int32 field. See objects.h.
5931 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5933 // Do not transform the receiver to object for strict mode functions.
5934 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5936 // Do not transform the receiver to object for builtins.
5937 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5940 // Normal function. Replace undefined or null with global receiver.
5941 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5942 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5944 // Deoptimize if the receiver is not a JS object.
5945 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
5946 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5947 __ B(ge, ©_receiver);
5948 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
5950 __ Bind(&global_object);
5951 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5952 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5953 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5956 __ Bind(©_receiver);
5957 __ Mov(result, receiver);
5962 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5966 PushSafepointRegistersScope scope(this);
5970 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5971 RecordSafepointWithRegisters(
5972 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5973 __ StoreToSafepointRegisterSlot(x0, result);
5977 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5978 class DeferredLoadMutableDouble final : public LDeferredCode {
5980 DeferredLoadMutableDouble(LCodeGen* codegen,
5981 LLoadFieldByIndex* instr,
5985 : LDeferredCode(codegen),
5991 void Generate() override {
5992 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5994 LInstruction* instr() override { return instr_; }
5997 LLoadFieldByIndex* instr_;
6002 Register object = ToRegister(instr->object());
6003 Register index = ToRegister(instr->index());
6004 Register result = ToRegister(instr->result());
6006 __ AssertSmi(index);
6008 DeferredLoadMutableDouble* deferred;
6009 deferred = new(zone()) DeferredLoadMutableDouble(
6010 this, instr, result, object, index);
6012 Label out_of_object, done;
6014 __ TestAndBranchIfAnySet(
6015 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6016 __ Mov(index, Operand(index, ASR, 1));
6018 __ Cmp(index, Smi::FromInt(0));
6019 __ B(lt, &out_of_object);
6021 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6022 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6023 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6027 __ Bind(&out_of_object);
6028 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6029 // Index is equal to negated out of object property index plus 1.
6030 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6031 __ Ldr(result, FieldMemOperand(result,
6032 FixedArray::kHeaderSize - kPointerSize));
6033 __ Bind(deferred->exit());
6038 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6039 Register context = ToRegister(instr->context());
6040 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6044 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6045 Handle<ScopeInfo> scope_info = instr->scope_info();
6046 __ Push(scope_info);
6047 __ Push(ToRegister(instr->function()));
6048 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6049 RecordSafepoint(Safepoint::kNoLazyDeopt);
6053 } // namespace internal