1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
20 class SafepointGenerator FINAL : public CallWrapper {
22 SafepointGenerator(LCodeGen* codegen,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
28 virtual ~SafepointGenerator() { }
30 virtual void BeforeCall(int call_size) const { }
32 virtual void AfterCall() const {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 LPointerMap* pointers_;
39 Safepoint::DeoptMode deopt_mode_;
45 // Emit code to branch if the given condition holds.
46 // The code generated here doesn't modify the flags and they must have
47 // been set by some prior instructions.
49 // The EmitInverted function simply inverts the condition.
50 class BranchOnCondition : public BranchGenerator {
52 BranchOnCondition(LCodeGen* codegen, Condition cond)
53 : BranchGenerator(codegen),
56 virtual void Emit(Label* label) const {
60 virtual void EmitInverted(Label* label) const {
62 __ B(NegateCondition(cond_), label);
71 // Emit code to compare lhs and rhs and branch if the condition holds.
72 // This uses MacroAssembler's CompareAndBranch function so it will handle
73 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
75 // EmitInverted still compares the two operands but inverts the condition.
76 class CompareAndBranch : public BranchGenerator {
78 CompareAndBranch(LCodeGen* codegen,
82 : BranchGenerator(codegen),
87 virtual void Emit(Label* label) const {
88 __ CompareAndBranch(lhs_, rhs_, cond_, label);
91 virtual void EmitInverted(Label* label) const {
92 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
102 // Test the input with the given mask and branch if the condition holds.
103 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
104 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
105 // conversion to Tbz/Tbnz when possible.
106 class TestAndBranch : public BranchGenerator {
108 TestAndBranch(LCodeGen* codegen,
110 const Register& value,
112 : BranchGenerator(codegen),
117 virtual void Emit(Label* label) const {
120 __ TestAndBranchIfAllClear(value_, mask_, label);
123 __ TestAndBranchIfAnySet(value_, mask_, label);
126 __ Tst(value_, mask_);
131 virtual void EmitInverted(Label* label) const {
132 // The inverse of "all clear" is "any set" and vice versa.
135 __ TestAndBranchIfAnySet(value_, mask_, label);
138 __ TestAndBranchIfAllClear(value_, mask_, label);
141 __ Tst(value_, mask_);
142 __ B(NegateCondition(cond_), label);
148 const Register& value_;
153 // Test the input and branch if it is non-zero and not a NaN.
154 class BranchIfNonZeroNumber : public BranchGenerator {
156 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
157 const FPRegister& scratch)
158 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
160 virtual void Emit(Label* label) const {
161 __ Fabs(scratch_, value_);
162 // Compare with 0.0. Because scratch_ is positive, the result can be one of
163 // nZCv (equal), nzCv (greater) or nzCV (unordered).
164 __ Fcmp(scratch_, 0.0);
168 virtual void EmitInverted(Label* label) const {
169 __ Fabs(scratch_, value_);
170 __ Fcmp(scratch_, 0.0);
175 const FPRegister& value_;
176 const FPRegister& scratch_;
180 // Test the input and branch if it is a heap number.
181 class BranchIfHeapNumber : public BranchGenerator {
183 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
184 : BranchGenerator(codegen), value_(value) { }
186 virtual void Emit(Label* label) const {
187 __ JumpIfHeapNumber(value_, label);
190 virtual void EmitInverted(Label* label) const {
191 __ JumpIfNotHeapNumber(value_, label);
195 const Register& value_;
199 // Test the input and branch if it is the specified root value.
200 class BranchIfRoot : public BranchGenerator {
202 BranchIfRoot(LCodeGen* codegen, const Register& value,
203 Heap::RootListIndex index)
204 : BranchGenerator(codegen), value_(value), index_(index) { }
206 virtual void Emit(Label* label) const {
207 __ JumpIfRoot(value_, index_, label);
210 virtual void EmitInverted(Label* label) const {
211 __ JumpIfNotRoot(value_, index_, label);
215 const Register& value_;
216 const Heap::RootListIndex index_;
220 void LCodeGen::WriteTranslation(LEnvironment* environment,
221 Translation* translation) {
222 if (environment == NULL) return;
224 // The translation includes one command per value in the environment.
225 int translation_size = environment->translation_size();
226 // The output frame height does not include the parameters.
227 int height = translation_size - environment->parameter_count();
229 WriteTranslation(environment->outer(), translation);
230 bool has_closure_id = !info()->closure().is_null() &&
231 !info()->closure().is_identical_to(environment->closure());
232 int closure_id = has_closure_id
233 ? DefineDeoptimizationLiteral(environment->closure())
234 : Translation::kSelfLiteralId;
236 switch (environment->frame_type()) {
238 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
241 translation->BeginConstructStubFrame(closure_id, translation_size);
244 DCHECK(translation_size == 1);
246 translation->BeginGetterStubFrame(closure_id);
249 DCHECK(translation_size == 2);
251 translation->BeginSetterStubFrame(closure_id);
254 translation->BeginCompiledStubFrame();
256 case ARGUMENTS_ADAPTOR:
257 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
263 int object_index = 0;
264 int dematerialized_index = 0;
265 for (int i = 0; i < translation_size; ++i) {
266 LOperand* value = environment->values()->at(i);
268 AddToTranslation(environment,
271 environment->HasTaggedValueAt(i),
272 environment->HasUint32ValueAt(i),
274 &dematerialized_index);
279 void LCodeGen::AddToTranslation(LEnvironment* environment,
280 Translation* translation,
284 int* object_index_pointer,
285 int* dematerialized_index_pointer) {
286 if (op == LEnvironment::materialization_marker()) {
287 int object_index = (*object_index_pointer)++;
288 if (environment->ObjectIsDuplicateAt(object_index)) {
289 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
290 translation->DuplicateObject(dupe_of);
293 int object_length = environment->ObjectLengthAt(object_index);
294 if (environment->ObjectIsArgumentsAt(object_index)) {
295 translation->BeginArgumentsObject(object_length);
297 translation->BeginCapturedObject(object_length);
299 int dematerialized_index = *dematerialized_index_pointer;
300 int env_offset = environment->translation_size() + dematerialized_index;
301 *dematerialized_index_pointer += object_length;
302 for (int i = 0; i < object_length; ++i) {
303 LOperand* value = environment->values()->at(env_offset + i);
304 AddToTranslation(environment,
307 environment->HasTaggedValueAt(env_offset + i),
308 environment->HasUint32ValueAt(env_offset + i),
309 object_index_pointer,
310 dematerialized_index_pointer);
315 if (op->IsStackSlot()) {
317 translation->StoreStackSlot(op->index());
318 } else if (is_uint32) {
319 translation->StoreUint32StackSlot(op->index());
321 translation->StoreInt32StackSlot(op->index());
323 } else if (op->IsDoubleStackSlot()) {
324 translation->StoreDoubleStackSlot(op->index());
325 } else if (op->IsRegister()) {
326 Register reg = ToRegister(op);
328 translation->StoreRegister(reg);
329 } else if (is_uint32) {
330 translation->StoreUint32Register(reg);
332 translation->StoreInt32Register(reg);
334 } else if (op->IsDoubleRegister()) {
335 DoubleRegister reg = ToDoubleRegister(op);
336 translation->StoreDoubleRegister(reg);
337 } else if (op->IsConstantOperand()) {
338 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
339 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
340 translation->StoreLiteral(src_index);
347 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
348 int result = deoptimization_literals_.length();
349 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
350 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
352 deoptimization_literals_.Add(literal, zone());
357 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
358 Safepoint::DeoptMode mode) {
359 environment->set_has_been_used();
360 if (!environment->HasBeenRegistered()) {
362 int jsframe_count = 0;
363 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
365 if (e->frame_type() == JS_FUNCTION) {
369 Translation translation(&translations_, frame_count, jsframe_count, zone());
370 WriteTranslation(environment, &translation);
371 int deoptimization_index = deoptimizations_.length();
372 int pc_offset = masm()->pc_offset();
373 environment->Register(deoptimization_index,
375 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
376 deoptimizations_.Add(environment, zone());
381 void LCodeGen::CallCode(Handle<Code> code,
382 RelocInfo::Mode mode,
383 LInstruction* instr) {
384 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
388 void LCodeGen::CallCodeGeneric(Handle<Code> code,
389 RelocInfo::Mode mode,
391 SafepointMode safepoint_mode) {
392 DCHECK(instr != NULL);
394 Assembler::BlockPoolsScope scope(masm_);
396 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
398 if ((code->kind() == Code::BINARY_OP_IC) ||
399 (code->kind() == Code::COMPARE_IC)) {
400 // Signal that we don't inline smi code before these stubs in the
401 // optimizing code generator.
402 InlineSmiCheckInfo::EmitNotInlined(masm());
407 void LCodeGen::DoCallFunction(LCallFunction* instr) {
408 DCHECK(ToRegister(instr->context()).is(cp));
409 DCHECK(ToRegister(instr->function()).Is(x1));
410 DCHECK(ToRegister(instr->result()).Is(x0));
412 int arity = instr->arity();
413 CallFunctionFlags flags = instr->hydrogen()->function_flags();
414 if (instr->hydrogen()->HasVectorAndSlot()) {
415 Register slot_register = ToRegister(instr->temp_slot());
416 Register vector_register = ToRegister(instr->temp_vector());
417 DCHECK(slot_register.is(x3));
418 DCHECK(vector_register.is(x2));
420 AllowDeferredHandleDereference vector_structure_check;
421 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
422 int index = vector->GetIndex(instr->hydrogen()->slot());
424 __ Mov(vector_register, vector);
425 __ Mov(slot_register, Operand(Smi::FromInt(index)));
427 CallICState::CallType call_type =
428 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
431 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
432 CallCode(ic, RelocInfo::CODE_TARGET, instr);
434 CallFunctionStub stub(isolate(), arity, flags);
435 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
437 after_push_argument_ = false;
441 void LCodeGen::DoCallNew(LCallNew* instr) {
442 DCHECK(ToRegister(instr->context()).is(cp));
443 DCHECK(instr->IsMarkedAsCall());
444 DCHECK(ToRegister(instr->constructor()).is(x1));
446 __ Mov(x0, instr->arity());
447 // No cell in x2 for construct type feedback in optimized code.
448 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
450 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
451 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
452 after_push_argument_ = false;
454 DCHECK(ToRegister(instr->result()).is(x0));
458 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
459 DCHECK(instr->IsMarkedAsCall());
460 DCHECK(ToRegister(instr->context()).is(cp));
461 DCHECK(ToRegister(instr->constructor()).is(x1));
463 __ Mov(x0, Operand(instr->arity()));
464 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
466 ElementsKind kind = instr->hydrogen()->elements_kind();
467 AllocationSiteOverrideMode override_mode =
468 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
469 ? DISABLE_ALLOCATION_SITES
472 if (instr->arity() == 0) {
473 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
474 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
475 } else if (instr->arity() == 1) {
477 if (IsFastPackedElementsKind(kind)) {
480 // We might need to create a holey array; look at the first argument.
482 __ Cbz(x10, &packed_case);
484 ElementsKind holey_kind = GetHoleyElementsKind(kind);
485 ArraySingleArgumentConstructorStub stub(isolate(),
488 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
490 __ Bind(&packed_case);
493 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
494 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
497 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
498 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
500 after_push_argument_ = false;
502 DCHECK(ToRegister(instr->result()).is(x0));
506 void LCodeGen::CallRuntime(const Runtime::Function* function,
509 SaveFPRegsMode save_doubles) {
510 DCHECK(instr != NULL);
512 __ CallRuntime(function, num_arguments, save_doubles);
514 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
518 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
519 if (context->IsRegister()) {
520 __ Mov(cp, ToRegister(context));
521 } else if (context->IsStackSlot()) {
522 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
523 } else if (context->IsConstantOperand()) {
524 HConstant* constant =
525 chunk_->LookupConstant(LConstantOperand::cast(context));
526 __ LoadHeapObject(cp,
527 Handle<HeapObject>::cast(constant->handle(isolate())));
534 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
538 LoadContextFromDeferred(context);
539 __ CallRuntimeSaveDoubles(id);
540 RecordSafepointWithRegisters(
541 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
545 void LCodeGen::RecordAndWritePosition(int position) {
546 if (position == RelocInfo::kNoPosition) return;
547 masm()->positions_recorder()->RecordPosition(position);
548 masm()->positions_recorder()->WriteRecordedPositions();
552 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
553 SafepointMode safepoint_mode) {
554 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
555 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
557 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
558 RecordSafepointWithRegisters(
559 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
564 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
565 Safepoint::Kind kind,
567 Safepoint::DeoptMode deopt_mode) {
568 DCHECK(expected_safepoint_kind_ == kind);
570 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
571 Safepoint safepoint = safepoints_.DefineSafepoint(
572 masm(), kind, arguments, deopt_mode);
574 for (int i = 0; i < operands->length(); i++) {
575 LOperand* pointer = operands->at(i);
576 if (pointer->IsStackSlot()) {
577 safepoint.DefinePointerSlot(pointer->index(), zone());
578 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
579 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
584 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
585 Safepoint::DeoptMode deopt_mode) {
586 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
590 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
591 LPointerMap empty_pointers(zone());
592 RecordSafepoint(&empty_pointers, deopt_mode);
596 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
598 Safepoint::DeoptMode deopt_mode) {
599 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
603 bool LCodeGen::GenerateCode() {
604 LPhase phase("Z_Code generation", chunk());
606 status_ = GENERATING;
608 // Open a frame scope to indicate that there is a frame on the stack. The
609 // NONE indicates that the scope shouldn't actually generate code to set up
610 // the frame (that is done in GeneratePrologue).
611 FrameScope frame_scope(masm_, StackFrame::NONE);
613 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
614 GenerateJumpTable() && GenerateSafepointTable();
618 void LCodeGen::SaveCallerDoubles() {
619 DCHECK(info()->saves_caller_doubles());
620 DCHECK(NeedsEagerFrame());
621 Comment(";;; Save clobbered callee double registers");
622 BitVector* doubles = chunk()->allocated_double_registers();
623 BitVector::Iterator iterator(doubles);
625 while (!iterator.Done()) {
626 // TODO(all): Is this supposed to save just the callee-saved doubles? It
627 // looks like it's saving all of them.
628 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
629 __ Poke(value, count * kDoubleSize);
636 void LCodeGen::RestoreCallerDoubles() {
637 DCHECK(info()->saves_caller_doubles());
638 DCHECK(NeedsEagerFrame());
639 Comment(";;; Restore clobbered callee double registers");
640 BitVector* doubles = chunk()->allocated_double_registers();
641 BitVector::Iterator iterator(doubles);
643 while (!iterator.Done()) {
644 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
645 // looks like it's restoring all of them.
646 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
647 __ Peek(value, count * kDoubleSize);
654 bool LCodeGen::GeneratePrologue() {
655 DCHECK(is_generating());
657 if (info()->IsOptimizing()) {
658 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
660 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
662 // Sloppy mode functions and builtins need to replace the receiver with the
663 // global proxy when called as functions (without an explicit receiver
665 if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
666 !info_->is_native()) {
668 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
669 __ Peek(x10, receiver_offset);
670 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
672 __ Ldr(x10, GlobalObjectMemOperand());
673 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
674 __ Poke(x10, receiver_offset);
680 DCHECK(__ StackPointer().Is(jssp));
681 info()->set_prologue_offset(masm_->pc_offset());
682 if (NeedsEagerFrame()) {
683 if (info()->IsStub()) {
686 __ Prologue(info()->IsCodePreAgingActive());
688 frame_is_built_ = true;
689 info_->AddNoFrameRange(0, masm_->pc_offset());
692 // Reserve space for the stack slots needed by the code.
693 int slots = GetStackSlotCount();
695 __ Claim(slots, kPointerSize);
698 if (info()->saves_caller_doubles()) {
702 // Allocate a local context if needed.
703 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
704 if (heap_slots > 0) {
705 Comment(";;; Allocate local context");
706 bool need_write_barrier = true;
707 // Argument to NewContext is the function, which is in x1.
708 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
709 FastNewContextStub stub(isolate(), heap_slots);
711 // Result of FastNewContextStub is always in new space.
712 need_write_barrier = false;
715 __ CallRuntime(Runtime::kNewFunctionContext, 1);
717 RecordSafepoint(Safepoint::kNoLazyDeopt);
718 // Context is returned in x0. It replaces the context passed to us. It's
719 // saved in the stack and kept live in cp.
721 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
722 // Copy any necessary parameters into the context.
723 int num_parameters = scope()->num_parameters();
724 for (int i = 0; i < num_parameters; i++) {
725 Variable* var = scope()->parameter(i);
726 if (var->IsContextSlot()) {
728 Register scratch = x3;
730 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
731 (num_parameters - 1 - i) * kPointerSize;
732 // Load parameter from stack.
733 __ Ldr(value, MemOperand(fp, parameter_offset));
734 // Store it in the context.
735 MemOperand target = ContextMemOperand(cp, var->index());
736 __ Str(value, target);
737 // Update the write barrier. This clobbers value and scratch.
738 if (need_write_barrier) {
739 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
740 GetLinkRegisterState(), kSaveFPRegs);
741 } else if (FLAG_debug_code) {
743 __ JumpIfInNewSpace(cp, &done);
744 __ Abort(kExpectedNewSpaceObject);
749 Comment(";;; End allocate local context");
753 if (FLAG_trace && info()->IsOptimizing()) {
754 // We have not executed any compiled code yet, so cp still holds the
756 __ CallRuntime(Runtime::kTraceEnter, 0);
759 return !is_aborted();
763 void LCodeGen::GenerateOsrPrologue() {
764 // Generate the OSR entry prologue at the first unknown OSR value, or if there
765 // are none, at the OSR entrypoint instruction.
766 if (osr_pc_offset_ >= 0) return;
768 osr_pc_offset_ = masm()->pc_offset();
770 // Adjust the frame size, subsuming the unoptimized frame into the
772 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
778 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
779 if (instr->IsCall()) {
780 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
782 if (!instr->IsLazyBailout() && !instr->IsGap()) {
783 safepoints_.BumpLastLazySafepointIndex();
788 bool LCodeGen::GenerateDeferredCode() {
789 DCHECK(is_generating());
790 if (deferred_.length() > 0) {
791 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
792 LDeferredCode* code = deferred_[i];
795 instructions_->at(code->instruction_index())->hydrogen_value();
796 RecordAndWritePosition(
797 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
799 Comment(";;; <@%d,#%d> "
800 "-------------------- Deferred %s --------------------",
801 code->instruction_index(),
802 code->instr()->hydrogen_value()->id(),
803 code->instr()->Mnemonic());
805 __ Bind(code->entry());
807 if (NeedsDeferredFrame()) {
808 Comment(";;; Build frame");
809 DCHECK(!frame_is_built_);
810 DCHECK(info()->IsStub());
811 frame_is_built_ = true;
813 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
815 __ Add(fp, __ StackPointer(),
816 StandardFrameConstants::kFixedFrameSizeFromFp);
817 Comment(";;; Deferred code");
822 if (NeedsDeferredFrame()) {
823 Comment(";;; Destroy frame");
824 DCHECK(frame_is_built_);
825 __ Pop(xzr, cp, fp, lr);
826 frame_is_built_ = false;
833 // Force constant pool emission at the end of the deferred code to make
834 // sure that no constant pools are emitted after deferred code because
835 // deferred code generation is the last step which generates code. The two
836 // following steps will only output data used by crakshaft.
837 masm()->CheckConstPool(true, false);
839 return !is_aborted();
843 bool LCodeGen::GenerateJumpTable() {
844 Label needs_frame, restore_caller_doubles, call_deopt_entry;
846 if (jump_table_.length() > 0) {
847 Comment(";;; -------------------- Jump table --------------------");
848 Address base = jump_table_[0]->address;
850 UseScratchRegisterScope temps(masm());
851 Register entry_offset = temps.AcquireX();
853 int length = jump_table_.length();
854 for (int i = 0; i < length; i++) {
855 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
856 __ Bind(&table_entry->label);
858 Address entry = table_entry->address;
859 DeoptComment(table_entry->deopt_info);
861 // Second-level deopt table entries are contiguous and small, so instead
862 // of loading the full, absolute address of each one, load the base
863 // address and add an immediate offset.
864 __ Mov(entry_offset, entry - base);
866 // The last entry can fall through into `call_deopt_entry`, avoiding a
868 bool last_entry = (i + 1) == length;
870 if (table_entry->needs_frame) {
871 DCHECK(!info()->saves_caller_doubles());
872 if (!needs_frame.is_bound()) {
873 // This variant of deopt can only be used with stubs. Since we don't
874 // have a function pointer to install in the stack frame that we're
875 // building, install a special marker there instead.
876 DCHECK(info()->IsStub());
878 UseScratchRegisterScope temps(masm());
879 Register stub_marker = temps.AcquireX();
880 __ Bind(&needs_frame);
881 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
882 __ Push(lr, fp, cp, stub_marker);
883 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
884 if (!last_entry) __ B(&call_deopt_entry);
886 // Reuse the existing needs_frame code.
889 } else if (info()->saves_caller_doubles()) {
890 DCHECK(info()->IsStub());
891 if (!restore_caller_doubles.is_bound()) {
892 __ Bind(&restore_caller_doubles);
893 RestoreCallerDoubles();
894 if (!last_entry) __ B(&call_deopt_entry);
896 // Reuse the existing restore_caller_doubles code.
897 __ B(&restore_caller_doubles);
900 // There is nothing special to do, so just continue to the second-level
902 if (!last_entry) __ B(&call_deopt_entry);
905 masm()->CheckConstPool(false, last_entry);
908 // Generate common code for calling the second-level deopt table.
909 Register deopt_entry = temps.AcquireX();
910 __ Bind(&call_deopt_entry);
911 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
912 RelocInfo::RUNTIME_ENTRY));
913 __ Add(deopt_entry, deopt_entry, entry_offset);
914 __ Call(deopt_entry);
917 // Force constant pool emission at the end of the deopt jump table to make
918 // sure that no constant pools are emitted after.
919 masm()->CheckConstPool(true, false);
921 // The deoptimization jump table is the last part of the instruction
922 // sequence. Mark the generated code as done unless we bailed out.
923 if (!is_aborted()) status_ = DONE;
924 return !is_aborted();
928 bool LCodeGen::GenerateSafepointTable() {
930 // We do not know how much data will be emitted for the safepoint table, so
931 // force emission of the veneer pool.
932 masm()->CheckVeneerPool(true, true);
933 safepoints_.Emit(masm(), GetStackSlotCount());
934 return !is_aborted();
938 void LCodeGen::FinishCode(Handle<Code> code) {
940 code->set_stack_slots(GetStackSlotCount());
941 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
942 PopulateDeoptimizationData(code);
946 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
947 int length = deoptimizations_.length();
948 if (length == 0) return;
950 Handle<DeoptimizationInputData> data =
951 DeoptimizationInputData::New(isolate(), length, TENURED);
953 Handle<ByteArray> translations =
954 translations_.CreateByteArray(isolate()->factory());
955 data->SetTranslationByteArray(*translations);
956 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
957 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
958 if (info_->IsOptimizing()) {
959 // Reference to shared function info does not change between phases.
960 AllowDeferredHandleDereference allow_handle_dereference;
961 data->SetSharedFunctionInfo(*info_->shared_info());
963 data->SetSharedFunctionInfo(Smi::FromInt(0));
965 data->SetWeakCellCache(Smi::FromInt(0));
967 Handle<FixedArray> literals =
968 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
969 { AllowDeferredHandleDereference copy_handles;
970 for (int i = 0; i < deoptimization_literals_.length(); i++) {
971 literals->set(i, *deoptimization_literals_[i]);
973 data->SetLiteralArray(*literals);
976 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
977 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
979 // Populate the deoptimization entries.
980 for (int i = 0; i < length; i++) {
981 LEnvironment* env = deoptimizations_[i];
982 data->SetAstId(i, env->ast_id());
983 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
984 data->SetArgumentsStackHeight(i,
985 Smi::FromInt(env->arguments_stack_height()));
986 data->SetPc(i, Smi::FromInt(env->pc_offset()));
989 code->set_deoptimization_data(*data);
993 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
994 DCHECK(deoptimization_literals_.length() == 0);
996 const ZoneList<Handle<JSFunction> >* inlined_closures =
997 chunk()->inlined_closures();
999 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
1000 DefineDeoptimizationLiteral(inlined_closures->at(i));
1003 inlined_function_count_ = deoptimization_literals_.length();
1007 void LCodeGen::DeoptimizeBranch(
1008 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
1009 BranchType branch_type, Register reg, int bit,
1010 Deoptimizer::BailoutType* override_bailout_type) {
1011 LEnvironment* environment = instr->environment();
1012 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1013 Deoptimizer::BailoutType bailout_type =
1014 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1016 if (override_bailout_type != NULL) {
1017 bailout_type = *override_bailout_type;
1020 DCHECK(environment->HasBeenRegistered());
1021 DCHECK(info()->IsOptimizing() || info()->IsStub());
1022 int id = environment->deoptimization_index();
1024 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1026 if (entry == NULL) {
1027 Abort(kBailoutWasNotPrepared);
1030 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1032 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1034 __ Push(x0, x1, x2);
1037 __ Ldr(w1, MemOperand(x0));
1039 __ B(gt, ¬_zero);
1040 __ Mov(w1, FLAG_deopt_every_n_times);
1041 __ Str(w1, MemOperand(x0));
1043 DCHECK(frame_is_built_);
1044 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1048 __ Str(w1, MemOperand(x0));
1053 if (info()->ShouldTrapOnDeopt()) {
1055 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1056 __ Debug("trap_on_deopt", __LINE__, BREAK);
1057 __ Bind(&dont_trap);
1060 Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
1061 instr->Mnemonic(), deopt_reason);
1062 DCHECK(info()->IsStub() || frame_is_built_);
1063 // Go through jump table if we need to build frame, or restore caller doubles.
1064 if (branch_type == always &&
1065 frame_is_built_ && !info()->saves_caller_doubles()) {
1066 DeoptComment(deopt_info);
1067 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1069 Deoptimizer::JumpTableEntry* table_entry =
1070 new (zone()) Deoptimizer::JumpTableEntry(
1071 entry, deopt_info, bailout_type, !frame_is_built_);
1072 // We often have several deopts to the same entry, reuse the last
1073 // jump entry if this is the case.
1074 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1075 jump_table_.is_empty() ||
1076 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1077 jump_table_.Add(table_entry, zone());
1079 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1084 void LCodeGen::Deoptimize(LInstruction* instr,
1085 Deoptimizer::DeoptReason deopt_reason,
1086 Deoptimizer::BailoutType* override_bailout_type) {
1087 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
1088 override_bailout_type);
1092 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1093 Deoptimizer::DeoptReason deopt_reason) {
1094 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
1098 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1099 Deoptimizer::DeoptReason deopt_reason) {
1100 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
1104 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1105 Deoptimizer::DeoptReason deopt_reason) {
1106 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
1110 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1111 Deoptimizer::DeoptReason deopt_reason) {
1112 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1113 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
1117 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1118 Deoptimizer::DeoptReason deopt_reason) {
1119 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1123 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1124 Deoptimizer::DeoptReason deopt_reason) {
1125 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1129 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1130 LInstruction* instr,
1131 Deoptimizer::DeoptReason deopt_reason) {
1132 __ CompareRoot(rt, index);
1133 DeoptimizeIf(eq, instr, deopt_reason);
1137 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1138 LInstruction* instr,
1139 Deoptimizer::DeoptReason deopt_reason) {
1140 __ CompareRoot(rt, index);
1141 DeoptimizeIf(ne, instr, deopt_reason);
1145 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1146 Deoptimizer::DeoptReason deopt_reason) {
1147 __ TestForMinusZero(input);
1148 DeoptimizeIf(vs, instr, deopt_reason);
1152 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1153 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1154 DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
1158 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1159 Deoptimizer::DeoptReason deopt_reason) {
1160 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
1164 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1165 Deoptimizer::DeoptReason deopt_reason) {
1166 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
1170 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1171 if (!info()->IsStub()) {
1172 // Ensure that we have enough space after the previous lazy-bailout
1173 // instruction for patching the code here.
1174 intptr_t current_pc = masm()->pc_offset();
1176 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1177 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1178 DCHECK((padding_size % kInstructionSize) == 0);
1179 InstructionAccurateScope instruction_accurate(
1180 masm(), padding_size / kInstructionSize);
1182 while (padding_size > 0) {
1184 padding_size -= kInstructionSize;
1188 last_lazy_deopt_pc_ = masm()->pc_offset();
1192 Register LCodeGen::ToRegister(LOperand* op) const {
1193 // TODO(all): support zero register results, as ToRegister32.
1194 DCHECK((op != NULL) && op->IsRegister());
1195 return Register::FromAllocationIndex(op->index());
1199 Register LCodeGen::ToRegister32(LOperand* op) const {
1201 if (op->IsConstantOperand()) {
1202 // If this is a constant operand, the result must be the zero register.
1203 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1206 return ToRegister(op).W();
1211 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1212 HConstant* constant = chunk_->LookupConstant(op);
1213 return Smi::FromInt(constant->Integer32Value());
1217 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1218 DCHECK((op != NULL) && op->IsDoubleRegister());
1219 return DoubleRegister::FromAllocationIndex(op->index());
1223 Operand LCodeGen::ToOperand(LOperand* op) {
1225 if (op->IsConstantOperand()) {
1226 LConstantOperand* const_op = LConstantOperand::cast(op);
1227 HConstant* constant = chunk()->LookupConstant(const_op);
1228 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1230 DCHECK(constant->HasSmiValue());
1231 return Operand(Smi::FromInt(constant->Integer32Value()));
1232 } else if (r.IsInteger32()) {
1233 DCHECK(constant->HasInteger32Value());
1234 return Operand(constant->Integer32Value());
1235 } else if (r.IsDouble()) {
1236 Abort(kToOperandUnsupportedDoubleImmediate);
1238 DCHECK(r.IsTagged());
1239 return Operand(constant->handle(isolate()));
1240 } else if (op->IsRegister()) {
1241 return Operand(ToRegister(op));
1242 } else if (op->IsDoubleRegister()) {
1243 Abort(kToOperandIsDoubleRegisterUnimplemented);
1246 // Stack slots not implemented, use ToMemOperand instead.
1252 Operand LCodeGen::ToOperand32(LOperand* op) {
1254 if (op->IsRegister()) {
1255 return Operand(ToRegister32(op));
1256 } else if (op->IsConstantOperand()) {
1257 LConstantOperand* const_op = LConstantOperand::cast(op);
1258 HConstant* constant = chunk()->LookupConstant(const_op);
1259 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1260 if (r.IsInteger32()) {
1261 return Operand(constant->Integer32Value());
1263 // Other constants not implemented.
1264 Abort(kToOperand32UnsupportedImmediate);
1267 // Other cases are not implemented.
1273 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1275 return -(index + 1) * kPointerSize;
1279 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1281 DCHECK(!op->IsRegister());
1282 DCHECK(!op->IsDoubleRegister());
1283 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1284 if (NeedsEagerFrame()) {
1285 int fp_offset = StackSlotOffset(op->index());
1286 if (op->index() >= 0) {
1287 // Loads and stores have a bigger reach in positive offset than negative.
1288 // When the load or the store can't be done in one instruction via fp
1289 // (too big negative offset), we try to access via jssp (positive offset).
1290 // We can reference a stack slot from jssp only if jssp references the end
1291 // of the stack slots. It's not the case when:
1292 // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1293 // code saved the registers.
1294 // - after_push_argument_: arguments has been pushed for a call.
1295 // - inlined_arguments_: inlined arguments have been pushed once. All the
1296 // remainder of the function cannot trust jssp any longer.
1297 // - saves_caller_doubles: some double registers have been pushed, jssp
1298 // references the end of the double registers and not the end of the
1300 // Also, if the offset from fp is small enough to make a load/store in
1301 // one instruction, we use a fp access.
1302 if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1303 !inlined_arguments_ && !is_int9(fp_offset) &&
1304 !info()->saves_caller_doubles()) {
1306 (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1307 return MemOperand(masm()->StackPointer(), jssp_offset);
1310 return MemOperand(fp, fp_offset);
1312 // Retrieve parameter without eager stack-frame relative to the
1314 return MemOperand(masm()->StackPointer(),
1315 ArgumentsOffsetWithoutFrame(op->index()));
1320 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1321 HConstant* constant = chunk_->LookupConstant(op);
1322 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1323 return constant->handle(isolate());
1328 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
1329 if (shift_info->shift() == NO_SHIFT) {
1330 return ToOperand32(right);
1333 ToRegister32(right),
1334 shift_info->shift(),
1335 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1340 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1341 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1345 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1346 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1350 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1351 HConstant* constant = chunk_->LookupConstant(op);
1352 return constant->Integer32Value();
1356 double LCodeGen::ToDouble(LConstantOperand* op) const {
1357 HConstant* constant = chunk_->LookupConstant(op);
1358 DCHECK(constant->HasDoubleValue());
1359 return constant->DoubleValue();
1363 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1364 Condition cond = nv;
1367 case Token::EQ_STRICT:
1371 case Token::NE_STRICT:
1375 cond = is_unsigned ? lo : lt;
1378 cond = is_unsigned ? hi : gt;
1381 cond = is_unsigned ? ls : le;
1384 cond = is_unsigned ? hs : ge;
1387 case Token::INSTANCEOF:
1395 template<class InstrType>
1396 void LCodeGen::EmitBranchGeneric(InstrType instr,
1397 const BranchGenerator& branch) {
1398 int left_block = instr->TrueDestination(chunk_);
1399 int right_block = instr->FalseDestination(chunk_);
1401 int next_block = GetNextEmittedBlock();
1403 if (right_block == left_block) {
1404 EmitGoto(left_block);
1405 } else if (left_block == next_block) {
1406 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1408 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1409 if (right_block != next_block) {
1410 __ B(chunk_->GetAssemblyLabel(right_block));
1416 template<class InstrType>
1417 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1418 DCHECK((condition != al) && (condition != nv));
1419 BranchOnCondition branch(this, condition);
1420 EmitBranchGeneric(instr, branch);
1424 template<class InstrType>
1425 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1426 Condition condition,
1427 const Register& lhs,
1428 const Operand& rhs) {
1429 DCHECK((condition != al) && (condition != nv));
1430 CompareAndBranch branch(this, condition, lhs, rhs);
1431 EmitBranchGeneric(instr, branch);
1435 template<class InstrType>
1436 void LCodeGen::EmitTestAndBranch(InstrType instr,
1437 Condition condition,
1438 const Register& value,
1440 DCHECK((condition != al) && (condition != nv));
1441 TestAndBranch branch(this, condition, value, mask);
1442 EmitBranchGeneric(instr, branch);
1446 template<class InstrType>
1447 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1448 const FPRegister& value,
1449 const FPRegister& scratch) {
1450 BranchIfNonZeroNumber branch(this, value, scratch);
1451 EmitBranchGeneric(instr, branch);
1455 template<class InstrType>
1456 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1457 const Register& value) {
1458 BranchIfHeapNumber branch(this, value);
1459 EmitBranchGeneric(instr, branch);
1463 template<class InstrType>
1464 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1465 const Register& value,
1466 Heap::RootListIndex index) {
1467 BranchIfRoot branch(this, value, index);
1468 EmitBranchGeneric(instr, branch);
1472 void LCodeGen::DoGap(LGap* gap) {
1473 for (int i = LGap::FIRST_INNER_POSITION;
1474 i <= LGap::LAST_INNER_POSITION;
1476 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1477 LParallelMove* move = gap->GetParallelMove(inner_pos);
1479 resolver_.Resolve(move);
1485 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1486 Register arguments = ToRegister(instr->arguments());
1487 Register result = ToRegister(instr->result());
1489 // The pointer to the arguments array come from DoArgumentsElements.
1490 // It does not point directly to the arguments and there is an offest of
1491 // two words that we must take into account when accessing an argument.
1492 // Subtracting the index from length accounts for one, so we add one more.
1494 if (instr->length()->IsConstantOperand() &&
1495 instr->index()->IsConstantOperand()) {
1496 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1497 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1498 int offset = ((length - index) + 1) * kPointerSize;
1499 __ Ldr(result, MemOperand(arguments, offset));
1500 } else if (instr->index()->IsConstantOperand()) {
1501 Register length = ToRegister32(instr->length());
1502 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1503 int loc = index - 1;
1505 __ Sub(result.W(), length, loc);
1506 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1508 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1511 Register length = ToRegister32(instr->length());
1512 Operand index = ToOperand32(instr->index());
1513 __ Sub(result.W(), length, index);
1514 __ Add(result.W(), result.W(), 1);
1515 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1520 void LCodeGen::DoAddE(LAddE* instr) {
1521 Register result = ToRegister(instr->result());
1522 Register left = ToRegister(instr->left());
1523 Operand right = (instr->right()->IsConstantOperand())
1524 ? ToInteger32(LConstantOperand::cast(instr->right()))
1525 : Operand(ToRegister32(instr->right()), SXTW);
1527 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1528 __ Add(result, left, right);
1532 void LCodeGen::DoAddI(LAddI* instr) {
1533 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1534 Register result = ToRegister32(instr->result());
1535 Register left = ToRegister32(instr->left());
1536 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1539 __ Adds(result, left, right);
1540 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1542 __ Add(result, left, right);
1547 void LCodeGen::DoAddS(LAddS* instr) {
1548 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1549 Register result = ToRegister(instr->result());
1550 Register left = ToRegister(instr->left());
1551 Operand right = ToOperand(instr->right());
1553 __ Adds(result, left, right);
1554 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1556 __ Add(result, left, right);
1561 void LCodeGen::DoAllocate(LAllocate* instr) {
1562 class DeferredAllocate: public LDeferredCode {
1564 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1565 : LDeferredCode(codegen), instr_(instr) { }
1566 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1567 virtual LInstruction* instr() { return instr_; }
1572 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1574 Register result = ToRegister(instr->result());
1575 Register temp1 = ToRegister(instr->temp1());
1576 Register temp2 = ToRegister(instr->temp2());
1578 // Allocate memory for the object.
1579 AllocationFlags flags = TAG_OBJECT;
1580 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1581 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1584 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1585 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1586 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1587 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1588 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1589 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1590 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1593 if (instr->size()->IsConstantOperand()) {
1594 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1595 if (size <= Page::kMaxRegularHeapObjectSize) {
1596 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1598 __ B(deferred->entry());
1601 Register size = ToRegister32(instr->size());
1602 __ Sxtw(size.X(), size);
1603 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1606 __ Bind(deferred->exit());
1608 if (instr->hydrogen()->MustPrefillWithFiller()) {
1609 Register filler_count = temp1;
1610 Register filler = temp2;
1611 Register untagged_result = ToRegister(instr->temp3());
1613 if (instr->size()->IsConstantOperand()) {
1614 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1615 __ Mov(filler_count, size / kPointerSize);
1617 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1620 __ Sub(untagged_result, result, kHeapObjectTag);
1621 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1622 __ FillFields(untagged_result, filler_count, filler);
1624 DCHECK(instr->temp3() == NULL);
1629 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1630 // TODO(3095996): Get rid of this. For now, we need to make the
1631 // result register contain a valid pointer because it is already
1632 // contained in the register pointer map.
1633 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1635 PushSafepointRegistersScope scope(this);
1636 // We're in a SafepointRegistersScope so we can use any scratch registers.
1638 if (instr->size()->IsConstantOperand()) {
1639 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1641 __ SmiTag(size, ToRegister32(instr->size()).X());
1643 int flags = AllocateDoubleAlignFlag::encode(
1644 instr->hydrogen()->MustAllocateDoubleAligned());
1645 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1646 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1647 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1648 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1649 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1650 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1651 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1653 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1655 __ Mov(x10, Smi::FromInt(flags));
1658 CallRuntimeFromDeferred(
1659 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1660 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1664 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1665 Register receiver = ToRegister(instr->receiver());
1666 Register function = ToRegister(instr->function());
1667 Register length = ToRegister32(instr->length());
1669 Register elements = ToRegister(instr->elements());
1670 Register scratch = x5;
1671 DCHECK(receiver.Is(x0)); // Used for parameter count.
1672 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1673 DCHECK(ToRegister(instr->result()).Is(x0));
1674 DCHECK(instr->IsMarkedAsCall());
1676 // Copy the arguments to this function possibly from the
1677 // adaptor frame below it.
1678 const uint32_t kArgumentsLimit = 1 * KB;
1679 __ Cmp(length, kArgumentsLimit);
1680 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
1682 // Push the receiver and use the register to keep the original
1683 // number of arguments.
1685 Register argc = receiver;
1687 __ Sxtw(argc, length);
1688 // The arguments are at a one pointer size offset from elements.
1689 __ Add(elements, elements, 1 * kPointerSize);
1691 // Loop through the arguments pushing them onto the execution
1694 // length is a small non-negative integer, due to the test above.
1695 __ Cbz(length, &invoke);
1697 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1699 __ Subs(length, length, 1);
1703 DCHECK(instr->HasPointerMap());
1704 LPointerMap* pointers = instr->pointer_map();
1705 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1706 // The number of arguments is stored in argc (receiver) which is x0, as
1707 // expected by InvokeFunction.
1708 ParameterCount actual(argc);
1709 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1713 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1714 // We push some arguments and they will be pop in an other block. We can't
1715 // trust that jssp references the end of the stack slots until the end of
1717 inlined_arguments_ = true;
1718 Register result = ToRegister(instr->result());
1720 if (instr->hydrogen()->from_inlined()) {
1721 // When we are inside an inlined function, the arguments are the last things
1722 // that have been pushed on the stack. Therefore the arguments array can be
1723 // accessed directly from jssp.
1724 // However in the normal case, it is accessed via fp but there are two words
1725 // on the stack between fp and the arguments (the saved lr and fp) and the
1726 // LAccessArgumentsAt implementation take that into account.
1727 // In the inlined case we need to subtract the size of 2 words to jssp to
1728 // get a pointer which will work well with LAccessArgumentsAt.
1729 DCHECK(masm()->StackPointer().Is(jssp));
1730 __ Sub(result, jssp, 2 * kPointerSize);
1732 DCHECK(instr->temp() != NULL);
1733 Register previous_fp = ToRegister(instr->temp());
1736 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1738 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1739 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1740 __ Csel(result, fp, previous_fp, ne);
1745 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1746 Register elements = ToRegister(instr->elements());
1747 Register result = ToRegister32(instr->result());
1750 // If no arguments adaptor frame the number of arguments is fixed.
1751 __ Cmp(fp, elements);
1752 __ Mov(result, scope()->num_parameters());
1755 // Arguments adaptor frame present. Get argument length from there.
1756 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1758 UntagSmiMemOperand(result.X(),
1759 ArgumentsAdaptorFrameConstants::kLengthOffset));
1761 // Argument length is in result register.
1766 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1767 DoubleRegister left = ToDoubleRegister(instr->left());
1768 DoubleRegister right = ToDoubleRegister(instr->right());
1769 DoubleRegister result = ToDoubleRegister(instr->result());
1771 switch (instr->op()) {
1772 case Token::ADD: __ Fadd(result, left, right); break;
1773 case Token::SUB: __ Fsub(result, left, right); break;
1774 case Token::MUL: __ Fmul(result, left, right); break;
1775 case Token::DIV: __ Fdiv(result, left, right); break;
1777 // The ECMA-262 remainder operator is the remainder from a truncating
1778 // (round-towards-zero) division. Note that this differs from IEEE-754.
1780 // TODO(jbramley): See if it's possible to do this inline, rather than by
1781 // calling a helper function. With frintz (to produce the intermediate
1782 // quotient) and fmsub (to calculate the remainder without loss of
1783 // precision), it should be possible. However, we would need support for
1784 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1785 // support that yet.
1786 DCHECK(left.Is(d0));
1787 DCHECK(right.Is(d1));
1789 ExternalReference::mod_two_doubles_operation(isolate()),
1791 DCHECK(result.Is(d0));
1801 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1802 DCHECK(ToRegister(instr->context()).is(cp));
1803 DCHECK(ToRegister(instr->left()).is(x1));
1804 DCHECK(ToRegister(instr->right()).is(x0));
1805 DCHECK(ToRegister(instr->result()).is(x0));
1807 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1808 CallCode(code, RelocInfo::CODE_TARGET, instr);
1812 void LCodeGen::DoBitI(LBitI* instr) {
1813 Register result = ToRegister32(instr->result());
1814 Register left = ToRegister32(instr->left());
1815 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1817 switch (instr->op()) {
1818 case Token::BIT_AND: __ And(result, left, right); break;
1819 case Token::BIT_OR: __ Orr(result, left, right); break;
1820 case Token::BIT_XOR: __ Eor(result, left, right); break;
1828 void LCodeGen::DoBitS(LBitS* instr) {
1829 Register result = ToRegister(instr->result());
1830 Register left = ToRegister(instr->left());
1831 Operand right = ToOperand(instr->right());
1833 switch (instr->op()) {
1834 case Token::BIT_AND: __ And(result, left, right); break;
1835 case Token::BIT_OR: __ Orr(result, left, right); break;
1836 case Token::BIT_XOR: __ Eor(result, left, right); break;
1844 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1845 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1846 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1847 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1848 if (instr->index()->IsConstantOperand()) {
1849 Operand index = ToOperand32(instr->index());
1850 Register length = ToRegister32(instr->length());
1851 __ Cmp(length, index);
1852 cond = CommuteCondition(cond);
1854 Register index = ToRegister32(instr->index());
1855 Operand length = ToOperand32(instr->length());
1856 __ Cmp(index, length);
1858 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1859 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1861 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
1866 void LCodeGen::DoBranch(LBranch* instr) {
1867 Representation r = instr->hydrogen()->value()->representation();
1868 Label* true_label = instr->TrueLabel(chunk_);
1869 Label* false_label = instr->FalseLabel(chunk_);
1871 if (r.IsInteger32()) {
1872 DCHECK(!info()->IsStub());
1873 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1874 } else if (r.IsSmi()) {
1875 DCHECK(!info()->IsStub());
1876 STATIC_ASSERT(kSmiTag == 0);
1877 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1878 } else if (r.IsDouble()) {
1879 DoubleRegister value = ToDoubleRegister(instr->value());
1880 // Test the double value. Zero and NaN are false.
1881 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1883 DCHECK(r.IsTagged());
1884 Register value = ToRegister(instr->value());
1885 HType type = instr->hydrogen()->value()->type();
1887 if (type.IsBoolean()) {
1888 DCHECK(!info()->IsStub());
1889 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1890 EmitBranch(instr, eq);
1891 } else if (type.IsSmi()) {
1892 DCHECK(!info()->IsStub());
1893 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1894 } else if (type.IsJSArray()) {
1895 DCHECK(!info()->IsStub());
1896 EmitGoto(instr->TrueDestination(chunk()));
1897 } else if (type.IsHeapNumber()) {
1898 DCHECK(!info()->IsStub());
1899 __ Ldr(double_scratch(), FieldMemOperand(value,
1900 HeapNumber::kValueOffset));
1901 // Test the double value. Zero and NaN are false.
1902 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1903 } else if (type.IsString()) {
1904 DCHECK(!info()->IsStub());
1905 Register temp = ToRegister(instr->temp1());
1906 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1907 EmitCompareAndBranch(instr, ne, temp, 0);
1909 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1910 // Avoid deopts in the case where we've never executed this path before.
1911 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1913 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1914 // undefined -> false.
1916 value, Heap::kUndefinedValueRootIndex, false_label);
1919 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1920 // Boolean -> its value.
1922 value, Heap::kTrueValueRootIndex, true_label);
1924 value, Heap::kFalseValueRootIndex, false_label);
1927 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1930 value, Heap::kNullValueRootIndex, false_label);
1933 if (expected.Contains(ToBooleanStub::SMI)) {
1934 // Smis: 0 -> false, all other -> true.
1935 DCHECK(Smi::FromInt(0) == 0);
1936 __ Cbz(value, false_label);
1937 __ JumpIfSmi(value, true_label);
1938 } else if (expected.NeedsMap()) {
1939 // If we need a map later and have a smi, deopt.
1940 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
1943 Register map = NoReg;
1944 Register scratch = NoReg;
1946 if (expected.NeedsMap()) {
1947 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1948 map = ToRegister(instr->temp1());
1949 scratch = ToRegister(instr->temp2());
1951 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1953 if (expected.CanBeUndetectable()) {
1954 // Undetectable -> false.
1955 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1956 __ TestAndBranchIfAnySet(
1957 scratch, 1 << Map::kIsUndetectable, false_label);
1961 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1962 // spec object -> true.
1963 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1964 __ B(ge, true_label);
1967 if (expected.Contains(ToBooleanStub::STRING)) {
1968 // String value -> false iff empty.
1970 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1971 __ B(ge, ¬_string);
1972 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1973 __ Cbz(scratch, false_label);
1975 __ Bind(¬_string);
1978 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1979 // Symbol value -> true.
1980 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1981 __ B(eq, true_label);
1984 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1985 Label not_heap_number;
1986 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1988 __ Ldr(double_scratch(),
1989 FieldMemOperand(value, HeapNumber::kValueOffset));
1990 __ Fcmp(double_scratch(), 0.0);
1991 // If we got a NaN (overflow bit is set), jump to the false branch.
1992 __ B(vs, false_label);
1993 __ B(eq, false_label);
1995 __ Bind(¬_heap_number);
1998 if (!expected.IsGeneric()) {
1999 // We've seen something for the first time -> deopt.
2000 // This can only happen if we are not generic already.
2001 Deoptimize(instr, Deoptimizer::kUnexpectedObject);
2008 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2009 int formal_parameter_count, int arity,
2010 LInstruction* instr) {
2011 bool dont_adapt_arguments =
2012 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2013 bool can_invoke_directly =
2014 dont_adapt_arguments || formal_parameter_count == arity;
2016 // The function interface relies on the following register assignments.
2017 Register function_reg = x1;
2018 Register arity_reg = x0;
2020 LPointerMap* pointers = instr->pointer_map();
2022 if (FLAG_debug_code) {
2024 // Try to confirm that function_reg (x1) is a tagged pointer.
2025 __ JumpIfNotSmi(function_reg, &is_not_smi);
2026 __ Abort(kExpectedFunctionObject);
2027 __ Bind(&is_not_smi);
2030 if (can_invoke_directly) {
2032 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2034 // Set the arguments count if adaption is not needed. Assumes that x0 is
2035 // available to write to at this point.
2036 if (dont_adapt_arguments) {
2037 __ Mov(arity_reg, arity);
2041 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2044 // Set up deoptimization.
2045 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2047 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2048 ParameterCount count(arity);
2049 ParameterCount expected(formal_parameter_count);
2050 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2055 void LCodeGen::DoTailCallThroughMegamorphicCache(
2056 LTailCallThroughMegamorphicCache* instr) {
2057 Register receiver = ToRegister(instr->receiver());
2058 Register name = ToRegister(instr->name());
2059 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
2060 DCHECK(name.is(LoadDescriptor::NameRegister()));
2061 DCHECK(receiver.is(x1));
2062 DCHECK(name.is(x2));
2063 Register scratch = x4;
2064 Register extra = x5;
2065 Register extra2 = x6;
2066 Register extra3 = x7;
2067 DCHECK(!FLAG_vector_ics ||
2068 !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
2069 scratch, extra, extra2, extra3));
2071 // Important for the tail-call.
2072 bool must_teardown_frame = NeedsEagerFrame();
2074 if (!instr->hydrogen()->is_just_miss()) {
2075 DCHECK(!instr->hydrogen()->is_keyed_load());
2077 // The probe will tail call to a handler if found.
2078 isolate()->stub_cache()->GenerateProbe(
2079 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
2080 receiver, name, scratch, extra, extra2, extra3);
2083 // Tail call to miss if we ended up here.
2084 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
2085 if (instr->hydrogen()->is_keyed_load()) {
2086 KeyedLoadIC::GenerateMiss(masm());
2088 LoadIC::GenerateMiss(masm());
2093 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2094 DCHECK(instr->IsMarkedAsCall());
2095 DCHECK(ToRegister(instr->result()).Is(x0));
2097 if (instr->hydrogen()->IsTailCall()) {
2098 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
2100 if (instr->target()->IsConstantOperand()) {
2101 LConstantOperand* target = LConstantOperand::cast(instr->target());
2102 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2103 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2104 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2105 // this understanding is correct.
2106 __ Jump(code, RelocInfo::CODE_TARGET);
2108 DCHECK(instr->target()->IsRegister());
2109 Register target = ToRegister(instr->target());
2110 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2114 LPointerMap* pointers = instr->pointer_map();
2115 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2117 if (instr->target()->IsConstantOperand()) {
2118 LConstantOperand* target = LConstantOperand::cast(instr->target());
2119 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2120 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2121 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2122 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2123 // this understanding is correct.
2124 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2126 DCHECK(instr->target()->IsRegister());
2127 Register target = ToRegister(instr->target());
2128 generator.BeforeCall(__ CallSize(target));
2129 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2132 generator.AfterCall();
2135 after_push_argument_ = false;
2139 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2140 DCHECK(instr->IsMarkedAsCall());
2141 DCHECK(ToRegister(instr->function()).is(x1));
2143 if (instr->hydrogen()->pass_argument_count()) {
2144 __ Mov(x0, Operand(instr->arity()));
2148 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2150 // Load the code entry address
2151 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2154 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2155 after_push_argument_ = false;
2159 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2160 CallRuntime(instr->function(), instr->arity(), instr);
2161 after_push_argument_ = false;
2165 void LCodeGen::DoCallStub(LCallStub* instr) {
2166 DCHECK(ToRegister(instr->context()).is(cp));
2167 DCHECK(ToRegister(instr->result()).is(x0));
2168 switch (instr->hydrogen()->major_key()) {
2169 case CodeStub::RegExpExec: {
2170 RegExpExecStub stub(isolate());
2171 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2174 case CodeStub::SubString: {
2175 SubStringStub stub(isolate());
2176 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2179 case CodeStub::StringCompare: {
2180 StringCompareStub stub(isolate());
2181 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2187 after_push_argument_ = false;
2191 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2192 GenerateOsrPrologue();
2196 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2197 Register temp = ToRegister(instr->temp());
2199 PushSafepointRegistersScope scope(this);
2202 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2203 RecordSafepointWithRegisters(
2204 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2205 __ StoreToSafepointRegisterSlot(x0, temp);
2207 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
2211 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2212 class DeferredCheckMaps: public LDeferredCode {
2214 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2215 : LDeferredCode(codegen), instr_(instr), object_(object) {
2216 SetExit(check_maps());
2218 virtual void Generate() {
2219 codegen()->DoDeferredInstanceMigration(instr_, object_);
2221 Label* check_maps() { return &check_maps_; }
2222 virtual LInstruction* instr() { return instr_; }
2229 if (instr->hydrogen()->IsStabilityCheck()) {
2230 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2231 for (int i = 0; i < maps->size(); ++i) {
2232 AddStabilityDependency(maps->at(i).handle());
2237 Register object = ToRegister(instr->value());
2238 Register map_reg = ToRegister(instr->temp());
2240 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2242 DeferredCheckMaps* deferred = NULL;
2243 if (instr->hydrogen()->HasMigrationTarget()) {
2244 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2245 __ Bind(deferred->check_maps());
2248 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2250 for (int i = 0; i < maps->size() - 1; i++) {
2251 Handle<Map> map = maps->at(i).handle();
2252 __ CompareMap(map_reg, map);
2255 Handle<Map> map = maps->at(maps->size() - 1).handle();
2256 __ CompareMap(map_reg, map);
2258 // We didn't match a map.
2259 if (instr->hydrogen()->HasMigrationTarget()) {
2260 __ B(ne, deferred->entry());
2262 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
2269 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2270 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2271 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
2276 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2277 Register value = ToRegister(instr->value());
2278 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2279 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
2283 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2284 Register input = ToRegister(instr->value());
2285 Register scratch = ToRegister(instr->temp());
2287 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2288 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2290 if (instr->hydrogen()->is_interval_check()) {
2291 InstanceType first, last;
2292 instr->hydrogen()->GetCheckInterval(&first, &last);
2294 __ Cmp(scratch, first);
2295 if (first == last) {
2296 // If there is only one type in the interval check for equality.
2297 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2298 } else if (last == LAST_TYPE) {
2299 // We don't need to compare with the higher bound of the interval.
2300 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
2302 // If we are below the lower bound, set the C flag and clear the Z flag
2303 // to force a deopt.
2304 __ Ccmp(scratch, last, CFlag, hs);
2305 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
2310 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2312 if (base::bits::IsPowerOfTwo32(mask)) {
2313 DCHECK((tag == 0) || (tag == mask));
2315 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2316 Deoptimizer::kWrongInstanceType);
2318 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2319 Deoptimizer::kWrongInstanceType);
2323 __ Tst(scratch, mask);
2325 __ And(scratch, scratch, mask);
2326 __ Cmp(scratch, tag);
2328 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2334 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2335 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2336 Register result = ToRegister32(instr->result());
2337 __ ClampDoubleToUint8(result, input, double_scratch());
2341 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2342 Register input = ToRegister32(instr->unclamped());
2343 Register result = ToRegister32(instr->result());
2344 __ ClampInt32ToUint8(result, input);
2348 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2349 Register input = ToRegister(instr->unclamped());
2350 Register result = ToRegister32(instr->result());
2353 // Both smi and heap number cases are handled.
2355 __ JumpIfNotSmi(input, &is_not_smi);
2356 __ SmiUntag(result.X(), input);
2357 __ ClampInt32ToUint8(result);
2360 __ Bind(&is_not_smi);
2362 // Check for heap number.
2363 Label is_heap_number;
2364 __ JumpIfHeapNumber(input, &is_heap_number);
2366 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2367 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2368 Deoptimizer::kNotAHeapNumberUndefined);
2372 // Heap number case.
2373 __ Bind(&is_heap_number);
2374 DoubleRegister dbl_scratch = double_scratch();
2375 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2376 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2377 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2383 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2384 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2385 Register result_reg = ToRegister(instr->result());
2386 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2387 __ Fmov(result_reg, value_reg);
2388 __ Lsr(result_reg, result_reg, 32);
2390 __ Fmov(result_reg.W(), value_reg.S());
2395 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2396 Register hi_reg = ToRegister(instr->hi());
2397 Register lo_reg = ToRegister(instr->lo());
2398 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2400 // Insert the least significant 32 bits of hi_reg into the most significant
2401 // 32 bits of lo_reg, and move to a floating point register.
2402 __ Bfi(lo_reg, hi_reg, 32, 32);
2403 __ Fmov(result_reg, lo_reg);
2407 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2408 Handle<String> class_name = instr->hydrogen()->class_name();
2409 Label* true_label = instr->TrueLabel(chunk_);
2410 Label* false_label = instr->FalseLabel(chunk_);
2411 Register input = ToRegister(instr->value());
2412 Register scratch1 = ToRegister(instr->temp1());
2413 Register scratch2 = ToRegister(instr->temp2());
2415 __ JumpIfSmi(input, false_label);
2417 Register map = scratch2;
2418 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2419 // Assuming the following assertions, we can use the same compares to test
2420 // for both being a function type and being in the object type range.
2421 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2422 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2423 FIRST_SPEC_OBJECT_TYPE + 1);
2424 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2425 LAST_SPEC_OBJECT_TYPE - 1);
2426 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2428 // We expect CompareObjectType to load the object instance type in scratch1.
2429 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2430 __ B(lt, false_label);
2431 __ B(eq, true_label);
2432 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2433 __ B(eq, true_label);
2435 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2438 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2439 // Check if the constructor in the map is a function.
2440 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2442 // Objects with a non-function constructor have class 'Object'.
2443 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2444 __ JumpIfNotObjectType(
2445 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2447 __ JumpIfNotObjectType(
2448 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2451 // The constructor function is in scratch1. Get its instance class name.
2453 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2455 FieldMemOperand(scratch1,
2456 SharedFunctionInfo::kInstanceClassNameOffset));
2458 // The class name we are testing against is internalized since it's a literal.
2459 // The name in the constructor is internalized because of the way the context
2460 // is booted. This routine isn't expected to work for random API-created
2461 // classes and it doesn't have to because you can't access it with natives
2462 // syntax. Since both sides are internalized it is sufficient to use an
2463 // identity comparison.
2464 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2468 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2469 DCHECK(instr->hydrogen()->representation().IsDouble());
2470 FPRegister object = ToDoubleRegister(instr->object());
2471 Register temp = ToRegister(instr->temp());
2473 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2474 // (relatively expensive) hole-NaN check.
2475 __ Fcmp(object, object);
2476 __ B(vc, instr->FalseLabel(chunk_));
2478 // We have a NaN, but is it the hole?
2479 __ Fmov(temp, object);
2480 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2484 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2485 DCHECK(instr->hydrogen()->representation().IsTagged());
2486 Register object = ToRegister(instr->object());
2488 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2492 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2493 Register value = ToRegister(instr->value());
2494 Register map = ToRegister(instr->temp());
2496 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2497 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2501 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2502 Representation rep = instr->hydrogen()->value()->representation();
2503 DCHECK(!rep.IsInteger32());
2504 Register scratch = ToRegister(instr->temp());
2506 if (rep.IsDouble()) {
2507 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2508 instr->TrueLabel(chunk()));
2510 Register value = ToRegister(instr->value());
2511 __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2512 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2513 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2515 EmitGoto(instr->FalseDestination(chunk()));
2519 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2520 LOperand* left = instr->left();
2521 LOperand* right = instr->right();
2523 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2524 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2525 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2527 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2528 // We can statically evaluate the comparison.
2529 double left_val = ToDouble(LConstantOperand::cast(left));
2530 double right_val = ToDouble(LConstantOperand::cast(right));
2531 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2532 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2533 EmitGoto(next_block);
2535 if (instr->is_double()) {
2536 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2538 // If a NaN is involved, i.e. the result is unordered (V set),
2539 // jump to false block label.
2540 __ B(vs, instr->FalseLabel(chunk_));
2541 EmitBranch(instr, cond);
2543 if (instr->hydrogen_value()->representation().IsInteger32()) {
2544 if (right->IsConstantOperand()) {
2545 EmitCompareAndBranch(instr, cond, ToRegister32(left),
2546 ToOperand32(right));
2548 // Commute the operands and the condition.
2549 EmitCompareAndBranch(instr, CommuteCondition(cond),
2550 ToRegister32(right), ToOperand32(left));
2553 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2554 if (right->IsConstantOperand()) {
2555 int32_t value = ToInteger32(LConstantOperand::cast(right));
2556 EmitCompareAndBranch(instr,
2559 Operand(Smi::FromInt(value)));
2560 } else if (left->IsConstantOperand()) {
2561 // Commute the operands and the condition.
2562 int32_t value = ToInteger32(LConstantOperand::cast(left));
2563 EmitCompareAndBranch(instr,
2564 CommuteCondition(cond),
2566 Operand(Smi::FromInt(value)));
2568 EmitCompareAndBranch(instr,
2579 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2580 Register left = ToRegister(instr->left());
2581 Register right = ToRegister(instr->right());
2582 EmitCompareAndBranch(instr, eq, left, right);
2586 void LCodeGen::DoCmpT(LCmpT* instr) {
2587 DCHECK(ToRegister(instr->context()).is(cp));
2588 Token::Value op = instr->op();
2589 Condition cond = TokenToCondition(op, false);
2591 DCHECK(ToRegister(instr->left()).Is(x1));
2592 DCHECK(ToRegister(instr->right()).Is(x0));
2593 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2594 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2595 // Signal that we don't inline smi code before this stub.
2596 InlineSmiCheckInfo::EmitNotInlined(masm());
2598 // Return true or false depending on CompareIC result.
2599 // This instruction is marked as call. We can clobber any register.
2600 DCHECK(instr->IsMarkedAsCall());
2601 __ LoadTrueFalseRoots(x1, x2);
2603 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2607 void LCodeGen::DoConstantD(LConstantD* instr) {
2608 DCHECK(instr->result()->IsDoubleRegister());
2609 DoubleRegister result = ToDoubleRegister(instr->result());
2610 if (instr->value() == 0) {
2611 if (copysign(1.0, instr->value()) == 1.0) {
2612 __ Fmov(result, fp_zero);
2614 __ Fneg(result, fp_zero);
2617 __ Fmov(result, instr->value());
2622 void LCodeGen::DoConstantE(LConstantE* instr) {
2623 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2627 void LCodeGen::DoConstantI(LConstantI* instr) {
2628 DCHECK(is_int32(instr->value()));
2629 // Cast the value here to ensure that the value isn't sign extended by the
2630 // implicit Operand constructor.
2631 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2635 void LCodeGen::DoConstantS(LConstantS* instr) {
2636 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2640 void LCodeGen::DoConstantT(LConstantT* instr) {
2641 Handle<Object> object = instr->value(isolate());
2642 AllowDeferredHandleDereference smi_check;
2643 __ LoadObject(ToRegister(instr->result()), object);
2647 void LCodeGen::DoContext(LContext* instr) {
2648 // If there is a non-return use, the context must be moved to a register.
2649 Register result = ToRegister(instr->result());
2650 if (info()->IsOptimizing()) {
2651 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2653 // If there is no frame, the context must be in cp.
2654 DCHECK(result.is(cp));
2659 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2660 Register reg = ToRegister(instr->value());
2661 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2662 AllowDeferredHandleDereference smi_check;
2663 if (isolate()->heap()->InNewSpace(*object)) {
2664 UseScratchRegisterScope temps(masm());
2665 Register temp = temps.AcquireX();
2666 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2667 __ Mov(temp, Operand(Handle<Object>(cell)));
2668 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2671 __ Cmp(reg, Operand(object));
2673 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
2677 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2678 last_lazy_deopt_pc_ = masm()->pc_offset();
2679 DCHECK(instr->HasEnvironment());
2680 LEnvironment* env = instr->environment();
2681 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2682 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2686 void LCodeGen::DoDateField(LDateField* instr) {
2687 Register object = ToRegister(instr->date());
2688 Register result = ToRegister(instr->result());
2689 Register temp1 = x10;
2690 Register temp2 = x11;
2691 Smi* index = instr->index();
2692 Label runtime, done;
2694 DCHECK(object.is(result) && object.Is(x0));
2695 DCHECK(instr->IsMarkedAsCall());
2697 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2698 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2699 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
2701 if (index->value() == 0) {
2702 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2704 if (index->value() < JSDate::kFirstUncachedField) {
2705 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2706 __ Mov(temp1, Operand(stamp));
2707 __ Ldr(temp1, MemOperand(temp1));
2708 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2709 __ Cmp(temp1, temp2);
2711 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2712 kPointerSize * index->value()));
2717 __ Mov(x1, Operand(index));
2718 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2725 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2726 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2727 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2728 // needed return address), even though the implementation of LAZY and EAGER is
2729 // now identical. When LAZY is eventually completely folded into EAGER, remove
2730 // the special case below.
2731 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2732 type = Deoptimizer::LAZY;
2735 Deoptimize(instr, instr->hydrogen()->reason(), &type);
2739 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2740 Register dividend = ToRegister32(instr->dividend());
2741 int32_t divisor = instr->divisor();
2742 Register result = ToRegister32(instr->result());
2743 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2744 DCHECK(!result.is(dividend));
2746 // Check for (0 / -x) that will produce negative zero.
2747 HDiv* hdiv = instr->hydrogen();
2748 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2749 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
2751 // Check for (kMinInt / -1).
2752 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2753 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2755 __ Cmp(dividend, 1);
2756 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2758 // Deoptimize if remainder will not be 0.
2759 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2760 divisor != 1 && divisor != -1) {
2761 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2762 __ Tst(dividend, mask);
2763 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
2766 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2767 __ Neg(result, dividend);
2770 int32_t shift = WhichPowerOf2Abs(divisor);
2772 __ Mov(result, dividend);
2773 } else if (shift == 1) {
2774 __ Add(result, dividend, Operand(dividend, LSR, 31));
2776 __ Mov(result, Operand(dividend, ASR, 31));
2777 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2779 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2780 if (divisor < 0) __ Neg(result, result);
2784 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2785 Register dividend = ToRegister32(instr->dividend());
2786 int32_t divisor = instr->divisor();
2787 Register result = ToRegister32(instr->result());
2788 DCHECK(!AreAliased(dividend, result));
2791 Deoptimize(instr, Deoptimizer::kDivisionByZero);
2795 // Check for (0 / -x) that will produce negative zero.
2796 HDiv* hdiv = instr->hydrogen();
2797 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2798 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
2801 __ TruncatingDiv(result, dividend, Abs(divisor));
2802 if (divisor < 0) __ Neg(result, result);
2804 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2805 Register temp = ToRegister32(instr->temp());
2806 DCHECK(!AreAliased(dividend, result, temp));
2807 __ Sxtw(dividend.X(), dividend);
2808 __ Mov(temp, divisor);
2809 __ Smsubl(temp.X(), result, temp, dividend.X());
2810 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
2815 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2816 void LCodeGen::DoDivI(LDivI* instr) {
2817 HBinaryOperation* hdiv = instr->hydrogen();
2818 Register dividend = ToRegister32(instr->dividend());
2819 Register divisor = ToRegister32(instr->divisor());
2820 Register result = ToRegister32(instr->result());
2822 // Issue the division first, and then check for any deopt cases whilst the
2823 // result is computed.
2824 __ Sdiv(result, dividend, divisor);
2826 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2827 DCHECK(!instr->temp());
2832 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2833 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
2836 // Check for (0 / -x) as that will produce negative zero.
2837 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2840 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2841 // zero, ie. zero dividend with negative divisor deopts.
2842 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2843 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2844 __ Ccmp(dividend, 0, NoFlag, mi);
2845 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
2848 // Check for (kMinInt / -1).
2849 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2850 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2852 __ Cmp(dividend, 1);
2853 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2854 // -1. If overflow is clear, set the flags for condition ne, as the
2855 // dividend isn't -1, and thus we shouldn't deopt.
2856 __ Ccmp(divisor, -1, NoFlag, vs);
2857 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
2860 // Compute remainder and deopt if it's not zero.
2861 Register remainder = ToRegister32(instr->temp());
2862 __ Msub(remainder, result, divisor, dividend);
2863 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
2867 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2868 DoubleRegister input = ToDoubleRegister(instr->value());
2869 Register result = ToRegister32(instr->result());
2871 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2872 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
2875 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2876 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
2878 if (instr->tag_result()) {
2879 __ SmiTag(result.X());
2884 void LCodeGen::DoDrop(LDrop* instr) {
2885 __ Drop(instr->count());
2889 void LCodeGen::DoDummy(LDummy* instr) {
2890 // Nothing to see here, move on!
2894 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2895 // Nothing to see here, move on!
2899 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2900 DCHECK(ToRegister(instr->context()).is(cp));
2901 // FunctionLiteral instruction is marked as call, we can trash any register.
2902 DCHECK(instr->IsMarkedAsCall());
2904 // Use the fast case closure allocation code that allocates in new
2905 // space for nested functions that don't need literals cloning.
2906 bool pretenure = instr->hydrogen()->pretenure();
2907 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2908 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
2909 instr->hydrogen()->kind());
2910 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2911 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2913 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2914 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2915 : factory()->false_value()));
2916 __ Push(cp, x2, x1);
2917 CallRuntime(Runtime::kNewClosure, 3, instr);
2922 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2923 Register map = ToRegister(instr->map());
2924 Register result = ToRegister(instr->result());
2925 Label load_cache, done;
2927 __ EnumLengthUntagged(result, map);
2928 __ Cbnz(result, &load_cache);
2930 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2933 __ Bind(&load_cache);
2934 __ LoadInstanceDescriptors(map, result);
2935 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2936 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2937 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
2943 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2944 Register object = ToRegister(instr->object());
2945 Register null_value = x5;
2947 DCHECK(instr->IsMarkedAsCall());
2948 DCHECK(object.Is(x0));
2950 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
2951 Deoptimizer::kUndefined);
2953 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2954 __ Cmp(object, null_value);
2955 DeoptimizeIf(eq, instr, Deoptimizer::kNull);
2957 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2959 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2960 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2961 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
2963 Label use_cache, call_runtime;
2964 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2966 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2969 // Get the set of properties to enumerate.
2970 __ Bind(&call_runtime);
2972 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2974 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2975 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
2976 Deoptimizer::kWrongMap);
2978 __ Bind(&use_cache);
2982 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2983 Register input = ToRegister(instr->value());
2984 Register result = ToRegister(instr->result());
2986 __ AssertString(input);
2988 // Assert that we can use a W register load to get the hash.
2989 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2990 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2991 __ IndexFromHash(result, result);
2995 void LCodeGen::EmitGoto(int block) {
2996 // Do not emit jump if we are emitting a goto to the next block.
2997 if (!IsNextEmittedBlock(block)) {
2998 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
3003 void LCodeGen::DoGoto(LGoto* instr) {
3004 EmitGoto(instr->block_id());
3008 void LCodeGen::DoHasCachedArrayIndexAndBranch(
3009 LHasCachedArrayIndexAndBranch* instr) {
3010 Register input = ToRegister(instr->value());
3011 Register temp = ToRegister32(instr->temp());
3013 // Assert that the cache status bits fit in a W register.
3014 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
3015 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
3016 __ Tst(temp, String::kContainsCachedArrayIndexMask);
3017 EmitBranch(instr, eq);
3021 // HHasInstanceTypeAndBranch instruction is built with an interval of type
3022 // to test but is only used in very restricted ways. The only possible kinds
3023 // of intervals are:
3024 // - [ FIRST_TYPE, instr->to() ]
3025 // - [ instr->form(), LAST_TYPE ]
3026 // - instr->from() == instr->to()
3028 // These kinds of intervals can be check with only one compare instruction
3029 // providing the correct value and test condition are used.
3031 // TestType() will return the value to use in the compare instruction and
3032 // BranchCondition() will return the condition to use depending on the kind
3033 // of interval actually specified in the instruction.
3034 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
3035 InstanceType from = instr->from();
3036 InstanceType to = instr->to();
3037 if (from == FIRST_TYPE) return to;
3038 DCHECK((from == to) || (to == LAST_TYPE));
3043 // See comment above TestType function for what this function does.
3044 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
3045 InstanceType from = instr->from();
3046 InstanceType to = instr->to();
3047 if (from == to) return eq;
3048 if (to == LAST_TYPE) return hs;
3049 if (from == FIRST_TYPE) return ls;
3055 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3056 Register input = ToRegister(instr->value());
3057 Register scratch = ToRegister(instr->temp());
3059 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3060 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3062 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3063 EmitBranch(instr, BranchCondition(instr->hydrogen()));
3067 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3068 Register result = ToRegister(instr->result());
3069 Register base = ToRegister(instr->base_object());
3070 if (instr->offset()->IsConstantOperand()) {
3071 __ Add(result, base, ToOperand32(instr->offset()));
3073 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3078 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3079 DCHECK(ToRegister(instr->context()).is(cp));
3080 // Assert that the arguments are in the registers expected by InstanceofStub.
3081 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3082 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3084 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3085 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3087 // InstanceofStub returns a result in x0:
3088 // 0 => not an instance
3089 // smi 1 => instance.
3091 __ LoadTrueFalseRoots(x0, x1);
3092 __ Csel(x0, x0, x1, eq);
3096 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3097 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3099 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3100 LInstanceOfKnownGlobal* instr)
3101 : LDeferredCode(codegen), instr_(instr) { }
3102 virtual void Generate() {
3103 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3105 virtual LInstruction* instr() { return instr_; }
3107 LInstanceOfKnownGlobal* instr_;
3110 DeferredInstanceOfKnownGlobal* deferred =
3111 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3113 Label map_check, return_false, cache_miss, done;
3114 Register object = ToRegister(instr->value());
3115 Register result = ToRegister(instr->result());
3116 // x4 is expected in the associated deferred code and stub.
3117 Register map_check_site = x4;
3120 // This instruction is marked as call. We can clobber any register.
3121 DCHECK(instr->IsMarkedAsCall());
3123 // We must take into account that object is in x11.
3124 DCHECK(object.Is(x11));
3125 Register scratch = x10;
3127 // A Smi is not instance of anything.
3128 __ JumpIfSmi(object, &return_false);
3130 // This is the inlined call site instanceof cache. The two occurences of the
3131 // hole value will be patched to the last map/result pair generated by the
3133 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3135 // Below we use Factory::the_hole_value() on purpose instead of loading from
3136 // the root array to force relocation and later be able to patch with a
3138 InstructionAccurateScope scope(masm(), 5);
3139 __ bind(&map_check);
3140 // Will be patched with the cached map.
3141 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3142 __ ldr(scratch, Immediate(Handle<Object>(cell)));
3143 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3144 __ cmp(map, scratch);
3145 __ b(&cache_miss, ne);
3146 // The address of this instruction is computed relative to the map check
3147 // above, so check the size of the code generated.
3148 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3149 // Will be patched with the cached result.
3150 __ ldr(result, Immediate(factory()->the_hole_value()));
3154 // The inlined call site cache did not match.
3155 // Check null and string before calling the deferred code.
3156 __ Bind(&cache_miss);
3157 // Compute the address of the map check. It must not be clobbered until the
3158 // InstanceOfStub has used it.
3159 __ Adr(map_check_site, &map_check);
3160 // Null is not instance of anything.
3161 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3163 // String values are not instances of anything.
3164 // Return false if the object is a string. Otherwise, jump to the deferred
3166 // Note that we can't jump directly to deferred code from
3167 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3168 // code can be out of range.
3169 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3170 __ B(deferred->entry());
3172 __ Bind(&return_false);
3173 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3175 // Here result is either true or false.
3176 __ Bind(deferred->exit());
3181 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3182 Register result = ToRegister(instr->result());
3183 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3184 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3185 flags = static_cast<InstanceofStub::Flags>(
3186 flags | InstanceofStub::kArgsInRegisters);
3187 flags = static_cast<InstanceofStub::Flags>(
3188 flags | InstanceofStub::kReturnTrueFalseObject);
3189 flags = static_cast<InstanceofStub::Flags>(
3190 flags | InstanceofStub::kCallSiteInlineCheck);
3192 PushSafepointRegistersScope scope(this);
3193 LoadContextFromDeferred(instr->context());
3195 // Prepare InstanceofStub arguments.
3196 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3197 __ LoadObject(InstanceofStub::right(), instr->function());
3199 InstanceofStub stub(isolate(), flags);
3200 CallCodeGeneric(stub.GetCode(),
3201 RelocInfo::CODE_TARGET,
3203 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3204 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3205 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3207 // Put the result value into the result register slot.
3208 __ StoreToSafepointRegisterSlot(result, result);
3212 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3217 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3218 Register value = ToRegister32(instr->value());
3219 DoubleRegister result = ToDoubleRegister(instr->result());
3220 __ Scvtf(result, value);
3224 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3225 DCHECK(ToRegister(instr->context()).is(cp));
3226 // The function is required to be in x1.
3227 DCHECK(ToRegister(instr->function()).is(x1));
3228 DCHECK(instr->HasPointerMap());
3230 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3231 if (known_function.is_null()) {
3232 LPointerMap* pointers = instr->pointer_map();
3233 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3234 ParameterCount count(instr->arity());
3235 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3237 CallKnownFunction(known_function,
3238 instr->hydrogen()->formal_parameter_count(),
3239 instr->arity(), instr);
3241 after_push_argument_ = false;
3245 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3246 Register temp1 = ToRegister(instr->temp1());
3247 Register temp2 = ToRegister(instr->temp2());
3249 // Get the frame pointer for the calling frame.
3250 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3252 // Skip the arguments adaptor frame if it exists.
3253 Label check_frame_marker;
3254 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3255 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3256 __ B(ne, &check_frame_marker);
3257 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3259 // Check the marker in the calling frame.
3260 __ Bind(&check_frame_marker);
3261 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3263 EmitCompareAndBranch(
3264 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3268 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3269 Label* is_object = instr->TrueLabel(chunk_);
3270 Label* is_not_object = instr->FalseLabel(chunk_);
3271 Register value = ToRegister(instr->value());
3272 Register map = ToRegister(instr->temp1());
3273 Register scratch = ToRegister(instr->temp2());
3275 __ JumpIfSmi(value, is_not_object);
3276 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3278 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3280 // Check for undetectable objects.
3281 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3282 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3284 // Check that instance type is in object type range.
3285 __ IsInstanceJSObjectType(map, scratch, NULL);
3286 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3287 // flags for "le" condition to check if the object's type is a valid
3289 EmitBranch(instr, le);
3293 Condition LCodeGen::EmitIsString(Register input,
3295 Label* is_not_string,
3296 SmiCheck check_needed = INLINE_SMI_CHECK) {
3297 if (check_needed == INLINE_SMI_CHECK) {
3298 __ JumpIfSmi(input, is_not_string);
3300 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3306 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3307 Register val = ToRegister(instr->value());
3308 Register scratch = ToRegister(instr->temp());
3310 SmiCheck check_needed =
3311 instr->hydrogen()->value()->type().IsHeapObject()
3312 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3313 Condition true_cond =
3314 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3316 EmitBranch(instr, true_cond);
3320 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3321 Register value = ToRegister(instr->value());
3322 STATIC_ASSERT(kSmiTag == 0);
3323 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3327 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3328 Register input = ToRegister(instr->value());
3329 Register temp = ToRegister(instr->temp());
3331 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3332 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3334 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3335 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3337 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3341 static const char* LabelType(LLabel* label) {
3342 if (label->is_loop_header()) return " (loop header)";
3343 if (label->is_osr_entry()) return " (OSR entry)";
3348 void LCodeGen::DoLabel(LLabel* label) {
3349 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3350 current_instruction_,
3351 label->hydrogen_value()->id(),
3355 __ Bind(label->label());
3356 current_block_ = label->block_id();
3361 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3362 Register context = ToRegister(instr->context());
3363 Register result = ToRegister(instr->result());
3364 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3365 if (instr->hydrogen()->RequiresHoleCheck()) {
3366 if (instr->hydrogen()->DeoptimizesOnHole()) {
3367 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3368 Deoptimizer::kHole);
3371 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3372 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3373 __ Bind(¬_the_hole);
3379 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3380 Register function = ToRegister(instr->function());
3381 Register result = ToRegister(instr->result());
3382 Register temp = ToRegister(instr->temp());
3384 // Get the prototype or initial map from the function.
3385 __ Ldr(result, FieldMemOperand(function,
3386 JSFunction::kPrototypeOrInitialMapOffset));
3388 // Check that the function has a prototype or an initial map.
3389 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3390 Deoptimizer::kHole);
3392 // If the function does not have an initial map, we're done.
3394 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3397 // Get the prototype from the initial map.
3398 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3405 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3406 Register result = ToRegister(instr->result());
3407 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3408 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3409 if (instr->hydrogen()->RequiresHoleCheck()) {
3410 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3411 Deoptimizer::kHole);
3417 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3418 DCHECK(FLAG_vector_ics);
3419 Register vector_register = ToRegister(instr->temp_vector());
3420 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3421 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
3422 DCHECK(slot_register.is(x0));
3424 AllowDeferredHandleDereference vector_structure_check;
3425 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3426 __ Mov(vector_register, vector);
3427 // No need to allocate this register.
3428 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3429 int index = vector->GetIndex(slot);
3430 __ Mov(slot_register, Smi::FromInt(index));
3434 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3435 DCHECK(ToRegister(instr->context()).is(cp));
3436 DCHECK(ToRegister(instr->global_object())
3437 .is(LoadDescriptor::ReceiverRegister()));
3438 DCHECK(ToRegister(instr->result()).Is(x0));
3439 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3440 if (FLAG_vector_ics) {
3441 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3443 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3444 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
3445 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3449 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3454 bool key_is_constant,
3456 ElementsKind elements_kind,
3458 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3460 if (key_is_constant) {
3461 int key_offset = constant_key << element_size_shift;
3462 return MemOperand(base, key_offset + base_offset);
3466 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3467 return MemOperand(scratch, base_offset);
3470 if (base_offset == 0) {
3471 return MemOperand(base, key, SXTW, element_size_shift);
3474 DCHECK(!AreAliased(scratch, key));
3475 __ Add(scratch, base, base_offset);
3476 return MemOperand(scratch, key, SXTW, element_size_shift);
3480 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3481 Register ext_ptr = ToRegister(instr->elements());
3483 ElementsKind elements_kind = instr->elements_kind();
3485 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3486 bool key_is_constant = instr->key()->IsConstantOperand();
3487 Register key = no_reg;
3488 int constant_key = 0;
3489 if (key_is_constant) {
3490 DCHECK(instr->temp() == NULL);
3491 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3492 if (constant_key & 0xf0000000) {
3493 Abort(kArrayIndexConstantValueTooBig);
3496 scratch = ToRegister(instr->temp());
3497 key = ToRegister(instr->key());
3501 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3502 key_is_constant, constant_key,
3504 instr->base_offset());
3506 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3507 (elements_kind == FLOAT32_ELEMENTS)) {
3508 DoubleRegister result = ToDoubleRegister(instr->result());
3509 __ Ldr(result.S(), mem_op);
3510 __ Fcvt(result, result.S());
3511 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3512 (elements_kind == FLOAT64_ELEMENTS)) {
3513 DoubleRegister result = ToDoubleRegister(instr->result());
3514 __ Ldr(result, mem_op);
3516 Register result = ToRegister(instr->result());
3518 switch (elements_kind) {
3519 case EXTERNAL_INT8_ELEMENTS:
3521 __ Ldrsb(result, mem_op);
3523 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3524 case EXTERNAL_UINT8_ELEMENTS:
3525 case UINT8_ELEMENTS:
3526 case UINT8_CLAMPED_ELEMENTS:
3527 __ Ldrb(result, mem_op);
3529 case EXTERNAL_INT16_ELEMENTS:
3530 case INT16_ELEMENTS:
3531 __ Ldrsh(result, mem_op);
3533 case EXTERNAL_UINT16_ELEMENTS:
3534 case UINT16_ELEMENTS:
3535 __ Ldrh(result, mem_op);
3537 case EXTERNAL_INT32_ELEMENTS:
3538 case INT32_ELEMENTS:
3539 __ Ldrsw(result, mem_op);
3541 case EXTERNAL_UINT32_ELEMENTS:
3542 case UINT32_ELEMENTS:
3543 __ Ldr(result.W(), mem_op);
3544 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3545 // Deopt if value > 0x80000000.
3546 __ Tst(result, 0xFFFFFFFF80000000);
3547 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
3550 case FLOAT32_ELEMENTS:
3551 case FLOAT64_ELEMENTS:
3552 case EXTERNAL_FLOAT32_ELEMENTS:
3553 case EXTERNAL_FLOAT64_ELEMENTS:
3554 case FAST_HOLEY_DOUBLE_ELEMENTS:
3555 case FAST_HOLEY_ELEMENTS:
3556 case FAST_HOLEY_SMI_ELEMENTS:
3557 case FAST_DOUBLE_ELEMENTS:
3559 case FAST_SMI_ELEMENTS:
3560 case DICTIONARY_ELEMENTS:
3561 case SLOPPY_ARGUMENTS_ELEMENTS:
3569 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3573 ElementsKind elements_kind,
3574 Representation representation,
3576 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3577 STATIC_ASSERT(kSmiTag == 0);
3578 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3580 // Even though the HLoad/StoreKeyed instructions force the input
3581 // representation for the key to be an integer, the input gets replaced during
3582 // bounds check elimination with the index argument to the bounds check, which
3583 // can be tagged, so that case must be handled here, too.
3584 if (key_is_tagged) {
3585 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3586 if (representation.IsInteger32()) {
3587 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3588 // Read or write only the smi payload in the case of fast smi arrays.
3589 return UntagSmiMemOperand(base, base_offset);
3591 return MemOperand(base, base_offset);
3594 // Sign extend key because it could be a 32-bit negative value or contain
3595 // garbage in the top 32-bits. The address computation happens in 64-bit.
3596 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3597 if (representation.IsInteger32()) {
3598 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3599 // Read or write only the smi payload in the case of fast smi arrays.
3600 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3601 return UntagSmiMemOperand(base, base_offset);
3603 __ Add(base, elements, base_offset);
3604 return MemOperand(base, key, SXTW, element_size_shift);
3610 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3611 Register elements = ToRegister(instr->elements());
3612 DoubleRegister result = ToDoubleRegister(instr->result());
3615 if (instr->key()->IsConstantOperand()) {
3616 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3617 (instr->temp() == NULL));
3619 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3620 if (constant_key & 0xf0000000) {
3621 Abort(kArrayIndexConstantValueTooBig);
3623 int offset = instr->base_offset() + constant_key * kDoubleSize;
3624 mem_op = MemOperand(elements, offset);
3626 Register load_base = ToRegister(instr->temp());
3627 Register key = ToRegister(instr->key());
3628 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3629 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3630 instr->hydrogen()->elements_kind(),
3631 instr->hydrogen()->representation(),
3632 instr->base_offset());
3635 __ Ldr(result, mem_op);
3637 if (instr->hydrogen()->RequiresHoleCheck()) {
3638 Register scratch = ToRegister(instr->temp());
3639 __ Fmov(scratch, result);
3640 __ Eor(scratch, scratch, kHoleNanInt64);
3641 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
3646 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3647 Register elements = ToRegister(instr->elements());
3648 Register result = ToRegister(instr->result());
3651 Representation representation = instr->hydrogen()->representation();
3652 if (instr->key()->IsConstantOperand()) {
3653 DCHECK(instr->temp() == NULL);
3654 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3655 int offset = instr->base_offset() +
3656 ToInteger32(const_operand) * kPointerSize;
3657 if (representation.IsInteger32()) {
3658 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3659 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3660 STATIC_ASSERT(kSmiTag == 0);
3661 mem_op = UntagSmiMemOperand(elements, offset);
3663 mem_op = MemOperand(elements, offset);
3666 Register load_base = ToRegister(instr->temp());
3667 Register key = ToRegister(instr->key());
3668 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3670 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3671 instr->hydrogen()->elements_kind(),
3672 representation, instr->base_offset());
3675 __ Load(result, mem_op, representation);
3677 if (instr->hydrogen()->RequiresHoleCheck()) {
3678 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3679 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
3681 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3682 Deoptimizer::kHole);
3688 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3689 DCHECK(ToRegister(instr->context()).is(cp));
3690 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3691 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3692 if (FLAG_vector_ics) {
3693 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3696 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
3697 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3699 DCHECK(ToRegister(instr->result()).Is(x0));
3703 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3704 HObjectAccess access = instr->hydrogen()->access();
3705 int offset = access.offset();
3706 Register object = ToRegister(instr->object());
3708 if (access.IsExternalMemory()) {
3709 Register result = ToRegister(instr->result());
3710 __ Load(result, MemOperand(object, offset), access.representation());
3714 if (instr->hydrogen()->representation().IsDouble()) {
3715 DCHECK(access.IsInobject());
3716 FPRegister result = ToDoubleRegister(instr->result());
3717 __ Ldr(result, FieldMemOperand(object, offset));
3721 Register result = ToRegister(instr->result());
3723 if (access.IsInobject()) {
3726 // Load the properties array, using result as a scratch register.
3727 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3731 if (access.representation().IsSmi() &&
3732 instr->hydrogen()->representation().IsInteger32()) {
3733 // Read int value directly from upper half of the smi.
3734 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3735 STATIC_ASSERT(kSmiTag == 0);
3736 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3737 Representation::Integer32());
3739 __ Load(result, FieldMemOperand(source, offset), access.representation());
3744 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3745 DCHECK(ToRegister(instr->context()).is(cp));
3746 // LoadIC expects name and receiver in registers.
3747 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3748 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3749 if (FLAG_vector_ics) {
3750 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3754 CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
3755 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3757 DCHECK(ToRegister(instr->result()).is(x0));
3761 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3762 Register result = ToRegister(instr->result());
3763 __ LoadRoot(result, instr->index());
3767 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3768 Register result = ToRegister(instr->result());
3769 Register map = ToRegister(instr->value());
3770 __ EnumLengthSmi(result, map);
3774 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3775 Representation r = instr->hydrogen()->value()->representation();
3777 DoubleRegister input = ToDoubleRegister(instr->value());
3778 DoubleRegister result = ToDoubleRegister(instr->result());
3779 __ Fabs(result, input);
3780 } else if (r.IsSmi() || r.IsInteger32()) {
3781 Register input = r.IsSmi() ? ToRegister(instr->value())
3782 : ToRegister32(instr->value());
3783 Register result = r.IsSmi() ? ToRegister(instr->result())
3784 : ToRegister32(instr->result());
3785 __ Abs(result, input);
3786 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3791 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3793 Label* allocation_entry) {
3794 // Handle the tricky cases of MathAbsTagged:
3795 // - HeapNumber inputs.
3796 // - Negative inputs produce a positive result, so a new HeapNumber is
3797 // allocated to hold it.
3798 // - Positive inputs are returned as-is, since there is no need to allocate
3799 // a new HeapNumber for the result.
3800 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3801 // a smi. In this case, the inline code sets the result and jumps directly
3802 // to the allocation_entry label.
3803 DCHECK(instr->context() != NULL);
3804 DCHECK(ToRegister(instr->context()).is(cp));
3805 Register input = ToRegister(instr->value());
3806 Register temp1 = ToRegister(instr->temp1());
3807 Register temp2 = ToRegister(instr->temp2());
3808 Register result_bits = ToRegister(instr->temp3());
3809 Register result = ToRegister(instr->result());
3811 Label runtime_allocation;
3813 // Deoptimize if the input is not a HeapNumber.
3814 DeoptimizeIfNotHeapNumber(input, instr);
3816 // If the argument is positive, we can return it as-is, without any need to
3817 // allocate a new HeapNumber for the result. We have to do this in integer
3818 // registers (rather than with fabs) because we need to be able to distinguish
3820 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3821 __ Mov(result, input);
3822 __ Tbz(result_bits, kXSignBit, exit);
3824 // Calculate abs(input) by clearing the sign bit.
3825 __ Bic(result_bits, result_bits, kXSignMask);
3827 // Allocate a new HeapNumber to hold the result.
3828 // result_bits The bit representation of the (double) result.
3829 __ Bind(allocation_entry);
3830 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3831 // The inline (non-deferred) code will store result_bits into result.
3834 __ Bind(&runtime_allocation);
3835 if (FLAG_debug_code) {
3836 // Because result is in the pointer map, we need to make sure it has a valid
3837 // tagged value before we call the runtime. We speculatively set it to the
3838 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3841 Register input = ToRegister(instr->value());
3842 __ JumpIfSmi(result, &result_ok);
3843 __ Cmp(input, result);
3844 __ Assert(eq, kUnexpectedValue);
3845 __ Bind(&result_ok);
3848 { PushSafepointRegistersScope scope(this);
3849 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3851 __ StoreToSafepointRegisterSlot(x0, result);
3853 // The inline (non-deferred) code will store result_bits into result.
3857 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3858 // Class for deferred case.
3859 class DeferredMathAbsTagged: public LDeferredCode {
3861 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3862 : LDeferredCode(codegen), instr_(instr) { }
3863 virtual void Generate() {
3864 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3865 allocation_entry());
3867 virtual LInstruction* instr() { return instr_; }
3868 Label* allocation_entry() { return &allocation; }
3870 LMathAbsTagged* instr_;
3874 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3875 // in GenerateDeferredCode. Tidy this up.
3876 DCHECK(!NeedsDeferredFrame());
3878 DeferredMathAbsTagged* deferred =
3879 new(zone()) DeferredMathAbsTagged(this, instr);
3881 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3882 instr->hydrogen()->value()->representation().IsSmi());
3883 Register input = ToRegister(instr->value());
3884 Register result_bits = ToRegister(instr->temp3());
3885 Register result = ToRegister(instr->result());
3888 // Handle smis inline.
3889 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3890 // never get set by the negation. This is therefore the same as the Integer32
3891 // case in DoMathAbs, except that it operates on 64-bit values.
3892 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3894 __ JumpIfNotSmi(input, deferred->entry());
3896 __ Abs(result, input, NULL, &done);
3898 // The result is the magnitude (abs) of the smallest value a smi can
3899 // represent, encoded as a double.
3900 __ Mov(result_bits, double_to_rawbits(0x80000000));
3901 __ B(deferred->allocation_entry());
3903 __ Bind(deferred->exit());
3904 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3910 void LCodeGen::DoMathExp(LMathExp* instr) {
3911 DoubleRegister input = ToDoubleRegister(instr->value());
3912 DoubleRegister result = ToDoubleRegister(instr->result());
3913 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3914 DoubleRegister double_temp2 = double_scratch();
3915 Register temp1 = ToRegister(instr->temp1());
3916 Register temp2 = ToRegister(instr->temp2());
3917 Register temp3 = ToRegister(instr->temp3());
3919 MathExpGenerator::EmitMathExp(masm(), input, result,
3920 double_temp1, double_temp2,
3921 temp1, temp2, temp3);
3925 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3926 DoubleRegister input = ToDoubleRegister(instr->value());
3927 DoubleRegister result = ToDoubleRegister(instr->result());
3929 __ Frintm(result, input);
3933 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3934 DoubleRegister input = ToDoubleRegister(instr->value());
3935 Register result = ToRegister(instr->result());
3937 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3938 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
3941 __ Fcvtms(result, input);
3943 // Check that the result fits into a 32-bit integer.
3944 // - The result did not overflow.
3945 __ Cmp(result, Operand(result, SXTW));
3946 // - The input was not NaN.
3947 __ Fccmp(input, input, NoFlag, eq);
3948 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
3952 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3953 Register dividend = ToRegister32(instr->dividend());
3954 Register result = ToRegister32(instr->result());
3955 int32_t divisor = instr->divisor();
3957 // If the divisor is 1, return the dividend.
3959 __ Mov(result, dividend, kDiscardForSameWReg);
3963 // If the divisor is positive, things are easy: There can be no deopts and we
3964 // can simply do an arithmetic right shift.
3965 int32_t shift = WhichPowerOf2Abs(divisor);
3967 __ Mov(result, Operand(dividend, ASR, shift));
3971 // If the divisor is negative, we have to negate and handle edge cases.
3972 __ Negs(result, dividend);
3973 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3974 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3977 // Dividing by -1 is basically negation, unless we overflow.
3978 if (divisor == -1) {
3979 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3980 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3985 // If the negation could not overflow, simply shifting is OK.
3986 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3987 __ Mov(result, Operand(dividend, ASR, shift));
3991 __ Asr(result, result, shift);
3992 __ Csel(result, result, kMinInt / divisor, vc);
3996 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3997 Register dividend = ToRegister32(instr->dividend());
3998 int32_t divisor = instr->divisor();
3999 Register result = ToRegister32(instr->result());
4000 DCHECK(!AreAliased(dividend, result));
4003 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4007 // Check for (0 / -x) that will produce negative zero.
4008 HMathFloorOfDiv* hdiv = instr->hydrogen();
4009 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
4010 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
4013 // Easy case: We need no dynamic check for the dividend and the flooring
4014 // division is the same as the truncating division.
4015 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
4016 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
4017 __ TruncatingDiv(result, dividend, Abs(divisor));
4018 if (divisor < 0) __ Neg(result, result);
4022 // In the general case we may need to adjust before and after the truncating
4023 // division to get a flooring division.
4024 Register temp = ToRegister32(instr->temp());
4025 DCHECK(!AreAliased(temp, dividend, result));
4026 Label needs_adjustment, done;
4027 __ Cmp(dividend, 0);
4028 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
4029 __ TruncatingDiv(result, dividend, Abs(divisor));
4030 if (divisor < 0) __ Neg(result, result);
4032 __ Bind(&needs_adjustment);
4033 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
4034 __ TruncatingDiv(result, temp, Abs(divisor));
4035 if (divisor < 0) __ Neg(result, result);
4036 __ Sub(result, result, Operand(1));
4041 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
4042 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
4043 Register dividend = ToRegister32(instr->dividend());
4044 Register divisor = ToRegister32(instr->divisor());
4045 Register remainder = ToRegister32(instr->temp());
4046 Register result = ToRegister32(instr->result());
4048 // This can't cause an exception on ARM, so we can speculatively
4049 // execute it already now.
4050 __ Sdiv(result, dividend, divisor);
4053 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4055 // Check for (kMinInt / -1).
4056 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4057 // The V flag will be set iff dividend == kMinInt.
4058 __ Cmp(dividend, 1);
4059 __ Ccmp(divisor, -1, NoFlag, vs);
4060 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
4063 // Check for (0 / -x) that will produce negative zero.
4064 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4066 __ Ccmp(dividend, 0, ZFlag, mi);
4067 // "divisor" can't be null because the code would have already been
4068 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4069 // In this case we need to deoptimize to produce a -0.
4070 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4074 // If both operands have the same sign then we are done.
4075 __ Eor(remainder, dividend, divisor);
4076 __ Tbz(remainder, kWSignBit, &done);
4078 // Check if the result needs to be corrected.
4079 __ Msub(remainder, result, divisor, dividend);
4080 __ Cbz(remainder, &done);
4081 __ Sub(result, result, 1);
4087 void LCodeGen::DoMathLog(LMathLog* instr) {
4088 DCHECK(instr->IsMarkedAsCall());
4089 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4090 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4092 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4096 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4097 Register input = ToRegister32(instr->value());
4098 Register result = ToRegister32(instr->result());
4099 __ Clz(result, input);
4103 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4104 DoubleRegister input = ToDoubleRegister(instr->value());
4105 DoubleRegister result = ToDoubleRegister(instr->result());
4108 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4109 // Math.pow(-Infinity, 0.5) == +Infinity
4110 // Math.pow(-0.0, 0.5) == +0.0
4112 // Catch -infinity inputs first.
4113 // TODO(jbramley): A constant infinity register would be helpful here.
4114 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4115 __ Fcmp(double_scratch(), input);
4116 __ Fabs(result, input);
4119 // Add +0.0 to convert -0.0 to +0.0.
4120 __ Fadd(double_scratch(), input, fp_zero);
4121 __ Fsqrt(result, double_scratch());
4127 void LCodeGen::DoPower(LPower* instr) {
4128 Representation exponent_type = instr->hydrogen()->right()->representation();
4129 // Having marked this as a call, we can use any registers.
4130 // Just make sure that the input/output registers are the expected ones.
4131 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4132 Register integer_exponent = MathPowIntegerDescriptor::exponent();
4133 DCHECK(!instr->right()->IsDoubleRegister() ||
4134 ToDoubleRegister(instr->right()).is(d1));
4135 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4136 ToRegister(instr->right()).is(tagged_exponent));
4137 DCHECK(!exponent_type.IsInteger32() ||
4138 ToRegister(instr->right()).is(integer_exponent));
4139 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4140 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4142 if (exponent_type.IsSmi()) {
4143 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4145 } else if (exponent_type.IsTagged()) {
4147 __ JumpIfSmi(tagged_exponent, &no_deopt);
4148 DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
4150 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4152 } else if (exponent_type.IsInteger32()) {
4153 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4154 // supports large integer exponents.
4155 __ Sxtw(integer_exponent, integer_exponent);
4156 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4159 DCHECK(exponent_type.IsDouble());
4160 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4166 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4167 DoubleRegister input = ToDoubleRegister(instr->value());
4168 DoubleRegister result = ToDoubleRegister(instr->result());
4169 DoubleRegister scratch_d = double_scratch();
4171 DCHECK(!AreAliased(input, result, scratch_d));
4175 __ Frinta(result, input);
4176 __ Fcmp(input, 0.0);
4177 __ Fccmp(result, input, ZFlag, lt);
4178 // The result is correct if the input was in [-0, +infinity], or was a
4179 // negative integral value.
4182 // Here the input is negative, non integral, with an exponent lower than 52.
4183 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4184 // case. So we can safely add 0.5.
4185 __ Fmov(scratch_d, 0.5);
4186 __ Fadd(result, input, scratch_d);
4187 __ Frintm(result, result);
4188 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4189 __ Fabs(result, result);
4190 __ Fneg(result, result);
4196 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4197 DoubleRegister input = ToDoubleRegister(instr->value());
4198 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4199 DoubleRegister dot_five = double_scratch();
4200 Register result = ToRegister(instr->result());
4203 // Math.round() rounds to the nearest integer, with ties going towards
4204 // +infinity. This does not match any IEEE-754 rounding mode.
4205 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4206 // they can't be represented as integers.
4207 // - The sign of the result is the same as the sign of the input. This means
4208 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4211 // Add 0.5 and round towards -infinity.
4212 __ Fmov(dot_five, 0.5);
4213 __ Fadd(temp, input, dot_five);
4214 __ Fcvtms(result, temp);
4216 // The result is correct if:
4217 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4218 // result is not 1, as 0.499...94 will wrongly map to 1.
4219 // result fits in 32 bits.
4220 __ Cmp(result, Operand(result.W(), SXTW));
4221 __ Ccmp(result, 1, ZFlag, eq);
4224 // At this point, we have to handle possible inputs of NaN or numbers in the
4225 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4227 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4229 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
4231 // Deoptimize for negative inputs, which at this point are only numbers in
4232 // the range [-0.5, -0.0]
4233 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4234 __ Fmov(result, input);
4235 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
4238 // Deoptimize if the input was NaN.
4239 __ Fcmp(input, dot_five);
4240 DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
4242 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4243 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4244 // else 0; we avoid dealing with 0.499...94 directly.
4245 __ Cset(result, ge);
4250 void LCodeGen::DoMathFround(LMathFround* instr) {
4251 DoubleRegister input = ToDoubleRegister(instr->value());
4252 DoubleRegister result = ToDoubleRegister(instr->result());
4253 __ Fcvt(result.S(), input);
4254 __ Fcvt(result, result.S());
4258 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4259 DoubleRegister input = ToDoubleRegister(instr->value());
4260 DoubleRegister result = ToDoubleRegister(instr->result());
4261 __ Fsqrt(result, input);
4265 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4266 HMathMinMax::Operation op = instr->hydrogen()->operation();
4267 if (instr->hydrogen()->representation().IsInteger32()) {
4268 Register result = ToRegister32(instr->result());
4269 Register left = ToRegister32(instr->left());
4270 Operand right = ToOperand32(instr->right());
4272 __ Cmp(left, right);
4273 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4274 } else if (instr->hydrogen()->representation().IsSmi()) {
4275 Register result = ToRegister(instr->result());
4276 Register left = ToRegister(instr->left());
4277 Operand right = ToOperand(instr->right());
4279 __ Cmp(left, right);
4280 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4282 DCHECK(instr->hydrogen()->representation().IsDouble());
4283 DoubleRegister result = ToDoubleRegister(instr->result());
4284 DoubleRegister left = ToDoubleRegister(instr->left());
4285 DoubleRegister right = ToDoubleRegister(instr->right());
4287 if (op == HMathMinMax::kMathMax) {
4288 __ Fmax(result, left, right);
4290 DCHECK(op == HMathMinMax::kMathMin);
4291 __ Fmin(result, left, right);
4297 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4298 Register dividend = ToRegister32(instr->dividend());
4299 int32_t divisor = instr->divisor();
4300 DCHECK(dividend.is(ToRegister32(instr->result())));
4302 // Theoretically, a variation of the branch-free code for integer division by
4303 // a power of 2 (calculating the remainder via an additional multiplication
4304 // (which gets simplified to an 'and') and subtraction) should be faster, and
4305 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4306 // indicate that positive dividends are heavily favored, so the branching
4307 // version performs better.
4308 HMod* hmod = instr->hydrogen();
4309 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4310 Label dividend_is_not_negative, done;
4311 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4312 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4313 // Note that this is correct even for kMinInt operands.
4314 __ Neg(dividend, dividend);
4315 __ And(dividend, dividend, mask);
4316 __ Negs(dividend, dividend);
4317 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4318 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4323 __ bind(÷nd_is_not_negative);
4324 __ And(dividend, dividend, mask);
4329 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4330 Register dividend = ToRegister32(instr->dividend());
4331 int32_t divisor = instr->divisor();
4332 Register result = ToRegister32(instr->result());
4333 Register temp = ToRegister32(instr->temp());
4334 DCHECK(!AreAliased(dividend, result, temp));
4337 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4341 __ TruncatingDiv(result, dividend, Abs(divisor));
4342 __ Sxtw(dividend.X(), dividend);
4343 __ Mov(temp, Abs(divisor));
4344 __ Smsubl(result.X(), result, temp, dividend.X());
4346 // Check for negative zero.
4347 HMod* hmod = instr->hydrogen();
4348 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4349 Label remainder_not_zero;
4350 __ Cbnz(result, &remainder_not_zero);
4351 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4352 __ bind(&remainder_not_zero);
4357 void LCodeGen::DoModI(LModI* instr) {
4358 Register dividend = ToRegister32(instr->left());
4359 Register divisor = ToRegister32(instr->right());
4360 Register result = ToRegister32(instr->result());
4363 // modulo = dividend - quotient * divisor
4364 __ Sdiv(result, dividend, divisor);
4365 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4366 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4368 __ Msub(result, result, divisor, dividend);
4369 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4370 __ Cbnz(result, &done);
4371 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4377 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4378 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4379 bool is_smi = instr->hydrogen()->representation().IsSmi();
4381 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4383 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4384 int32_t right = ToInteger32(instr->right());
4385 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4387 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4388 bool bailout_on_minus_zero =
4389 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4391 if (bailout_on_minus_zero) {
4393 // The result is -0 if right is negative and left is zero.
4394 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
4395 } else if (right == 0) {
4396 // The result is -0 if the right is zero and the left is negative.
4397 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
4402 // Cases which can detect overflow.
4405 // Only 0x80000000 can overflow here.
4406 __ Negs(result, left);
4407 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4409 __ Neg(result, left);
4413 // This case can never overflow.
4417 // This case can never overflow.
4418 __ Mov(result, left, kDiscardForSameWReg);
4422 __ Adds(result, left, left);
4423 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4425 __ Add(result, left, left);
4430 // Multiplication by constant powers of two (and some related values)
4431 // can be done efficiently with shifted operands.
4432 int32_t right_abs = Abs(right);
4434 if (base::bits::IsPowerOfTwo32(right_abs)) {
4435 int right_log2 = WhichPowerOf2(right_abs);
4438 Register scratch = result;
4439 DCHECK(!AreAliased(scratch, left));
4440 __ Cls(scratch, left);
4441 __ Cmp(scratch, right_log2);
4442 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
4446 // result = left << log2(right)
4447 __ Lsl(result, left, right_log2);
4449 // result = -left << log2(-right)
4451 __ Negs(result, Operand(left, LSL, right_log2));
4452 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4454 __ Neg(result, Operand(left, LSL, right_log2));
4461 // For the following cases, we could perform a conservative overflow check
4462 // with CLS as above. However the few cycles saved are likely not worth
4463 // the risk of deoptimizing more often than required.
4464 DCHECK(!can_overflow);
4467 if (base::bits::IsPowerOfTwo32(right - 1)) {
4468 // result = left + left << log2(right - 1)
4469 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4470 } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4471 // result = -left + left << log2(right + 1)
4472 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4473 __ Neg(result, result);
4478 if (base::bits::IsPowerOfTwo32(-right + 1)) {
4479 // result = left - left << log2(-right + 1)
4480 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4481 } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4482 // result = -left - left << log2(-right - 1)
4483 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4484 __ Neg(result, result);
4493 void LCodeGen::DoMulI(LMulI* instr) {
4494 Register result = ToRegister32(instr->result());
4495 Register left = ToRegister32(instr->left());
4496 Register right = ToRegister32(instr->right());
4498 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4499 bool bailout_on_minus_zero =
4500 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4502 if (bailout_on_minus_zero && !left.Is(right)) {
4503 // If one operand is zero and the other is negative, the result is -0.
4504 // - Set Z (eq) if either left or right, or both, are 0.
4506 __ Ccmp(right, 0, ZFlag, ne);
4507 // - If so (eq), set N (mi) if left + right is negative.
4508 // - Otherwise, clear N.
4509 __ Ccmn(left, right, NoFlag, eq);
4510 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4514 __ Smull(result.X(), left, right);
4515 __ Cmp(result.X(), Operand(result, SXTW));
4516 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4518 __ Mul(result, left, right);
4523 void LCodeGen::DoMulS(LMulS* instr) {
4524 Register result = ToRegister(instr->result());
4525 Register left = ToRegister(instr->left());
4526 Register right = ToRegister(instr->right());
4528 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4529 bool bailout_on_minus_zero =
4530 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4532 if (bailout_on_minus_zero && !left.Is(right)) {
4533 // If one operand is zero and the other is negative, the result is -0.
4534 // - Set Z (eq) if either left or right, or both, are 0.
4536 __ Ccmp(right, 0, ZFlag, ne);
4537 // - If so (eq), set N (mi) if left + right is negative.
4538 // - Otherwise, clear N.
4539 __ Ccmn(left, right, NoFlag, eq);
4540 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4543 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4545 __ Smulh(result, left, right);
4546 __ Cmp(result, Operand(result.W(), SXTW));
4548 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4550 if (AreAliased(result, left, right)) {
4551 // All three registers are the same: half untag the input and then
4552 // multiply, giving a tagged result.
4553 STATIC_ASSERT((kSmiShift % 2) == 0);
4554 __ Asr(result, left, kSmiShift / 2);
4555 __ Mul(result, result, result);
4556 } else if (result.Is(left) && !left.Is(right)) {
4557 // Registers result and left alias, right is distinct: untag left into
4558 // result, and then multiply by right, giving a tagged result.
4559 __ SmiUntag(result, left);
4560 __ Mul(result, result, right);
4562 DCHECK(!left.Is(result));
4563 // Registers result and right alias, left is distinct, or all registers
4564 // are distinct: untag right into result, and then multiply by left,
4565 // giving a tagged result.
4566 __ SmiUntag(result, right);
4567 __ Mul(result, left, result);
4573 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4574 // TODO(3095996): Get rid of this. For now, we need to make the
4575 // result register contain a valid pointer because it is already
4576 // contained in the register pointer map.
4577 Register result = ToRegister(instr->result());
4580 PushSafepointRegistersScope scope(this);
4581 // NumberTagU and NumberTagD use the context from the frame, rather than
4582 // the environment's HContext or HInlinedContext value.
4583 // They only call Runtime::kAllocateHeapNumber.
4584 // The corresponding HChange instructions are added in a phase that does
4585 // not have easy access to the local context.
4586 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4587 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4588 RecordSafepointWithRegisters(
4589 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4590 __ StoreToSafepointRegisterSlot(x0, result);
4594 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4595 class DeferredNumberTagD: public LDeferredCode {
4597 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4598 : LDeferredCode(codegen), instr_(instr) { }
4599 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4600 virtual LInstruction* instr() { return instr_; }
4602 LNumberTagD* instr_;
4605 DoubleRegister input = ToDoubleRegister(instr->value());
4606 Register result = ToRegister(instr->result());
4607 Register temp1 = ToRegister(instr->temp1());
4608 Register temp2 = ToRegister(instr->temp2());
4610 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4611 if (FLAG_inline_new) {
4612 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4614 __ B(deferred->entry());
4617 __ Bind(deferred->exit());
4618 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4622 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4626 Label slow, convert_and_store;
4627 Register src = ToRegister32(value);
4628 Register dst = ToRegister(instr->result());
4629 Register scratch1 = ToRegister(temp1);
4631 if (FLAG_inline_new) {
4632 Register scratch2 = ToRegister(temp2);
4633 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4634 __ B(&convert_and_store);
4637 // Slow case: call the runtime system to do the number allocation.
4639 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4640 // register is stored, as this register is in the pointer map, but contains an
4644 // Preserve the value of all registers.
4645 PushSafepointRegistersScope scope(this);
4647 // NumberTagU and NumberTagD use the context from the frame, rather than
4648 // the environment's HContext or HInlinedContext value.
4649 // They only call Runtime::kAllocateHeapNumber.
4650 // The corresponding HChange instructions are added in a phase that does
4651 // not have easy access to the local context.
4652 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4653 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4654 RecordSafepointWithRegisters(
4655 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4656 __ StoreToSafepointRegisterSlot(x0, dst);
4659 // Convert number to floating point and store in the newly allocated heap
4661 __ Bind(&convert_and_store);
4662 DoubleRegister dbl_scratch = double_scratch();
4663 __ Ucvtf(dbl_scratch, src);
4664 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4668 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4669 class DeferredNumberTagU: public LDeferredCode {
4671 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4672 : LDeferredCode(codegen), instr_(instr) { }
4673 virtual void Generate() {
4674 codegen()->DoDeferredNumberTagU(instr_,
4679 virtual LInstruction* instr() { return instr_; }
4681 LNumberTagU* instr_;
4684 Register value = ToRegister32(instr->value());
4685 Register result = ToRegister(instr->result());
4687 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4688 __ Cmp(value, Smi::kMaxValue);
4689 __ B(hi, deferred->entry());
4690 __ SmiTag(result, value.X());
4691 __ Bind(deferred->exit());
4695 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4696 Register input = ToRegister(instr->value());
4697 Register scratch = ToRegister(instr->temp());
4698 DoubleRegister result = ToDoubleRegister(instr->result());
4699 bool can_convert_undefined_to_nan =
4700 instr->hydrogen()->can_convert_undefined_to_nan();
4702 Label done, load_smi;
4704 // Work out what untag mode we're working with.
4705 HValue* value = instr->hydrogen()->value();
4706 NumberUntagDMode mode = value->representation().IsSmi()
4707 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4709 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4710 __ JumpIfSmi(input, &load_smi);
4712 Label convert_undefined;
4714 // Heap number map check.
4715 if (can_convert_undefined_to_nan) {
4716 __ JumpIfNotHeapNumber(input, &convert_undefined);
4718 DeoptimizeIfNotHeapNumber(input, instr);
4721 // Load heap number.
4722 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4723 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4724 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
4728 if (can_convert_undefined_to_nan) {
4729 __ Bind(&convert_undefined);
4730 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4731 Deoptimizer::kNotAHeapNumberUndefined);
4733 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4734 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4739 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4740 // Fall through to load_smi.
4743 // Smi to double register conversion.
4745 __ SmiUntagToDouble(result, input);
4751 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4752 // This is a pseudo-instruction that ensures that the environment here is
4753 // properly registered for deoptimization and records the assembler's PC
4755 LEnvironment* environment = instr->environment();
4757 // If the environment were already registered, we would have no way of
4758 // backpatching it with the spill slot operands.
4759 DCHECK(!environment->HasBeenRegistered());
4760 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4762 GenerateOsrPrologue();
4766 void LCodeGen::DoParameter(LParameter* instr) {
4771 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4772 __ PushPreamble(instr->argc(), kPointerSize);
4776 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4777 MacroAssembler::PushPopQueue args(masm());
4779 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4780 LOperand* arg = instr->argument(i);
4781 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4782 Abort(kDoPushArgumentNotImplementedForDoubleType);
4785 args.Queue(ToRegister(arg));
4788 // The preamble was done by LPreparePushArguments.
4789 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4791 after_push_argument_ = true;
4795 void LCodeGen::DoReturn(LReturn* instr) {
4796 if (FLAG_trace && info()->IsOptimizing()) {
4797 // Push the return value on the stack as the parameter.
4798 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4799 // managed by the register allocator and tearing down the frame, it's
4800 // safe to write to the context register.
4802 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4803 __ CallRuntime(Runtime::kTraceExit, 1);
4806 if (info()->saves_caller_doubles()) {
4807 RestoreCallerDoubles();
4810 int no_frame_start = -1;
4811 if (NeedsEagerFrame()) {
4812 Register stack_pointer = masm()->StackPointer();
4813 __ Mov(stack_pointer, fp);
4814 no_frame_start = masm_->pc_offset();
4818 if (instr->has_constant_parameter_count()) {
4819 int parameter_count = ToInteger32(instr->constant_parameter_count());
4820 __ Drop(parameter_count + 1);
4822 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
4823 Register parameter_count = ToRegister(instr->parameter_count());
4824 __ DropBySMI(parameter_count);
4828 if (no_frame_start != -1) {
4829 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4834 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4837 String::Encoding encoding) {
4838 if (index->IsConstantOperand()) {
4839 int offset = ToInteger32(LConstantOperand::cast(index));
4840 if (encoding == String::TWO_BYTE_ENCODING) {
4841 offset *= kUC16Size;
4843 STATIC_ASSERT(kCharSize == 1);
4844 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4847 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4848 if (encoding == String::ONE_BYTE_ENCODING) {
4849 return MemOperand(temp, ToRegister32(index), SXTW);
4851 STATIC_ASSERT(kUC16Size == 2);
4852 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4857 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4858 String::Encoding encoding = instr->hydrogen()->encoding();
4859 Register string = ToRegister(instr->string());
4860 Register result = ToRegister(instr->result());
4861 Register temp = ToRegister(instr->temp());
4863 if (FLAG_debug_code) {
4864 // Even though this lithium instruction comes with a temp register, we
4865 // can't use it here because we want to use "AtStart" constraints on the
4866 // inputs and the debug code here needs a scratch register.
4867 UseScratchRegisterScope temps(masm());
4868 Register dbg_temp = temps.AcquireX();
4870 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4871 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4873 __ And(dbg_temp, dbg_temp,
4874 Operand(kStringRepresentationMask | kStringEncodingMask));
4875 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4876 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4877 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4878 ? one_byte_seq_type : two_byte_seq_type));
4879 __ Check(eq, kUnexpectedStringType);
4882 MemOperand operand =
4883 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4884 if (encoding == String::ONE_BYTE_ENCODING) {
4885 __ Ldrb(result, operand);
4887 __ Ldrh(result, operand);
4892 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4893 String::Encoding encoding = instr->hydrogen()->encoding();
4894 Register string = ToRegister(instr->string());
4895 Register value = ToRegister(instr->value());
4896 Register temp = ToRegister(instr->temp());
4898 if (FLAG_debug_code) {
4899 DCHECK(ToRegister(instr->context()).is(cp));
4900 Register index = ToRegister(instr->index());
4901 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4902 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4904 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4905 ? one_byte_seq_type : two_byte_seq_type;
4906 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4909 MemOperand operand =
4910 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4911 if (encoding == String::ONE_BYTE_ENCODING) {
4912 __ Strb(value, operand);
4914 __ Strh(value, operand);
4919 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4920 HChange* hchange = instr->hydrogen();
4921 Register input = ToRegister(instr->value());
4922 Register output = ToRegister(instr->result());
4923 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4924 hchange->value()->CheckFlag(HValue::kUint32)) {
4925 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
4927 __ SmiTag(output, input);
4931 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4932 Register input = ToRegister(instr->value());
4933 Register result = ToRegister(instr->result());
4936 if (instr->needs_check()) {
4937 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
4941 __ SmiUntag(result, input);
4946 void LCodeGen::DoShiftI(LShiftI* instr) {
4947 LOperand* right_op = instr->right();
4948 Register left = ToRegister32(instr->left());
4949 Register result = ToRegister32(instr->result());
4951 if (right_op->IsRegister()) {
4952 Register right = ToRegister32(instr->right());
4953 switch (instr->op()) {
4954 case Token::ROR: __ Ror(result, left, right); break;
4955 case Token::SAR: __ Asr(result, left, right); break;
4956 case Token::SHL: __ Lsl(result, left, right); break;
4958 __ Lsr(result, left, right);
4959 if (instr->can_deopt()) {
4960 // If `left >>> right` >= 0x80000000, the result is not representable
4961 // in a signed 32-bit smi.
4962 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4965 default: UNREACHABLE();
4968 DCHECK(right_op->IsConstantOperand());
4969 int shift_count = JSShiftAmountFromLConstant(right_op);
4970 if (shift_count == 0) {
4971 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4972 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4974 __ Mov(result, left, kDiscardForSameWReg);
4976 switch (instr->op()) {
4977 case Token::ROR: __ Ror(result, left, shift_count); break;
4978 case Token::SAR: __ Asr(result, left, shift_count); break;
4979 case Token::SHL: __ Lsl(result, left, shift_count); break;
4980 case Token::SHR: __ Lsr(result, left, shift_count); break;
4981 default: UNREACHABLE();
4988 void LCodeGen::DoShiftS(LShiftS* instr) {
4989 LOperand* right_op = instr->right();
4990 Register left = ToRegister(instr->left());
4991 Register result = ToRegister(instr->result());
4993 if (right_op->IsRegister()) {
4994 Register right = ToRegister(instr->right());
4996 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4997 // Since we're handling smis in X registers, we have to extract these bits
4999 __ Ubfx(result, right, kSmiShift, 5);
5001 switch (instr->op()) {
5003 // This is the only case that needs a scratch register. To keep things
5004 // simple for the other cases, borrow a MacroAssembler scratch register.
5005 UseScratchRegisterScope temps(masm());
5006 Register temp = temps.AcquireW();
5007 __ SmiUntag(temp, left);
5008 __ Ror(result.W(), temp.W(), result.W());
5013 __ Asr(result, left, result);
5014 __ Bic(result, result, kSmiShiftMask);
5017 __ Lsl(result, left, result);
5020 __ Lsr(result, left, result);
5021 __ Bic(result, result, kSmiShiftMask);
5022 if (instr->can_deopt()) {
5023 // If `left >>> right` >= 0x80000000, the result is not representable
5024 // in a signed 32-bit smi.
5025 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
5028 default: UNREACHABLE();
5031 DCHECK(right_op->IsConstantOperand());
5032 int shift_count = JSShiftAmountFromLConstant(right_op);
5033 if (shift_count == 0) {
5034 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
5035 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
5037 __ Mov(result, left);
5039 switch (instr->op()) {
5041 __ SmiUntag(result, left);
5042 __ Ror(result.W(), result.W(), shift_count);
5046 __ Asr(result, left, shift_count);
5047 __ Bic(result, result, kSmiShiftMask);
5050 __ Lsl(result, left, shift_count);
5053 __ Lsr(result, left, shift_count);
5054 __ Bic(result, result, kSmiShiftMask);
5056 default: UNREACHABLE();
5063 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5064 __ Debug("LDebugBreak", 0, BREAK);
5068 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5069 DCHECK(ToRegister(instr->context()).is(cp));
5070 Register scratch1 = x5;
5071 Register scratch2 = x6;
5072 DCHECK(instr->IsMarkedAsCall());
5074 // TODO(all): if Mov could handle object in new space then it could be used
5076 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5077 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5078 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5079 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5083 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5084 PushSafepointRegistersScope scope(this);
5085 LoadContextFromDeferred(instr->context());
5086 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5087 RecordSafepointWithLazyDeopt(
5088 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5089 DCHECK(instr->HasEnvironment());
5090 LEnvironment* env = instr->environment();
5091 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5095 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5096 class DeferredStackCheck: public LDeferredCode {
5098 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5099 : LDeferredCode(codegen), instr_(instr) { }
5100 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5101 virtual LInstruction* instr() { return instr_; }
5103 LStackCheck* instr_;
5106 DCHECK(instr->HasEnvironment());
5107 LEnvironment* env = instr->environment();
5108 // There is no LLazyBailout instruction for stack-checks. We have to
5109 // prepare for lazy deoptimization explicitly here.
5110 if (instr->hydrogen()->is_function_entry()) {
5111 // Perform stack overflow check.
5113 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5116 PredictableCodeSizeScope predictable(masm_,
5117 Assembler::kCallSizeWithRelocation);
5118 DCHECK(instr->context()->IsRegister());
5119 DCHECK(ToRegister(instr->context()).is(cp));
5120 CallCode(isolate()->builtins()->StackCheck(),
5121 RelocInfo::CODE_TARGET,
5125 DCHECK(instr->hydrogen()->is_backwards_branch());
5126 // Perform stack overflow check if this goto needs it before jumping.
5127 DeferredStackCheck* deferred_stack_check =
5128 new(zone()) DeferredStackCheck(this, instr);
5129 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5130 __ B(lo, deferred_stack_check->entry());
5132 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5133 __ Bind(instr->done_label());
5134 deferred_stack_check->SetExit(instr->done_label());
5135 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5136 // Don't record a deoptimization index for the safepoint here.
5137 // This will be done explicitly when emitting call and the safepoint in
5138 // the deferred code.
5143 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5144 Register function = ToRegister(instr->function());
5145 Register code_object = ToRegister(instr->code_object());
5146 Register temp = ToRegister(instr->temp());
5147 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5148 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5152 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5153 Register context = ToRegister(instr->context());
5154 Register value = ToRegister(instr->value());
5155 Register scratch = ToRegister(instr->temp());
5156 MemOperand target = ContextMemOperand(context, instr->slot_index());
5158 Label skip_assignment;
5160 if (instr->hydrogen()->RequiresHoleCheck()) {
5161 __ Ldr(scratch, target);
5162 if (instr->hydrogen()->DeoptimizesOnHole()) {
5163 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
5164 Deoptimizer::kHole);
5166 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5170 __ Str(value, target);
5171 if (instr->hydrogen()->NeedsWriteBarrier()) {
5172 SmiCheck check_needed =
5173 instr->hydrogen()->value()->type().IsHeapObject()
5174 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5175 __ RecordWriteContextSlot(context,
5179 GetLinkRegisterState(),
5181 EMIT_REMEMBERED_SET,
5184 __ Bind(&skip_assignment);
5188 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5189 Register value = ToRegister(instr->value());
5190 Register cell = ToRegister(instr->temp1());
5193 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5195 // If the cell we are storing to contains the hole it could have
5196 // been deleted from the property dictionary. In that case, we need
5197 // to update the property details in the property dictionary to mark
5198 // it as no longer deleted. We deoptimize in that case.
5199 if (instr->hydrogen()->RequiresHoleCheck()) {
5200 Register payload = ToRegister(instr->temp2());
5201 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5202 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
5203 Deoptimizer::kHole);
5207 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5208 // Cells are always rescanned, so no write barrier here.
5212 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5213 Register ext_ptr = ToRegister(instr->elements());
5214 Register key = no_reg;
5216 ElementsKind elements_kind = instr->elements_kind();
5218 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5219 bool key_is_constant = instr->key()->IsConstantOperand();
5220 int constant_key = 0;
5221 if (key_is_constant) {
5222 DCHECK(instr->temp() == NULL);
5223 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5224 if (constant_key & 0xf0000000) {
5225 Abort(kArrayIndexConstantValueTooBig);
5228 key = ToRegister(instr->key());
5229 scratch = ToRegister(instr->temp());
5233 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5234 key_is_constant, constant_key,
5236 instr->base_offset());
5238 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5239 (elements_kind == FLOAT32_ELEMENTS)) {
5240 DoubleRegister value = ToDoubleRegister(instr->value());
5241 DoubleRegister dbl_scratch = double_scratch();
5242 __ Fcvt(dbl_scratch.S(), value);
5243 __ Str(dbl_scratch.S(), dst);
5244 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5245 (elements_kind == FLOAT64_ELEMENTS)) {
5246 DoubleRegister value = ToDoubleRegister(instr->value());
5249 Register value = ToRegister(instr->value());
5251 switch (elements_kind) {
5252 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5253 case EXTERNAL_INT8_ELEMENTS:
5254 case EXTERNAL_UINT8_ELEMENTS:
5255 case UINT8_ELEMENTS:
5256 case UINT8_CLAMPED_ELEMENTS:
5258 __ Strb(value, dst);
5260 case EXTERNAL_INT16_ELEMENTS:
5261 case EXTERNAL_UINT16_ELEMENTS:
5262 case INT16_ELEMENTS:
5263 case UINT16_ELEMENTS:
5264 __ Strh(value, dst);
5266 case EXTERNAL_INT32_ELEMENTS:
5267 case EXTERNAL_UINT32_ELEMENTS:
5268 case INT32_ELEMENTS:
5269 case UINT32_ELEMENTS:
5270 __ Str(value.W(), dst);
5272 case FLOAT32_ELEMENTS:
5273 case FLOAT64_ELEMENTS:
5274 case EXTERNAL_FLOAT32_ELEMENTS:
5275 case EXTERNAL_FLOAT64_ELEMENTS:
5276 case FAST_DOUBLE_ELEMENTS:
5278 case FAST_SMI_ELEMENTS:
5279 case FAST_HOLEY_DOUBLE_ELEMENTS:
5280 case FAST_HOLEY_ELEMENTS:
5281 case FAST_HOLEY_SMI_ELEMENTS:
5282 case DICTIONARY_ELEMENTS:
5283 case SLOPPY_ARGUMENTS_ELEMENTS:
5291 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5292 Register elements = ToRegister(instr->elements());
5293 DoubleRegister value = ToDoubleRegister(instr->value());
5296 if (instr->key()->IsConstantOperand()) {
5297 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5298 if (constant_key & 0xf0000000) {
5299 Abort(kArrayIndexConstantValueTooBig);
5301 int offset = instr->base_offset() + constant_key * kDoubleSize;
5302 mem_op = MemOperand(elements, offset);
5304 Register store_base = ToRegister(instr->temp());
5305 Register key = ToRegister(instr->key());
5306 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5307 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5308 instr->hydrogen()->elements_kind(),
5309 instr->hydrogen()->representation(),
5310 instr->base_offset());
5313 if (instr->NeedsCanonicalization()) {
5314 __ CanonicalizeNaN(double_scratch(), value);
5315 __ Str(double_scratch(), mem_op);
5317 __ Str(value, mem_op);
5322 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5323 Register value = ToRegister(instr->value());
5324 Register elements = ToRegister(instr->elements());
5325 Register scratch = no_reg;
5326 Register store_base = no_reg;
5327 Register key = no_reg;
5330 if (!instr->key()->IsConstantOperand() ||
5331 instr->hydrogen()->NeedsWriteBarrier()) {
5332 scratch = ToRegister(instr->temp());
5335 Representation representation = instr->hydrogen()->value()->representation();
5336 if (instr->key()->IsConstantOperand()) {
5337 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5338 int offset = instr->base_offset() +
5339 ToInteger32(const_operand) * kPointerSize;
5340 store_base = elements;
5341 if (representation.IsInteger32()) {
5342 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5343 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5344 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5345 STATIC_ASSERT(kSmiTag == 0);
5346 mem_op = UntagSmiMemOperand(store_base, offset);
5348 mem_op = MemOperand(store_base, offset);
5351 store_base = scratch;
5352 key = ToRegister(instr->key());
5353 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5355 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5356 instr->hydrogen()->elements_kind(),
5357 representation, instr->base_offset());
5360 __ Store(value, mem_op, representation);
5362 if (instr->hydrogen()->NeedsWriteBarrier()) {
5363 DCHECK(representation.IsTagged());
5364 // This assignment may cause element_addr to alias store_base.
5365 Register element_addr = scratch;
5366 SmiCheck check_needed =
5367 instr->hydrogen()->value()->type().IsHeapObject()
5368 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5369 // Compute address of modified element and store it into key register.
5370 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5371 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5372 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5373 instr->hydrogen()->PointersToHereCheckForValue());
5378 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5379 DCHECK(ToRegister(instr->context()).is(cp));
5380 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5381 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
5382 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5385 CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
5386 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5390 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5391 Representation representation = instr->representation();
5393 Register object = ToRegister(instr->object());
5394 HObjectAccess access = instr->hydrogen()->access();
5395 int offset = access.offset();
5397 if (access.IsExternalMemory()) {
5398 DCHECK(!instr->hydrogen()->has_transition());
5399 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5400 Register value = ToRegister(instr->value());
5401 __ Store(value, MemOperand(object, offset), representation);
5405 __ AssertNotSmi(object);
5407 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
5408 DCHECK(access.IsInobject());
5409 DCHECK(!instr->hydrogen()->has_transition());
5410 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5411 FPRegister value = ToDoubleRegister(instr->value());
5412 __ Str(value, FieldMemOperand(object, offset));
5416 DCHECK(!representation.IsSmi() ||
5417 !instr->value()->IsConstantOperand() ||
5418 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5420 if (instr->hydrogen()->has_transition()) {
5421 Handle<Map> transition = instr->hydrogen()->transition_map();
5422 AddDeprecationDependency(transition);
5423 // Store the new map value.
5424 Register new_map_value = ToRegister(instr->temp0());
5425 __ Mov(new_map_value, Operand(transition));
5426 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5427 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5428 // Update the write barrier for the map field.
5429 __ RecordWriteForMap(object,
5431 ToRegister(instr->temp1()),
5432 GetLinkRegisterState(),
5438 Register destination;
5439 if (access.IsInobject()) {
5440 destination = object;
5442 Register temp0 = ToRegister(instr->temp0());
5443 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5444 destination = temp0;
5447 if (FLAG_unbox_double_fields && representation.IsDouble()) {
5448 DCHECK(access.IsInobject());
5449 FPRegister value = ToDoubleRegister(instr->value());
5450 __ Str(value, FieldMemOperand(object, offset));
5451 } else if (representation.IsSmi() &&
5452 instr->hydrogen()->value()->representation().IsInteger32()) {
5453 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5455 Register temp0 = ToRegister(instr->temp0());
5456 __ Ldr(temp0, FieldMemOperand(destination, offset));
5457 __ AssertSmi(temp0);
5458 // If destination aliased temp0, restore it to the address calculated
5460 if (destination.Is(temp0)) {
5461 DCHECK(!access.IsInobject());
5462 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5465 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5466 STATIC_ASSERT(kSmiTag == 0);
5467 Register value = ToRegister(instr->value());
5468 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5469 Representation::Integer32());
5471 Register value = ToRegister(instr->value());
5472 __ Store(value, FieldMemOperand(destination, offset), representation);
5474 if (instr->hydrogen()->NeedsWriteBarrier()) {
5475 Register value = ToRegister(instr->value());
5476 __ RecordWriteField(destination,
5478 value, // Clobbered.
5479 ToRegister(instr->temp1()), // Clobbered.
5480 GetLinkRegisterState(),
5482 EMIT_REMEMBERED_SET,
5483 instr->hydrogen()->SmiCheckForWriteBarrier(),
5484 instr->hydrogen()->PointersToHereCheckForValue());
5489 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5490 DCHECK(ToRegister(instr->context()).is(cp));
5491 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5492 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5494 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5495 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
5496 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5500 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5501 DCHECK(ToRegister(instr->context()).is(cp));
5502 DCHECK(ToRegister(instr->left()).Is(x1));
5503 DCHECK(ToRegister(instr->right()).Is(x0));
5504 StringAddStub stub(isolate(),
5505 instr->hydrogen()->flags(),
5506 instr->hydrogen()->pretenure_flag());
5507 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5511 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5512 class DeferredStringCharCodeAt: public LDeferredCode {
5514 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5515 : LDeferredCode(codegen), instr_(instr) { }
5516 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5517 virtual LInstruction* instr() { return instr_; }
5519 LStringCharCodeAt* instr_;
5522 DeferredStringCharCodeAt* deferred =
5523 new(zone()) DeferredStringCharCodeAt(this, instr);
5525 StringCharLoadGenerator::Generate(masm(),
5526 ToRegister(instr->string()),
5527 ToRegister32(instr->index()),
5528 ToRegister(instr->result()),
5530 __ Bind(deferred->exit());
5534 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5535 Register string = ToRegister(instr->string());
5536 Register result = ToRegister(instr->result());
5538 // TODO(3095996): Get rid of this. For now, we need to make the
5539 // result register contain a valid pointer because it is already
5540 // contained in the register pointer map.
5543 PushSafepointRegistersScope scope(this);
5545 // Push the index as a smi. This is safe because of the checks in
5546 // DoStringCharCodeAt above.
5547 Register index = ToRegister(instr->index());
5548 __ SmiTagAndPush(index);
5550 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5554 __ StoreToSafepointRegisterSlot(x0, result);
5558 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5559 class DeferredStringCharFromCode: public LDeferredCode {
5561 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5562 : LDeferredCode(codegen), instr_(instr) { }
5563 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5564 virtual LInstruction* instr() { return instr_; }
5566 LStringCharFromCode* instr_;
5569 DeferredStringCharFromCode* deferred =
5570 new(zone()) DeferredStringCharFromCode(this, instr);
5572 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5573 Register char_code = ToRegister32(instr->char_code());
5574 Register result = ToRegister(instr->result());
5576 __ Cmp(char_code, String::kMaxOneByteCharCode);
5577 __ B(hi, deferred->entry());
5578 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5579 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5580 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5581 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5582 __ B(eq, deferred->entry());
5583 __ Bind(deferred->exit());
5587 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5588 Register char_code = ToRegister(instr->char_code());
5589 Register result = ToRegister(instr->result());
5591 // TODO(3095996): Get rid of this. For now, we need to make the
5592 // result register contain a valid pointer because it is already
5593 // contained in the register pointer map.
5596 PushSafepointRegistersScope scope(this);
5597 __ SmiTagAndPush(char_code);
5598 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5599 __ StoreToSafepointRegisterSlot(x0, result);
5603 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5604 DCHECK(ToRegister(instr->context()).is(cp));
5605 Token::Value op = instr->op();
5607 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
5608 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5609 InlineSmiCheckInfo::EmitNotInlined(masm());
5611 Condition condition = TokenToCondition(op, false);
5613 EmitCompareAndBranch(instr, condition, x0, 0);
5617 void LCodeGen::DoSubI(LSubI* instr) {
5618 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5619 Register result = ToRegister32(instr->result());
5620 Register left = ToRegister32(instr->left());
5621 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5624 __ Subs(result, left, right);
5625 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5627 __ Sub(result, left, right);
5632 void LCodeGen::DoSubS(LSubS* instr) {
5633 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5634 Register result = ToRegister(instr->result());
5635 Register left = ToRegister(instr->left());
5636 Operand right = ToOperand(instr->right());
5638 __ Subs(result, left, right);
5639 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5641 __ Sub(result, left, right);
5646 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5650 Register input = ToRegister(value);
5651 Register scratch1 = ToRegister(temp1);
5652 DoubleRegister dbl_scratch1 = double_scratch();
5656 if (instr->truncating()) {
5657 Register output = ToRegister(instr->result());
5660 // If it's not a heap number, jump to undefined check.
5661 __ JumpIfNotHeapNumber(input, &check_bools);
5663 // A heap number: load value and convert to int32 using truncating function.
5664 __ TruncateHeapNumberToI(output, input);
5667 __ Bind(&check_bools);
5669 Register true_root = output;
5670 Register false_root = scratch1;
5671 __ LoadTrueFalseRoots(true_root, false_root);
5672 __ Cmp(input, true_root);
5673 __ Cset(output, eq);
5674 __ Ccmp(input, false_root, ZFlag, ne);
5677 // Output contains zero, undefined is converted to zero for truncating
5679 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5680 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5682 Register output = ToRegister32(instr->result());
5683 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5685 DeoptimizeIfNotHeapNumber(input, instr);
5687 // A heap number: load value and convert to int32 using non-truncating
5688 // function. If the result is out of range, branch to deoptimize.
5689 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5690 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5691 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5693 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5696 __ Fmov(scratch1, dbl_scratch1);
5697 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
5704 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5705 class DeferredTaggedToI: public LDeferredCode {
5707 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5708 : LDeferredCode(codegen), instr_(instr) { }
5709 virtual void Generate() {
5710 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5714 virtual LInstruction* instr() { return instr_; }
5719 Register input = ToRegister(instr->value());
5720 Register output = ToRegister(instr->result());
5722 if (instr->hydrogen()->value()->representation().IsSmi()) {
5723 __ SmiUntag(output, input);
5725 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5727 __ JumpIfNotSmi(input, deferred->entry());
5728 __ SmiUntag(output, input);
5729 __ Bind(deferred->exit());
5734 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5735 Register result = ToRegister(instr->result());
5736 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5740 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5741 DCHECK(ToRegister(instr->value()).Is(x0));
5742 DCHECK(ToRegister(instr->result()).Is(x0));
5744 CallRuntime(Runtime::kToFastProperties, 1, instr);
5748 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5749 DCHECK(ToRegister(instr->context()).is(cp));
5751 // Registers will be used as follows:
5752 // x7 = literals array.
5753 // x1 = regexp literal.
5754 // x0 = regexp literal clone.
5755 // x10-x12 are used as temporaries.
5756 int literal_offset =
5757 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5758 __ LoadObject(x7, instr->hydrogen()->literals());
5759 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5760 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5762 // Create regexp literal using runtime function
5763 // Result will be in x0.
5764 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5765 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5766 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5767 __ Push(x7, x12, x11, x10);
5768 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5771 __ Bind(&materialized);
5772 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5773 Label allocated, runtime_allocate;
5775 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5778 __ Bind(&runtime_allocate);
5779 __ Mov(x0, Smi::FromInt(size));
5781 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5784 __ Bind(&allocated);
5785 // Copy the content into the newly allocated memory.
5786 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5790 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5791 Register object = ToRegister(instr->object());
5793 Handle<Map> from_map = instr->original_map();
5794 Handle<Map> to_map = instr->transitioned_map();
5795 ElementsKind from_kind = instr->from_kind();
5796 ElementsKind to_kind = instr->to_kind();
5798 Label not_applicable;
5800 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5801 Register temp1 = ToRegister(instr->temp1());
5802 Register new_map = ToRegister(instr->temp2());
5803 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5804 __ Mov(new_map, Operand(to_map));
5805 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5807 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5811 UseScratchRegisterScope temps(masm());
5812 // Use the temp register only in a restricted scope - the codegen checks
5813 // that we do not use any register across a call.
5814 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5817 DCHECK(object.is(x0));
5818 DCHECK(ToRegister(instr->context()).is(cp));
5819 PushSafepointRegistersScope scope(this);
5820 __ Mov(x1, Operand(to_map));
5821 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5822 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5824 RecordSafepointWithRegisters(
5825 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5827 __ Bind(¬_applicable);
5831 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5832 Register object = ToRegister(instr->object());
5833 Register temp1 = ToRegister(instr->temp1());
5834 Register temp2 = ToRegister(instr->temp2());
5836 Label no_memento_found;
5837 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5838 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
5839 __ Bind(&no_memento_found);
5843 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5844 DoubleRegister input = ToDoubleRegister(instr->value());
5845 Register result = ToRegister(instr->result());
5846 __ TruncateDoubleToI(result, input);
5847 if (instr->tag_result()) {
5848 __ SmiTag(result, result);
5853 void LCodeGen::DoTypeof(LTypeof* instr) {
5854 Register input = ToRegister(instr->value());
5856 CallRuntime(Runtime::kTypeof, 1, instr);
5860 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5861 Handle<String> type_name = instr->type_literal();
5862 Label* true_label = instr->TrueLabel(chunk_);
5863 Label* false_label = instr->FalseLabel(chunk_);
5864 Register value = ToRegister(instr->value());
5866 Factory* factory = isolate()->factory();
5867 if (String::Equals(type_name, factory->number_string())) {
5868 __ JumpIfSmi(value, true_label);
5870 int true_block = instr->TrueDestination(chunk_);
5871 int false_block = instr->FalseDestination(chunk_);
5872 int next_block = GetNextEmittedBlock();
5874 if (true_block == false_block) {
5875 EmitGoto(true_block);
5876 } else if (true_block == next_block) {
5877 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5879 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5880 if (false_block != next_block) {
5881 __ B(chunk_->GetAssemblyLabel(false_block));
5885 } else if (String::Equals(type_name, factory->string_string())) {
5886 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5887 Register map = ToRegister(instr->temp1());
5888 Register scratch = ToRegister(instr->temp2());
5890 __ JumpIfSmi(value, false_label);
5891 __ JumpIfObjectType(
5892 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5893 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5894 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5896 } else if (String::Equals(type_name, factory->symbol_string())) {
5897 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5898 Register map = ToRegister(instr->temp1());
5899 Register scratch = ToRegister(instr->temp2());
5901 __ JumpIfSmi(value, false_label);
5902 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5903 EmitBranch(instr, eq);
5905 } else if (String::Equals(type_name, factory->boolean_string())) {
5906 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5907 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5908 EmitBranch(instr, eq);
5910 } else if (String::Equals(type_name, factory->undefined_string())) {
5911 DCHECK(instr->temp1() != NULL);
5912 Register scratch = ToRegister(instr->temp1());
5914 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5915 __ JumpIfSmi(value, false_label);
5916 // Check for undetectable objects and jump to the true branch in this case.
5917 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5918 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5919 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5921 } else if (String::Equals(type_name, factory->function_string())) {
5922 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5923 DCHECK(instr->temp1() != NULL);
5924 Register type = ToRegister(instr->temp1());
5926 __ JumpIfSmi(value, false_label);
5927 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5928 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5929 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5931 } else if (String::Equals(type_name, factory->object_string())) {
5932 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5933 Register map = ToRegister(instr->temp1());
5934 Register scratch = ToRegister(instr->temp2());
5936 __ JumpIfSmi(value, false_label);
5937 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5938 __ JumpIfObjectType(value, map, scratch,
5939 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5940 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5941 __ B(gt, false_label);
5942 // Check for undetectable objects => false.
5943 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5944 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5952 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5953 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5957 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5958 Register object = ToRegister(instr->value());
5959 Register map = ToRegister(instr->map());
5960 Register temp = ToRegister(instr->temp());
5961 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5963 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5967 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5968 Register receiver = ToRegister(instr->receiver());
5969 Register function = ToRegister(instr->function());
5970 Register result = ToRegister(instr->result());
5972 // If the receiver is null or undefined, we have to pass the global object as
5973 // a receiver to normal functions. Values have to be passed unchanged to
5974 // builtins and strict-mode functions.
5975 Label global_object, done, copy_receiver;
5977 if (!instr->hydrogen()->known_function()) {
5978 __ Ldr(result, FieldMemOperand(function,
5979 JSFunction::kSharedFunctionInfoOffset));
5981 // CompilerHints is an int32 field. See objects.h.
5983 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5985 // Do not transform the receiver to object for strict mode functions.
5986 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5988 // Do not transform the receiver to object for builtins.
5989 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5992 // Normal function. Replace undefined or null with global receiver.
5993 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5994 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5996 // Deoptimize if the receiver is not a JS object.
5997 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
5998 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5999 __ B(ge, ©_receiver);
6000 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
6002 __ Bind(&global_object);
6003 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
6004 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
6005 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
6008 __ Bind(©_receiver);
6009 __ Mov(result, receiver);
6014 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6018 PushSafepointRegistersScope scope(this);
6022 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6023 RecordSafepointWithRegisters(
6024 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6025 __ StoreToSafepointRegisterSlot(x0, result);
6029 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6030 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
6032 DeferredLoadMutableDouble(LCodeGen* codegen,
6033 LLoadFieldByIndex* instr,
6037 : LDeferredCode(codegen),
6043 void Generate() OVERRIDE {
6044 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6046 LInstruction* instr() OVERRIDE { return instr_; }
6049 LLoadFieldByIndex* instr_;
6054 Register object = ToRegister(instr->object());
6055 Register index = ToRegister(instr->index());
6056 Register result = ToRegister(instr->result());
6058 __ AssertSmi(index);
6060 DeferredLoadMutableDouble* deferred;
6061 deferred = new(zone()) DeferredLoadMutableDouble(
6062 this, instr, result, object, index);
6064 Label out_of_object, done;
6066 __ TestAndBranchIfAnySet(
6067 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6068 __ Mov(index, Operand(index, ASR, 1));
6070 __ Cmp(index, Smi::FromInt(0));
6071 __ B(lt, &out_of_object);
6073 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6074 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6075 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6079 __ Bind(&out_of_object);
6080 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6081 // Index is equal to negated out of object property index plus 1.
6082 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6083 __ Ldr(result, FieldMemOperand(result,
6084 FixedArray::kHeaderSize - kPointerSize));
6085 __ Bind(deferred->exit());
6090 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6091 Register context = ToRegister(instr->context());
6092 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6096 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6097 Handle<ScopeInfo> scope_info = instr->scope_info();
6098 __ Push(scope_info);
6099 __ Push(ToRegister(instr->function()));
6100 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6101 RecordSafepoint(Safepoint::kNoLazyDeopt);
6106 } } // namespace v8::internal