1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
21 class SafepointGenerator final : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen,
24 LPointerMap* pointers,
25 Safepoint::DeoptMode mode)
29 virtual ~SafepointGenerator() { }
31 virtual void BeforeCall(int call_size) const { }
33 virtual void AfterCall() const {
34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 LPointerMap* pointers_;
40 Safepoint::DeoptMode deopt_mode_;
46 // Emit code to branch if the given condition holds.
47 // The code generated here doesn't modify the flags and they must have
48 // been set by some prior instructions.
50 // The EmitInverted function simply inverts the condition.
51 class BranchOnCondition : public BranchGenerator {
53 BranchOnCondition(LCodeGen* codegen, Condition cond)
54 : BranchGenerator(codegen),
57 virtual void Emit(Label* label) const {
61 virtual void EmitInverted(Label* label) const {
63 __ B(NegateCondition(cond_), label);
72 // Emit code to compare lhs and rhs and branch if the condition holds.
73 // This uses MacroAssembler's CompareAndBranch function so it will handle
74 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
76 // EmitInverted still compares the two operands but inverts the condition.
77 class CompareAndBranch : public BranchGenerator {
79 CompareAndBranch(LCodeGen* codegen,
83 : BranchGenerator(codegen),
88 virtual void Emit(Label* label) const {
89 __ CompareAndBranch(lhs_, rhs_, cond_, label);
92 virtual void EmitInverted(Label* label) const {
93 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
103 // Test the input with the given mask and branch if the condition holds.
104 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
105 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
106 // conversion to Tbz/Tbnz when possible.
107 class TestAndBranch : public BranchGenerator {
109 TestAndBranch(LCodeGen* codegen,
111 const Register& value,
113 : BranchGenerator(codegen),
118 virtual void Emit(Label* label) const {
121 __ TestAndBranchIfAllClear(value_, mask_, label);
124 __ TestAndBranchIfAnySet(value_, mask_, label);
127 __ Tst(value_, mask_);
132 virtual void EmitInverted(Label* label) const {
133 // The inverse of "all clear" is "any set" and vice versa.
136 __ TestAndBranchIfAnySet(value_, mask_, label);
139 __ TestAndBranchIfAllClear(value_, mask_, label);
142 __ Tst(value_, mask_);
143 __ B(NegateCondition(cond_), label);
149 const Register& value_;
154 // Test the input and branch if it is non-zero and not a NaN.
155 class BranchIfNonZeroNumber : public BranchGenerator {
157 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
158 const FPRegister& scratch)
159 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
161 virtual void Emit(Label* label) const {
162 __ Fabs(scratch_, value_);
163 // Compare with 0.0. Because scratch_ is positive, the result can be one of
164 // nZCv (equal), nzCv (greater) or nzCV (unordered).
165 __ Fcmp(scratch_, 0.0);
169 virtual void EmitInverted(Label* label) const {
170 __ Fabs(scratch_, value_);
171 __ Fcmp(scratch_, 0.0);
176 const FPRegister& value_;
177 const FPRegister& scratch_;
181 // Test the input and branch if it is a heap number.
182 class BranchIfHeapNumber : public BranchGenerator {
184 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
185 : BranchGenerator(codegen), value_(value) { }
187 virtual void Emit(Label* label) const {
188 __ JumpIfHeapNumber(value_, label);
191 virtual void EmitInverted(Label* label) const {
192 __ JumpIfNotHeapNumber(value_, label);
196 const Register& value_;
200 // Test the input and branch if it is the specified root value.
201 class BranchIfRoot : public BranchGenerator {
203 BranchIfRoot(LCodeGen* codegen, const Register& value,
204 Heap::RootListIndex index)
205 : BranchGenerator(codegen), value_(value), index_(index) { }
207 virtual void Emit(Label* label) const {
208 __ JumpIfRoot(value_, index_, label);
211 virtual void EmitInverted(Label* label) const {
212 __ JumpIfNotRoot(value_, index_, label);
216 const Register& value_;
217 const Heap::RootListIndex index_;
221 void LCodeGen::WriteTranslation(LEnvironment* environment,
222 Translation* translation) {
223 if (environment == NULL) return;
225 // The translation includes one command per value in the environment.
226 int translation_size = environment->translation_size();
227 // The output frame height does not include the parameters.
228 int height = translation_size - environment->parameter_count();
230 WriteTranslation(environment->outer(), translation);
231 bool has_closure_id = !info()->closure().is_null() &&
232 !info()->closure().is_identical_to(environment->closure());
233 int closure_id = has_closure_id
234 ? DefineDeoptimizationLiteral(environment->closure())
235 : Translation::kSelfLiteralId;
237 switch (environment->frame_type()) {
239 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
242 translation->BeginConstructStubFrame(closure_id, translation_size);
245 DCHECK(translation_size == 1);
247 translation->BeginGetterStubFrame(closure_id);
250 DCHECK(translation_size == 2);
252 translation->BeginSetterStubFrame(closure_id);
255 translation->BeginCompiledStubFrame(translation_size);
257 case ARGUMENTS_ADAPTOR:
258 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
264 int object_index = 0;
265 int dematerialized_index = 0;
266 for (int i = 0; i < translation_size; ++i) {
267 LOperand* value = environment->values()->at(i);
269 AddToTranslation(environment,
272 environment->HasTaggedValueAt(i),
273 environment->HasUint32ValueAt(i),
275 &dematerialized_index);
280 void LCodeGen::AddToTranslation(LEnvironment* environment,
281 Translation* translation,
285 int* object_index_pointer,
286 int* dematerialized_index_pointer) {
287 if (op == LEnvironment::materialization_marker()) {
288 int object_index = (*object_index_pointer)++;
289 if (environment->ObjectIsDuplicateAt(object_index)) {
290 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
291 translation->DuplicateObject(dupe_of);
294 int object_length = environment->ObjectLengthAt(object_index);
295 if (environment->ObjectIsArgumentsAt(object_index)) {
296 translation->BeginArgumentsObject(object_length);
298 translation->BeginCapturedObject(object_length);
300 int dematerialized_index = *dematerialized_index_pointer;
301 int env_offset = environment->translation_size() + dematerialized_index;
302 *dematerialized_index_pointer += object_length;
303 for (int i = 0; i < object_length; ++i) {
304 LOperand* value = environment->values()->at(env_offset + i);
305 AddToTranslation(environment,
308 environment->HasTaggedValueAt(env_offset + i),
309 environment->HasUint32ValueAt(env_offset + i),
310 object_index_pointer,
311 dematerialized_index_pointer);
316 if (op->IsStackSlot()) {
318 translation->StoreStackSlot(op->index());
319 } else if (is_uint32) {
320 translation->StoreUint32StackSlot(op->index());
322 translation->StoreInt32StackSlot(op->index());
324 } else if (op->IsDoubleStackSlot()) {
325 translation->StoreDoubleStackSlot(op->index());
326 } else if (op->IsRegister()) {
327 Register reg = ToRegister(op);
329 translation->StoreRegister(reg);
330 } else if (is_uint32) {
331 translation->StoreUint32Register(reg);
333 translation->StoreInt32Register(reg);
335 } else if (op->IsDoubleRegister()) {
336 DoubleRegister reg = ToDoubleRegister(op);
337 translation->StoreDoubleRegister(reg);
338 } else if (op->IsConstantOperand()) {
339 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
340 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
341 translation->StoreLiteral(src_index);
348 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
349 int result = deoptimization_literals_.length();
350 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
351 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
353 deoptimization_literals_.Add(literal, zone());
358 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
359 Safepoint::DeoptMode mode) {
360 environment->set_has_been_used();
361 if (!environment->HasBeenRegistered()) {
363 int jsframe_count = 0;
364 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
366 if (e->frame_type() == JS_FUNCTION) {
370 Translation translation(&translations_, frame_count, jsframe_count, zone());
371 WriteTranslation(environment, &translation);
372 int deoptimization_index = deoptimizations_.length();
373 int pc_offset = masm()->pc_offset();
374 environment->Register(deoptimization_index,
376 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
377 deoptimizations_.Add(environment, zone());
382 void LCodeGen::CallCode(Handle<Code> code,
383 RelocInfo::Mode mode,
384 LInstruction* instr) {
385 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
389 void LCodeGen::CallCodeGeneric(Handle<Code> code,
390 RelocInfo::Mode mode,
392 SafepointMode safepoint_mode) {
393 DCHECK(instr != NULL);
395 Assembler::BlockPoolsScope scope(masm_);
397 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
399 if ((code->kind() == Code::BINARY_OP_IC) ||
400 (code->kind() == Code::COMPARE_IC)) {
401 // Signal that we don't inline smi code before these stubs in the
402 // optimizing code generator.
403 InlineSmiCheckInfo::EmitNotInlined(masm());
408 void LCodeGen::DoCallFunction(LCallFunction* instr) {
409 DCHECK(ToRegister(instr->context()).is(cp));
410 DCHECK(ToRegister(instr->function()).Is(x1));
411 DCHECK(ToRegister(instr->result()).Is(x0));
413 int arity = instr->arity();
414 CallFunctionFlags flags = instr->hydrogen()->function_flags();
415 if (instr->hydrogen()->HasVectorAndSlot()) {
416 Register slot_register = ToRegister(instr->temp_slot());
417 Register vector_register = ToRegister(instr->temp_vector());
418 DCHECK(slot_register.is(x3));
419 DCHECK(vector_register.is(x2));
421 AllowDeferredHandleDereference vector_structure_check;
422 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
423 int index = vector->GetIndex(instr->hydrogen()->slot());
425 __ Mov(vector_register, vector);
426 __ Mov(slot_register, Operand(Smi::FromInt(index)));
428 CallICState::CallType call_type =
429 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
432 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
433 CallCode(ic, RelocInfo::CODE_TARGET, instr);
435 CallFunctionStub stub(isolate(), arity, flags);
436 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
441 void LCodeGen::DoCallNew(LCallNew* instr) {
442 DCHECK(ToRegister(instr->context()).is(cp));
443 DCHECK(instr->IsMarkedAsCall());
444 DCHECK(ToRegister(instr->constructor()).is(x1));
446 __ Mov(x0, instr->arity());
447 // No cell in x2 for construct type feedback in optimized code.
448 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
450 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
451 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
453 DCHECK(ToRegister(instr->result()).is(x0));
457 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
458 DCHECK(instr->IsMarkedAsCall());
459 DCHECK(ToRegister(instr->context()).is(cp));
460 DCHECK(ToRegister(instr->constructor()).is(x1));
462 __ Mov(x0, Operand(instr->arity()));
463 if (instr->arity() == 1) {
464 // We only need the allocation site for the case we have a length argument.
465 // The case may bail out to the runtime, which will determine the correct
466 // elements kind with the site.
467 __ Mov(x2, instr->hydrogen()->site());
469 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
473 ElementsKind kind = instr->hydrogen()->elements_kind();
474 AllocationSiteOverrideMode override_mode =
475 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
476 ? DISABLE_ALLOCATION_SITES
479 if (instr->arity() == 0) {
480 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
481 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
482 } else if (instr->arity() == 1) {
484 if (IsFastPackedElementsKind(kind)) {
487 // We might need to create a holey array; look at the first argument.
489 __ Cbz(x10, &packed_case);
491 ElementsKind holey_kind = GetHoleyElementsKind(kind);
492 ArraySingleArgumentConstructorStub stub(isolate(),
495 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
497 __ Bind(&packed_case);
500 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
501 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
504 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
505 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
508 DCHECK(ToRegister(instr->result()).is(x0));
512 void LCodeGen::CallRuntime(const Runtime::Function* function,
515 SaveFPRegsMode save_doubles) {
516 DCHECK(instr != NULL);
518 __ CallRuntime(function, num_arguments, save_doubles);
520 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
524 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
525 if (context->IsRegister()) {
526 __ Mov(cp, ToRegister(context));
527 } else if (context->IsStackSlot()) {
528 __ Ldr(cp, ToMemOperand(context));
529 } else if (context->IsConstantOperand()) {
530 HConstant* constant =
531 chunk_->LookupConstant(LConstantOperand::cast(context));
532 __ LoadHeapObject(cp,
533 Handle<HeapObject>::cast(constant->handle(isolate())));
540 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
544 LoadContextFromDeferred(context);
545 __ CallRuntimeSaveDoubles(id);
546 RecordSafepointWithRegisters(
547 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
551 void LCodeGen::RecordAndWritePosition(int position) {
552 if (position == RelocInfo::kNoPosition) return;
553 masm()->positions_recorder()->RecordPosition(position);
554 masm()->positions_recorder()->WriteRecordedPositions();
558 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
559 SafepointMode safepoint_mode) {
560 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
561 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
563 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
564 RecordSafepointWithRegisters(
565 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
570 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
571 Safepoint::Kind kind,
573 Safepoint::DeoptMode deopt_mode) {
574 DCHECK(expected_safepoint_kind_ == kind);
576 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
577 Safepoint safepoint = safepoints_.DefineSafepoint(
578 masm(), kind, arguments, deopt_mode);
580 for (int i = 0; i < operands->length(); i++) {
581 LOperand* pointer = operands->at(i);
582 if (pointer->IsStackSlot()) {
583 safepoint.DefinePointerSlot(pointer->index(), zone());
584 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
585 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
590 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
591 Safepoint::DeoptMode deopt_mode) {
592 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
596 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
597 LPointerMap empty_pointers(zone());
598 RecordSafepoint(&empty_pointers, deopt_mode);
602 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
604 Safepoint::DeoptMode deopt_mode) {
605 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
609 bool LCodeGen::GenerateCode() {
610 LPhase phase("Z_Code generation", chunk());
612 status_ = GENERATING;
614 // Open a frame scope to indicate that there is a frame on the stack. The
615 // NONE indicates that the scope shouldn't actually generate code to set up
616 // the frame (that is done in GeneratePrologue).
617 FrameScope frame_scope(masm_, StackFrame::NONE);
619 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
620 GenerateJumpTable() && GenerateSafepointTable();
624 void LCodeGen::SaveCallerDoubles() {
625 DCHECK(info()->saves_caller_doubles());
626 DCHECK(NeedsEagerFrame());
627 Comment(";;; Save clobbered callee double registers");
628 BitVector* doubles = chunk()->allocated_double_registers();
629 BitVector::Iterator iterator(doubles);
631 while (!iterator.Done()) {
632 // TODO(all): Is this supposed to save just the callee-saved doubles? It
633 // looks like it's saving all of them.
634 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
635 __ Poke(value, count * kDoubleSize);
642 void LCodeGen::RestoreCallerDoubles() {
643 DCHECK(info()->saves_caller_doubles());
644 DCHECK(NeedsEagerFrame());
645 Comment(";;; Restore clobbered callee double registers");
646 BitVector* doubles = chunk()->allocated_double_registers();
647 BitVector::Iterator iterator(doubles);
649 while (!iterator.Done()) {
650 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
651 // looks like it's restoring all of them.
652 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
653 __ Peek(value, count * kDoubleSize);
660 bool LCodeGen::GeneratePrologue() {
661 DCHECK(is_generating());
663 if (info()->IsOptimizing()) {
664 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
666 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
668 // Sloppy mode functions and builtins need to replace the receiver with the
669 // global proxy when called as functions (without an explicit receiver
671 if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
672 !info()->is_native() && info()->scope()->has_this_declaration()) {
674 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
675 __ Peek(x10, receiver_offset);
676 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
678 __ Ldr(x10, GlobalObjectMemOperand());
679 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
680 __ Poke(x10, receiver_offset);
686 DCHECK(__ StackPointer().Is(jssp));
687 info()->set_prologue_offset(masm_->pc_offset());
688 if (NeedsEagerFrame()) {
689 if (info()->IsStub()) {
692 __ Prologue(info()->IsCodePreAgingActive());
694 frame_is_built_ = true;
695 info_->AddNoFrameRange(0, masm_->pc_offset());
698 // Reserve space for the stack slots needed by the code.
699 int slots = GetStackSlotCount();
701 __ Claim(slots, kPointerSize);
704 if (info()->saves_caller_doubles()) {
708 // Allocate a local context if needed.
709 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
710 if (heap_slots > 0) {
711 Comment(";;; Allocate local context");
712 bool need_write_barrier = true;
713 // Argument to NewContext is the function, which is in x1.
714 DCHECK(!info()->scope()->is_script_scope());
715 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
716 FastNewContextStub stub(isolate(), heap_slots);
718 // Result of FastNewContextStub is always in new space.
719 need_write_barrier = false;
722 __ CallRuntime(Runtime::kNewFunctionContext, 1);
724 RecordSafepoint(Safepoint::kNoLazyDeopt);
725 // Context is returned in x0. It replaces the context passed to us. It's
726 // saved in the stack and kept live in cp.
728 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
729 // Copy any necessary parameters into the context.
730 int num_parameters = scope()->num_parameters();
731 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
732 for (int i = first_parameter; i < num_parameters; i++) {
733 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
734 if (var->IsContextSlot()) {
736 Register scratch = x3;
738 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
739 (num_parameters - 1 - i) * kPointerSize;
740 // Load parameter from stack.
741 __ Ldr(value, MemOperand(fp, parameter_offset));
742 // Store it in the context.
743 MemOperand target = ContextMemOperand(cp, var->index());
744 __ Str(value, target);
745 // Update the write barrier. This clobbers value and scratch.
746 if (need_write_barrier) {
747 __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
748 value, scratch, GetLinkRegisterState(),
750 } else if (FLAG_debug_code) {
752 __ JumpIfInNewSpace(cp, &done);
753 __ Abort(kExpectedNewSpaceObject);
758 Comment(";;; End allocate local context");
762 if (FLAG_trace && info()->IsOptimizing()) {
763 // We have not executed any compiled code yet, so cp still holds the
765 __ CallRuntime(Runtime::kTraceEnter, 0);
768 return !is_aborted();
772 void LCodeGen::GenerateOsrPrologue() {
773 // Generate the OSR entry prologue at the first unknown OSR value, or if there
774 // are none, at the OSR entrypoint instruction.
775 if (osr_pc_offset_ >= 0) return;
777 osr_pc_offset_ = masm()->pc_offset();
779 // Adjust the frame size, subsuming the unoptimized frame into the
781 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
787 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
788 if (instr->IsCall()) {
789 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
791 if (!instr->IsLazyBailout() && !instr->IsGap()) {
792 safepoints_.BumpLastLazySafepointIndex();
797 bool LCodeGen::GenerateDeferredCode() {
798 DCHECK(is_generating());
799 if (deferred_.length() > 0) {
800 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
801 LDeferredCode* code = deferred_[i];
804 instructions_->at(code->instruction_index())->hydrogen_value();
805 RecordAndWritePosition(
806 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
808 Comment(";;; <@%d,#%d> "
809 "-------------------- Deferred %s --------------------",
810 code->instruction_index(),
811 code->instr()->hydrogen_value()->id(),
812 code->instr()->Mnemonic());
814 __ Bind(code->entry());
816 if (NeedsDeferredFrame()) {
817 Comment(";;; Build frame");
818 DCHECK(!frame_is_built_);
819 DCHECK(info()->IsStub());
820 frame_is_built_ = true;
822 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
824 __ Add(fp, __ StackPointer(),
825 StandardFrameConstants::kFixedFrameSizeFromFp);
826 Comment(";;; Deferred code");
831 if (NeedsDeferredFrame()) {
832 Comment(";;; Destroy frame");
833 DCHECK(frame_is_built_);
834 __ Pop(xzr, cp, fp, lr);
835 frame_is_built_ = false;
842 // Force constant pool emission at the end of the deferred code to make
843 // sure that no constant pools are emitted after deferred code because
844 // deferred code generation is the last step which generates code. The two
845 // following steps will only output data used by crakshaft.
846 masm()->CheckConstPool(true, false);
848 return !is_aborted();
852 bool LCodeGen::GenerateJumpTable() {
853 Label needs_frame, call_deopt_entry;
855 if (jump_table_.length() > 0) {
856 Comment(";;; -------------------- Jump table --------------------");
857 Address base = jump_table_[0]->address;
859 UseScratchRegisterScope temps(masm());
860 Register entry_offset = temps.AcquireX();
862 int length = jump_table_.length();
863 for (int i = 0; i < length; i++) {
864 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
865 __ Bind(&table_entry->label);
867 Address entry = table_entry->address;
868 DeoptComment(table_entry->deopt_info);
870 // Second-level deopt table entries are contiguous and small, so instead
871 // of loading the full, absolute address of each one, load the base
872 // address and add an immediate offset.
873 __ Mov(entry_offset, entry - base);
875 if (table_entry->needs_frame) {
876 DCHECK(!info()->saves_caller_doubles());
877 Comment(";;; call deopt with frame");
878 // Save lr before Bl, fp will be adjusted in the needs_frame code.
880 // Reuse the existing needs_frame code.
883 // There is nothing special to do, so just continue to the second-level
885 __ Bl(&call_deopt_entry);
887 info()->LogDeoptCallPosition(masm()->pc_offset(),
888 table_entry->deopt_info.inlining_id);
890 masm()->CheckConstPool(false, false);
893 if (needs_frame.is_linked()) {
894 // This variant of deopt can only be used with stubs. Since we don't
895 // have a function pointer to install in the stack frame that we're
896 // building, install a special marker there instead.
897 DCHECK(info()->IsStub());
899 Comment(";;; needs_frame common code");
900 UseScratchRegisterScope temps(masm());
901 Register stub_marker = temps.AcquireX();
902 __ Bind(&needs_frame);
903 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
904 __ Push(cp, stub_marker);
905 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
908 // Generate common code for calling the second-level deopt table.
909 __ Bind(&call_deopt_entry);
911 if (info()->saves_caller_doubles()) {
912 DCHECK(info()->IsStub());
913 RestoreCallerDoubles();
916 Register deopt_entry = temps.AcquireX();
917 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
918 RelocInfo::RUNTIME_ENTRY));
919 __ Add(deopt_entry, deopt_entry, entry_offset);
923 // Force constant pool emission at the end of the deopt jump table to make
924 // sure that no constant pools are emitted after.
925 masm()->CheckConstPool(true, false);
927 // The deoptimization jump table is the last part of the instruction
928 // sequence. Mark the generated code as done unless we bailed out.
929 if (!is_aborted()) status_ = DONE;
930 return !is_aborted();
934 bool LCodeGen::GenerateSafepointTable() {
936 // We do not know how much data will be emitted for the safepoint table, so
937 // force emission of the veneer pool.
938 masm()->CheckVeneerPool(true, true);
939 safepoints_.Emit(masm(), GetStackSlotCount());
940 return !is_aborted();
944 void LCodeGen::FinishCode(Handle<Code> code) {
946 code->set_stack_slots(GetStackSlotCount());
947 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
948 PopulateDeoptimizationData(code);
952 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
953 int length = deoptimizations_.length();
954 if (length == 0) return;
956 Handle<DeoptimizationInputData> data =
957 DeoptimizationInputData::New(isolate(), length, TENURED);
959 Handle<ByteArray> translations =
960 translations_.CreateByteArray(isolate()->factory());
961 data->SetTranslationByteArray(*translations);
962 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
963 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
964 if (info_->IsOptimizing()) {
965 // Reference to shared function info does not change between phases.
966 AllowDeferredHandleDereference allow_handle_dereference;
967 data->SetSharedFunctionInfo(*info_->shared_info());
969 data->SetSharedFunctionInfo(Smi::FromInt(0));
971 data->SetWeakCellCache(Smi::FromInt(0));
973 Handle<FixedArray> literals =
974 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
975 { AllowDeferredHandleDereference copy_handles;
976 for (int i = 0; i < deoptimization_literals_.length(); i++) {
977 literals->set(i, *deoptimization_literals_[i]);
979 data->SetLiteralArray(*literals);
982 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
983 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
985 // Populate the deoptimization entries.
986 for (int i = 0; i < length; i++) {
987 LEnvironment* env = deoptimizations_[i];
988 data->SetAstId(i, env->ast_id());
989 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
990 data->SetArgumentsStackHeight(i,
991 Smi::FromInt(env->arguments_stack_height()));
992 data->SetPc(i, Smi::FromInt(env->pc_offset()));
995 code->set_deoptimization_data(*data);
999 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1000 DCHECK_EQ(0, deoptimization_literals_.length());
1001 for (auto function : chunk()->inlined_functions()) {
1002 DefineDeoptimizationLiteral(function);
1004 inlined_function_count_ = deoptimization_literals_.length();
1008 void LCodeGen::DeoptimizeBranch(
1009 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
1010 BranchType branch_type, Register reg, int bit,
1011 Deoptimizer::BailoutType* override_bailout_type) {
1012 LEnvironment* environment = instr->environment();
1013 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1014 Deoptimizer::BailoutType bailout_type =
1015 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1017 if (override_bailout_type != NULL) {
1018 bailout_type = *override_bailout_type;
1021 DCHECK(environment->HasBeenRegistered());
1022 DCHECK(info()->IsOptimizing() || info()->IsStub());
1023 int id = environment->deoptimization_index();
1025 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1027 if (entry == NULL) {
1028 Abort(kBailoutWasNotPrepared);
1031 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1033 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1035 __ Push(x0, x1, x2);
1038 __ Ldr(w1, MemOperand(x0));
1040 __ B(gt, ¬_zero);
1041 __ Mov(w1, FLAG_deopt_every_n_times);
1042 __ Str(w1, MemOperand(x0));
1044 DCHECK(frame_is_built_);
1045 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1049 __ Str(w1, MemOperand(x0));
1054 if (info()->ShouldTrapOnDeopt()) {
1056 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1057 __ Debug("trap_on_deopt", __LINE__, BREAK);
1058 __ Bind(&dont_trap);
1061 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
1063 DCHECK(info()->IsStub() || frame_is_built_);
1064 // Go through jump table if we need to build frame, or restore caller doubles.
1065 if (branch_type == always &&
1066 frame_is_built_ && !info()->saves_caller_doubles()) {
1067 DeoptComment(deopt_info);
1068 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1069 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
1071 Deoptimizer::JumpTableEntry* table_entry =
1072 new (zone()) Deoptimizer::JumpTableEntry(
1073 entry, deopt_info, bailout_type, !frame_is_built_);
1074 // We often have several deopts to the same entry, reuse the last
1075 // jump entry if this is the case.
1076 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1077 jump_table_.is_empty() ||
1078 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1079 jump_table_.Add(table_entry, zone());
1081 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1086 void LCodeGen::Deoptimize(LInstruction* instr,
1087 Deoptimizer::DeoptReason deopt_reason,
1088 Deoptimizer::BailoutType* override_bailout_type) {
1089 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
1090 override_bailout_type);
1094 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1095 Deoptimizer::DeoptReason deopt_reason) {
1096 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
1100 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1101 Deoptimizer::DeoptReason deopt_reason) {
1102 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
1106 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1107 Deoptimizer::DeoptReason deopt_reason) {
1108 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
1112 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1113 Deoptimizer::DeoptReason deopt_reason) {
1114 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1115 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
1119 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1120 Deoptimizer::DeoptReason deopt_reason) {
1121 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1125 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1126 Deoptimizer::DeoptReason deopt_reason) {
1127 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1131 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1132 LInstruction* instr,
1133 Deoptimizer::DeoptReason deopt_reason) {
1134 __ CompareRoot(rt, index);
1135 DeoptimizeIf(eq, instr, deopt_reason);
1139 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1140 LInstruction* instr,
1141 Deoptimizer::DeoptReason deopt_reason) {
1142 __ CompareRoot(rt, index);
1143 DeoptimizeIf(ne, instr, deopt_reason);
1147 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1148 Deoptimizer::DeoptReason deopt_reason) {
1149 __ TestForMinusZero(input);
1150 DeoptimizeIf(vs, instr, deopt_reason);
1154 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1155 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1156 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
1160 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1161 Deoptimizer::DeoptReason deopt_reason) {
1162 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
1166 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1167 Deoptimizer::DeoptReason deopt_reason) {
1168 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
1172 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1173 if (!info()->IsStub()) {
1174 // Ensure that we have enough space after the previous lazy-bailout
1175 // instruction for patching the code here.
1176 intptr_t current_pc = masm()->pc_offset();
1178 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1179 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1180 DCHECK((padding_size % kInstructionSize) == 0);
1181 InstructionAccurateScope instruction_accurate(
1182 masm(), padding_size / kInstructionSize);
1184 while (padding_size > 0) {
1186 padding_size -= kInstructionSize;
1190 last_lazy_deopt_pc_ = masm()->pc_offset();
1194 Register LCodeGen::ToRegister(LOperand* op) const {
1195 // TODO(all): support zero register results, as ToRegister32.
1196 DCHECK((op != NULL) && op->IsRegister());
1197 return Register::FromAllocationIndex(op->index());
1201 Register LCodeGen::ToRegister32(LOperand* op) const {
1203 if (op->IsConstantOperand()) {
1204 // If this is a constant operand, the result must be the zero register.
1205 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1208 return ToRegister(op).W();
1213 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1214 HConstant* constant = chunk_->LookupConstant(op);
1215 return Smi::FromInt(constant->Integer32Value());
1219 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1220 DCHECK((op != NULL) && op->IsDoubleRegister());
1221 return DoubleRegister::FromAllocationIndex(op->index());
1225 Operand LCodeGen::ToOperand(LOperand* op) {
1227 if (op->IsConstantOperand()) {
1228 LConstantOperand* const_op = LConstantOperand::cast(op);
1229 HConstant* constant = chunk()->LookupConstant(const_op);
1230 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1232 DCHECK(constant->HasSmiValue());
1233 return Operand(Smi::FromInt(constant->Integer32Value()));
1234 } else if (r.IsInteger32()) {
1235 DCHECK(constant->HasInteger32Value());
1236 return Operand(constant->Integer32Value());
1237 } else if (r.IsDouble()) {
1238 Abort(kToOperandUnsupportedDoubleImmediate);
1240 DCHECK(r.IsTagged());
1241 return Operand(constant->handle(isolate()));
1242 } else if (op->IsRegister()) {
1243 return Operand(ToRegister(op));
1244 } else if (op->IsDoubleRegister()) {
1245 Abort(kToOperandIsDoubleRegisterUnimplemented);
1248 // Stack slots not implemented, use ToMemOperand instead.
1254 Operand LCodeGen::ToOperand32(LOperand* op) {
1256 if (op->IsRegister()) {
1257 return Operand(ToRegister32(op));
1258 } else if (op->IsConstantOperand()) {
1259 LConstantOperand* const_op = LConstantOperand::cast(op);
1260 HConstant* constant = chunk()->LookupConstant(const_op);
1261 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1262 if (r.IsInteger32()) {
1263 return Operand(constant->Integer32Value());
1265 // Other constants not implemented.
1266 Abort(kToOperand32UnsupportedImmediate);
1269 // Other cases are not implemented.
1275 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1277 return -(index + 1) * kPointerSize;
1281 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1283 DCHECK(!op->IsRegister());
1284 DCHECK(!op->IsDoubleRegister());
1285 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1286 if (NeedsEagerFrame()) {
1287 return MemOperand(fp, StackSlotOffset(op->index()));
1289 // Retrieve parameter without eager stack-frame relative to the
1291 return MemOperand(masm()->StackPointer(),
1292 ArgumentsOffsetWithoutFrame(op->index()));
1297 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1298 HConstant* constant = chunk_->LookupConstant(op);
1299 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1300 return constant->handle(isolate());
1305 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
1306 if (shift_info->shift() == NO_SHIFT) {
1307 return ToOperand32(right);
1310 ToRegister32(right),
1311 shift_info->shift(),
1312 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1317 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1318 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1322 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1323 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1327 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1328 HConstant* constant = chunk_->LookupConstant(op);
1329 return constant->Integer32Value();
1333 double LCodeGen::ToDouble(LConstantOperand* op) const {
1334 HConstant* constant = chunk_->LookupConstant(op);
1335 DCHECK(constant->HasDoubleValue());
1336 return constant->DoubleValue();
1340 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1341 Condition cond = nv;
1344 case Token::EQ_STRICT:
1348 case Token::NE_STRICT:
1352 cond = is_unsigned ? lo : lt;
1355 cond = is_unsigned ? hi : gt;
1358 cond = is_unsigned ? ls : le;
1361 cond = is_unsigned ? hs : ge;
1364 case Token::INSTANCEOF:
1372 template<class InstrType>
1373 void LCodeGen::EmitBranchGeneric(InstrType instr,
1374 const BranchGenerator& branch) {
1375 int left_block = instr->TrueDestination(chunk_);
1376 int right_block = instr->FalseDestination(chunk_);
1378 int next_block = GetNextEmittedBlock();
1380 if (right_block == left_block) {
1381 EmitGoto(left_block);
1382 } else if (left_block == next_block) {
1383 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1385 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1386 if (right_block != next_block) {
1387 __ B(chunk_->GetAssemblyLabel(right_block));
1393 template<class InstrType>
1394 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1395 DCHECK((condition != al) && (condition != nv));
1396 BranchOnCondition branch(this, condition);
1397 EmitBranchGeneric(instr, branch);
1401 template<class InstrType>
1402 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1403 Condition condition,
1404 const Register& lhs,
1405 const Operand& rhs) {
1406 DCHECK((condition != al) && (condition != nv));
1407 CompareAndBranch branch(this, condition, lhs, rhs);
1408 EmitBranchGeneric(instr, branch);
1412 template<class InstrType>
1413 void LCodeGen::EmitTestAndBranch(InstrType instr,
1414 Condition condition,
1415 const Register& value,
1417 DCHECK((condition != al) && (condition != nv));
1418 TestAndBranch branch(this, condition, value, mask);
1419 EmitBranchGeneric(instr, branch);
1423 template<class InstrType>
1424 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1425 const FPRegister& value,
1426 const FPRegister& scratch) {
1427 BranchIfNonZeroNumber branch(this, value, scratch);
1428 EmitBranchGeneric(instr, branch);
1432 template<class InstrType>
1433 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1434 const Register& value) {
1435 BranchIfHeapNumber branch(this, value);
1436 EmitBranchGeneric(instr, branch);
1440 template<class InstrType>
1441 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1442 const Register& value,
1443 Heap::RootListIndex index) {
1444 BranchIfRoot branch(this, value, index);
1445 EmitBranchGeneric(instr, branch);
1449 void LCodeGen::DoGap(LGap* gap) {
1450 for (int i = LGap::FIRST_INNER_POSITION;
1451 i <= LGap::LAST_INNER_POSITION;
1453 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1454 LParallelMove* move = gap->GetParallelMove(inner_pos);
1456 resolver_.Resolve(move);
1462 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1463 Register arguments = ToRegister(instr->arguments());
1464 Register result = ToRegister(instr->result());
1466 // The pointer to the arguments array come from DoArgumentsElements.
1467 // It does not point directly to the arguments and there is an offest of
1468 // two words that we must take into account when accessing an argument.
1469 // Subtracting the index from length accounts for one, so we add one more.
1471 if (instr->length()->IsConstantOperand() &&
1472 instr->index()->IsConstantOperand()) {
1473 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1474 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1475 int offset = ((length - index) + 1) * kPointerSize;
1476 __ Ldr(result, MemOperand(arguments, offset));
1477 } else if (instr->index()->IsConstantOperand()) {
1478 Register length = ToRegister32(instr->length());
1479 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1480 int loc = index - 1;
1482 __ Sub(result.W(), length, loc);
1483 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1485 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1488 Register length = ToRegister32(instr->length());
1489 Operand index = ToOperand32(instr->index());
1490 __ Sub(result.W(), length, index);
1491 __ Add(result.W(), result.W(), 1);
1492 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1497 void LCodeGen::DoAddE(LAddE* instr) {
1498 Register result = ToRegister(instr->result());
1499 Register left = ToRegister(instr->left());
1500 Operand right = (instr->right()->IsConstantOperand())
1501 ? ToInteger32(LConstantOperand::cast(instr->right()))
1502 : Operand(ToRegister32(instr->right()), SXTW);
1504 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1505 __ Add(result, left, right);
1509 void LCodeGen::DoAddI(LAddI* instr) {
1510 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1511 Register result = ToRegister32(instr->result());
1512 Register left = ToRegister32(instr->left());
1513 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1516 __ Adds(result, left, right);
1517 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1519 __ Add(result, left, right);
1524 void LCodeGen::DoAddS(LAddS* instr) {
1525 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1526 Register result = ToRegister(instr->result());
1527 Register left = ToRegister(instr->left());
1528 Operand right = ToOperand(instr->right());
1530 __ Adds(result, left, right);
1531 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1533 __ Add(result, left, right);
1538 void LCodeGen::DoAllocate(LAllocate* instr) {
1539 class DeferredAllocate: public LDeferredCode {
1541 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1542 : LDeferredCode(codegen), instr_(instr) { }
1543 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1544 virtual LInstruction* instr() { return instr_; }
1549 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1551 Register result = ToRegister(instr->result());
1552 Register temp1 = ToRegister(instr->temp1());
1553 Register temp2 = ToRegister(instr->temp2());
1555 // Allocate memory for the object.
1556 AllocationFlags flags = TAG_OBJECT;
1557 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1558 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1561 if (instr->hydrogen()->IsOldSpaceAllocation()) {
1562 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1563 flags = static_cast<AllocationFlags>(flags | PRETENURE);
1566 if (instr->size()->IsConstantOperand()) {
1567 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1568 if (size <= Page::kMaxRegularHeapObjectSize) {
1569 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1571 __ B(deferred->entry());
1574 Register size = ToRegister32(instr->size());
1575 __ Sxtw(size.X(), size);
1576 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1579 __ Bind(deferred->exit());
1581 if (instr->hydrogen()->MustPrefillWithFiller()) {
1582 Register filler_count = temp1;
1583 Register filler = temp2;
1584 Register untagged_result = ToRegister(instr->temp3());
1586 if (instr->size()->IsConstantOperand()) {
1587 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1588 __ Mov(filler_count, size / kPointerSize);
1590 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1593 __ Sub(untagged_result, result, kHeapObjectTag);
1594 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1595 __ FillFields(untagged_result, filler_count, filler);
1597 DCHECK(instr->temp3() == NULL);
1602 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1603 // TODO(3095996): Get rid of this. For now, we need to make the
1604 // result register contain a valid pointer because it is already
1605 // contained in the register pointer map.
1606 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1608 PushSafepointRegistersScope scope(this);
1609 // We're in a SafepointRegistersScope so we can use any scratch registers.
1611 if (instr->size()->IsConstantOperand()) {
1612 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1614 __ SmiTag(size, ToRegister32(instr->size()).X());
1616 int flags = AllocateDoubleAlignFlag::encode(
1617 instr->hydrogen()->MustAllocateDoubleAligned());
1618 if (instr->hydrogen()->IsOldSpaceAllocation()) {
1619 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1620 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
1622 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1624 __ Mov(x10, Smi::FromInt(flags));
1627 CallRuntimeFromDeferred(
1628 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1629 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1633 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1634 Register receiver = ToRegister(instr->receiver());
1635 Register function = ToRegister(instr->function());
1636 Register length = ToRegister32(instr->length());
1638 Register elements = ToRegister(instr->elements());
1639 Register scratch = x5;
1640 DCHECK(receiver.Is(x0)); // Used for parameter count.
1641 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1642 DCHECK(ToRegister(instr->result()).Is(x0));
1643 DCHECK(instr->IsMarkedAsCall());
1645 // Copy the arguments to this function possibly from the
1646 // adaptor frame below it.
1647 const uint32_t kArgumentsLimit = 1 * KB;
1648 __ Cmp(length, kArgumentsLimit);
1649 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
1651 // Push the receiver and use the register to keep the original
1652 // number of arguments.
1654 Register argc = receiver;
1656 __ Sxtw(argc, length);
1657 // The arguments are at a one pointer size offset from elements.
1658 __ Add(elements, elements, 1 * kPointerSize);
1660 // Loop through the arguments pushing them onto the execution
1663 // length is a small non-negative integer, due to the test above.
1664 __ Cbz(length, &invoke);
1666 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1668 __ Subs(length, length, 1);
1672 DCHECK(instr->HasPointerMap());
1673 LPointerMap* pointers = instr->pointer_map();
1674 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1675 // The number of arguments is stored in argc (receiver) which is x0, as
1676 // expected by InvokeFunction.
1677 ParameterCount actual(argc);
1678 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1682 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1683 Register result = ToRegister(instr->result());
1685 if (instr->hydrogen()->from_inlined()) {
1686 // When we are inside an inlined function, the arguments are the last things
1687 // that have been pushed on the stack. Therefore the arguments array can be
1688 // accessed directly from jssp.
1689 // However in the normal case, it is accessed via fp but there are two words
1690 // on the stack between fp and the arguments (the saved lr and fp) and the
1691 // LAccessArgumentsAt implementation take that into account.
1692 // In the inlined case we need to subtract the size of 2 words to jssp to
1693 // get a pointer which will work well with LAccessArgumentsAt.
1694 DCHECK(masm()->StackPointer().Is(jssp));
1695 __ Sub(result, jssp, 2 * kPointerSize);
1697 DCHECK(instr->temp() != NULL);
1698 Register previous_fp = ToRegister(instr->temp());
1701 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1703 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1704 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1705 __ Csel(result, fp, previous_fp, ne);
1710 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1711 Register elements = ToRegister(instr->elements());
1712 Register result = ToRegister32(instr->result());
1715 // If no arguments adaptor frame the number of arguments is fixed.
1716 __ Cmp(fp, elements);
1717 __ Mov(result, scope()->num_parameters());
1720 // Arguments adaptor frame present. Get argument length from there.
1721 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1723 UntagSmiMemOperand(result.X(),
1724 ArgumentsAdaptorFrameConstants::kLengthOffset));
1726 // Argument length is in result register.
1731 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1732 DoubleRegister left = ToDoubleRegister(instr->left());
1733 DoubleRegister right = ToDoubleRegister(instr->right());
1734 DoubleRegister result = ToDoubleRegister(instr->result());
1736 switch (instr->op()) {
1737 case Token::ADD: __ Fadd(result, left, right); break;
1738 case Token::SUB: __ Fsub(result, left, right); break;
1739 case Token::MUL: __ Fmul(result, left, right); break;
1740 case Token::DIV: __ Fdiv(result, left, right); break;
1742 // The ECMA-262 remainder operator is the remainder from a truncating
1743 // (round-towards-zero) division. Note that this differs from IEEE-754.
1745 // TODO(jbramley): See if it's possible to do this inline, rather than by
1746 // calling a helper function. With frintz (to produce the intermediate
1747 // quotient) and fmsub (to calculate the remainder without loss of
1748 // precision), it should be possible. However, we would need support for
1749 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1750 // support that yet.
1751 DCHECK(left.Is(d0));
1752 DCHECK(right.Is(d1));
1754 ExternalReference::mod_two_doubles_operation(isolate()),
1756 DCHECK(result.Is(d0));
1766 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1767 DCHECK(ToRegister(instr->context()).is(cp));
1768 DCHECK(ToRegister(instr->left()).is(x1));
1769 DCHECK(ToRegister(instr->right()).is(x0));
1770 DCHECK(ToRegister(instr->result()).is(x0));
1772 Handle<Code> code = CodeFactory::BinaryOpIC(
1773 isolate(), instr->op(), instr->language_mode()).code();
1774 CallCode(code, RelocInfo::CODE_TARGET, instr);
1778 void LCodeGen::DoBitI(LBitI* instr) {
1779 Register result = ToRegister32(instr->result());
1780 Register left = ToRegister32(instr->left());
1781 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1783 switch (instr->op()) {
1784 case Token::BIT_AND: __ And(result, left, right); break;
1785 case Token::BIT_OR: __ Orr(result, left, right); break;
1786 case Token::BIT_XOR: __ Eor(result, left, right); break;
1794 void LCodeGen::DoBitS(LBitS* instr) {
1795 Register result = ToRegister(instr->result());
1796 Register left = ToRegister(instr->left());
1797 Operand right = ToOperand(instr->right());
1799 switch (instr->op()) {
1800 case Token::BIT_AND: __ And(result, left, right); break;
1801 case Token::BIT_OR: __ Orr(result, left, right); break;
1802 case Token::BIT_XOR: __ Eor(result, left, right); break;
1810 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1811 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1812 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1813 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1814 if (instr->index()->IsConstantOperand()) {
1815 Operand index = ToOperand32(instr->index());
1816 Register length = ToRegister32(instr->length());
1817 __ Cmp(length, index);
1818 cond = CommuteCondition(cond);
1820 Register index = ToRegister32(instr->index());
1821 Operand length = ToOperand32(instr->length());
1822 __ Cmp(index, length);
1824 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1825 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1827 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
1832 void LCodeGen::DoBranch(LBranch* instr) {
1833 Representation r = instr->hydrogen()->value()->representation();
1834 Label* true_label = instr->TrueLabel(chunk_);
1835 Label* false_label = instr->FalseLabel(chunk_);
1837 if (r.IsInteger32()) {
1838 DCHECK(!info()->IsStub());
1839 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1840 } else if (r.IsSmi()) {
1841 DCHECK(!info()->IsStub());
1842 STATIC_ASSERT(kSmiTag == 0);
1843 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1844 } else if (r.IsDouble()) {
1845 DoubleRegister value = ToDoubleRegister(instr->value());
1846 // Test the double value. Zero and NaN are false.
1847 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1849 DCHECK(r.IsTagged());
1850 Register value = ToRegister(instr->value());
1851 HType type = instr->hydrogen()->value()->type();
1853 if (type.IsBoolean()) {
1854 DCHECK(!info()->IsStub());
1855 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1856 EmitBranch(instr, eq);
1857 } else if (type.IsSmi()) {
1858 DCHECK(!info()->IsStub());
1859 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1860 } else if (type.IsJSArray()) {
1861 DCHECK(!info()->IsStub());
1862 EmitGoto(instr->TrueDestination(chunk()));
1863 } else if (type.IsHeapNumber()) {
1864 DCHECK(!info()->IsStub());
1865 __ Ldr(double_scratch(), FieldMemOperand(value,
1866 HeapNumber::kValueOffset));
1867 // Test the double value. Zero and NaN are false.
1868 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1869 } else if (type.IsString()) {
1870 DCHECK(!info()->IsStub());
1871 Register temp = ToRegister(instr->temp1());
1872 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1873 EmitCompareAndBranch(instr, ne, temp, 0);
1875 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1876 // Avoid deopts in the case where we've never executed this path before.
1877 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1879 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1880 // undefined -> false.
1882 value, Heap::kUndefinedValueRootIndex, false_label);
1885 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1886 // Boolean -> its value.
1888 value, Heap::kTrueValueRootIndex, true_label);
1890 value, Heap::kFalseValueRootIndex, false_label);
1893 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1896 value, Heap::kNullValueRootIndex, false_label);
1899 if (expected.Contains(ToBooleanStub::SMI)) {
1900 // Smis: 0 -> false, all other -> true.
1901 DCHECK(Smi::FromInt(0) == 0);
1902 __ Cbz(value, false_label);
1903 __ JumpIfSmi(value, true_label);
1904 } else if (expected.NeedsMap()) {
1905 // If we need a map later and have a smi, deopt.
1906 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
1909 Register map = NoReg;
1910 Register scratch = NoReg;
1912 if (expected.NeedsMap()) {
1913 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1914 map = ToRegister(instr->temp1());
1915 scratch = ToRegister(instr->temp2());
1917 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1919 if (expected.CanBeUndetectable()) {
1920 // Undetectable -> false.
1921 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1922 __ TestAndBranchIfAnySet(
1923 scratch, 1 << Map::kIsUndetectable, false_label);
1927 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1928 // spec object -> true.
1929 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1930 __ B(ge, true_label);
1933 if (expected.Contains(ToBooleanStub::STRING)) {
1934 // String value -> false iff empty.
1936 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1937 __ B(ge, ¬_string);
1938 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1939 __ Cbz(scratch, false_label);
1941 __ Bind(¬_string);
1944 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1945 // Symbol value -> true.
1946 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1947 __ B(eq, true_label);
1950 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1951 Label not_heap_number;
1952 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1954 __ Ldr(double_scratch(),
1955 FieldMemOperand(value, HeapNumber::kValueOffset));
1956 __ Fcmp(double_scratch(), 0.0);
1957 // If we got a NaN (overflow bit is set), jump to the false branch.
1958 __ B(vs, false_label);
1959 __ B(eq, false_label);
1961 __ Bind(¬_heap_number);
1964 if (!expected.IsGeneric()) {
1965 // We've seen something for the first time -> deopt.
1966 // This can only happen if we are not generic already.
1967 Deoptimize(instr, Deoptimizer::kUnexpectedObject);
1974 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1975 int formal_parameter_count, int arity,
1976 LInstruction* instr) {
1977 bool dont_adapt_arguments =
1978 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1979 bool can_invoke_directly =
1980 dont_adapt_arguments || formal_parameter_count == arity;
1982 // The function interface relies on the following register assignments.
1983 Register function_reg = x1;
1984 Register arity_reg = x0;
1986 LPointerMap* pointers = instr->pointer_map();
1988 if (FLAG_debug_code) {
1990 // Try to confirm that function_reg (x1) is a tagged pointer.
1991 __ JumpIfNotSmi(function_reg, &is_not_smi);
1992 __ Abort(kExpectedFunctionObject);
1993 __ Bind(&is_not_smi);
1996 if (can_invoke_directly) {
1998 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2000 // Set the arguments count if adaption is not needed. Assumes that x0 is
2001 // available to write to at this point.
2002 if (dont_adapt_arguments) {
2003 __ Mov(arity_reg, arity);
2007 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2010 // Set up deoptimization.
2011 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2013 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2014 ParameterCount count(arity);
2015 ParameterCount expected(formal_parameter_count);
2016 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2021 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2022 DCHECK(instr->IsMarkedAsCall());
2023 DCHECK(ToRegister(instr->result()).Is(x0));
2025 if (instr->hydrogen()->IsTailCall()) {
2026 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
2028 if (instr->target()->IsConstantOperand()) {
2029 LConstantOperand* target = LConstantOperand::cast(instr->target());
2030 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2031 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2032 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2033 // this understanding is correct.
2034 __ Jump(code, RelocInfo::CODE_TARGET);
2036 DCHECK(instr->target()->IsRegister());
2037 Register target = ToRegister(instr->target());
2038 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2042 LPointerMap* pointers = instr->pointer_map();
2043 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2045 if (instr->target()->IsConstantOperand()) {
2046 LConstantOperand* target = LConstantOperand::cast(instr->target());
2047 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2048 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2049 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2050 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2051 // this understanding is correct.
2052 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2054 DCHECK(instr->target()->IsRegister());
2055 Register target = ToRegister(instr->target());
2056 generator.BeforeCall(__ CallSize(target));
2057 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2060 generator.AfterCall();
2065 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2066 DCHECK(instr->IsMarkedAsCall());
2067 DCHECK(ToRegister(instr->function()).is(x1));
2069 if (instr->hydrogen()->pass_argument_count()) {
2070 __ Mov(x0, Operand(instr->arity()));
2074 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2076 // Load the code entry address
2077 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2080 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2084 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2085 CallRuntime(instr->function(), instr->arity(), instr);
2089 void LCodeGen::DoCallStub(LCallStub* instr) {
2090 DCHECK(ToRegister(instr->context()).is(cp));
2091 DCHECK(ToRegister(instr->result()).is(x0));
2092 switch (instr->hydrogen()->major_key()) {
2093 case CodeStub::RegExpExec: {
2094 RegExpExecStub stub(isolate());
2095 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2098 case CodeStub::SubString: {
2099 SubStringStub stub(isolate());
2100 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2103 case CodeStub::StringCompare: {
2104 StringCompareStub stub(isolate());
2105 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2114 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2115 GenerateOsrPrologue();
2119 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2120 Register temp = ToRegister(instr->temp());
2122 PushSafepointRegistersScope scope(this);
2125 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2126 RecordSafepointWithRegisters(
2127 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2128 __ StoreToSafepointRegisterSlot(x0, temp);
2130 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
2134 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2135 class DeferredCheckMaps: public LDeferredCode {
2137 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2138 : LDeferredCode(codegen), instr_(instr), object_(object) {
2139 SetExit(check_maps());
2141 virtual void Generate() {
2142 codegen()->DoDeferredInstanceMigration(instr_, object_);
2144 Label* check_maps() { return &check_maps_; }
2145 virtual LInstruction* instr() { return instr_; }
2152 if (instr->hydrogen()->IsStabilityCheck()) {
2153 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2154 for (int i = 0; i < maps->size(); ++i) {
2155 AddStabilityDependency(maps->at(i).handle());
2160 Register object = ToRegister(instr->value());
2161 Register map_reg = ToRegister(instr->temp());
2163 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2165 DeferredCheckMaps* deferred = NULL;
2166 if (instr->hydrogen()->HasMigrationTarget()) {
2167 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2168 __ Bind(deferred->check_maps());
2171 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2173 for (int i = 0; i < maps->size() - 1; i++) {
2174 Handle<Map> map = maps->at(i).handle();
2175 __ CompareMap(map_reg, map);
2178 Handle<Map> map = maps->at(maps->size() - 1).handle();
2179 __ CompareMap(map_reg, map);
2181 // We didn't match a map.
2182 if (instr->hydrogen()->HasMigrationTarget()) {
2183 __ B(ne, deferred->entry());
2185 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
2192 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2193 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2194 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
2199 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2200 Register value = ToRegister(instr->value());
2201 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2202 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
2206 void LCodeGen::DoCheckArrayBufferNotNeutered(
2207 LCheckArrayBufferNotNeutered* instr) {
2208 UseScratchRegisterScope temps(masm());
2209 Register view = ToRegister(instr->view());
2210 Register scratch = temps.AcquireX();
2212 __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
2213 __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
2214 __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
2215 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
2219 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2220 Register input = ToRegister(instr->value());
2221 Register scratch = ToRegister(instr->temp());
2223 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2224 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2226 if (instr->hydrogen()->is_interval_check()) {
2227 InstanceType first, last;
2228 instr->hydrogen()->GetCheckInterval(&first, &last);
2230 __ Cmp(scratch, first);
2231 if (first == last) {
2232 // If there is only one type in the interval check for equality.
2233 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2234 } else if (last == LAST_TYPE) {
2235 // We don't need to compare with the higher bound of the interval.
2236 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
2238 // If we are below the lower bound, set the C flag and clear the Z flag
2239 // to force a deopt.
2240 __ Ccmp(scratch, last, CFlag, hs);
2241 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
2246 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2248 if (base::bits::IsPowerOfTwo32(mask)) {
2249 DCHECK((tag == 0) || (tag == mask));
2251 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2252 Deoptimizer::kWrongInstanceType);
2254 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2255 Deoptimizer::kWrongInstanceType);
2259 __ Tst(scratch, mask);
2261 __ And(scratch, scratch, mask);
2262 __ Cmp(scratch, tag);
2264 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2270 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2271 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2272 Register result = ToRegister32(instr->result());
2273 __ ClampDoubleToUint8(result, input, double_scratch());
2277 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2278 Register input = ToRegister32(instr->unclamped());
2279 Register result = ToRegister32(instr->result());
2280 __ ClampInt32ToUint8(result, input);
2284 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2285 Register input = ToRegister(instr->unclamped());
2286 Register result = ToRegister32(instr->result());
2289 // Both smi and heap number cases are handled.
2291 __ JumpIfNotSmi(input, &is_not_smi);
2292 __ SmiUntag(result.X(), input);
2293 __ ClampInt32ToUint8(result);
2296 __ Bind(&is_not_smi);
2298 // Check for heap number.
2299 Label is_heap_number;
2300 __ JumpIfHeapNumber(input, &is_heap_number);
2302 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2303 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2304 Deoptimizer::kNotAHeapNumberUndefined);
2308 // Heap number case.
2309 __ Bind(&is_heap_number);
2310 DoubleRegister dbl_scratch = double_scratch();
2311 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2312 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2313 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2319 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2320 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2321 Register result_reg = ToRegister(instr->result());
2322 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2323 __ Fmov(result_reg, value_reg);
2324 __ Lsr(result_reg, result_reg, 32);
2326 __ Fmov(result_reg.W(), value_reg.S());
2331 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2332 Register hi_reg = ToRegister(instr->hi());
2333 Register lo_reg = ToRegister(instr->lo());
2334 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2336 // Insert the least significant 32 bits of hi_reg into the most significant
2337 // 32 bits of lo_reg, and move to a floating point register.
2338 __ Bfi(lo_reg, hi_reg, 32, 32);
2339 __ Fmov(result_reg, lo_reg);
2343 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2344 Handle<String> class_name = instr->hydrogen()->class_name();
2345 Label* true_label = instr->TrueLabel(chunk_);
2346 Label* false_label = instr->FalseLabel(chunk_);
2347 Register input = ToRegister(instr->value());
2348 Register scratch1 = ToRegister(instr->temp1());
2349 Register scratch2 = ToRegister(instr->temp2());
2351 __ JumpIfSmi(input, false_label);
2353 Register map = scratch2;
2354 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2355 // Assuming the following assertions, we can use the same compares to test
2356 // for both being a function type and being in the object type range.
2357 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2358 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2359 FIRST_SPEC_OBJECT_TYPE + 1);
2360 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2361 LAST_SPEC_OBJECT_TYPE - 1);
2362 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2364 // We expect CompareObjectType to load the object instance type in scratch1.
2365 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2366 __ B(lt, false_label);
2367 __ B(eq, true_label);
2368 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2369 __ B(eq, true_label);
2371 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2374 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2375 // Check if the constructor in the map is a function.
2377 UseScratchRegisterScope temps(masm());
2378 Register instance_type = temps.AcquireX();
2379 __ GetMapConstructor(scratch1, map, scratch2, instance_type);
2380 __ Cmp(instance_type, JS_FUNCTION_TYPE);
2382 // Objects with a non-function constructor have class 'Object'.
2383 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2384 __ B(ne, true_label);
2386 __ B(ne, false_label);
2389 // The constructor function is in scratch1. Get its instance class name.
2391 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2393 FieldMemOperand(scratch1,
2394 SharedFunctionInfo::kInstanceClassNameOffset));
2396 // The class name we are testing against is internalized since it's a literal.
2397 // The name in the constructor is internalized because of the way the context
2398 // is booted. This routine isn't expected to work for random API-created
2399 // classes and it doesn't have to because you can't access it with natives
2400 // syntax. Since both sides are internalized it is sufficient to use an
2401 // identity comparison.
2402 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2406 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2407 DCHECK(instr->hydrogen()->representation().IsDouble());
2408 FPRegister object = ToDoubleRegister(instr->object());
2409 Register temp = ToRegister(instr->temp());
2411 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2412 // (relatively expensive) hole-NaN check.
2413 __ Fcmp(object, object);
2414 __ B(vc, instr->FalseLabel(chunk_));
2416 // We have a NaN, but is it the hole?
2417 __ Fmov(temp, object);
2418 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2422 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2423 DCHECK(instr->hydrogen()->representation().IsTagged());
2424 Register object = ToRegister(instr->object());
2426 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2430 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2431 Register value = ToRegister(instr->value());
2432 Register map = ToRegister(instr->temp());
2434 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2435 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2439 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2440 Representation rep = instr->hydrogen()->value()->representation();
2441 DCHECK(!rep.IsInteger32());
2442 Register scratch = ToRegister(instr->temp());
2444 if (rep.IsDouble()) {
2445 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2446 instr->TrueLabel(chunk()));
2448 Register value = ToRegister(instr->value());
2449 __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2450 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2451 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2453 EmitGoto(instr->FalseDestination(chunk()));
2457 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2458 LOperand* left = instr->left();
2459 LOperand* right = instr->right();
2461 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2462 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2463 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2465 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2466 // We can statically evaluate the comparison.
2467 double left_val = ToDouble(LConstantOperand::cast(left));
2468 double right_val = ToDouble(LConstantOperand::cast(right));
2469 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2470 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2471 EmitGoto(next_block);
2473 if (instr->is_double()) {
2474 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2476 // If a NaN is involved, i.e. the result is unordered (V set),
2477 // jump to false block label.
2478 __ B(vs, instr->FalseLabel(chunk_));
2479 EmitBranch(instr, cond);
2481 if (instr->hydrogen_value()->representation().IsInteger32()) {
2482 if (right->IsConstantOperand()) {
2483 EmitCompareAndBranch(instr, cond, ToRegister32(left),
2484 ToOperand32(right));
2486 // Commute the operands and the condition.
2487 EmitCompareAndBranch(instr, CommuteCondition(cond),
2488 ToRegister32(right), ToOperand32(left));
2491 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2492 if (right->IsConstantOperand()) {
2493 int32_t value = ToInteger32(LConstantOperand::cast(right));
2494 EmitCompareAndBranch(instr,
2497 Operand(Smi::FromInt(value)));
2498 } else if (left->IsConstantOperand()) {
2499 // Commute the operands and the condition.
2500 int32_t value = ToInteger32(LConstantOperand::cast(left));
2501 EmitCompareAndBranch(instr,
2502 CommuteCondition(cond),
2504 Operand(Smi::FromInt(value)));
2506 EmitCompareAndBranch(instr,
2517 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2518 Register left = ToRegister(instr->left());
2519 Register right = ToRegister(instr->right());
2520 EmitCompareAndBranch(instr, eq, left, right);
2524 void LCodeGen::DoCmpT(LCmpT* instr) {
2525 DCHECK(ToRegister(instr->context()).is(cp));
2526 Token::Value op = instr->op();
2527 Condition cond = TokenToCondition(op, false);
2529 DCHECK(ToRegister(instr->left()).Is(x1));
2530 DCHECK(ToRegister(instr->right()).Is(x0));
2532 CodeFactory::CompareIC(isolate(), op, instr->language_mode()).code();
2533 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2534 // Signal that we don't inline smi code before this stub.
2535 InlineSmiCheckInfo::EmitNotInlined(masm());
2537 // Return true or false depending on CompareIC result.
2538 // This instruction is marked as call. We can clobber any register.
2539 DCHECK(instr->IsMarkedAsCall());
2540 __ LoadTrueFalseRoots(x1, x2);
2542 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2546 void LCodeGen::DoConstantD(LConstantD* instr) {
2547 DCHECK(instr->result()->IsDoubleRegister());
2548 DoubleRegister result = ToDoubleRegister(instr->result());
2549 if (instr->value() == 0) {
2550 if (copysign(1.0, instr->value()) == 1.0) {
2551 __ Fmov(result, fp_zero);
2553 __ Fneg(result, fp_zero);
2556 __ Fmov(result, instr->value());
2561 void LCodeGen::DoConstantE(LConstantE* instr) {
2562 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2566 void LCodeGen::DoConstantI(LConstantI* instr) {
2567 DCHECK(is_int32(instr->value()));
2568 // Cast the value here to ensure that the value isn't sign extended by the
2569 // implicit Operand constructor.
2570 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2574 void LCodeGen::DoConstantS(LConstantS* instr) {
2575 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2579 void LCodeGen::DoConstantT(LConstantT* instr) {
2580 Handle<Object> object = instr->value(isolate());
2581 AllowDeferredHandleDereference smi_check;
2582 __ LoadObject(ToRegister(instr->result()), object);
2586 void LCodeGen::DoContext(LContext* instr) {
2587 // If there is a non-return use, the context must be moved to a register.
2588 Register result = ToRegister(instr->result());
2589 if (info()->IsOptimizing()) {
2590 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2592 // If there is no frame, the context must be in cp.
2593 DCHECK(result.is(cp));
2598 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2599 Register reg = ToRegister(instr->value());
2600 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2601 AllowDeferredHandleDereference smi_check;
2602 if (isolate()->heap()->InNewSpace(*object)) {
2603 UseScratchRegisterScope temps(masm());
2604 Register temp = temps.AcquireX();
2605 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2606 __ Mov(temp, Operand(cell));
2607 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2610 __ Cmp(reg, Operand(object));
2612 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
2616 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2617 last_lazy_deopt_pc_ = masm()->pc_offset();
2618 DCHECK(instr->HasEnvironment());
2619 LEnvironment* env = instr->environment();
2620 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2621 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2625 void LCodeGen::DoDateField(LDateField* instr) {
2626 Register object = ToRegister(instr->date());
2627 Register result = ToRegister(instr->result());
2628 Register temp1 = x10;
2629 Register temp2 = x11;
2630 Smi* index = instr->index();
2632 DCHECK(object.is(result) && object.Is(x0));
2633 DCHECK(instr->IsMarkedAsCall());
2635 if (index->value() == 0) {
2636 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2638 Label runtime, done;
2639 if (index->value() < JSDate::kFirstUncachedField) {
2640 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2641 __ Mov(temp1, Operand(stamp));
2642 __ Ldr(temp1, MemOperand(temp1));
2643 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2644 __ Cmp(temp1, temp2);
2646 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2647 kPointerSize * index->value()));
2652 __ Mov(x1, Operand(index));
2653 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2659 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2660 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2661 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2662 // needed return address), even though the implementation of LAZY and EAGER is
2663 // now identical. When LAZY is eventually completely folded into EAGER, remove
2664 // the special case below.
2665 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2666 type = Deoptimizer::LAZY;
2669 Deoptimize(instr, instr->hydrogen()->reason(), &type);
2673 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2674 Register dividend = ToRegister32(instr->dividend());
2675 int32_t divisor = instr->divisor();
2676 Register result = ToRegister32(instr->result());
2677 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2678 DCHECK(!result.is(dividend));
2680 // Check for (0 / -x) that will produce negative zero.
2681 HDiv* hdiv = instr->hydrogen();
2682 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2683 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
2685 // Check for (kMinInt / -1).
2686 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2687 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2689 __ Cmp(dividend, 1);
2690 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2692 // Deoptimize if remainder will not be 0.
2693 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2694 divisor != 1 && divisor != -1) {
2695 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2696 __ Tst(dividend, mask);
2697 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
2700 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2701 __ Neg(result, dividend);
2704 int32_t shift = WhichPowerOf2Abs(divisor);
2706 __ Mov(result, dividend);
2707 } else if (shift == 1) {
2708 __ Add(result, dividend, Operand(dividend, LSR, 31));
2710 __ Mov(result, Operand(dividend, ASR, 31));
2711 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2713 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2714 if (divisor < 0) __ Neg(result, result);
2718 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2719 Register dividend = ToRegister32(instr->dividend());
2720 int32_t divisor = instr->divisor();
2721 Register result = ToRegister32(instr->result());
2722 DCHECK(!AreAliased(dividend, result));
2725 Deoptimize(instr, Deoptimizer::kDivisionByZero);
2729 // Check for (0 / -x) that will produce negative zero.
2730 HDiv* hdiv = instr->hydrogen();
2731 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2732 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
2735 __ TruncatingDiv(result, dividend, Abs(divisor));
2736 if (divisor < 0) __ Neg(result, result);
2738 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2739 Register temp = ToRegister32(instr->temp());
2740 DCHECK(!AreAliased(dividend, result, temp));
2741 __ Sxtw(dividend.X(), dividend);
2742 __ Mov(temp, divisor);
2743 __ Smsubl(temp.X(), result, temp, dividend.X());
2744 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
2749 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2750 void LCodeGen::DoDivI(LDivI* instr) {
2751 HBinaryOperation* hdiv = instr->hydrogen();
2752 Register dividend = ToRegister32(instr->dividend());
2753 Register divisor = ToRegister32(instr->divisor());
2754 Register result = ToRegister32(instr->result());
2756 // Issue the division first, and then check for any deopt cases whilst the
2757 // result is computed.
2758 __ Sdiv(result, dividend, divisor);
2760 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2761 DCHECK(!instr->temp());
2766 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2767 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
2770 // Check for (0 / -x) as that will produce negative zero.
2771 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2774 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2775 // zero, ie. zero dividend with negative divisor deopts.
2776 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2777 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2778 __ Ccmp(dividend, 0, NoFlag, mi);
2779 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
2782 // Check for (kMinInt / -1).
2783 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2784 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2786 __ Cmp(dividend, 1);
2787 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2788 // -1. If overflow is clear, set the flags for condition ne, as the
2789 // dividend isn't -1, and thus we shouldn't deopt.
2790 __ Ccmp(divisor, -1, NoFlag, vs);
2791 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
2794 // Compute remainder and deopt if it's not zero.
2795 Register remainder = ToRegister32(instr->temp());
2796 __ Msub(remainder, result, divisor, dividend);
2797 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
2801 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2802 DoubleRegister input = ToDoubleRegister(instr->value());
2803 Register result = ToRegister32(instr->result());
2805 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2806 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
2809 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2810 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
2812 if (instr->tag_result()) {
2813 __ SmiTag(result.X());
2818 void LCodeGen::DoDrop(LDrop* instr) {
2819 __ Drop(instr->count());
2823 void LCodeGen::DoDummy(LDummy* instr) {
2824 // Nothing to see here, move on!
2828 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2829 // Nothing to see here, move on!
2833 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2834 DCHECK(ToRegister(instr->context()).is(cp));
2835 // FunctionLiteral instruction is marked as call, we can trash any register.
2836 DCHECK(instr->IsMarkedAsCall());
2838 // Use the fast case closure allocation code that allocates in new
2839 // space for nested functions that don't need literals cloning.
2840 bool pretenure = instr->hydrogen()->pretenure();
2841 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2842 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
2843 instr->hydrogen()->kind());
2844 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2845 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2847 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2848 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2849 : factory()->false_value()));
2850 __ Push(cp, x2, x1);
2851 CallRuntime(Runtime::kNewClosure, 3, instr);
2856 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2857 Register map = ToRegister(instr->map());
2858 Register result = ToRegister(instr->result());
2859 Label load_cache, done;
2861 __ EnumLengthUntagged(result, map);
2862 __ Cbnz(result, &load_cache);
2864 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2867 __ Bind(&load_cache);
2868 __ LoadInstanceDescriptors(map, result);
2869 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2870 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2871 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
2877 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2878 Register object = ToRegister(instr->object());
2879 Register null_value = x5;
2881 DCHECK(instr->IsMarkedAsCall());
2882 DCHECK(object.Is(x0));
2884 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2886 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2887 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2888 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
2890 Label use_cache, call_runtime;
2891 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2892 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2894 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2897 // Get the set of properties to enumerate.
2898 __ Bind(&call_runtime);
2900 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2902 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2903 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
2904 Deoptimizer::kWrongMap);
2906 __ Bind(&use_cache);
2910 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2911 Register input = ToRegister(instr->value());
2912 Register result = ToRegister(instr->result());
2914 __ AssertString(input);
2916 // Assert that we can use a W register load to get the hash.
2917 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2918 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2919 __ IndexFromHash(result, result);
2923 void LCodeGen::EmitGoto(int block) {
2924 // Do not emit jump if we are emitting a goto to the next block.
2925 if (!IsNextEmittedBlock(block)) {
2926 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2931 void LCodeGen::DoGoto(LGoto* instr) {
2932 EmitGoto(instr->block_id());
2936 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2937 LHasCachedArrayIndexAndBranch* instr) {
2938 Register input = ToRegister(instr->value());
2939 Register temp = ToRegister32(instr->temp());
2941 // Assert that the cache status bits fit in a W register.
2942 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2943 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2944 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2945 EmitBranch(instr, eq);
2949 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2950 // to test but is only used in very restricted ways. The only possible kinds
2951 // of intervals are:
2952 // - [ FIRST_TYPE, instr->to() ]
2953 // - [ instr->form(), LAST_TYPE ]
2954 // - instr->from() == instr->to()
2956 // These kinds of intervals can be check with only one compare instruction
2957 // providing the correct value and test condition are used.
2959 // TestType() will return the value to use in the compare instruction and
2960 // BranchCondition() will return the condition to use depending on the kind
2961 // of interval actually specified in the instruction.
2962 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2963 InstanceType from = instr->from();
2964 InstanceType to = instr->to();
2965 if (from == FIRST_TYPE) return to;
2966 DCHECK((from == to) || (to == LAST_TYPE));
2971 // See comment above TestType function for what this function does.
2972 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2973 InstanceType from = instr->from();
2974 InstanceType to = instr->to();
2975 if (from == to) return eq;
2976 if (to == LAST_TYPE) return hs;
2977 if (from == FIRST_TYPE) return ls;
2983 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2984 Register input = ToRegister(instr->value());
2985 Register scratch = ToRegister(instr->temp());
2987 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2988 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2990 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2991 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2995 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2996 Register result = ToRegister(instr->result());
2997 Register base = ToRegister(instr->base_object());
2998 if (instr->offset()->IsConstantOperand()) {
2999 __ Add(result, base, ToOperand32(instr->offset()));
3001 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3006 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3007 DCHECK(ToRegister(instr->context()).is(cp));
3008 // Assert that the arguments are in the registers expected by InstanceofStub.
3009 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3010 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3012 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3013 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3015 // InstanceofStub returns a result in x0:
3016 // 0 => not an instance
3017 // smi 1 => instance.
3019 __ LoadTrueFalseRoots(x0, x1);
3020 __ Csel(x0, x0, x1, eq);
3024 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3025 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3027 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3028 LInstanceOfKnownGlobal* instr)
3029 : LDeferredCode(codegen), instr_(instr) { }
3030 virtual void Generate() {
3031 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3033 virtual LInstruction* instr() { return instr_; }
3035 LInstanceOfKnownGlobal* instr_;
3038 DeferredInstanceOfKnownGlobal* deferred =
3039 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3041 Label map_check, return_false, cache_miss, done;
3042 Register object = ToRegister(instr->value());
3043 Register result = ToRegister(instr->result());
3044 // x4 is expected in the associated deferred code and stub.
3045 Register map_check_site = x4;
3048 // This instruction is marked as call. We can clobber any register.
3049 DCHECK(instr->IsMarkedAsCall());
3051 // We must take into account that object is in x11.
3052 DCHECK(object.Is(x11));
3053 Register scratch = x10;
3055 // A Smi is not instance of anything.
3056 __ JumpIfSmi(object, &return_false);
3058 // This is the inlined call site instanceof cache. The two occurences of the
3059 // hole value will be patched to the last map/result pair generated by the
3061 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3063 // Below we use Factory::the_hole_value() on purpose instead of loading from
3064 // the root array to force relocation and later be able to patch with a
3066 InstructionAccurateScope scope(masm(), 5);
3067 __ bind(&map_check);
3068 // Will be patched with the cached map.
3069 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3070 __ ldr(scratch, Immediate(cell));
3071 __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
3072 __ cmp(map, scratch);
3073 __ b(&cache_miss, ne);
3074 // The address of this instruction is computed relative to the map check
3075 // above, so check the size of the code generated.
3076 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3077 // Will be patched with the cached result.
3078 __ ldr(result, Immediate(factory()->the_hole_value()));
3082 // The inlined call site cache did not match.
3083 // Check null and string before calling the deferred code.
3084 __ Bind(&cache_miss);
3085 // Compute the address of the map check. It must not be clobbered until the
3086 // InstanceOfStub has used it.
3087 __ Adr(map_check_site, &map_check);
3088 // Null is not instance of anything.
3089 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3091 // String values are not instances of anything.
3092 // Return false if the object is a string. Otherwise, jump to the deferred
3094 // Note that we can't jump directly to deferred code from
3095 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3096 // code can be out of range.
3097 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3098 __ B(deferred->entry());
3100 __ Bind(&return_false);
3101 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3103 // Here result is either true or false.
3104 __ Bind(deferred->exit());
3109 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3110 Register result = ToRegister(instr->result());
3111 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3112 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3113 flags = static_cast<InstanceofStub::Flags>(
3114 flags | InstanceofStub::kArgsInRegisters);
3115 flags = static_cast<InstanceofStub::Flags>(
3116 flags | InstanceofStub::kReturnTrueFalseObject);
3117 flags = static_cast<InstanceofStub::Flags>(
3118 flags | InstanceofStub::kCallSiteInlineCheck);
3120 PushSafepointRegistersScope scope(this);
3121 LoadContextFromDeferred(instr->context());
3123 // Prepare InstanceofStub arguments.
3124 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3125 __ LoadObject(InstanceofStub::right(), instr->function());
3127 InstanceofStub stub(isolate(), flags);
3128 CallCodeGeneric(stub.GetCode(),
3129 RelocInfo::CODE_TARGET,
3131 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3132 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3133 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3135 // Put the result value into the result register slot.
3136 __ StoreToSafepointRegisterSlot(result, result);
3140 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3145 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3146 Register value = ToRegister32(instr->value());
3147 DoubleRegister result = ToDoubleRegister(instr->result());
3148 __ Scvtf(result, value);
3152 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3153 DCHECK(ToRegister(instr->context()).is(cp));
3154 // The function is required to be in x1.
3155 DCHECK(ToRegister(instr->function()).is(x1));
3156 DCHECK(instr->HasPointerMap());
3158 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3159 if (known_function.is_null()) {
3160 LPointerMap* pointers = instr->pointer_map();
3161 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3162 ParameterCount count(instr->arity());
3163 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3165 CallKnownFunction(known_function,
3166 instr->hydrogen()->formal_parameter_count(),
3167 instr->arity(), instr);
3172 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3173 Register temp1 = ToRegister(instr->temp1());
3174 Register temp2 = ToRegister(instr->temp2());
3176 // Get the frame pointer for the calling frame.
3177 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3179 // Skip the arguments adaptor frame if it exists.
3180 Label check_frame_marker;
3181 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3182 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3183 __ B(ne, &check_frame_marker);
3184 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3186 // Check the marker in the calling frame.
3187 __ Bind(&check_frame_marker);
3188 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3190 EmitCompareAndBranch(
3191 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3195 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3196 Label* is_object = instr->TrueLabel(chunk_);
3197 Label* is_not_object = instr->FalseLabel(chunk_);
3198 Register value = ToRegister(instr->value());
3199 Register map = ToRegister(instr->temp1());
3200 Register scratch = ToRegister(instr->temp2());
3202 __ JumpIfSmi(value, is_not_object);
3203 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3205 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3207 // Check for undetectable objects.
3208 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3209 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3211 // Check that instance type is in object type range.
3212 __ IsInstanceJSObjectType(map, scratch, NULL);
3213 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3214 // flags for "le" condition to check if the object's type is a valid
3216 EmitBranch(instr, le);
3220 Condition LCodeGen::EmitIsString(Register input,
3222 Label* is_not_string,
3223 SmiCheck check_needed = INLINE_SMI_CHECK) {
3224 if (check_needed == INLINE_SMI_CHECK) {
3225 __ JumpIfSmi(input, is_not_string);
3227 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3233 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3234 Register val = ToRegister(instr->value());
3235 Register scratch = ToRegister(instr->temp());
3237 SmiCheck check_needed =
3238 instr->hydrogen()->value()->type().IsHeapObject()
3239 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3240 Condition true_cond =
3241 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3243 EmitBranch(instr, true_cond);
3247 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3248 Register value = ToRegister(instr->value());
3249 STATIC_ASSERT(kSmiTag == 0);
3250 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3254 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3255 Register input = ToRegister(instr->value());
3256 Register temp = ToRegister(instr->temp());
3258 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3259 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3261 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3262 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3264 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3268 static const char* LabelType(LLabel* label) {
3269 if (label->is_loop_header()) return " (loop header)";
3270 if (label->is_osr_entry()) return " (OSR entry)";
3275 void LCodeGen::DoLabel(LLabel* label) {
3276 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3277 current_instruction_,
3278 label->hydrogen_value()->id(),
3282 __ Bind(label->label());
3283 current_block_ = label->block_id();
3288 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3289 Register context = ToRegister(instr->context());
3290 Register result = ToRegister(instr->result());
3291 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3292 if (instr->hydrogen()->RequiresHoleCheck()) {
3293 if (instr->hydrogen()->DeoptimizesOnHole()) {
3294 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3295 Deoptimizer::kHole);
3298 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3299 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3300 __ Bind(¬_the_hole);
3306 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3307 Register function = ToRegister(instr->function());
3308 Register result = ToRegister(instr->result());
3309 Register temp = ToRegister(instr->temp());
3311 // Get the prototype or initial map from the function.
3312 __ Ldr(result, FieldMemOperand(function,
3313 JSFunction::kPrototypeOrInitialMapOffset));
3315 // Check that the function has a prototype or an initial map.
3316 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3317 Deoptimizer::kHole);
3319 // If the function does not have an initial map, we're done.
3321 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3324 // Get the prototype from the initial map.
3325 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3333 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3334 Register vector_register = ToRegister(instr->temp_vector());
3335 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
3336 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
3337 DCHECK(slot_register.is(x0));
3339 AllowDeferredHandleDereference vector_structure_check;
3340 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3341 __ Mov(vector_register, vector);
3342 // No need to allocate this register.
3343 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3344 int index = vector->GetIndex(slot);
3345 __ Mov(slot_register, Smi::FromInt(index));
3349 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3350 DCHECK(ToRegister(instr->context()).is(cp));
3351 DCHECK(ToRegister(instr->global_object())
3352 .is(LoadDescriptor::ReceiverRegister()));
3353 DCHECK(ToRegister(instr->result()).Is(x0));
3354 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3355 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3356 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3357 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
3358 PREMONOMORPHIC).code();
3359 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3363 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3368 bool key_is_constant,
3370 ElementsKind elements_kind,
3372 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3374 if (key_is_constant) {
3375 int key_offset = constant_key << element_size_shift;
3376 return MemOperand(base, key_offset + base_offset);
3380 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3381 return MemOperand(scratch, base_offset);
3384 if (base_offset == 0) {
3385 return MemOperand(base, key, SXTW, element_size_shift);
3388 DCHECK(!AreAliased(scratch, key));
3389 __ Add(scratch, base, base_offset);
3390 return MemOperand(scratch, key, SXTW, element_size_shift);
3394 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3395 Register ext_ptr = ToRegister(instr->elements());
3397 ElementsKind elements_kind = instr->elements_kind();
3399 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3400 bool key_is_constant = instr->key()->IsConstantOperand();
3401 Register key = no_reg;
3402 int constant_key = 0;
3403 if (key_is_constant) {
3404 DCHECK(instr->temp() == NULL);
3405 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3406 if (constant_key & 0xf0000000) {
3407 Abort(kArrayIndexConstantValueTooBig);
3410 scratch = ToRegister(instr->temp());
3411 key = ToRegister(instr->key());
3415 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3416 key_is_constant, constant_key,
3418 instr->base_offset());
3420 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3421 (elements_kind == FLOAT32_ELEMENTS)) {
3422 DoubleRegister result = ToDoubleRegister(instr->result());
3423 __ Ldr(result.S(), mem_op);
3424 __ Fcvt(result, result.S());
3425 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3426 (elements_kind == FLOAT64_ELEMENTS)) {
3427 DoubleRegister result = ToDoubleRegister(instr->result());
3428 __ Ldr(result, mem_op);
3430 Register result = ToRegister(instr->result());
3432 switch (elements_kind) {
3433 case EXTERNAL_INT8_ELEMENTS:
3435 __ Ldrsb(result, mem_op);
3437 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3438 case EXTERNAL_UINT8_ELEMENTS:
3439 case UINT8_ELEMENTS:
3440 case UINT8_CLAMPED_ELEMENTS:
3441 __ Ldrb(result, mem_op);
3443 case EXTERNAL_INT16_ELEMENTS:
3444 case INT16_ELEMENTS:
3445 __ Ldrsh(result, mem_op);
3447 case EXTERNAL_UINT16_ELEMENTS:
3448 case UINT16_ELEMENTS:
3449 __ Ldrh(result, mem_op);
3451 case EXTERNAL_INT32_ELEMENTS:
3452 case INT32_ELEMENTS:
3453 __ Ldrsw(result, mem_op);
3455 case EXTERNAL_UINT32_ELEMENTS:
3456 case UINT32_ELEMENTS:
3457 __ Ldr(result.W(), mem_op);
3458 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3459 // Deopt if value > 0x80000000.
3460 __ Tst(result, 0xFFFFFFFF80000000);
3461 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
3464 case FLOAT32_ELEMENTS:
3465 case FLOAT64_ELEMENTS:
3466 case EXTERNAL_FLOAT32_ELEMENTS:
3467 case EXTERNAL_FLOAT64_ELEMENTS:
3468 case FAST_HOLEY_DOUBLE_ELEMENTS:
3469 case FAST_HOLEY_ELEMENTS:
3470 case FAST_HOLEY_SMI_ELEMENTS:
3471 case FAST_DOUBLE_ELEMENTS:
3473 case FAST_SMI_ELEMENTS:
3474 case DICTIONARY_ELEMENTS:
3475 case SLOPPY_ARGUMENTS_ELEMENTS:
3483 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3487 ElementsKind elements_kind,
3488 Representation representation,
3490 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3491 STATIC_ASSERT(kSmiTag == 0);
3492 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3494 // Even though the HLoad/StoreKeyed instructions force the input
3495 // representation for the key to be an integer, the input gets replaced during
3496 // bounds check elimination with the index argument to the bounds check, which
3497 // can be tagged, so that case must be handled here, too.
3498 if (key_is_tagged) {
3499 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3500 if (representation.IsInteger32()) {
3501 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3502 // Read or write only the smi payload in the case of fast smi arrays.
3503 return UntagSmiMemOperand(base, base_offset);
3505 return MemOperand(base, base_offset);
3508 // Sign extend key because it could be a 32-bit negative value or contain
3509 // garbage in the top 32-bits. The address computation happens in 64-bit.
3510 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3511 if (representation.IsInteger32()) {
3512 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3513 // Read or write only the smi payload in the case of fast smi arrays.
3514 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3515 return UntagSmiMemOperand(base, base_offset);
3517 __ Add(base, elements, base_offset);
3518 return MemOperand(base, key, SXTW, element_size_shift);
3524 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3525 Register elements = ToRegister(instr->elements());
3526 DoubleRegister result = ToDoubleRegister(instr->result());
3529 if (instr->key()->IsConstantOperand()) {
3530 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3531 (instr->temp() == NULL));
3533 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3534 if (constant_key & 0xf0000000) {
3535 Abort(kArrayIndexConstantValueTooBig);
3537 int offset = instr->base_offset() + constant_key * kDoubleSize;
3538 mem_op = MemOperand(elements, offset);
3540 Register load_base = ToRegister(instr->temp());
3541 Register key = ToRegister(instr->key());
3542 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3543 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3544 instr->hydrogen()->elements_kind(),
3545 instr->hydrogen()->representation(),
3546 instr->base_offset());
3549 __ Ldr(result, mem_op);
3551 if (instr->hydrogen()->RequiresHoleCheck()) {
3552 Register scratch = ToRegister(instr->temp());
3553 __ Fmov(scratch, result);
3554 __ Eor(scratch, scratch, kHoleNanInt64);
3555 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
3560 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3561 Register elements = ToRegister(instr->elements());
3562 Register result = ToRegister(instr->result());
3565 Representation representation = instr->hydrogen()->representation();
3566 if (instr->key()->IsConstantOperand()) {
3567 DCHECK(instr->temp() == NULL);
3568 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3569 int offset = instr->base_offset() +
3570 ToInteger32(const_operand) * kPointerSize;
3571 if (representation.IsInteger32()) {
3572 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3573 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3574 STATIC_ASSERT(kSmiTag == 0);
3575 mem_op = UntagSmiMemOperand(elements, offset);
3577 mem_op = MemOperand(elements, offset);
3580 Register load_base = ToRegister(instr->temp());
3581 Register key = ToRegister(instr->key());
3582 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3584 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3585 instr->hydrogen()->elements_kind(),
3586 representation, instr->base_offset());
3589 __ Load(result, mem_op, representation);
3591 if (instr->hydrogen()->RequiresHoleCheck()) {
3592 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3593 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
3595 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3596 Deoptimizer::kHole);
3598 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3599 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3601 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3603 if (info()->IsStub()) {
3604 // A stub can safely convert the hole to undefined only if the array
3605 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3606 // it needs to bail out.
3607 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3608 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3609 __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3610 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3612 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3618 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3619 DCHECK(ToRegister(instr->context()).is(cp));
3620 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3621 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3623 if (instr->hydrogen()->HasVectorAndSlot()) {
3624 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3628 CodeFactory::KeyedLoadICInOptimizedCode(
3629 isolate(), instr->hydrogen()->initialization_state()).code();
3630 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3632 DCHECK(ToRegister(instr->result()).Is(x0));
3636 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3637 HObjectAccess access = instr->hydrogen()->access();
3638 int offset = access.offset();
3639 Register object = ToRegister(instr->object());
3641 if (access.IsExternalMemory()) {
3642 Register result = ToRegister(instr->result());
3643 __ Load(result, MemOperand(object, offset), access.representation());
3647 if (instr->hydrogen()->representation().IsDouble()) {
3648 DCHECK(access.IsInobject());
3649 FPRegister result = ToDoubleRegister(instr->result());
3650 __ Ldr(result, FieldMemOperand(object, offset));
3654 Register result = ToRegister(instr->result());
3656 if (access.IsInobject()) {
3659 // Load the properties array, using result as a scratch register.
3660 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3664 if (access.representation().IsSmi() &&
3665 instr->hydrogen()->representation().IsInteger32()) {
3666 // Read int value directly from upper half of the smi.
3667 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3668 STATIC_ASSERT(kSmiTag == 0);
3669 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3670 Representation::Integer32());
3672 __ Load(result, FieldMemOperand(source, offset), access.representation());
3677 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3678 DCHECK(ToRegister(instr->context()).is(cp));
3679 // LoadIC expects name and receiver in registers.
3680 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3681 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3682 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3684 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3685 isolate(), NOT_CONTEXTUAL,
3686 instr->hydrogen()->initialization_state()).code();
3687 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3689 DCHECK(ToRegister(instr->result()).is(x0));
3693 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3694 Register result = ToRegister(instr->result());
3695 __ LoadRoot(result, instr->index());
3699 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3700 Register result = ToRegister(instr->result());
3701 Register map = ToRegister(instr->value());
3702 __ EnumLengthSmi(result, map);
3706 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3707 Representation r = instr->hydrogen()->value()->representation();
3709 DoubleRegister input = ToDoubleRegister(instr->value());
3710 DoubleRegister result = ToDoubleRegister(instr->result());
3711 __ Fabs(result, input);
3712 } else if (r.IsSmi() || r.IsInteger32()) {
3713 Register input = r.IsSmi() ? ToRegister(instr->value())
3714 : ToRegister32(instr->value());
3715 Register result = r.IsSmi() ? ToRegister(instr->result())
3716 : ToRegister32(instr->result());
3717 __ Abs(result, input);
3718 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3723 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3725 Label* allocation_entry) {
3726 // Handle the tricky cases of MathAbsTagged:
3727 // - HeapNumber inputs.
3728 // - Negative inputs produce a positive result, so a new HeapNumber is
3729 // allocated to hold it.
3730 // - Positive inputs are returned as-is, since there is no need to allocate
3731 // a new HeapNumber for the result.
3732 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3733 // a smi. In this case, the inline code sets the result and jumps directly
3734 // to the allocation_entry label.
3735 DCHECK(instr->context() != NULL);
3736 DCHECK(ToRegister(instr->context()).is(cp));
3737 Register input = ToRegister(instr->value());
3738 Register temp1 = ToRegister(instr->temp1());
3739 Register temp2 = ToRegister(instr->temp2());
3740 Register result_bits = ToRegister(instr->temp3());
3741 Register result = ToRegister(instr->result());
3743 Label runtime_allocation;
3745 // Deoptimize if the input is not a HeapNumber.
3746 DeoptimizeIfNotHeapNumber(input, instr);
3748 // If the argument is positive, we can return it as-is, without any need to
3749 // allocate a new HeapNumber for the result. We have to do this in integer
3750 // registers (rather than with fabs) because we need to be able to distinguish
3752 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3753 __ Mov(result, input);
3754 __ Tbz(result_bits, kXSignBit, exit);
3756 // Calculate abs(input) by clearing the sign bit.
3757 __ Bic(result_bits, result_bits, kXSignMask);
3759 // Allocate a new HeapNumber to hold the result.
3760 // result_bits The bit representation of the (double) result.
3761 __ Bind(allocation_entry);
3762 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3763 // The inline (non-deferred) code will store result_bits into result.
3766 __ Bind(&runtime_allocation);
3767 if (FLAG_debug_code) {
3768 // Because result is in the pointer map, we need to make sure it has a valid
3769 // tagged value before we call the runtime. We speculatively set it to the
3770 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3773 Register input = ToRegister(instr->value());
3774 __ JumpIfSmi(result, &result_ok);
3775 __ Cmp(input, result);
3776 __ Assert(eq, kUnexpectedValue);
3777 __ Bind(&result_ok);
3780 { PushSafepointRegistersScope scope(this);
3781 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3783 __ StoreToSafepointRegisterSlot(x0, result);
3785 // The inline (non-deferred) code will store result_bits into result.
3789 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3790 // Class for deferred case.
3791 class DeferredMathAbsTagged: public LDeferredCode {
3793 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3794 : LDeferredCode(codegen), instr_(instr) { }
3795 virtual void Generate() {
3796 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3797 allocation_entry());
3799 virtual LInstruction* instr() { return instr_; }
3800 Label* allocation_entry() { return &allocation; }
3802 LMathAbsTagged* instr_;
3806 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3807 // in GenerateDeferredCode. Tidy this up.
3808 DCHECK(!NeedsDeferredFrame());
3810 DeferredMathAbsTagged* deferred =
3811 new(zone()) DeferredMathAbsTagged(this, instr);
3813 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3814 instr->hydrogen()->value()->representation().IsSmi());
3815 Register input = ToRegister(instr->value());
3816 Register result_bits = ToRegister(instr->temp3());
3817 Register result = ToRegister(instr->result());
3820 // Handle smis inline.
3821 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3822 // never get set by the negation. This is therefore the same as the Integer32
3823 // case in DoMathAbs, except that it operates on 64-bit values.
3824 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3826 __ JumpIfNotSmi(input, deferred->entry());
3828 __ Abs(result, input, NULL, &done);
3830 // The result is the magnitude (abs) of the smallest value a smi can
3831 // represent, encoded as a double.
3832 __ Mov(result_bits, double_to_rawbits(0x80000000));
3833 __ B(deferred->allocation_entry());
3835 __ Bind(deferred->exit());
3836 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3842 void LCodeGen::DoMathExp(LMathExp* instr) {
3843 DoubleRegister input = ToDoubleRegister(instr->value());
3844 DoubleRegister result = ToDoubleRegister(instr->result());
3845 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3846 DoubleRegister double_temp2 = double_scratch();
3847 Register temp1 = ToRegister(instr->temp1());
3848 Register temp2 = ToRegister(instr->temp2());
3849 Register temp3 = ToRegister(instr->temp3());
3851 MathExpGenerator::EmitMathExp(masm(), input, result,
3852 double_temp1, double_temp2,
3853 temp1, temp2, temp3);
3857 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3858 DoubleRegister input = ToDoubleRegister(instr->value());
3859 DoubleRegister result = ToDoubleRegister(instr->result());
3861 __ Frintm(result, input);
3865 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3866 DoubleRegister input = ToDoubleRegister(instr->value());
3867 Register result = ToRegister(instr->result());
3869 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3870 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
3873 __ Fcvtms(result, input);
3875 // Check that the result fits into a 32-bit integer.
3876 // - The result did not overflow.
3877 __ Cmp(result, Operand(result, SXTW));
3878 // - The input was not NaN.
3879 __ Fccmp(input, input, NoFlag, eq);
3880 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
3884 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3885 Register dividend = ToRegister32(instr->dividend());
3886 Register result = ToRegister32(instr->result());
3887 int32_t divisor = instr->divisor();
3889 // If the divisor is 1, return the dividend.
3891 __ Mov(result, dividend, kDiscardForSameWReg);
3895 // If the divisor is positive, things are easy: There can be no deopts and we
3896 // can simply do an arithmetic right shift.
3897 int32_t shift = WhichPowerOf2Abs(divisor);
3899 __ Mov(result, Operand(dividend, ASR, shift));
3903 // If the divisor is negative, we have to negate and handle edge cases.
3904 __ Negs(result, dividend);
3905 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3906 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3909 // Dividing by -1 is basically negation, unless we overflow.
3910 if (divisor == -1) {
3911 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3912 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3917 // If the negation could not overflow, simply shifting is OK.
3918 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3919 __ Mov(result, Operand(dividend, ASR, shift));
3923 __ Asr(result, result, shift);
3924 __ Csel(result, result, kMinInt / divisor, vc);
3928 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3929 Register dividend = ToRegister32(instr->dividend());
3930 int32_t divisor = instr->divisor();
3931 Register result = ToRegister32(instr->result());
3932 DCHECK(!AreAliased(dividend, result));
3935 Deoptimize(instr, Deoptimizer::kDivisionByZero);
3939 // Check for (0 / -x) that will produce negative zero.
3940 HMathFloorOfDiv* hdiv = instr->hydrogen();
3941 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3942 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
3945 // Easy case: We need no dynamic check for the dividend and the flooring
3946 // division is the same as the truncating division.
3947 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3948 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3949 __ TruncatingDiv(result, dividend, Abs(divisor));
3950 if (divisor < 0) __ Neg(result, result);
3954 // In the general case we may need to adjust before and after the truncating
3955 // division to get a flooring division.
3956 Register temp = ToRegister32(instr->temp());
3957 DCHECK(!AreAliased(temp, dividend, result));
3958 Label needs_adjustment, done;
3959 __ Cmp(dividend, 0);
3960 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3961 __ TruncatingDiv(result, dividend, Abs(divisor));
3962 if (divisor < 0) __ Neg(result, result);
3964 __ Bind(&needs_adjustment);
3965 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3966 __ TruncatingDiv(result, temp, Abs(divisor));
3967 if (divisor < 0) __ Neg(result, result);
3968 __ Sub(result, result, Operand(1));
3973 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3974 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3975 Register dividend = ToRegister32(instr->dividend());
3976 Register divisor = ToRegister32(instr->divisor());
3977 Register remainder = ToRegister32(instr->temp());
3978 Register result = ToRegister32(instr->result());
3980 // This can't cause an exception on ARM, so we can speculatively
3981 // execute it already now.
3982 __ Sdiv(result, dividend, divisor);
3985 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
3987 // Check for (kMinInt / -1).
3988 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3989 // The V flag will be set iff dividend == kMinInt.
3990 __ Cmp(dividend, 1);
3991 __ Ccmp(divisor, -1, NoFlag, vs);
3992 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3995 // Check for (0 / -x) that will produce negative zero.
3996 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3998 __ Ccmp(dividend, 0, ZFlag, mi);
3999 // "divisor" can't be null because the code would have already been
4000 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4001 // In this case we need to deoptimize to produce a -0.
4002 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4006 // If both operands have the same sign then we are done.
4007 __ Eor(remainder, dividend, divisor);
4008 __ Tbz(remainder, kWSignBit, &done);
4010 // Check if the result needs to be corrected.
4011 __ Msub(remainder, result, divisor, dividend);
4012 __ Cbz(remainder, &done);
4013 __ Sub(result, result, 1);
4019 void LCodeGen::DoMathLog(LMathLog* instr) {
4020 DCHECK(instr->IsMarkedAsCall());
4021 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4022 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4024 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4028 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4029 Register input = ToRegister32(instr->value());
4030 Register result = ToRegister32(instr->result());
4031 __ Clz(result, input);
4035 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4036 DoubleRegister input = ToDoubleRegister(instr->value());
4037 DoubleRegister result = ToDoubleRegister(instr->result());
4040 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4041 // Math.pow(-Infinity, 0.5) == +Infinity
4042 // Math.pow(-0.0, 0.5) == +0.0
4044 // Catch -infinity inputs first.
4045 // TODO(jbramley): A constant infinity register would be helpful here.
4046 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4047 __ Fcmp(double_scratch(), input);
4048 __ Fabs(result, input);
4051 // Add +0.0 to convert -0.0 to +0.0.
4052 __ Fadd(double_scratch(), input, fp_zero);
4053 __ Fsqrt(result, double_scratch());
4059 void LCodeGen::DoPower(LPower* instr) {
4060 Representation exponent_type = instr->hydrogen()->right()->representation();
4061 // Having marked this as a call, we can use any registers.
4062 // Just make sure that the input/output registers are the expected ones.
4063 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4064 Register integer_exponent = MathPowIntegerDescriptor::exponent();
4065 DCHECK(!instr->right()->IsDoubleRegister() ||
4066 ToDoubleRegister(instr->right()).is(d1));
4067 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4068 ToRegister(instr->right()).is(tagged_exponent));
4069 DCHECK(!exponent_type.IsInteger32() ||
4070 ToRegister(instr->right()).is(integer_exponent));
4071 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4072 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4074 if (exponent_type.IsSmi()) {
4075 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4077 } else if (exponent_type.IsTagged()) {
4079 __ JumpIfSmi(tagged_exponent, &no_deopt);
4080 DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
4082 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4084 } else if (exponent_type.IsInteger32()) {
4085 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4086 // supports large integer exponents.
4087 __ Sxtw(integer_exponent, integer_exponent);
4088 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4091 DCHECK(exponent_type.IsDouble());
4092 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4098 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4099 DoubleRegister input = ToDoubleRegister(instr->value());
4100 DoubleRegister result = ToDoubleRegister(instr->result());
4101 DoubleRegister scratch_d = double_scratch();
4103 DCHECK(!AreAliased(input, result, scratch_d));
4107 __ Frinta(result, input);
4108 __ Fcmp(input, 0.0);
4109 __ Fccmp(result, input, ZFlag, lt);
4110 // The result is correct if the input was in [-0, +infinity], or was a
4111 // negative integral value.
4114 // Here the input is negative, non integral, with an exponent lower than 52.
4115 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4116 // case. So we can safely add 0.5.
4117 __ Fmov(scratch_d, 0.5);
4118 __ Fadd(result, input, scratch_d);
4119 __ Frintm(result, result);
4120 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4121 __ Fabs(result, result);
4122 __ Fneg(result, result);
4128 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4129 DoubleRegister input = ToDoubleRegister(instr->value());
4130 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4131 DoubleRegister dot_five = double_scratch();
4132 Register result = ToRegister(instr->result());
4135 // Math.round() rounds to the nearest integer, with ties going towards
4136 // +infinity. This does not match any IEEE-754 rounding mode.
4137 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4138 // they can't be represented as integers.
4139 // - The sign of the result is the same as the sign of the input. This means
4140 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4143 // Add 0.5 and round towards -infinity.
4144 __ Fmov(dot_five, 0.5);
4145 __ Fadd(temp, input, dot_five);
4146 __ Fcvtms(result, temp);
4148 // The result is correct if:
4149 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4150 // result is not 1, as 0.499...94 will wrongly map to 1.
4151 // result fits in 32 bits.
4152 __ Cmp(result, Operand(result.W(), SXTW));
4153 __ Ccmp(result, 1, ZFlag, eq);
4156 // At this point, we have to handle possible inputs of NaN or numbers in the
4157 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4159 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4161 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
4163 // Deoptimize for negative inputs, which at this point are only numbers in
4164 // the range [-0.5, -0.0]
4165 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4166 __ Fmov(result, input);
4167 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
4170 // Deoptimize if the input was NaN.
4171 __ Fcmp(input, dot_five);
4172 DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
4174 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4175 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4176 // else 0; we avoid dealing with 0.499...94 directly.
4177 __ Cset(result, ge);
4182 void LCodeGen::DoMathFround(LMathFround* instr) {
4183 DoubleRegister input = ToDoubleRegister(instr->value());
4184 DoubleRegister result = ToDoubleRegister(instr->result());
4185 __ Fcvt(result.S(), input);
4186 __ Fcvt(result, result.S());
4190 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4191 DoubleRegister input = ToDoubleRegister(instr->value());
4192 DoubleRegister result = ToDoubleRegister(instr->result());
4193 __ Fsqrt(result, input);
4197 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4198 HMathMinMax::Operation op = instr->hydrogen()->operation();
4199 if (instr->hydrogen()->representation().IsInteger32()) {
4200 Register result = ToRegister32(instr->result());
4201 Register left = ToRegister32(instr->left());
4202 Operand right = ToOperand32(instr->right());
4204 __ Cmp(left, right);
4205 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4206 } else if (instr->hydrogen()->representation().IsSmi()) {
4207 Register result = ToRegister(instr->result());
4208 Register left = ToRegister(instr->left());
4209 Operand right = ToOperand(instr->right());
4211 __ Cmp(left, right);
4212 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4214 DCHECK(instr->hydrogen()->representation().IsDouble());
4215 DoubleRegister result = ToDoubleRegister(instr->result());
4216 DoubleRegister left = ToDoubleRegister(instr->left());
4217 DoubleRegister right = ToDoubleRegister(instr->right());
4219 if (op == HMathMinMax::kMathMax) {
4220 __ Fmax(result, left, right);
4222 DCHECK(op == HMathMinMax::kMathMin);
4223 __ Fmin(result, left, right);
4229 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4230 Register dividend = ToRegister32(instr->dividend());
4231 int32_t divisor = instr->divisor();
4232 DCHECK(dividend.is(ToRegister32(instr->result())));
4234 // Theoretically, a variation of the branch-free code for integer division by
4235 // a power of 2 (calculating the remainder via an additional multiplication
4236 // (which gets simplified to an 'and') and subtraction) should be faster, and
4237 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4238 // indicate that positive dividends are heavily favored, so the branching
4239 // version performs better.
4240 HMod* hmod = instr->hydrogen();
4241 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4242 Label dividend_is_not_negative, done;
4243 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4244 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4245 // Note that this is correct even for kMinInt operands.
4246 __ Neg(dividend, dividend);
4247 __ And(dividend, dividend, mask);
4248 __ Negs(dividend, dividend);
4249 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4250 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4255 __ bind(÷nd_is_not_negative);
4256 __ And(dividend, dividend, mask);
4261 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4262 Register dividend = ToRegister32(instr->dividend());
4263 int32_t divisor = instr->divisor();
4264 Register result = ToRegister32(instr->result());
4265 Register temp = ToRegister32(instr->temp());
4266 DCHECK(!AreAliased(dividend, result, temp));
4269 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4273 __ TruncatingDiv(result, dividend, Abs(divisor));
4274 __ Sxtw(dividend.X(), dividend);
4275 __ Mov(temp, Abs(divisor));
4276 __ Smsubl(result.X(), result, temp, dividend.X());
4278 // Check for negative zero.
4279 HMod* hmod = instr->hydrogen();
4280 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4281 Label remainder_not_zero;
4282 __ Cbnz(result, &remainder_not_zero);
4283 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4284 __ bind(&remainder_not_zero);
4289 void LCodeGen::DoModI(LModI* instr) {
4290 Register dividend = ToRegister32(instr->left());
4291 Register divisor = ToRegister32(instr->right());
4292 Register result = ToRegister32(instr->result());
4295 // modulo = dividend - quotient * divisor
4296 __ Sdiv(result, dividend, divisor);
4297 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4298 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4300 __ Msub(result, result, divisor, dividend);
4301 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4302 __ Cbnz(result, &done);
4303 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4309 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4310 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4311 bool is_smi = instr->hydrogen()->representation().IsSmi();
4313 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4315 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4316 int32_t right = ToInteger32(instr->right());
4317 DCHECK((right > -kMaxInt) && (right < kMaxInt));
4319 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4320 bool bailout_on_minus_zero =
4321 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4323 if (bailout_on_minus_zero) {
4325 // The result is -0 if right is negative and left is zero.
4326 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
4327 } else if (right == 0) {
4328 // The result is -0 if the right is zero and the left is negative.
4329 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
4334 // Cases which can detect overflow.
4337 // Only 0x80000000 can overflow here.
4338 __ Negs(result, left);
4339 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4341 __ Neg(result, left);
4345 // This case can never overflow.
4349 // This case can never overflow.
4350 __ Mov(result, left, kDiscardForSameWReg);
4354 __ Adds(result, left, left);
4355 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4357 __ Add(result, left, left);
4362 // Multiplication by constant powers of two (and some related values)
4363 // can be done efficiently with shifted operands.
4364 int32_t right_abs = Abs(right);
4366 if (base::bits::IsPowerOfTwo32(right_abs)) {
4367 int right_log2 = WhichPowerOf2(right_abs);
4370 Register scratch = result;
4371 DCHECK(!AreAliased(scratch, left));
4372 __ Cls(scratch, left);
4373 __ Cmp(scratch, right_log2);
4374 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
4378 // result = left << log2(right)
4379 __ Lsl(result, left, right_log2);
4381 // result = -left << log2(-right)
4383 __ Negs(result, Operand(left, LSL, right_log2));
4384 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4386 __ Neg(result, Operand(left, LSL, right_log2));
4393 // For the following cases, we could perform a conservative overflow check
4394 // with CLS as above. However the few cycles saved are likely not worth
4395 // the risk of deoptimizing more often than required.
4396 DCHECK(!can_overflow);
4399 if (base::bits::IsPowerOfTwo32(right - 1)) {
4400 // result = left + left << log2(right - 1)
4401 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4402 } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4403 // result = -left + left << log2(right + 1)
4404 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4405 __ Neg(result, result);
4410 if (base::bits::IsPowerOfTwo32(-right + 1)) {
4411 // result = left - left << log2(-right + 1)
4412 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4413 } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4414 // result = -left - left << log2(-right - 1)
4415 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4416 __ Neg(result, result);
4425 void LCodeGen::DoMulI(LMulI* instr) {
4426 Register result = ToRegister32(instr->result());
4427 Register left = ToRegister32(instr->left());
4428 Register right = ToRegister32(instr->right());
4430 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4431 bool bailout_on_minus_zero =
4432 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4434 if (bailout_on_minus_zero && !left.Is(right)) {
4435 // If one operand is zero and the other is negative, the result is -0.
4436 // - Set Z (eq) if either left or right, or both, are 0.
4438 __ Ccmp(right, 0, ZFlag, ne);
4439 // - If so (eq), set N (mi) if left + right is negative.
4440 // - Otherwise, clear N.
4441 __ Ccmn(left, right, NoFlag, eq);
4442 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4446 __ Smull(result.X(), left, right);
4447 __ Cmp(result.X(), Operand(result, SXTW));
4448 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4450 __ Mul(result, left, right);
4455 void LCodeGen::DoMulS(LMulS* instr) {
4456 Register result = ToRegister(instr->result());
4457 Register left = ToRegister(instr->left());
4458 Register right = ToRegister(instr->right());
4460 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4461 bool bailout_on_minus_zero =
4462 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4464 if (bailout_on_minus_zero && !left.Is(right)) {
4465 // If one operand is zero and the other is negative, the result is -0.
4466 // - Set Z (eq) if either left or right, or both, are 0.
4468 __ Ccmp(right, 0, ZFlag, ne);
4469 // - If so (eq), set N (mi) if left + right is negative.
4470 // - Otherwise, clear N.
4471 __ Ccmn(left, right, NoFlag, eq);
4472 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4475 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4477 __ Smulh(result, left, right);
4478 __ Cmp(result, Operand(result.W(), SXTW));
4480 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4482 if (AreAliased(result, left, right)) {
4483 // All three registers are the same: half untag the input and then
4484 // multiply, giving a tagged result.
4485 STATIC_ASSERT((kSmiShift % 2) == 0);
4486 __ Asr(result, left, kSmiShift / 2);
4487 __ Mul(result, result, result);
4488 } else if (result.Is(left) && !left.Is(right)) {
4489 // Registers result and left alias, right is distinct: untag left into
4490 // result, and then multiply by right, giving a tagged result.
4491 __ SmiUntag(result, left);
4492 __ Mul(result, result, right);
4494 DCHECK(!left.Is(result));
4495 // Registers result and right alias, left is distinct, or all registers
4496 // are distinct: untag right into result, and then multiply by left,
4497 // giving a tagged result.
4498 __ SmiUntag(result, right);
4499 __ Mul(result, left, result);
4505 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4506 // TODO(3095996): Get rid of this. For now, we need to make the
4507 // result register contain a valid pointer because it is already
4508 // contained in the register pointer map.
4509 Register result = ToRegister(instr->result());
4512 PushSafepointRegistersScope scope(this);
4513 // NumberTagU and NumberTagD use the context from the frame, rather than
4514 // the environment's HContext or HInlinedContext value.
4515 // They only call Runtime::kAllocateHeapNumber.
4516 // The corresponding HChange instructions are added in a phase that does
4517 // not have easy access to the local context.
4518 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4519 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4520 RecordSafepointWithRegisters(
4521 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4522 __ StoreToSafepointRegisterSlot(x0, result);
4526 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4527 class DeferredNumberTagD: public LDeferredCode {
4529 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4530 : LDeferredCode(codegen), instr_(instr) { }
4531 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4532 virtual LInstruction* instr() { return instr_; }
4534 LNumberTagD* instr_;
4537 DoubleRegister input = ToDoubleRegister(instr->value());
4538 Register result = ToRegister(instr->result());
4539 Register temp1 = ToRegister(instr->temp1());
4540 Register temp2 = ToRegister(instr->temp2());
4542 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4543 if (FLAG_inline_new) {
4544 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4546 __ B(deferred->entry());
4549 __ Bind(deferred->exit());
4550 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4554 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4558 Label slow, convert_and_store;
4559 Register src = ToRegister32(value);
4560 Register dst = ToRegister(instr->result());
4561 Register scratch1 = ToRegister(temp1);
4563 if (FLAG_inline_new) {
4564 Register scratch2 = ToRegister(temp2);
4565 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4566 __ B(&convert_and_store);
4569 // Slow case: call the runtime system to do the number allocation.
4571 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4572 // register is stored, as this register is in the pointer map, but contains an
4576 // Preserve the value of all registers.
4577 PushSafepointRegistersScope scope(this);
4579 // NumberTagU and NumberTagD use the context from the frame, rather than
4580 // the environment's HContext or HInlinedContext value.
4581 // They only call Runtime::kAllocateHeapNumber.
4582 // The corresponding HChange instructions are added in a phase that does
4583 // not have easy access to the local context.
4584 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4585 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4586 RecordSafepointWithRegisters(
4587 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4588 __ StoreToSafepointRegisterSlot(x0, dst);
4591 // Convert number to floating point and store in the newly allocated heap
4593 __ Bind(&convert_and_store);
4594 DoubleRegister dbl_scratch = double_scratch();
4595 __ Ucvtf(dbl_scratch, src);
4596 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4600 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4601 class DeferredNumberTagU: public LDeferredCode {
4603 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4604 : LDeferredCode(codegen), instr_(instr) { }
4605 virtual void Generate() {
4606 codegen()->DoDeferredNumberTagU(instr_,
4611 virtual LInstruction* instr() { return instr_; }
4613 LNumberTagU* instr_;
4616 Register value = ToRegister32(instr->value());
4617 Register result = ToRegister(instr->result());
4619 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4620 __ Cmp(value, Smi::kMaxValue);
4621 __ B(hi, deferred->entry());
4622 __ SmiTag(result, value.X());
4623 __ Bind(deferred->exit());
4627 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4628 Register input = ToRegister(instr->value());
4629 Register scratch = ToRegister(instr->temp());
4630 DoubleRegister result = ToDoubleRegister(instr->result());
4631 bool can_convert_undefined_to_nan =
4632 instr->hydrogen()->can_convert_undefined_to_nan();
4634 Label done, load_smi;
4636 // Work out what untag mode we're working with.
4637 HValue* value = instr->hydrogen()->value();
4638 NumberUntagDMode mode = value->representation().IsSmi()
4639 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4641 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4642 __ JumpIfSmi(input, &load_smi);
4644 Label convert_undefined;
4646 // Heap number map check.
4647 if (can_convert_undefined_to_nan) {
4648 __ JumpIfNotHeapNumber(input, &convert_undefined);
4650 DeoptimizeIfNotHeapNumber(input, instr);
4653 // Load heap number.
4654 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4655 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4656 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
4660 if (can_convert_undefined_to_nan) {
4661 __ Bind(&convert_undefined);
4662 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4663 Deoptimizer::kNotAHeapNumberUndefined);
4665 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4666 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4671 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4672 // Fall through to load_smi.
4675 // Smi to double register conversion.
4677 __ SmiUntagToDouble(result, input);
4683 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4684 // This is a pseudo-instruction that ensures that the environment here is
4685 // properly registered for deoptimization and records the assembler's PC
4687 LEnvironment* environment = instr->environment();
4689 // If the environment were already registered, we would have no way of
4690 // backpatching it with the spill slot operands.
4691 DCHECK(!environment->HasBeenRegistered());
4692 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4694 GenerateOsrPrologue();
4698 void LCodeGen::DoParameter(LParameter* instr) {
4703 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4704 __ PushPreamble(instr->argc(), kPointerSize);
4708 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4709 MacroAssembler::PushPopQueue args(masm());
4711 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4712 LOperand* arg = instr->argument(i);
4713 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4714 Abort(kDoPushArgumentNotImplementedForDoubleType);
4717 args.Queue(ToRegister(arg));
4720 // The preamble was done by LPreparePushArguments.
4721 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4725 void LCodeGen::DoReturn(LReturn* instr) {
4726 if (FLAG_trace && info()->IsOptimizing()) {
4727 // Push the return value on the stack as the parameter.
4728 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4729 // managed by the register allocator and tearing down the frame, it's
4730 // safe to write to the context register.
4732 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4733 __ CallRuntime(Runtime::kTraceExit, 1);
4736 if (info()->saves_caller_doubles()) {
4737 RestoreCallerDoubles();
4740 int no_frame_start = -1;
4741 if (NeedsEagerFrame()) {
4742 Register stack_pointer = masm()->StackPointer();
4743 __ Mov(stack_pointer, fp);
4744 no_frame_start = masm_->pc_offset();
4748 if (instr->has_constant_parameter_count()) {
4749 int parameter_count = ToInteger32(instr->constant_parameter_count());
4750 __ Drop(parameter_count + 1);
4752 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
4753 Register parameter_count = ToRegister(instr->parameter_count());
4754 __ DropBySMI(parameter_count);
4758 if (no_frame_start != -1) {
4759 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4764 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4767 String::Encoding encoding) {
4768 if (index->IsConstantOperand()) {
4769 int offset = ToInteger32(LConstantOperand::cast(index));
4770 if (encoding == String::TWO_BYTE_ENCODING) {
4771 offset *= kUC16Size;
4773 STATIC_ASSERT(kCharSize == 1);
4774 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4777 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4778 if (encoding == String::ONE_BYTE_ENCODING) {
4779 return MemOperand(temp, ToRegister32(index), SXTW);
4781 STATIC_ASSERT(kUC16Size == 2);
4782 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4787 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4788 String::Encoding encoding = instr->hydrogen()->encoding();
4789 Register string = ToRegister(instr->string());
4790 Register result = ToRegister(instr->result());
4791 Register temp = ToRegister(instr->temp());
4793 if (FLAG_debug_code) {
4794 // Even though this lithium instruction comes with a temp register, we
4795 // can't use it here because we want to use "AtStart" constraints on the
4796 // inputs and the debug code here needs a scratch register.
4797 UseScratchRegisterScope temps(masm());
4798 Register dbg_temp = temps.AcquireX();
4800 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4801 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4803 __ And(dbg_temp, dbg_temp,
4804 Operand(kStringRepresentationMask | kStringEncodingMask));
4805 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4806 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4807 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4808 ? one_byte_seq_type : two_byte_seq_type));
4809 __ Check(eq, kUnexpectedStringType);
4812 MemOperand operand =
4813 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4814 if (encoding == String::ONE_BYTE_ENCODING) {
4815 __ Ldrb(result, operand);
4817 __ Ldrh(result, operand);
4822 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4823 String::Encoding encoding = instr->hydrogen()->encoding();
4824 Register string = ToRegister(instr->string());
4825 Register value = ToRegister(instr->value());
4826 Register temp = ToRegister(instr->temp());
4828 if (FLAG_debug_code) {
4829 DCHECK(ToRegister(instr->context()).is(cp));
4830 Register index = ToRegister(instr->index());
4831 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4832 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4834 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4835 ? one_byte_seq_type : two_byte_seq_type;
4836 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4839 MemOperand operand =
4840 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4841 if (encoding == String::ONE_BYTE_ENCODING) {
4842 __ Strb(value, operand);
4844 __ Strh(value, operand);
4849 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4850 HChange* hchange = instr->hydrogen();
4851 Register input = ToRegister(instr->value());
4852 Register output = ToRegister(instr->result());
4853 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4854 hchange->value()->CheckFlag(HValue::kUint32)) {
4855 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
4857 __ SmiTag(output, input);
4861 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4862 Register input = ToRegister(instr->value());
4863 Register result = ToRegister(instr->result());
4866 if (instr->needs_check()) {
4867 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
4871 __ SmiUntag(result, input);
4876 void LCodeGen::DoShiftI(LShiftI* instr) {
4877 LOperand* right_op = instr->right();
4878 Register left = ToRegister32(instr->left());
4879 Register result = ToRegister32(instr->result());
4881 if (right_op->IsRegister()) {
4882 Register right = ToRegister32(instr->right());
4883 switch (instr->op()) {
4884 case Token::ROR: __ Ror(result, left, right); break;
4885 case Token::SAR: __ Asr(result, left, right); break;
4886 case Token::SHL: __ Lsl(result, left, right); break;
4888 __ Lsr(result, left, right);
4889 if (instr->can_deopt()) {
4890 // If `left >>> right` >= 0x80000000, the result is not representable
4891 // in a signed 32-bit smi.
4892 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4895 default: UNREACHABLE();
4898 DCHECK(right_op->IsConstantOperand());
4899 int shift_count = JSShiftAmountFromLConstant(right_op);
4900 if (shift_count == 0) {
4901 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4902 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4904 __ Mov(result, left, kDiscardForSameWReg);
4906 switch (instr->op()) {
4907 case Token::ROR: __ Ror(result, left, shift_count); break;
4908 case Token::SAR: __ Asr(result, left, shift_count); break;
4909 case Token::SHL: __ Lsl(result, left, shift_count); break;
4910 case Token::SHR: __ Lsr(result, left, shift_count); break;
4911 default: UNREACHABLE();
4918 void LCodeGen::DoShiftS(LShiftS* instr) {
4919 LOperand* right_op = instr->right();
4920 Register left = ToRegister(instr->left());
4921 Register result = ToRegister(instr->result());
4923 if (right_op->IsRegister()) {
4924 Register right = ToRegister(instr->right());
4926 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4927 // Since we're handling smis in X registers, we have to extract these bits
4929 __ Ubfx(result, right, kSmiShift, 5);
4931 switch (instr->op()) {
4933 // This is the only case that needs a scratch register. To keep things
4934 // simple for the other cases, borrow a MacroAssembler scratch register.
4935 UseScratchRegisterScope temps(masm());
4936 Register temp = temps.AcquireW();
4937 __ SmiUntag(temp, left);
4938 __ Ror(result.W(), temp.W(), result.W());
4943 __ Asr(result, left, result);
4944 __ Bic(result, result, kSmiShiftMask);
4947 __ Lsl(result, left, result);
4950 __ Lsr(result, left, result);
4951 __ Bic(result, result, kSmiShiftMask);
4952 if (instr->can_deopt()) {
4953 // If `left >>> right` >= 0x80000000, the result is not representable
4954 // in a signed 32-bit smi.
4955 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4958 default: UNREACHABLE();
4961 DCHECK(right_op->IsConstantOperand());
4962 int shift_count = JSShiftAmountFromLConstant(right_op);
4963 if (shift_count == 0) {
4964 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4965 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4967 __ Mov(result, left);
4969 switch (instr->op()) {
4971 __ SmiUntag(result, left);
4972 __ Ror(result.W(), result.W(), shift_count);
4976 __ Asr(result, left, shift_count);
4977 __ Bic(result, result, kSmiShiftMask);
4980 __ Lsl(result, left, shift_count);
4983 __ Lsr(result, left, shift_count);
4984 __ Bic(result, result, kSmiShiftMask);
4986 default: UNREACHABLE();
4993 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4994 __ Debug("LDebugBreak", 0, BREAK);
4998 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4999 DCHECK(ToRegister(instr->context()).is(cp));
5000 Register scratch1 = x5;
5001 Register scratch2 = x6;
5002 DCHECK(instr->IsMarkedAsCall());
5004 // TODO(all): if Mov could handle object in new space then it could be used
5006 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5007 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5008 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5009 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5013 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5014 PushSafepointRegistersScope scope(this);
5015 LoadContextFromDeferred(instr->context());
5016 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5017 RecordSafepointWithLazyDeopt(
5018 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5019 DCHECK(instr->HasEnvironment());
5020 LEnvironment* env = instr->environment();
5021 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5025 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5026 class DeferredStackCheck: public LDeferredCode {
5028 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5029 : LDeferredCode(codegen), instr_(instr) { }
5030 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5031 virtual LInstruction* instr() { return instr_; }
5033 LStackCheck* instr_;
5036 DCHECK(instr->HasEnvironment());
5037 LEnvironment* env = instr->environment();
5038 // There is no LLazyBailout instruction for stack-checks. We have to
5039 // prepare for lazy deoptimization explicitly here.
5040 if (instr->hydrogen()->is_function_entry()) {
5041 // Perform stack overflow check.
5043 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5046 PredictableCodeSizeScope predictable(masm_,
5047 Assembler::kCallSizeWithRelocation);
5048 DCHECK(instr->context()->IsRegister());
5049 DCHECK(ToRegister(instr->context()).is(cp));
5050 CallCode(isolate()->builtins()->StackCheck(),
5051 RelocInfo::CODE_TARGET,
5055 DCHECK(instr->hydrogen()->is_backwards_branch());
5056 // Perform stack overflow check if this goto needs it before jumping.
5057 DeferredStackCheck* deferred_stack_check =
5058 new(zone()) DeferredStackCheck(this, instr);
5059 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5060 __ B(lo, deferred_stack_check->entry());
5062 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5063 __ Bind(instr->done_label());
5064 deferred_stack_check->SetExit(instr->done_label());
5065 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5066 // Don't record a deoptimization index for the safepoint here.
5067 // This will be done explicitly when emitting call and the safepoint in
5068 // the deferred code.
5073 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5074 Register function = ToRegister(instr->function());
5075 Register code_object = ToRegister(instr->code_object());
5076 Register temp = ToRegister(instr->temp());
5077 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5078 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5082 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5083 Register context = ToRegister(instr->context());
5084 Register value = ToRegister(instr->value());
5085 Register scratch = ToRegister(instr->temp());
5086 MemOperand target = ContextMemOperand(context, instr->slot_index());
5088 Label skip_assignment;
5090 if (instr->hydrogen()->RequiresHoleCheck()) {
5091 __ Ldr(scratch, target);
5092 if (instr->hydrogen()->DeoptimizesOnHole()) {
5093 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
5094 Deoptimizer::kHole);
5096 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5100 __ Str(value, target);
5101 if (instr->hydrogen()->NeedsWriteBarrier()) {
5102 SmiCheck check_needed =
5103 instr->hydrogen()->value()->type().IsHeapObject()
5104 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5105 __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
5106 scratch, GetLinkRegisterState(), kSaveFPRegs,
5107 EMIT_REMEMBERED_SET, check_needed);
5109 __ Bind(&skip_assignment);
5113 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5114 Register ext_ptr = ToRegister(instr->elements());
5115 Register key = no_reg;
5117 ElementsKind elements_kind = instr->elements_kind();
5119 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5120 bool key_is_constant = instr->key()->IsConstantOperand();
5121 int constant_key = 0;
5122 if (key_is_constant) {
5123 DCHECK(instr->temp() == NULL);
5124 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5125 if (constant_key & 0xf0000000) {
5126 Abort(kArrayIndexConstantValueTooBig);
5129 key = ToRegister(instr->key());
5130 scratch = ToRegister(instr->temp());
5134 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5135 key_is_constant, constant_key,
5137 instr->base_offset());
5139 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5140 (elements_kind == FLOAT32_ELEMENTS)) {
5141 DoubleRegister value = ToDoubleRegister(instr->value());
5142 DoubleRegister dbl_scratch = double_scratch();
5143 __ Fcvt(dbl_scratch.S(), value);
5144 __ Str(dbl_scratch.S(), dst);
5145 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5146 (elements_kind == FLOAT64_ELEMENTS)) {
5147 DoubleRegister value = ToDoubleRegister(instr->value());
5150 Register value = ToRegister(instr->value());
5152 switch (elements_kind) {
5153 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5154 case EXTERNAL_INT8_ELEMENTS:
5155 case EXTERNAL_UINT8_ELEMENTS:
5156 case UINT8_ELEMENTS:
5157 case UINT8_CLAMPED_ELEMENTS:
5159 __ Strb(value, dst);
5161 case EXTERNAL_INT16_ELEMENTS:
5162 case EXTERNAL_UINT16_ELEMENTS:
5163 case INT16_ELEMENTS:
5164 case UINT16_ELEMENTS:
5165 __ Strh(value, dst);
5167 case EXTERNAL_INT32_ELEMENTS:
5168 case EXTERNAL_UINT32_ELEMENTS:
5169 case INT32_ELEMENTS:
5170 case UINT32_ELEMENTS:
5171 __ Str(value.W(), dst);
5173 case FLOAT32_ELEMENTS:
5174 case FLOAT64_ELEMENTS:
5175 case EXTERNAL_FLOAT32_ELEMENTS:
5176 case EXTERNAL_FLOAT64_ELEMENTS:
5177 case FAST_DOUBLE_ELEMENTS:
5179 case FAST_SMI_ELEMENTS:
5180 case FAST_HOLEY_DOUBLE_ELEMENTS:
5181 case FAST_HOLEY_ELEMENTS:
5182 case FAST_HOLEY_SMI_ELEMENTS:
5183 case DICTIONARY_ELEMENTS:
5184 case SLOPPY_ARGUMENTS_ELEMENTS:
5192 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5193 Register elements = ToRegister(instr->elements());
5194 DoubleRegister value = ToDoubleRegister(instr->value());
5197 if (instr->key()->IsConstantOperand()) {
5198 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5199 if (constant_key & 0xf0000000) {
5200 Abort(kArrayIndexConstantValueTooBig);
5202 int offset = instr->base_offset() + constant_key * kDoubleSize;
5203 mem_op = MemOperand(elements, offset);
5205 Register store_base = ToRegister(instr->temp());
5206 Register key = ToRegister(instr->key());
5207 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5208 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5209 instr->hydrogen()->elements_kind(),
5210 instr->hydrogen()->representation(),
5211 instr->base_offset());
5214 if (instr->NeedsCanonicalization()) {
5215 __ CanonicalizeNaN(double_scratch(), value);
5216 __ Str(double_scratch(), mem_op);
5218 __ Str(value, mem_op);
5223 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5224 Register value = ToRegister(instr->value());
5225 Register elements = ToRegister(instr->elements());
5226 Register scratch = no_reg;
5227 Register store_base = no_reg;
5228 Register key = no_reg;
5231 if (!instr->key()->IsConstantOperand() ||
5232 instr->hydrogen()->NeedsWriteBarrier()) {
5233 scratch = ToRegister(instr->temp());
5236 Representation representation = instr->hydrogen()->value()->representation();
5237 if (instr->key()->IsConstantOperand()) {
5238 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5239 int offset = instr->base_offset() +
5240 ToInteger32(const_operand) * kPointerSize;
5241 store_base = elements;
5242 if (representation.IsInteger32()) {
5243 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5244 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5245 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5246 STATIC_ASSERT(kSmiTag == 0);
5247 mem_op = UntagSmiMemOperand(store_base, offset);
5249 mem_op = MemOperand(store_base, offset);
5252 store_base = scratch;
5253 key = ToRegister(instr->key());
5254 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5256 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5257 instr->hydrogen()->elements_kind(),
5258 representation, instr->base_offset());
5261 __ Store(value, mem_op, representation);
5263 if (instr->hydrogen()->NeedsWriteBarrier()) {
5264 DCHECK(representation.IsTagged());
5265 // This assignment may cause element_addr to alias store_base.
5266 Register element_addr = scratch;
5267 SmiCheck check_needed =
5268 instr->hydrogen()->value()->type().IsHeapObject()
5269 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5270 // Compute address of modified element and store it into key register.
5271 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5272 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5273 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5274 instr->hydrogen()->PointersToHereCheckForValue());
5279 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5280 DCHECK(ToRegister(instr->context()).is(cp));
5281 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5282 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
5283 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5285 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
5286 isolate(), instr->language_mode(),
5287 instr->hydrogen()->initialization_state()).code();
5288 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5292 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
5293 class DeferredMaybeGrowElements final : public LDeferredCode {
5295 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
5296 : LDeferredCode(codegen), instr_(instr) {}
5297 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
5298 LInstruction* instr() override { return instr_; }
5301 LMaybeGrowElements* instr_;
5304 Register result = x0;
5305 DeferredMaybeGrowElements* deferred =
5306 new (zone()) DeferredMaybeGrowElements(this, instr);
5307 LOperand* key = instr->key();
5308 LOperand* current_capacity = instr->current_capacity();
5310 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
5311 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
5312 DCHECK(key->IsConstantOperand() || key->IsRegister());
5313 DCHECK(current_capacity->IsConstantOperand() ||
5314 current_capacity->IsRegister());
5316 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
5317 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
5318 int32_t constant_capacity =
5319 ToInteger32(LConstantOperand::cast(current_capacity));
5320 if (constant_key >= constant_capacity) {
5322 __ B(deferred->entry());
5324 } else if (key->IsConstantOperand()) {
5325 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
5326 __ Cmp(ToRegister(current_capacity), Operand(constant_key));
5327 __ B(le, deferred->entry());
5328 } else if (current_capacity->IsConstantOperand()) {
5329 int32_t constant_capacity =
5330 ToInteger32(LConstantOperand::cast(current_capacity));
5331 __ Cmp(ToRegister(key), Operand(constant_capacity));
5332 __ B(ge, deferred->entry());
5334 __ Cmp(ToRegister(key), ToRegister(current_capacity));
5335 __ B(ge, deferred->entry());
5338 __ Mov(result, ToRegister(instr->elements()));
5340 __ Bind(deferred->exit());
5344 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
5345 // TODO(3095996): Get rid of this. For now, we need to make the
5346 // result register contain a valid pointer because it is already
5347 // contained in the register pointer map.
5348 Register result = x0;
5351 // We have to call a stub.
5353 PushSafepointRegistersScope scope(this);
5354 __ Move(result, ToRegister(instr->object()));
5356 LOperand* key = instr->key();
5357 if (key->IsConstantOperand()) {
5358 __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
5360 __ Mov(x3, ToRegister(key));
5364 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
5365 instr->hydrogen()->kind());
5367 RecordSafepointWithLazyDeopt(
5368 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5369 __ StoreToSafepointRegisterSlot(result, result);
5372 // Deopt on smi, which means the elements array changed to dictionary mode.
5373 DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
5377 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5378 Representation representation = instr->representation();
5380 Register object = ToRegister(instr->object());
5381 HObjectAccess access = instr->hydrogen()->access();
5382 int offset = access.offset();
5384 if (access.IsExternalMemory()) {
5385 DCHECK(!instr->hydrogen()->has_transition());
5386 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5387 Register value = ToRegister(instr->value());
5388 __ Store(value, MemOperand(object, offset), representation);
5392 __ AssertNotSmi(object);
5394 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
5395 DCHECK(access.IsInobject());
5396 DCHECK(!instr->hydrogen()->has_transition());
5397 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5398 FPRegister value = ToDoubleRegister(instr->value());
5399 __ Str(value, FieldMemOperand(object, offset));
5403 DCHECK(!representation.IsSmi() ||
5404 !instr->value()->IsConstantOperand() ||
5405 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5407 if (instr->hydrogen()->has_transition()) {
5408 Handle<Map> transition = instr->hydrogen()->transition_map();
5409 AddDeprecationDependency(transition);
5410 // Store the new map value.
5411 Register new_map_value = ToRegister(instr->temp0());
5412 __ Mov(new_map_value, Operand(transition));
5413 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5414 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5415 // Update the write barrier for the map field.
5416 __ RecordWriteForMap(object,
5418 ToRegister(instr->temp1()),
5419 GetLinkRegisterState(),
5425 Register destination;
5426 if (access.IsInobject()) {
5427 destination = object;
5429 Register temp0 = ToRegister(instr->temp0());
5430 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5431 destination = temp0;
5434 if (FLAG_unbox_double_fields && representation.IsDouble()) {
5435 DCHECK(access.IsInobject());
5436 FPRegister value = ToDoubleRegister(instr->value());
5437 __ Str(value, FieldMemOperand(object, offset));
5438 } else if (representation.IsSmi() &&
5439 instr->hydrogen()->value()->representation().IsInteger32()) {
5440 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5442 Register temp0 = ToRegister(instr->temp0());
5443 __ Ldr(temp0, FieldMemOperand(destination, offset));
5444 __ AssertSmi(temp0);
5445 // If destination aliased temp0, restore it to the address calculated
5447 if (destination.Is(temp0)) {
5448 DCHECK(!access.IsInobject());
5449 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5452 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5453 STATIC_ASSERT(kSmiTag == 0);
5454 Register value = ToRegister(instr->value());
5455 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5456 Representation::Integer32());
5458 Register value = ToRegister(instr->value());
5459 __ Store(value, FieldMemOperand(destination, offset), representation);
5461 if (instr->hydrogen()->NeedsWriteBarrier()) {
5462 Register value = ToRegister(instr->value());
5463 __ RecordWriteField(destination,
5465 value, // Clobbered.
5466 ToRegister(instr->temp1()), // Clobbered.
5467 GetLinkRegisterState(),
5469 EMIT_REMEMBERED_SET,
5470 instr->hydrogen()->SmiCheckForWriteBarrier(),
5471 instr->hydrogen()->PointersToHereCheckForValue());
5476 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5477 DCHECK(ToRegister(instr->context()).is(cp));
5478 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5479 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5481 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5483 StoreIC::initialize_stub(isolate(), instr->language_mode(),
5484 instr->hydrogen()->initialization_state());
5485 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5489 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5490 DCHECK(ToRegister(instr->context()).is(cp));
5491 DCHECK(ToRegister(instr->left()).Is(x1));
5492 DCHECK(ToRegister(instr->right()).Is(x0));
5493 StringAddStub stub(isolate(),
5494 instr->hydrogen()->flags(),
5495 instr->hydrogen()->pretenure_flag());
5496 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5500 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5501 class DeferredStringCharCodeAt: public LDeferredCode {
5503 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5504 : LDeferredCode(codegen), instr_(instr) { }
5505 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5506 virtual LInstruction* instr() { return instr_; }
5508 LStringCharCodeAt* instr_;
5511 DeferredStringCharCodeAt* deferred =
5512 new(zone()) DeferredStringCharCodeAt(this, instr);
5514 StringCharLoadGenerator::Generate(masm(),
5515 ToRegister(instr->string()),
5516 ToRegister32(instr->index()),
5517 ToRegister(instr->result()),
5519 __ Bind(deferred->exit());
5523 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5524 Register string = ToRegister(instr->string());
5525 Register result = ToRegister(instr->result());
5527 // TODO(3095996): Get rid of this. For now, we need to make the
5528 // result register contain a valid pointer because it is already
5529 // contained in the register pointer map.
5532 PushSafepointRegistersScope scope(this);
5534 // Push the index as a smi. This is safe because of the checks in
5535 // DoStringCharCodeAt above.
5536 Register index = ToRegister(instr->index());
5537 __ SmiTagAndPush(index);
5539 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5543 __ StoreToSafepointRegisterSlot(x0, result);
5547 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5548 class DeferredStringCharFromCode: public LDeferredCode {
5550 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5551 : LDeferredCode(codegen), instr_(instr) { }
5552 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5553 virtual LInstruction* instr() { return instr_; }
5555 LStringCharFromCode* instr_;
5558 DeferredStringCharFromCode* deferred =
5559 new(zone()) DeferredStringCharFromCode(this, instr);
5561 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5562 Register char_code = ToRegister32(instr->char_code());
5563 Register result = ToRegister(instr->result());
5565 __ Cmp(char_code, String::kMaxOneByteCharCode);
5566 __ B(hi, deferred->entry());
5567 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5568 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5569 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5570 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5571 __ B(eq, deferred->entry());
5572 __ Bind(deferred->exit());
5576 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5577 Register char_code = ToRegister(instr->char_code());
5578 Register result = ToRegister(instr->result());
5580 // TODO(3095996): Get rid of this. For now, we need to make the
5581 // result register contain a valid pointer because it is already
5582 // contained in the register pointer map.
5585 PushSafepointRegistersScope scope(this);
5586 __ SmiTagAndPush(char_code);
5587 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5588 __ StoreToSafepointRegisterSlot(x0, result);
5592 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5593 DCHECK(ToRegister(instr->context()).is(cp));
5594 Token::Value op = instr->op();
5596 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op, SLOPPY).code();
5597 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5598 InlineSmiCheckInfo::EmitNotInlined(masm());
5600 Condition condition = TokenToCondition(op, false);
5602 EmitCompareAndBranch(instr, condition, x0, 0);
5606 void LCodeGen::DoSubI(LSubI* instr) {
5607 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5608 Register result = ToRegister32(instr->result());
5609 Register left = ToRegister32(instr->left());
5610 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5613 __ Subs(result, left, right);
5614 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5616 __ Sub(result, left, right);
5621 void LCodeGen::DoSubS(LSubS* instr) {
5622 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5623 Register result = ToRegister(instr->result());
5624 Register left = ToRegister(instr->left());
5625 Operand right = ToOperand(instr->right());
5627 __ Subs(result, left, right);
5628 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5630 __ Sub(result, left, right);
5635 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5639 Register input = ToRegister(value);
5640 Register scratch1 = ToRegister(temp1);
5641 DoubleRegister dbl_scratch1 = double_scratch();
5645 if (instr->truncating()) {
5646 Register output = ToRegister(instr->result());
5649 // If it's not a heap number, jump to undefined check.
5650 __ JumpIfNotHeapNumber(input, &check_bools);
5652 // A heap number: load value and convert to int32 using truncating function.
5653 __ TruncateHeapNumberToI(output, input);
5656 __ Bind(&check_bools);
5658 Register true_root = output;
5659 Register false_root = scratch1;
5660 __ LoadTrueFalseRoots(true_root, false_root);
5661 __ Cmp(input, true_root);
5662 __ Cset(output, eq);
5663 __ Ccmp(input, false_root, ZFlag, ne);
5666 // Output contains zero, undefined is converted to zero for truncating
5668 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5669 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5671 Register output = ToRegister32(instr->result());
5672 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5674 DeoptimizeIfNotHeapNumber(input, instr);
5676 // A heap number: load value and convert to int32 using non-truncating
5677 // function. If the result is out of range, branch to deoptimize.
5678 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5679 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5680 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5682 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5685 __ Fmov(scratch1, dbl_scratch1);
5686 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
5693 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5694 class DeferredTaggedToI: public LDeferredCode {
5696 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5697 : LDeferredCode(codegen), instr_(instr) { }
5698 virtual void Generate() {
5699 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5703 virtual LInstruction* instr() { return instr_; }
5708 Register input = ToRegister(instr->value());
5709 Register output = ToRegister(instr->result());
5711 if (instr->hydrogen()->value()->representation().IsSmi()) {
5712 __ SmiUntag(output, input);
5714 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5716 __ JumpIfNotSmi(input, deferred->entry());
5717 __ SmiUntag(output, input);
5718 __ Bind(deferred->exit());
5723 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5724 Register result = ToRegister(instr->result());
5725 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5729 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5730 DCHECK(ToRegister(instr->value()).Is(x0));
5731 DCHECK(ToRegister(instr->result()).Is(x0));
5733 CallRuntime(Runtime::kToFastProperties, 1, instr);
5737 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5738 DCHECK(ToRegister(instr->context()).is(cp));
5740 // Registers will be used as follows:
5741 // x7 = literals array.
5742 // x1 = regexp literal.
5743 // x0 = regexp literal clone.
5744 // x10-x12 are used as temporaries.
5745 int literal_offset =
5746 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5747 __ LoadObject(x7, instr->hydrogen()->literals());
5748 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5749 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5751 // Create regexp literal using runtime function
5752 // Result will be in x0.
5753 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5754 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5755 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5756 __ Push(x7, x12, x11, x10);
5757 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5760 __ Bind(&materialized);
5761 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5762 Label allocated, runtime_allocate;
5764 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5767 __ Bind(&runtime_allocate);
5768 __ Mov(x0, Smi::FromInt(size));
5770 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5773 __ Bind(&allocated);
5774 // Copy the content into the newly allocated memory.
5775 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5779 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5780 Register object = ToRegister(instr->object());
5782 Handle<Map> from_map = instr->original_map();
5783 Handle<Map> to_map = instr->transitioned_map();
5784 ElementsKind from_kind = instr->from_kind();
5785 ElementsKind to_kind = instr->to_kind();
5787 Label not_applicable;
5789 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5790 Register temp1 = ToRegister(instr->temp1());
5791 Register new_map = ToRegister(instr->temp2());
5792 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5793 __ Mov(new_map, Operand(to_map));
5794 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5796 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5800 UseScratchRegisterScope temps(masm());
5801 // Use the temp register only in a restricted scope - the codegen checks
5802 // that we do not use any register across a call.
5803 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5806 DCHECK(object.is(x0));
5807 DCHECK(ToRegister(instr->context()).is(cp));
5808 PushSafepointRegistersScope scope(this);
5809 __ Mov(x1, Operand(to_map));
5810 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5811 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5813 RecordSafepointWithRegisters(
5814 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5816 __ Bind(¬_applicable);
5820 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5821 Register object = ToRegister(instr->object());
5822 Register temp1 = ToRegister(instr->temp1());
5823 Register temp2 = ToRegister(instr->temp2());
5825 Label no_memento_found;
5826 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5827 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
5828 __ Bind(&no_memento_found);
5832 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5833 DoubleRegister input = ToDoubleRegister(instr->value());
5834 Register result = ToRegister(instr->result());
5835 __ TruncateDoubleToI(result, input);
5836 if (instr->tag_result()) {
5837 __ SmiTag(result, result);
5842 void LCodeGen::DoTypeof(LTypeof* instr) {
5843 DCHECK(ToRegister(instr->value()).is(x3));
5844 DCHECK(ToRegister(instr->result()).is(x0));
5846 Register value_register = ToRegister(instr->value());
5847 __ JumpIfNotSmi(value_register, &do_call);
5848 __ Mov(x0, Immediate(isolate()->factory()->number_string()));
5851 TypeofStub stub(isolate());
5852 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5857 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5858 Handle<String> type_name = instr->type_literal();
5859 Label* true_label = instr->TrueLabel(chunk_);
5860 Label* false_label = instr->FalseLabel(chunk_);
5861 Register value = ToRegister(instr->value());
5863 Factory* factory = isolate()->factory();
5864 if (String::Equals(type_name, factory->number_string())) {
5865 __ JumpIfSmi(value, true_label);
5867 int true_block = instr->TrueDestination(chunk_);
5868 int false_block = instr->FalseDestination(chunk_);
5869 int next_block = GetNextEmittedBlock();
5871 if (true_block == false_block) {
5872 EmitGoto(true_block);
5873 } else if (true_block == next_block) {
5874 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5876 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5877 if (false_block != next_block) {
5878 __ B(chunk_->GetAssemblyLabel(false_block));
5882 } else if (String::Equals(type_name, factory->string_string())) {
5883 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5884 Register map = ToRegister(instr->temp1());
5885 Register scratch = ToRegister(instr->temp2());
5887 __ JumpIfSmi(value, false_label);
5888 __ JumpIfObjectType(
5889 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5890 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5891 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5893 } else if (String::Equals(type_name, factory->symbol_string())) {
5894 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5895 Register map = ToRegister(instr->temp1());
5896 Register scratch = ToRegister(instr->temp2());
5898 __ JumpIfSmi(value, false_label);
5899 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5900 EmitBranch(instr, eq);
5902 } else if (String::Equals(type_name, factory->boolean_string())) {
5903 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5904 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5905 EmitBranch(instr, eq);
5907 } else if (String::Equals(type_name, factory->undefined_string())) {
5908 DCHECK(instr->temp1() != NULL);
5909 Register scratch = ToRegister(instr->temp1());
5911 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5912 __ JumpIfSmi(value, false_label);
5913 // Check for undetectable objects and jump to the true branch in this case.
5914 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5915 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5916 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5918 } else if (String::Equals(type_name, factory->function_string())) {
5919 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5920 DCHECK(instr->temp1() != NULL);
5921 Register type = ToRegister(instr->temp1());
5923 __ JumpIfSmi(value, false_label);
5924 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5925 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5926 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5928 } else if (String::Equals(type_name, factory->object_string())) {
5929 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5930 Register map = ToRegister(instr->temp1());
5931 Register scratch = ToRegister(instr->temp2());
5933 __ JumpIfSmi(value, false_label);
5934 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5935 __ JumpIfObjectType(value, map, scratch,
5936 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5937 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5938 __ B(gt, false_label);
5939 // Check for undetectable objects => false.
5940 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5941 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5949 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5950 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5954 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5955 Register object = ToRegister(instr->value());
5956 Register map = ToRegister(instr->map());
5957 Register temp = ToRegister(instr->temp());
5958 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5960 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5964 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5965 Register receiver = ToRegister(instr->receiver());
5966 Register function = ToRegister(instr->function());
5967 Register result = ToRegister(instr->result());
5969 // If the receiver is null or undefined, we have to pass the global object as
5970 // a receiver to normal functions. Values have to be passed unchanged to
5971 // builtins and strict-mode functions.
5972 Label global_object, done, copy_receiver;
5974 if (!instr->hydrogen()->known_function()) {
5975 __ Ldr(result, FieldMemOperand(function,
5976 JSFunction::kSharedFunctionInfoOffset));
5978 // CompilerHints is an int32 field. See objects.h.
5980 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5982 // Do not transform the receiver to object for strict mode functions.
5983 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5985 // Do not transform the receiver to object for builtins.
5986 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5989 // Normal function. Replace undefined or null with global receiver.
5990 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5991 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5993 // Deoptimize if the receiver is not a JS object.
5994 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
5995 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5996 __ B(ge, ©_receiver);
5997 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
5999 __ Bind(&global_object);
6000 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
6001 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
6002 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
6005 __ Bind(©_receiver);
6006 __ Mov(result, receiver);
6011 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6015 PushSafepointRegistersScope scope(this);
6019 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6020 RecordSafepointWithRegisters(
6021 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6022 __ StoreToSafepointRegisterSlot(x0, result);
6026 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6027 class DeferredLoadMutableDouble final : public LDeferredCode {
6029 DeferredLoadMutableDouble(LCodeGen* codegen,
6030 LLoadFieldByIndex* instr,
6034 : LDeferredCode(codegen),
6040 void Generate() override {
6041 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6043 LInstruction* instr() override { return instr_; }
6046 LLoadFieldByIndex* instr_;
6051 Register object = ToRegister(instr->object());
6052 Register index = ToRegister(instr->index());
6053 Register result = ToRegister(instr->result());
6055 __ AssertSmi(index);
6057 DeferredLoadMutableDouble* deferred;
6058 deferred = new(zone()) DeferredLoadMutableDouble(
6059 this, instr, result, object, index);
6061 Label out_of_object, done;
6063 __ TestAndBranchIfAnySet(
6064 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6065 __ Mov(index, Operand(index, ASR, 1));
6067 __ Cmp(index, Smi::FromInt(0));
6068 __ B(lt, &out_of_object);
6070 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6071 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6072 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6076 __ Bind(&out_of_object);
6077 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6078 // Index is equal to negated out of object property index plus 1.
6079 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6080 __ Ldr(result, FieldMemOperand(result,
6081 FixedArray::kHeaderSize - kPointerSize));
6082 __ Bind(deferred->exit());
6087 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6088 Register context = ToRegister(instr->context());
6089 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6093 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6094 Handle<ScopeInfo> scope_info = instr->scope_info();
6095 __ Push(scope_info);
6096 __ Push(ToRegister(instr->function()));
6097 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6098 RecordSafepoint(Safepoint::kNoLazyDeopt);
6102 } // namespace internal