1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "arm64/lithium-codegen-arm64.h"
31 #include "arm64/lithium-gap-resolver-arm64.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
40 class SafepointGenerator V8_FINAL : public CallWrapper {
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
48 virtual ~SafepointGenerator() { }
50 virtual void BeforeCall(int call_size) const { }
52 virtual void AfterCall() const {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
65 // Emit code to branch if the given condition holds.
66 // The code generated here doesn't modify the flags and they must have
67 // been set by some prior instructions.
69 // The EmitInverted function simply inverts the condition.
70 class BranchOnCondition : public BranchGenerator {
72 BranchOnCondition(LCodeGen* codegen, Condition cond)
73 : BranchGenerator(codegen),
76 virtual void Emit(Label* label) const {
80 virtual void EmitInverted(Label* label) const {
82 __ B(InvertCondition(cond_), label);
91 // Emit code to compare lhs and rhs and branch if the condition holds.
92 // This uses MacroAssembler's CompareAndBranch function so it will handle
93 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
95 // EmitInverted still compares the two operands but inverts the condition.
96 class CompareAndBranch : public BranchGenerator {
98 CompareAndBranch(LCodeGen* codegen,
102 : BranchGenerator(codegen),
107 virtual void Emit(Label* label) const {
108 __ CompareAndBranch(lhs_, rhs_, cond_, label);
111 virtual void EmitInverted(Label* label) const {
112 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
117 const Register& lhs_;
122 // Test the input with the given mask and branch if the condition holds.
123 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
124 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
125 // conversion to Tbz/Tbnz when possible.
126 class TestAndBranch : public BranchGenerator {
128 TestAndBranch(LCodeGen* codegen,
130 const Register& value,
132 : BranchGenerator(codegen),
137 virtual void Emit(Label* label) const {
140 __ TestAndBranchIfAllClear(value_, mask_, label);
143 __ TestAndBranchIfAnySet(value_, mask_, label);
146 __ Tst(value_, mask_);
151 virtual void EmitInverted(Label* label) const {
152 // The inverse of "all clear" is "any set" and vice versa.
155 __ TestAndBranchIfAnySet(value_, mask_, label);
158 __ TestAndBranchIfAllClear(value_, mask_, label);
161 __ Tst(value_, mask_);
162 __ B(InvertCondition(cond_), label);
168 const Register& value_;
173 // Test the input and branch if it is non-zero and not a NaN.
174 class BranchIfNonZeroNumber : public BranchGenerator {
176 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
177 const FPRegister& scratch)
178 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
180 virtual void Emit(Label* label) const {
181 __ Fabs(scratch_, value_);
182 // Compare with 0.0. Because scratch_ is positive, the result can be one of
183 // nZCv (equal), nzCv (greater) or nzCV (unordered).
184 __ Fcmp(scratch_, 0.0);
188 virtual void EmitInverted(Label* label) const {
189 __ Fabs(scratch_, value_);
190 __ Fcmp(scratch_, 0.0);
195 const FPRegister& value_;
196 const FPRegister& scratch_;
200 // Test the input and branch if it is a heap number.
201 class BranchIfHeapNumber : public BranchGenerator {
203 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
204 : BranchGenerator(codegen), value_(value) { }
206 virtual void Emit(Label* label) const {
207 __ JumpIfHeapNumber(value_, label);
210 virtual void EmitInverted(Label* label) const {
211 __ JumpIfNotHeapNumber(value_, label);
215 const Register& value_;
219 // Test the input and branch if it is the specified root value.
220 class BranchIfRoot : public BranchGenerator {
222 BranchIfRoot(LCodeGen* codegen, const Register& value,
223 Heap::RootListIndex index)
224 : BranchGenerator(codegen), value_(value), index_(index) { }
226 virtual void Emit(Label* label) const {
227 __ JumpIfRoot(value_, index_, label);
230 virtual void EmitInverted(Label* label) const {
231 __ JumpIfNotRoot(value_, index_, label);
235 const Register& value_;
236 const Heap::RootListIndex index_;
240 void LCodeGen::WriteTranslation(LEnvironment* environment,
241 Translation* translation) {
242 if (environment == NULL) return;
244 // The translation includes one command per value in the environment.
245 int translation_size = environment->translation_size();
246 // The output frame height does not include the parameters.
247 int height = translation_size - environment->parameter_count();
249 WriteTranslation(environment->outer(), translation);
250 bool has_closure_id = !info()->closure().is_null() &&
251 !info()->closure().is_identical_to(environment->closure());
252 int closure_id = has_closure_id
253 ? DefineDeoptimizationLiteral(environment->closure())
254 : Translation::kSelfLiteralId;
256 switch (environment->frame_type()) {
258 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
261 translation->BeginConstructStubFrame(closure_id, translation_size);
264 ASSERT(translation_size == 1);
266 translation->BeginGetterStubFrame(closure_id);
269 ASSERT(translation_size == 2);
271 translation->BeginSetterStubFrame(closure_id);
274 translation->BeginCompiledStubFrame();
276 case ARGUMENTS_ADAPTOR:
277 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
283 int object_index = 0;
284 int dematerialized_index = 0;
285 for (int i = 0; i < translation_size; ++i) {
286 LOperand* value = environment->values()->at(i);
288 AddToTranslation(environment,
291 environment->HasTaggedValueAt(i),
292 environment->HasUint32ValueAt(i),
294 &dematerialized_index);
299 void LCodeGen::AddToTranslation(LEnvironment* environment,
300 Translation* translation,
304 int* object_index_pointer,
305 int* dematerialized_index_pointer) {
306 if (op == LEnvironment::materialization_marker()) {
307 int object_index = (*object_index_pointer)++;
308 if (environment->ObjectIsDuplicateAt(object_index)) {
309 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
310 translation->DuplicateObject(dupe_of);
313 int object_length = environment->ObjectLengthAt(object_index);
314 if (environment->ObjectIsArgumentsAt(object_index)) {
315 translation->BeginArgumentsObject(object_length);
317 translation->BeginCapturedObject(object_length);
319 int dematerialized_index = *dematerialized_index_pointer;
320 int env_offset = environment->translation_size() + dematerialized_index;
321 *dematerialized_index_pointer += object_length;
322 for (int i = 0; i < object_length; ++i) {
323 LOperand* value = environment->values()->at(env_offset + i);
324 AddToTranslation(environment,
327 environment->HasTaggedValueAt(env_offset + i),
328 environment->HasUint32ValueAt(env_offset + i),
329 object_index_pointer,
330 dematerialized_index_pointer);
335 if (op->IsStackSlot()) {
337 translation->StoreStackSlot(op->index());
338 } else if (is_uint32) {
339 translation->StoreUint32StackSlot(op->index());
341 translation->StoreInt32StackSlot(op->index());
343 } else if (op->IsDoubleStackSlot()) {
344 translation->StoreDoubleStackSlot(op->index());
345 } else if (op->IsRegister()) {
346 Register reg = ToRegister(op);
348 translation->StoreRegister(reg);
349 } else if (is_uint32) {
350 translation->StoreUint32Register(reg);
352 translation->StoreInt32Register(reg);
354 } else if (op->IsDoubleRegister()) {
355 DoubleRegister reg = ToDoubleRegister(op);
356 translation->StoreDoubleRegister(reg);
357 } else if (op->IsConstantOperand()) {
358 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
359 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
360 translation->StoreLiteral(src_index);
367 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
368 int result = deoptimization_literals_.length();
369 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
370 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
372 deoptimization_literals_.Add(literal, zone());
377 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
378 Safepoint::DeoptMode mode) {
379 if (!environment->HasBeenRegistered()) {
381 int jsframe_count = 0;
382 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
384 if (e->frame_type() == JS_FUNCTION) {
388 Translation translation(&translations_, frame_count, jsframe_count, zone());
389 WriteTranslation(environment, &translation);
390 int deoptimization_index = deoptimizations_.length();
391 int pc_offset = masm()->pc_offset();
392 environment->Register(deoptimization_index,
394 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
395 deoptimizations_.Add(environment, zone());
400 void LCodeGen::CallCode(Handle<Code> code,
401 RelocInfo::Mode mode,
402 LInstruction* instr) {
403 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
407 void LCodeGen::CallCodeGeneric(Handle<Code> code,
408 RelocInfo::Mode mode,
410 SafepointMode safepoint_mode) {
411 ASSERT(instr != NULL);
413 Assembler::BlockPoolsScope scope(masm_);
415 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
417 if ((code->kind() == Code::BINARY_OP_IC) ||
418 (code->kind() == Code::COMPARE_IC)) {
419 // Signal that we don't inline smi code before these stubs in the
420 // optimizing code generator.
421 InlineSmiCheckInfo::EmitNotInlined(masm());
426 void LCodeGen::DoCallFunction(LCallFunction* instr) {
427 ASSERT(ToRegister(instr->context()).is(cp));
428 ASSERT(ToRegister(instr->function()).Is(x1));
429 ASSERT(ToRegister(instr->result()).Is(x0));
431 int arity = instr->arity();
432 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
433 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
437 void LCodeGen::DoCallNew(LCallNew* instr) {
438 ASSERT(ToRegister(instr->context()).is(cp));
439 ASSERT(instr->IsMarkedAsCall());
440 ASSERT(ToRegister(instr->constructor()).is(x1));
442 __ Mov(x0, instr->arity());
443 // No cell in x2 for construct type feedback in optimized code.
444 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
446 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
447 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
449 ASSERT(ToRegister(instr->result()).is(x0));
453 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
454 ASSERT(instr->IsMarkedAsCall());
455 ASSERT(ToRegister(instr->context()).is(cp));
456 ASSERT(ToRegister(instr->constructor()).is(x1));
458 __ Mov(x0, Operand(instr->arity()));
459 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
461 ElementsKind kind = instr->hydrogen()->elements_kind();
462 AllocationSiteOverrideMode override_mode =
463 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
464 ? DISABLE_ALLOCATION_SITES
467 if (instr->arity() == 0) {
468 ArrayNoArgumentConstructorStub stub(kind, override_mode);
469 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
470 } else if (instr->arity() == 1) {
472 if (IsFastPackedElementsKind(kind)) {
475 // We might need to create a holey array; look at the first argument.
477 __ Cbz(x10, &packed_case);
479 ElementsKind holey_kind = GetHoleyElementsKind(kind);
480 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
481 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
483 __ Bind(&packed_case);
486 ArraySingleArgumentConstructorStub stub(kind, override_mode);
487 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
490 ArrayNArgumentsConstructorStub stub(kind, override_mode);
491 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
494 ASSERT(ToRegister(instr->result()).is(x0));
498 void LCodeGen::CallRuntime(const Runtime::Function* function,
501 SaveFPRegsMode save_doubles) {
502 ASSERT(instr != NULL);
504 __ CallRuntime(function, num_arguments, save_doubles);
506 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
510 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
511 if (context->IsRegister()) {
512 __ Mov(cp, ToRegister(context));
513 } else if (context->IsStackSlot()) {
514 __ Ldr(cp, ToMemOperand(context));
515 } else if (context->IsConstantOperand()) {
516 HConstant* constant =
517 chunk_->LookupConstant(LConstantOperand::cast(context));
518 __ LoadHeapObject(cp,
519 Handle<HeapObject>::cast(constant->handle(isolate())));
526 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
530 LoadContextFromDeferred(context);
531 __ CallRuntimeSaveDoubles(id);
532 RecordSafepointWithRegisters(
533 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
537 void LCodeGen::RecordAndWritePosition(int position) {
538 if (position == RelocInfo::kNoPosition) return;
539 masm()->positions_recorder()->RecordPosition(position);
540 masm()->positions_recorder()->WriteRecordedPositions();
544 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
545 SafepointMode safepoint_mode) {
546 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
547 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
549 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
550 RecordSafepointWithRegisters(
551 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
556 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
557 Safepoint::Kind kind,
559 Safepoint::DeoptMode deopt_mode) {
560 ASSERT(expected_safepoint_kind_ == kind);
562 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
563 Safepoint safepoint = safepoints_.DefineSafepoint(
564 masm(), kind, arguments, deopt_mode);
566 for (int i = 0; i < operands->length(); i++) {
567 LOperand* pointer = operands->at(i);
568 if (pointer->IsStackSlot()) {
569 safepoint.DefinePointerSlot(pointer->index(), zone());
570 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
571 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
575 if (kind & Safepoint::kWithRegisters) {
576 // Register cp always contains a pointer to the context.
577 safepoint.DefinePointerRegister(cp, zone());
581 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
582 Safepoint::DeoptMode deopt_mode) {
583 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
587 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
588 LPointerMap empty_pointers(zone());
589 RecordSafepoint(&empty_pointers, deopt_mode);
593 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
595 Safepoint::DeoptMode deopt_mode) {
596 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
600 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
601 LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
603 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
607 bool LCodeGen::GenerateCode() {
608 LPhase phase("Z_Code generation", chunk());
610 status_ = GENERATING;
612 // Open a frame scope to indicate that there is a frame on the stack. The
613 // NONE indicates that the scope shouldn't actually generate code to set up
614 // the frame (that is done in GeneratePrologue).
615 FrameScope frame_scope(masm_, StackFrame::NONE);
617 return GeneratePrologue() &&
619 GenerateDeferredCode() &&
620 GenerateDeoptJumpTable() &&
621 GenerateSafepointTable();
625 void LCodeGen::SaveCallerDoubles() {
626 ASSERT(info()->saves_caller_doubles());
627 ASSERT(NeedsEagerFrame());
628 Comment(";;; Save clobbered callee double registers");
629 BitVector* doubles = chunk()->allocated_double_registers();
630 BitVector::Iterator iterator(doubles);
632 while (!iterator.Done()) {
633 // TODO(all): Is this supposed to save just the callee-saved doubles? It
634 // looks like it's saving all of them.
635 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
636 __ Poke(value, count * kDoubleSize);
643 void LCodeGen::RestoreCallerDoubles() {
644 ASSERT(info()->saves_caller_doubles());
645 ASSERT(NeedsEagerFrame());
646 Comment(";;; Restore clobbered callee double registers");
647 BitVector* doubles = chunk()->allocated_double_registers();
648 BitVector::Iterator iterator(doubles);
650 while (!iterator.Done()) {
651 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
652 // looks like it's restoring all of them.
653 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
654 __ Peek(value, count * kDoubleSize);
661 bool LCodeGen::GeneratePrologue() {
662 ASSERT(is_generating());
664 if (info()->IsOptimizing()) {
665 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
667 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
669 // Sloppy mode functions and builtins need to replace the receiver with the
670 // global proxy when called as functions (without an explicit receiver
672 if (info_->this_has_uses() &&
673 info_->strict_mode() == SLOPPY &&
674 !info_->is_native()) {
676 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
677 __ Peek(x10, receiver_offset);
678 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
680 __ Ldr(x10, GlobalObjectMemOperand());
681 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
682 __ Poke(x10, receiver_offset);
688 ASSERT(__ StackPointer().Is(jssp));
689 info()->set_prologue_offset(masm_->pc_offset());
690 if (NeedsEagerFrame()) {
691 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
692 frame_is_built_ = true;
693 info_->AddNoFrameRange(0, masm_->pc_offset());
696 // Reserve space for the stack slots needed by the code.
697 int slots = GetStackSlotCount();
699 __ Claim(slots, kPointerSize);
702 if (info()->saves_caller_doubles()) {
706 // Allocate a local context if needed.
707 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
708 if (heap_slots > 0) {
709 Comment(";;; Allocate local context");
710 // Argument to NewContext is the function, which is in x1.
711 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
712 FastNewContextStub stub(heap_slots);
716 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
718 RecordSafepoint(Safepoint::kNoLazyDeopt);
719 // Context is returned in x0. It replaces the context passed to us. It's
720 // saved in the stack and kept live in cp.
722 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
723 // Copy any necessary parameters into the context.
724 int num_parameters = scope()->num_parameters();
725 for (int i = 0; i < num_parameters; i++) {
726 Variable* var = scope()->parameter(i);
727 if (var->IsContextSlot()) {
729 Register scratch = x3;
731 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
732 (num_parameters - 1 - i) * kPointerSize;
733 // Load parameter from stack.
734 __ Ldr(value, MemOperand(fp, parameter_offset));
735 // Store it in the context.
736 MemOperand target = ContextMemOperand(cp, var->index());
737 __ Str(value, target);
738 // Update the write barrier. This clobbers value and scratch.
739 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
740 GetLinkRegisterState(), kSaveFPRegs);
743 Comment(";;; End allocate local context");
747 if (FLAG_trace && info()->IsOptimizing()) {
748 // We have not executed any compiled code yet, so cp still holds the
750 __ CallRuntime(Runtime::kTraceEnter, 0);
753 return !is_aborted();
757 void LCodeGen::GenerateOsrPrologue() {
758 // Generate the OSR entry prologue at the first unknown OSR value, or if there
759 // are none, at the OSR entrypoint instruction.
760 if (osr_pc_offset_ >= 0) return;
762 osr_pc_offset_ = masm()->pc_offset();
764 // Adjust the frame size, subsuming the unoptimized frame into the
766 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
772 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
773 if (instr->IsCall()) {
774 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
776 if (!instr->IsLazyBailout() && !instr->IsGap()) {
777 safepoints_.BumpLastLazySafepointIndex();
782 bool LCodeGen::GenerateDeferredCode() {
783 ASSERT(is_generating());
784 if (deferred_.length() > 0) {
785 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
786 LDeferredCode* code = deferred_[i];
789 instructions_->at(code->instruction_index())->hydrogen_value();
790 RecordAndWritePosition(
791 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
793 Comment(";;; <@%d,#%d> "
794 "-------------------- Deferred %s --------------------",
795 code->instruction_index(),
796 code->instr()->hydrogen_value()->id(),
797 code->instr()->Mnemonic());
799 __ Bind(code->entry());
801 if (NeedsDeferredFrame()) {
802 Comment(";;; Build frame");
803 ASSERT(!frame_is_built_);
804 ASSERT(info()->IsStub());
805 frame_is_built_ = true;
807 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
809 __ Add(fp, __ StackPointer(),
810 StandardFrameConstants::kFixedFrameSizeFromFp);
811 Comment(";;; Deferred code");
816 if (NeedsDeferredFrame()) {
817 Comment(";;; Destroy frame");
818 ASSERT(frame_is_built_);
819 __ Pop(xzr, cp, fp, lr);
820 frame_is_built_ = false;
827 // Force constant pool emission at the end of the deferred code to make
828 // sure that no constant pools are emitted after deferred code because
829 // deferred code generation is the last step which generates code. The two
830 // following steps will only output data used by crakshaft.
831 masm()->CheckConstPool(true, false);
833 return !is_aborted();
837 bool LCodeGen::GenerateDeoptJumpTable() {
838 if (deopt_jump_table_.length() > 0) {
839 Comment(";;; -------------------- Jump table --------------------");
842 __ bind(&table_start);
844 for (int i = 0; i < deopt_jump_table_.length(); i++) {
845 __ Bind(&deopt_jump_table_[i]->label);
846 Address entry = deopt_jump_table_[i]->address;
847 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
848 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
849 if (id == Deoptimizer::kNotDeoptimizationEntry) {
850 Comment(";;; jump table entry %d.", i);
852 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
854 if (deopt_jump_table_[i]->needs_frame) {
855 ASSERT(!info()->saves_caller_doubles());
857 UseScratchRegisterScope temps(masm());
858 Register stub_deopt_entry = temps.AcquireX();
859 Register stub_marker = temps.AcquireX();
861 __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
862 if (needs_frame.is_bound()) {
865 __ Bind(&needs_frame);
866 // This variant of deopt can only be used with stubs. Since we don't
867 // have a function pointer to install in the stack frame that we're
868 // building, install a special marker there instead.
869 ASSERT(info()->IsStub());
870 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
871 __ Push(lr, fp, cp, stub_marker);
872 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
873 __ Call(stub_deopt_entry);
876 if (info()->saves_caller_doubles()) {
877 ASSERT(info()->IsStub());
878 RestoreCallerDoubles();
880 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
882 masm()->CheckConstPool(false, false);
885 // Force constant pool emission at the end of the deopt jump table to make
886 // sure that no constant pools are emitted after.
887 masm()->CheckConstPool(true, false);
889 // The deoptimization jump table is the last part of the instruction
890 // sequence. Mark the generated code as done unless we bailed out.
891 if (!is_aborted()) status_ = DONE;
892 return !is_aborted();
896 bool LCodeGen::GenerateSafepointTable() {
898 // We do not know how much data will be emitted for the safepoint table, so
899 // force emission of the veneer pool.
900 masm()->CheckVeneerPool(true, true);
901 safepoints_.Emit(masm(), GetStackSlotCount());
902 return !is_aborted();
906 void LCodeGen::FinishCode(Handle<Code> code) {
908 code->set_stack_slots(GetStackSlotCount());
909 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
910 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
911 PopulateDeoptimizationData(code);
912 info()->CommitDependencies(code);
916 void LCodeGen::Abort(BailoutReason reason) {
917 info()->set_bailout_reason(reason);
922 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
923 int length = deoptimizations_.length();
924 if (length == 0) return;
926 Handle<DeoptimizationInputData> data =
927 factory()->NewDeoptimizationInputData(length, TENURED);
929 Handle<ByteArray> translations =
930 translations_.CreateByteArray(isolate()->factory());
931 data->SetTranslationByteArray(*translations);
932 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
933 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
934 if (info_->IsOptimizing()) {
935 // Reference to shared function info does not change between phases.
936 AllowDeferredHandleDereference allow_handle_dereference;
937 data->SetSharedFunctionInfo(*info_->shared_info());
939 data->SetSharedFunctionInfo(Smi::FromInt(0));
942 Handle<FixedArray> literals =
943 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
944 { AllowDeferredHandleDereference copy_handles;
945 for (int i = 0; i < deoptimization_literals_.length(); i++) {
946 literals->set(i, *deoptimization_literals_[i]);
948 data->SetLiteralArray(*literals);
951 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
952 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
954 // Populate the deoptimization entries.
955 for (int i = 0; i < length; i++) {
956 LEnvironment* env = deoptimizations_[i];
957 data->SetAstId(i, env->ast_id());
958 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
959 data->SetArgumentsStackHeight(i,
960 Smi::FromInt(env->arguments_stack_height()));
961 data->SetPc(i, Smi::FromInt(env->pc_offset()));
964 code->set_deoptimization_data(*data);
968 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
969 ASSERT(deoptimization_literals_.length() == 0);
971 const ZoneList<Handle<JSFunction> >* inlined_closures =
972 chunk()->inlined_closures();
974 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
975 DefineDeoptimizationLiteral(inlined_closures->at(i));
978 inlined_function_count_ = deoptimization_literals_.length();
982 void LCodeGen::DeoptimizeBranch(
983 LEnvironment* environment,
984 BranchType branch_type, Register reg, int bit,
985 Deoptimizer::BailoutType* override_bailout_type) {
986 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
987 Deoptimizer::BailoutType bailout_type =
988 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
990 if (override_bailout_type != NULL) {
991 bailout_type = *override_bailout_type;
994 ASSERT(environment->HasBeenRegistered());
995 ASSERT(info()->IsOptimizing() || info()->IsStub());
996 int id = environment->deoptimization_index();
998 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1000 if (entry == NULL) {
1001 Abort(kBailoutWasNotPrepared);
1004 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1006 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1008 __ Push(x0, x1, x2);
1011 __ Ldr(w1, MemOperand(x0));
1013 __ B(gt, ¬_zero);
1014 __ Mov(w1, FLAG_deopt_every_n_times);
1015 __ Str(w1, MemOperand(x0));
1017 ASSERT(frame_is_built_);
1018 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1022 __ Str(w1, MemOperand(x0));
1027 if (info()->ShouldTrapOnDeopt()) {
1029 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1030 __ Debug("trap_on_deopt", __LINE__, BREAK);
1031 __ Bind(&dont_trap);
1034 ASSERT(info()->IsStub() || frame_is_built_);
1035 // Go through jump table if we need to build frame, or restore caller doubles.
1036 if (branch_type == always &&
1037 frame_is_built_ && !info()->saves_caller_doubles()) {
1038 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1040 // We often have several deopts to the same entry, reuse the last
1041 // jump entry if this is the case.
1042 if (deopt_jump_table_.is_empty() ||
1043 (deopt_jump_table_.last()->address != entry) ||
1044 (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1045 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1046 Deoptimizer::JumpTableEntry* table_entry =
1047 new(zone()) Deoptimizer::JumpTableEntry(entry,
1050 deopt_jump_table_.Add(table_entry, zone());
1052 __ B(&deopt_jump_table_.last()->label,
1053 branch_type, reg, bit);
1058 void LCodeGen::Deoptimize(LEnvironment* environment,
1059 Deoptimizer::BailoutType* override_bailout_type) {
1060 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1064 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1065 DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1069 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1070 DeoptimizeBranch(environment, reg_zero, rt);
1074 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1075 DeoptimizeBranch(environment, reg_not_zero, rt);
1079 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1080 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1081 DeoptimizeIfBitSet(rt, sign_bit, environment);
1085 void LCodeGen::DeoptimizeIfSmi(Register rt,
1086 LEnvironment* environment) {
1087 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
1091 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1092 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
1096 void LCodeGen::DeoptimizeIfRoot(Register rt,
1097 Heap::RootListIndex index,
1098 LEnvironment* environment) {
1099 __ CompareRoot(rt, index);
1100 DeoptimizeIf(eq, environment);
1104 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1105 Heap::RootListIndex index,
1106 LEnvironment* environment) {
1107 __ CompareRoot(rt, index);
1108 DeoptimizeIf(ne, environment);
1112 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
1113 LEnvironment* environment) {
1114 __ TestForMinusZero(input);
1115 DeoptimizeIf(vs, environment);
1119 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1121 LEnvironment* environment) {
1122 DeoptimizeBranch(environment, reg_bit_set, rt, bit);
1126 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1128 LEnvironment* environment) {
1129 DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
1133 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1134 if (!info()->IsStub()) {
1135 // Ensure that we have enough space after the previous lazy-bailout
1136 // instruction for patching the code here.
1137 intptr_t current_pc = masm()->pc_offset();
1139 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1140 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1141 ASSERT((padding_size % kInstructionSize) == 0);
1142 InstructionAccurateScope instruction_accurate(
1143 masm(), padding_size / kInstructionSize);
1145 while (padding_size > 0) {
1147 padding_size -= kInstructionSize;
1151 last_lazy_deopt_pc_ = masm()->pc_offset();
1155 Register LCodeGen::ToRegister(LOperand* op) const {
1156 // TODO(all): support zero register results, as ToRegister32.
1157 ASSERT((op != NULL) && op->IsRegister());
1158 return Register::FromAllocationIndex(op->index());
1162 Register LCodeGen::ToRegister32(LOperand* op) const {
1164 if (op->IsConstantOperand()) {
1165 // If this is a constant operand, the result must be the zero register.
1166 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1169 return ToRegister(op).W();
1174 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1175 HConstant* constant = chunk_->LookupConstant(op);
1176 return Smi::FromInt(constant->Integer32Value());
1180 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1181 ASSERT((op != NULL) && op->IsDoubleRegister());
1182 return DoubleRegister::FromAllocationIndex(op->index());
1186 Operand LCodeGen::ToOperand(LOperand* op) {
1188 if (op->IsConstantOperand()) {
1189 LConstantOperand* const_op = LConstantOperand::cast(op);
1190 HConstant* constant = chunk()->LookupConstant(const_op);
1191 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1193 ASSERT(constant->HasSmiValue());
1194 return Operand(Smi::FromInt(constant->Integer32Value()));
1195 } else if (r.IsInteger32()) {
1196 ASSERT(constant->HasInteger32Value());
1197 return Operand(constant->Integer32Value());
1198 } else if (r.IsDouble()) {
1199 Abort(kToOperandUnsupportedDoubleImmediate);
1201 ASSERT(r.IsTagged());
1202 return Operand(constant->handle(isolate()));
1203 } else if (op->IsRegister()) {
1204 return Operand(ToRegister(op));
1205 } else if (op->IsDoubleRegister()) {
1206 Abort(kToOperandIsDoubleRegisterUnimplemented);
1209 // Stack slots not implemented, use ToMemOperand instead.
1215 Operand LCodeGen::ToOperand32I(LOperand* op) {
1216 return ToOperand32(op, SIGNED_INT32);
1220 Operand LCodeGen::ToOperand32U(LOperand* op) {
1221 return ToOperand32(op, UNSIGNED_INT32);
1225 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1227 if (op->IsRegister()) {
1228 return Operand(ToRegister32(op));
1229 } else if (op->IsConstantOperand()) {
1230 LConstantOperand* const_op = LConstantOperand::cast(op);
1231 HConstant* constant = chunk()->LookupConstant(const_op);
1232 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1233 if (r.IsInteger32()) {
1234 ASSERT(constant->HasInteger32Value());
1235 return Operand(signedness == SIGNED_INT32
1236 ? constant->Integer32Value()
1237 : static_cast<uint32_t>(constant->Integer32Value()));
1239 // Other constants not implemented.
1240 Abort(kToOperand32UnsupportedImmediate);
1243 // Other cases are not implemented.
1249 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1251 return -(index + 1) * kPointerSize;
1255 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1257 ASSERT(!op->IsRegister());
1258 ASSERT(!op->IsDoubleRegister());
1259 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1260 if (NeedsEagerFrame()) {
1261 return MemOperand(fp, StackSlotOffset(op->index()));
1263 // Retrieve parameter without eager stack-frame relative to the
1265 return MemOperand(masm()->StackPointer(),
1266 ArgumentsOffsetWithoutFrame(op->index()));
1271 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1272 HConstant* constant = chunk_->LookupConstant(op);
1273 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1274 return constant->handle(isolate());
1278 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1279 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1283 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1284 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1288 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1289 HConstant* constant = chunk_->LookupConstant(op);
1290 return constant->Integer32Value();
1294 double LCodeGen::ToDouble(LConstantOperand* op) const {
1295 HConstant* constant = chunk_->LookupConstant(op);
1296 ASSERT(constant->HasDoubleValue());
1297 return constant->DoubleValue();
1301 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1302 Condition cond = nv;
1305 case Token::EQ_STRICT:
1309 case Token::NE_STRICT:
1313 cond = is_unsigned ? lo : lt;
1316 cond = is_unsigned ? hi : gt;
1319 cond = is_unsigned ? ls : le;
1322 cond = is_unsigned ? hs : ge;
1325 case Token::INSTANCEOF:
1333 template<class InstrType>
1334 void LCodeGen::EmitBranchGeneric(InstrType instr,
1335 const BranchGenerator& branch) {
1336 int left_block = instr->TrueDestination(chunk_);
1337 int right_block = instr->FalseDestination(chunk_);
1339 int next_block = GetNextEmittedBlock();
1341 if (right_block == left_block) {
1342 EmitGoto(left_block);
1343 } else if (left_block == next_block) {
1344 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1345 } else if (right_block == next_block) {
1346 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1348 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1349 __ B(chunk_->GetAssemblyLabel(right_block));
1354 template<class InstrType>
1355 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1356 ASSERT((condition != al) && (condition != nv));
1357 BranchOnCondition branch(this, condition);
1358 EmitBranchGeneric(instr, branch);
1362 template<class InstrType>
1363 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1364 Condition condition,
1365 const Register& lhs,
1366 const Operand& rhs) {
1367 ASSERT((condition != al) && (condition != nv));
1368 CompareAndBranch branch(this, condition, lhs, rhs);
1369 EmitBranchGeneric(instr, branch);
1373 template<class InstrType>
1374 void LCodeGen::EmitTestAndBranch(InstrType instr,
1375 Condition condition,
1376 const Register& value,
1378 ASSERT((condition != al) && (condition != nv));
1379 TestAndBranch branch(this, condition, value, mask);
1380 EmitBranchGeneric(instr, branch);
1384 template<class InstrType>
1385 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1386 const FPRegister& value,
1387 const FPRegister& scratch) {
1388 BranchIfNonZeroNumber branch(this, value, scratch);
1389 EmitBranchGeneric(instr, branch);
1393 template<class InstrType>
1394 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1395 const Register& value) {
1396 BranchIfHeapNumber branch(this, value);
1397 EmitBranchGeneric(instr, branch);
1401 template<class InstrType>
1402 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1403 const Register& value,
1404 Heap::RootListIndex index) {
1405 BranchIfRoot branch(this, value, index);
1406 EmitBranchGeneric(instr, branch);
1410 void LCodeGen::DoGap(LGap* gap) {
1411 for (int i = LGap::FIRST_INNER_POSITION;
1412 i <= LGap::LAST_INNER_POSITION;
1414 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1415 LParallelMove* move = gap->GetParallelMove(inner_pos);
1417 resolver_.Resolve(move);
1423 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1424 Register arguments = ToRegister(instr->arguments());
1425 Register result = ToRegister(instr->result());
1427 // The pointer to the arguments array come from DoArgumentsElements.
1428 // It does not point directly to the arguments and there is an offest of
1429 // two words that we must take into account when accessing an argument.
1430 // Subtracting the index from length accounts for one, so we add one more.
1432 if (instr->length()->IsConstantOperand() &&
1433 instr->index()->IsConstantOperand()) {
1434 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1435 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1436 int offset = ((length - index) + 1) * kPointerSize;
1437 __ Ldr(result, MemOperand(arguments, offset));
1438 } else if (instr->index()->IsConstantOperand()) {
1439 Register length = ToRegister32(instr->length());
1440 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1441 int loc = index - 1;
1443 __ Sub(result.W(), length, loc);
1444 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1446 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1449 Register length = ToRegister32(instr->length());
1450 Operand index = ToOperand32I(instr->index());
1451 __ Sub(result.W(), length, index);
1452 __ Add(result.W(), result.W(), 1);
1453 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1458 void LCodeGen::DoAddE(LAddE* instr) {
1459 Register result = ToRegister(instr->result());
1460 Register left = ToRegister(instr->left());
1461 Operand right = (instr->right()->IsConstantOperand())
1462 ? ToInteger32(LConstantOperand::cast(instr->right()))
1463 : Operand(ToRegister32(instr->right()), SXTW);
1465 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1466 __ Add(result, left, right);
1470 void LCodeGen::DoAddI(LAddI* instr) {
1471 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1472 Register result = ToRegister32(instr->result());
1473 Register left = ToRegister32(instr->left());
1474 Operand right = ToOperand32I(instr->right());
1476 __ Adds(result, left, right);
1477 DeoptimizeIf(vs, instr->environment());
1479 __ Add(result, left, right);
1484 void LCodeGen::DoAddS(LAddS* instr) {
1485 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1486 Register result = ToRegister(instr->result());
1487 Register left = ToRegister(instr->left());
1488 Operand right = ToOperand(instr->right());
1490 __ Adds(result, left, right);
1491 DeoptimizeIf(vs, instr->environment());
1493 __ Add(result, left, right);
1498 void LCodeGen::DoAllocate(LAllocate* instr) {
1499 class DeferredAllocate: public LDeferredCode {
1501 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1502 : LDeferredCode(codegen), instr_(instr) { }
1503 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1504 virtual LInstruction* instr() { return instr_; }
1509 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1511 Register result = ToRegister(instr->result());
1512 Register temp1 = ToRegister(instr->temp1());
1513 Register temp2 = ToRegister(instr->temp2());
1515 // Allocate memory for the object.
1516 AllocationFlags flags = TAG_OBJECT;
1517 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1518 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1521 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1522 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1523 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1524 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1525 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1526 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1527 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1530 if (instr->size()->IsConstantOperand()) {
1531 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1532 if (size <= Page::kMaxRegularHeapObjectSize) {
1533 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1535 __ B(deferred->entry());
1538 Register size = ToRegister32(instr->size());
1539 __ Sxtw(size.X(), size);
1540 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1543 __ Bind(deferred->exit());
1545 if (instr->hydrogen()->MustPrefillWithFiller()) {
1546 Register filler_count = temp1;
1547 Register filler = temp2;
1548 Register untagged_result = ToRegister(instr->temp3());
1550 if (instr->size()->IsConstantOperand()) {
1551 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1552 __ Mov(filler_count, size / kPointerSize);
1554 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1557 __ Sub(untagged_result, result, kHeapObjectTag);
1558 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1559 __ FillFields(untagged_result, filler_count, filler);
1561 ASSERT(instr->temp3() == NULL);
1566 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1567 // TODO(3095996): Get rid of this. For now, we need to make the
1568 // result register contain a valid pointer because it is already
1569 // contained in the register pointer map.
1570 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1572 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1573 // We're in a SafepointRegistersScope so we can use any scratch registers.
1575 if (instr->size()->IsConstantOperand()) {
1576 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1578 __ SmiTag(size, ToRegister32(instr->size()).X());
1580 int flags = AllocateDoubleAlignFlag::encode(
1581 instr->hydrogen()->MustAllocateDoubleAligned());
1582 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1583 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1584 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1585 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1586 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1587 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1588 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1590 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1592 __ Mov(x10, Smi::FromInt(flags));
1595 CallRuntimeFromDeferred(
1596 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
1597 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1601 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1602 Register receiver = ToRegister(instr->receiver());
1603 Register function = ToRegister(instr->function());
1604 Register length = ToRegister32(instr->length());
1606 Register elements = ToRegister(instr->elements());
1607 Register scratch = x5;
1608 ASSERT(receiver.Is(x0)); // Used for parameter count.
1609 ASSERT(function.Is(x1)); // Required by InvokeFunction.
1610 ASSERT(ToRegister(instr->result()).Is(x0));
1611 ASSERT(instr->IsMarkedAsCall());
1613 // Copy the arguments to this function possibly from the
1614 // adaptor frame below it.
1615 const uint32_t kArgumentsLimit = 1 * KB;
1616 __ Cmp(length, kArgumentsLimit);
1617 DeoptimizeIf(hi, instr->environment());
1619 // Push the receiver and use the register to keep the original
1620 // number of arguments.
1622 Register argc = receiver;
1624 __ Sxtw(argc, length);
1625 // The arguments are at a one pointer size offset from elements.
1626 __ Add(elements, elements, 1 * kPointerSize);
1628 // Loop through the arguments pushing them onto the execution
1631 // length is a small non-negative integer, due to the test above.
1632 __ Cbz(length, &invoke);
1634 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1636 __ Subs(length, length, 1);
1640 ASSERT(instr->HasPointerMap());
1641 LPointerMap* pointers = instr->pointer_map();
1642 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1643 // The number of arguments is stored in argc (receiver) which is x0, as
1644 // expected by InvokeFunction.
1645 ParameterCount actual(argc);
1646 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1650 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1651 Register result = ToRegister(instr->result());
1653 if (instr->hydrogen()->from_inlined()) {
1654 // When we are inside an inlined function, the arguments are the last things
1655 // that have been pushed on the stack. Therefore the arguments array can be
1656 // accessed directly from jssp.
1657 // However in the normal case, it is accessed via fp but there are two words
1658 // on the stack between fp and the arguments (the saved lr and fp) and the
1659 // LAccessArgumentsAt implementation take that into account.
1660 // In the inlined case we need to subtract the size of 2 words to jssp to
1661 // get a pointer which will work well with LAccessArgumentsAt.
1662 ASSERT(masm()->StackPointer().Is(jssp));
1663 __ Sub(result, jssp, 2 * kPointerSize);
1665 ASSERT(instr->temp() != NULL);
1666 Register previous_fp = ToRegister(instr->temp());
1669 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1671 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1672 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1673 __ Csel(result, fp, previous_fp, ne);
1678 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1679 Register elements = ToRegister(instr->elements());
1680 Register result = ToRegister32(instr->result());
1683 // If no arguments adaptor frame the number of arguments is fixed.
1684 __ Cmp(fp, elements);
1685 __ Mov(result, scope()->num_parameters());
1688 // Arguments adaptor frame present. Get argument length from there.
1689 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1691 UntagSmiMemOperand(result.X(),
1692 ArgumentsAdaptorFrameConstants::kLengthOffset));
1694 // Argument length is in result register.
1699 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1700 DoubleRegister left = ToDoubleRegister(instr->left());
1701 DoubleRegister right = ToDoubleRegister(instr->right());
1702 DoubleRegister result = ToDoubleRegister(instr->result());
1704 switch (instr->op()) {
1705 case Token::ADD: __ Fadd(result, left, right); break;
1706 case Token::SUB: __ Fsub(result, left, right); break;
1707 case Token::MUL: __ Fmul(result, left, right); break;
1708 case Token::DIV: __ Fdiv(result, left, right); break;
1710 // The ECMA-262 remainder operator is the remainder from a truncating
1711 // (round-towards-zero) division. Note that this differs from IEEE-754.
1713 // TODO(jbramley): See if it's possible to do this inline, rather than by
1714 // calling a helper function. With frintz (to produce the intermediate
1715 // quotient) and fmsub (to calculate the remainder without loss of
1716 // precision), it should be possible. However, we would need support for
1717 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1718 // support that yet.
1719 ASSERT(left.Is(d0));
1720 ASSERT(right.Is(d1));
1722 ExternalReference::mod_two_doubles_operation(isolate()),
1724 ASSERT(result.Is(d0));
1734 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1735 ASSERT(ToRegister(instr->context()).is(cp));
1736 ASSERT(ToRegister(instr->left()).is(x1));
1737 ASSERT(ToRegister(instr->right()).is(x0));
1738 ASSERT(ToRegister(instr->result()).is(x0));
1740 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1741 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1745 void LCodeGen::DoBitI(LBitI* instr) {
1746 Register result = ToRegister32(instr->result());
1747 Register left = ToRegister32(instr->left());
1748 Operand right = ToOperand32U(instr->right());
1750 switch (instr->op()) {
1751 case Token::BIT_AND: __ And(result, left, right); break;
1752 case Token::BIT_OR: __ Orr(result, left, right); break;
1753 case Token::BIT_XOR: __ Eor(result, left, right); break;
1761 void LCodeGen::DoBitS(LBitS* instr) {
1762 Register result = ToRegister(instr->result());
1763 Register left = ToRegister(instr->left());
1764 Operand right = ToOperand(instr->right());
1766 switch (instr->op()) {
1767 case Token::BIT_AND: __ And(result, left, right); break;
1768 case Token::BIT_OR: __ Orr(result, left, right); break;
1769 case Token::BIT_XOR: __ Eor(result, left, right); break;
1777 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
1778 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
1779 __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
1781 DeoptimizeIf(cc, check->environment());
1786 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1787 if (instr->hydrogen()->skip_check()) return;
1789 ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1790 Register length = ToRegister32(instr->length());
1792 if (instr->index()->IsConstantOperand()) {
1793 int constant_index =
1794 ToInteger32(LConstantOperand::cast(instr->index()));
1796 if (instr->hydrogen()->length()->representation().IsSmi()) {
1797 __ Cmp(length, Smi::FromInt(constant_index));
1799 __ Cmp(length, constant_index);
1802 ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1803 __ Cmp(length, ToRegister32(instr->index()));
1805 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
1806 ApplyCheckIf(condition, instr);
1810 void LCodeGen::DoBranch(LBranch* instr) {
1811 Representation r = instr->hydrogen()->value()->representation();
1812 Label* true_label = instr->TrueLabel(chunk_);
1813 Label* false_label = instr->FalseLabel(chunk_);
1815 if (r.IsInteger32()) {
1816 ASSERT(!info()->IsStub());
1817 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1818 } else if (r.IsSmi()) {
1819 ASSERT(!info()->IsStub());
1820 STATIC_ASSERT(kSmiTag == 0);
1821 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1822 } else if (r.IsDouble()) {
1823 DoubleRegister value = ToDoubleRegister(instr->value());
1824 // Test the double value. Zero and NaN are false.
1825 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1827 ASSERT(r.IsTagged());
1828 Register value = ToRegister(instr->value());
1829 HType type = instr->hydrogen()->value()->type();
1831 if (type.IsBoolean()) {
1832 ASSERT(!info()->IsStub());
1833 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1834 EmitBranch(instr, eq);
1835 } else if (type.IsSmi()) {
1836 ASSERT(!info()->IsStub());
1837 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1838 } else if (type.IsJSArray()) {
1839 ASSERT(!info()->IsStub());
1840 EmitGoto(instr->TrueDestination(chunk()));
1841 } else if (type.IsHeapNumber()) {
1842 ASSERT(!info()->IsStub());
1843 __ Ldr(double_scratch(), FieldMemOperand(value,
1844 HeapNumber::kValueOffset));
1845 // Test the double value. Zero and NaN are false.
1846 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1847 } else if (type.IsString()) {
1848 ASSERT(!info()->IsStub());
1849 Register temp = ToRegister(instr->temp1());
1850 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1851 EmitCompareAndBranch(instr, ne, temp, 0);
1853 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1854 // Avoid deopts in the case where we've never executed this path before.
1855 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1857 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1858 // undefined -> false.
1860 value, Heap::kUndefinedValueRootIndex, false_label);
1863 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1864 // Boolean -> its value.
1866 value, Heap::kTrueValueRootIndex, true_label);
1868 value, Heap::kFalseValueRootIndex, false_label);
1871 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1874 value, Heap::kNullValueRootIndex, false_label);
1877 if (expected.Contains(ToBooleanStub::SMI)) {
1878 // Smis: 0 -> false, all other -> true.
1879 ASSERT(Smi::FromInt(0) == 0);
1880 __ Cbz(value, false_label);
1881 __ JumpIfSmi(value, true_label);
1882 } else if (expected.NeedsMap()) {
1883 // If we need a map later and have a smi, deopt.
1884 DeoptimizeIfSmi(value, instr->environment());
1887 Register map = NoReg;
1888 Register scratch = NoReg;
1890 if (expected.NeedsMap()) {
1891 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1892 map = ToRegister(instr->temp1());
1893 scratch = ToRegister(instr->temp2());
1895 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1897 if (expected.CanBeUndetectable()) {
1898 // Undetectable -> false.
1899 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1900 __ TestAndBranchIfAnySet(
1901 scratch, 1 << Map::kIsUndetectable, false_label);
1905 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1906 // spec object -> true.
1907 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1908 __ B(ge, true_label);
1911 if (expected.Contains(ToBooleanStub::STRING)) {
1912 // String value -> false iff empty.
1914 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1915 __ B(ge, ¬_string);
1916 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1917 __ Cbz(scratch, false_label);
1919 __ Bind(¬_string);
1922 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1923 // Symbol value -> true.
1924 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1925 __ B(eq, true_label);
1928 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1929 Label not_heap_number;
1930 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1932 __ Ldr(double_scratch(),
1933 FieldMemOperand(value, HeapNumber::kValueOffset));
1934 __ Fcmp(double_scratch(), 0.0);
1935 // If we got a NaN (overflow bit is set), jump to the false branch.
1936 __ B(vs, false_label);
1937 __ B(eq, false_label);
1939 __ Bind(¬_heap_number);
1942 if (!expected.IsGeneric()) {
1943 // We've seen something for the first time -> deopt.
1944 // This can only happen if we are not generic already.
1945 Deoptimize(instr->environment());
1952 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1953 int formal_parameter_count,
1955 LInstruction* instr,
1956 Register function_reg) {
1957 bool dont_adapt_arguments =
1958 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1959 bool can_invoke_directly =
1960 dont_adapt_arguments || formal_parameter_count == arity;
1962 // The function interface relies on the following register assignments.
1963 ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1964 Register arity_reg = x0;
1966 LPointerMap* pointers = instr->pointer_map();
1968 // If necessary, load the function object.
1969 if (function_reg.IsNone()) {
1971 __ LoadObject(function_reg, function);
1974 if (FLAG_debug_code) {
1976 // Try to confirm that function_reg (x1) is a tagged pointer.
1977 __ JumpIfNotSmi(function_reg, &is_not_smi);
1978 __ Abort(kExpectedFunctionObject);
1979 __ Bind(&is_not_smi);
1982 if (can_invoke_directly) {
1984 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1986 // Set the arguments count if adaption is not needed. Assumes that x0 is
1987 // available to write to at this point.
1988 if (dont_adapt_arguments) {
1989 __ Mov(arity_reg, arity);
1993 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
1996 // Set up deoptimization.
1997 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1999 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2000 ParameterCount count(arity);
2001 ParameterCount expected(formal_parameter_count);
2002 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2007 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2008 ASSERT(instr->IsMarkedAsCall());
2009 ASSERT(ToRegister(instr->result()).Is(x0));
2011 LPointerMap* pointers = instr->pointer_map();
2012 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2014 if (instr->target()->IsConstantOperand()) {
2015 LConstantOperand* target = LConstantOperand::cast(instr->target());
2016 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2017 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2018 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2019 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2020 // this understanding is correct.
2021 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2023 ASSERT(instr->target()->IsRegister());
2024 Register target = ToRegister(instr->target());
2025 generator.BeforeCall(__ CallSize(target));
2026 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2029 generator.AfterCall();
2033 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2034 ASSERT(instr->IsMarkedAsCall());
2035 ASSERT(ToRegister(instr->function()).is(x1));
2037 if (instr->hydrogen()->pass_argument_count()) {
2038 __ Mov(x0, Operand(instr->arity()));
2042 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2044 // Load the code entry address
2045 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2048 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2052 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2053 CallRuntime(instr->function(), instr->arity(), instr);
2057 void LCodeGen::DoCallStub(LCallStub* instr) {
2058 ASSERT(ToRegister(instr->context()).is(cp));
2059 ASSERT(ToRegister(instr->result()).is(x0));
2060 switch (instr->hydrogen()->major_key()) {
2061 case CodeStub::RegExpExec: {
2062 RegExpExecStub stub;
2063 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2066 case CodeStub::SubString: {
2068 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2071 case CodeStub::StringCompare: {
2072 StringCompareStub stub;
2073 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2082 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2083 GenerateOsrPrologue();
2087 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2088 Register temp = ToRegister(instr->temp());
2090 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2093 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2094 RecordSafepointWithRegisters(
2095 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2096 __ StoreToSafepointRegisterSlot(x0, temp);
2098 DeoptimizeIfSmi(temp, instr->environment());
2102 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2103 class DeferredCheckMaps: public LDeferredCode {
2105 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2106 : LDeferredCode(codegen), instr_(instr), object_(object) {
2107 SetExit(check_maps());
2109 virtual void Generate() {
2110 codegen()->DoDeferredInstanceMigration(instr_, object_);
2112 Label* check_maps() { return &check_maps_; }
2113 virtual LInstruction* instr() { return instr_; }
2120 if (instr->hydrogen()->CanOmitMapChecks()) {
2121 ASSERT(instr->value() == NULL);
2122 ASSERT(instr->temp() == NULL);
2126 Register object = ToRegister(instr->value());
2127 Register map_reg = ToRegister(instr->temp());
2129 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2131 DeferredCheckMaps* deferred = NULL;
2132 if (instr->hydrogen()->has_migration_target()) {
2133 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2134 __ Bind(deferred->check_maps());
2137 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
2139 for (int i = 0; i < map_set.size(); i++) {
2140 Handle<Map> map = map_set.at(i).handle();
2141 __ CompareMap(map_reg, map);
2145 // We didn't match a map.
2146 if (instr->hydrogen()->has_migration_target()) {
2147 __ B(deferred->entry());
2149 Deoptimize(instr->environment());
2156 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2157 if (!instr->hydrogen()->value()->IsHeapObject()) {
2158 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2163 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2164 Register value = ToRegister(instr->value());
2165 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
2166 DeoptimizeIfNotSmi(value, instr->environment());
2170 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2171 Register input = ToRegister(instr->value());
2172 Register scratch = ToRegister(instr->temp());
2174 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2175 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2177 if (instr->hydrogen()->is_interval_check()) {
2178 InstanceType first, last;
2179 instr->hydrogen()->GetCheckInterval(&first, &last);
2181 __ Cmp(scratch, first);
2182 if (first == last) {
2183 // If there is only one type in the interval check for equality.
2184 DeoptimizeIf(ne, instr->environment());
2185 } else if (last == LAST_TYPE) {
2186 // We don't need to compare with the higher bound of the interval.
2187 DeoptimizeIf(lo, instr->environment());
2189 // If we are below the lower bound, set the C flag and clear the Z flag
2190 // to force a deopt.
2191 __ Ccmp(scratch, last, CFlag, hs);
2192 DeoptimizeIf(hi, instr->environment());
2197 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2199 if (IsPowerOf2(mask)) {
2200 ASSERT((tag == 0) || (tag == mask));
2202 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2204 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2208 __ Tst(scratch, mask);
2210 __ And(scratch, scratch, mask);
2211 __ Cmp(scratch, tag);
2213 DeoptimizeIf(ne, instr->environment());
2219 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2220 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2221 Register result = ToRegister32(instr->result());
2222 __ ClampDoubleToUint8(result, input, double_scratch());
2226 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2227 Register input = ToRegister32(instr->unclamped());
2228 Register result = ToRegister32(instr->result());
2229 __ ClampInt32ToUint8(result, input);
2233 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2234 Register input = ToRegister(instr->unclamped());
2235 Register result = ToRegister32(instr->result());
2236 Register scratch = ToRegister(instr->temp1());
2239 // Both smi and heap number cases are handled.
2241 __ JumpIfNotSmi(input, &is_not_smi);
2242 __ SmiUntag(result.X(), input);
2243 __ ClampInt32ToUint8(result);
2246 __ Bind(&is_not_smi);
2248 // Check for heap number.
2249 Label is_heap_number;
2250 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2251 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2253 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2254 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2255 instr->environment());
2259 // Heap number case.
2260 __ Bind(&is_heap_number);
2261 DoubleRegister dbl_scratch = double_scratch();
2262 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2263 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2264 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2270 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2271 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2272 Register result_reg = ToRegister(instr->result());
2273 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2274 __ Fmov(result_reg, value_reg);
2275 __ Mov(result_reg, Operand(result_reg, LSR, 32));
2277 __ Fmov(result_reg.W(), value_reg.S());
2282 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2283 Register hi_reg = ToRegister(instr->hi());
2284 Register lo_reg = ToRegister(instr->lo());
2285 Register temp = ToRegister(instr->temp());
2286 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2288 __ And(temp, lo_reg, Operand(0xffffffff));
2289 __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
2290 __ Fmov(result_reg, temp);
2294 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2295 Handle<String> class_name = instr->hydrogen()->class_name();
2296 Label* true_label = instr->TrueLabel(chunk_);
2297 Label* false_label = instr->FalseLabel(chunk_);
2298 Register input = ToRegister(instr->value());
2299 Register scratch1 = ToRegister(instr->temp1());
2300 Register scratch2 = ToRegister(instr->temp2());
2302 __ JumpIfSmi(input, false_label);
2304 Register map = scratch2;
2305 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2306 // Assuming the following assertions, we can use the same compares to test
2307 // for both being a function type and being in the object type range.
2308 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2309 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2310 FIRST_SPEC_OBJECT_TYPE + 1);
2311 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2312 LAST_SPEC_OBJECT_TYPE - 1);
2313 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2315 // We expect CompareObjectType to load the object instance type in scratch1.
2316 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2317 __ B(lt, false_label);
2318 __ B(eq, true_label);
2319 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2320 __ B(eq, true_label);
2322 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2325 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2326 // Check if the constructor in the map is a function.
2327 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2329 // Objects with a non-function constructor have class 'Object'.
2330 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2331 __ JumpIfNotObjectType(
2332 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2334 __ JumpIfNotObjectType(
2335 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2338 // The constructor function is in scratch1. Get its instance class name.
2340 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2342 FieldMemOperand(scratch1,
2343 SharedFunctionInfo::kInstanceClassNameOffset));
2345 // The class name we are testing against is internalized since it's a literal.
2346 // The name in the constructor is internalized because of the way the context
2347 // is booted. This routine isn't expected to work for random API-created
2348 // classes and it doesn't have to because you can't access it with natives
2349 // syntax. Since both sides are internalized it is sufficient to use an
2350 // identity comparison.
2351 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2355 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2356 ASSERT(instr->hydrogen()->representation().IsDouble());
2357 FPRegister object = ToDoubleRegister(instr->object());
2358 Register temp = ToRegister(instr->temp());
2360 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2361 // (relatively expensive) hole-NaN check.
2362 __ Fcmp(object, object);
2363 __ B(vc, instr->FalseLabel(chunk_));
2365 // We have a NaN, but is it the hole?
2366 __ Fmov(temp, object);
2367 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2371 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2372 ASSERT(instr->hydrogen()->representation().IsTagged());
2373 Register object = ToRegister(instr->object());
2375 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2379 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2380 Register value = ToRegister(instr->value());
2381 Register map = ToRegister(instr->temp());
2383 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2384 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2388 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2389 Representation rep = instr->hydrogen()->value()->representation();
2390 ASSERT(!rep.IsInteger32());
2391 Register scratch = ToRegister(instr->temp());
2393 if (rep.IsDouble()) {
2394 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2395 instr->TrueLabel(chunk()));
2397 Register value = ToRegister(instr->value());
2398 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2399 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2400 __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
2401 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
2403 EmitGoto(instr->FalseDestination(chunk()));
2407 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2408 LOperand* left = instr->left();
2409 LOperand* right = instr->right();
2410 Condition cond = TokenToCondition(instr->op(), false);
2412 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2413 // We can statically evaluate the comparison.
2414 double left_val = ToDouble(LConstantOperand::cast(left));
2415 double right_val = ToDouble(LConstantOperand::cast(right));
2416 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2417 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2418 EmitGoto(next_block);
2420 if (instr->is_double()) {
2421 if (right->IsConstantOperand()) {
2422 __ Fcmp(ToDoubleRegister(left),
2423 ToDouble(LConstantOperand::cast(right)));
2424 } else if (left->IsConstantOperand()) {
2425 // Transpose the operands and reverse the condition.
2426 __ Fcmp(ToDoubleRegister(right),
2427 ToDouble(LConstantOperand::cast(left)));
2428 cond = ReverseConditionForCmp(cond);
2430 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2433 // If a NaN is involved, i.e. the result is unordered (V set),
2434 // jump to false block label.
2435 __ B(vs, instr->FalseLabel(chunk_));
2436 EmitBranch(instr, cond);
2438 if (instr->hydrogen_value()->representation().IsInteger32()) {
2439 if (right->IsConstantOperand()) {
2440 EmitCompareAndBranch(instr,
2443 ToOperand32I(right));
2445 // Transpose the operands and reverse the condition.
2446 EmitCompareAndBranch(instr,
2447 ReverseConditionForCmp(cond),
2448 ToRegister32(right),
2449 ToOperand32I(left));
2452 ASSERT(instr->hydrogen_value()->representation().IsSmi());
2453 if (right->IsConstantOperand()) {
2454 int32_t value = ToInteger32(LConstantOperand::cast(right));
2455 EmitCompareAndBranch(instr,
2458 Operand(Smi::FromInt(value)));
2459 } else if (left->IsConstantOperand()) {
2460 // Transpose the operands and reverse the condition.
2461 int32_t value = ToInteger32(LConstantOperand::cast(left));
2462 EmitCompareAndBranch(instr,
2463 ReverseConditionForCmp(cond),
2465 Operand(Smi::FromInt(value)));
2467 EmitCompareAndBranch(instr,
2478 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2479 Register left = ToRegister(instr->left());
2480 Register right = ToRegister(instr->right());
2481 EmitCompareAndBranch(instr, eq, left, right);
2485 void LCodeGen::DoCmpT(LCmpT* instr) {
2486 ASSERT(ToRegister(instr->context()).is(cp));
2487 Token::Value op = instr->op();
2488 Condition cond = TokenToCondition(op, false);
2490 ASSERT(ToRegister(instr->left()).Is(x1));
2491 ASSERT(ToRegister(instr->right()).Is(x0));
2492 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2493 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2494 // Signal that we don't inline smi code before this stub.
2495 InlineSmiCheckInfo::EmitNotInlined(masm());
2497 // Return true or false depending on CompareIC result.
2498 // This instruction is marked as call. We can clobber any register.
2499 ASSERT(instr->IsMarkedAsCall());
2500 __ LoadTrueFalseRoots(x1, x2);
2502 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2506 void LCodeGen::DoConstantD(LConstantD* instr) {
2507 ASSERT(instr->result()->IsDoubleRegister());
2508 DoubleRegister result = ToDoubleRegister(instr->result());
2509 __ Fmov(result, instr->value());
2513 void LCodeGen::DoConstantE(LConstantE* instr) {
2514 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2518 void LCodeGen::DoConstantI(LConstantI* instr) {
2519 ASSERT(is_int32(instr->value()));
2520 // Cast the value here to ensure that the value isn't sign extended by the
2521 // implicit Operand constructor.
2522 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2526 void LCodeGen::DoConstantS(LConstantS* instr) {
2527 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2531 void LCodeGen::DoConstantT(LConstantT* instr) {
2532 Handle<Object> value = instr->value(isolate());
2533 AllowDeferredHandleDereference smi_check;
2534 __ LoadObject(ToRegister(instr->result()), value);
2538 void LCodeGen::DoContext(LContext* instr) {
2539 // If there is a non-return use, the context must be moved to a register.
2540 Register result = ToRegister(instr->result());
2541 if (info()->IsOptimizing()) {
2542 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2544 // If there is no frame, the context must be in cp.
2545 ASSERT(result.is(cp));
2550 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2551 Register reg = ToRegister(instr->value());
2552 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2553 AllowDeferredHandleDereference smi_check;
2554 if (isolate()->heap()->InNewSpace(*object)) {
2555 UseScratchRegisterScope temps(masm());
2556 Register temp = temps.AcquireX();
2557 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2558 __ Mov(temp, Operand(Handle<Object>(cell)));
2559 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2562 __ Cmp(reg, Operand(object));
2564 DeoptimizeIf(ne, instr->environment());
2568 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2569 last_lazy_deopt_pc_ = masm()->pc_offset();
2570 ASSERT(instr->HasEnvironment());
2571 LEnvironment* env = instr->environment();
2572 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2573 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2577 void LCodeGen::DoDateField(LDateField* instr) {
2578 Register object = ToRegister(instr->date());
2579 Register result = ToRegister(instr->result());
2580 Register temp1 = x10;
2581 Register temp2 = x11;
2582 Smi* index = instr->index();
2583 Label runtime, done, deopt, obj_ok;
2585 ASSERT(object.is(result) && object.Is(x0));
2586 ASSERT(instr->IsMarkedAsCall());
2588 __ JumpIfSmi(object, &deopt);
2589 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2593 Deoptimize(instr->environment());
2596 if (index->value() == 0) {
2597 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2599 if (index->value() < JSDate::kFirstUncachedField) {
2600 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2601 __ Mov(temp1, Operand(stamp));
2602 __ Ldr(temp1, MemOperand(temp1));
2603 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2604 __ Cmp(temp1, temp2);
2606 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2607 kPointerSize * index->value()));
2612 __ Mov(x1, Operand(index));
2613 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2620 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2621 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2622 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2623 // needed return address), even though the implementation of LAZY and EAGER is
2624 // now identical. When LAZY is eventually completely folded into EAGER, remove
2625 // the special case below.
2626 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2627 type = Deoptimizer::LAZY;
2630 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2631 Deoptimize(instr->environment(), &type);
2635 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2636 Register dividend = ToRegister32(instr->dividend());
2637 int32_t divisor = instr->divisor();
2638 Register result = ToRegister32(instr->result());
2639 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
2640 ASSERT(!result.is(dividend));
2642 // Check for (0 / -x) that will produce negative zero.
2643 HDiv* hdiv = instr->hydrogen();
2644 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2645 __ Cmp(dividend, 0);
2646 DeoptimizeIf(eq, instr->environment());
2648 // Check for (kMinInt / -1).
2649 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2650 __ Cmp(dividend, kMinInt);
2651 DeoptimizeIf(eq, instr->environment());
2653 // Deoptimize if remainder will not be 0.
2654 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2655 divisor != 1 && divisor != -1) {
2656 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2657 __ Tst(dividend, mask);
2658 DeoptimizeIf(ne, instr->environment());
2661 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2662 __ Neg(result, dividend);
2665 int32_t shift = WhichPowerOf2Abs(divisor);
2667 __ Mov(result, dividend);
2668 } else if (shift == 1) {
2669 __ Add(result, dividend, Operand(dividend, LSR, 31));
2671 __ Mov(result, Operand(dividend, ASR, 31));
2672 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2674 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2675 if (divisor < 0) __ Neg(result, result);
2679 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2680 Register dividend = ToRegister32(instr->dividend());
2681 int32_t divisor = instr->divisor();
2682 Register result = ToRegister32(instr->result());
2683 ASSERT(!AreAliased(dividend, result));
2686 Deoptimize(instr->environment());
2690 // Check for (0 / -x) that will produce negative zero.
2691 HDiv* hdiv = instr->hydrogen();
2692 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2693 DeoptimizeIfZero(dividend, instr->environment());
2696 __ TruncatingDiv(result, dividend, Abs(divisor));
2697 if (divisor < 0) __ Neg(result, result);
2699 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2700 Register temp = ToRegister32(instr->temp());
2701 ASSERT(!AreAliased(dividend, result, temp));
2702 __ Sxtw(dividend.X(), dividend);
2703 __ Mov(temp, divisor);
2704 __ Smsubl(temp.X(), result, temp, dividend.X());
2705 DeoptimizeIfNotZero(temp, instr->environment());
2710 void LCodeGen::DoDivI(LDivI* instr) {
2711 HBinaryOperation* hdiv = instr->hydrogen();
2712 Register dividend = ToRegister32(instr->left());
2713 Register divisor = ToRegister32(instr->right());
2714 Register result = ToRegister32(instr->result());
2716 // Issue the division first, and then check for any deopt cases whilst the
2717 // result is computed.
2718 __ Sdiv(result, dividend, divisor);
2720 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2721 ASSERT_EQ(NULL, instr->temp());
2727 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2728 __ Cbz(divisor, &deopt);
2731 // Check for (0 / -x) as that will produce negative zero.
2732 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2735 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2736 // zero, ie. zero dividend with negative divisor deopts.
2737 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2738 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2739 __ Ccmp(dividend, 0, NoFlag, mi);
2743 // Check for (kMinInt / -1).
2744 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2745 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2747 __ Cmp(dividend, 1);
2748 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2749 // -1. If overflow is clear, set the flags for condition ne, as the
2750 // dividend isn't -1, and thus we shouldn't deopt.
2751 __ Ccmp(divisor, -1, NoFlag, vs);
2755 // Compute remainder and deopt if it's not zero.
2756 Register remainder = ToRegister32(instr->temp());
2757 __ Msub(remainder, result, divisor, dividend);
2758 __ Cbnz(remainder, &deopt);
2763 Deoptimize(instr->environment());
2768 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2769 DoubleRegister input = ToDoubleRegister(instr->value());
2770 Register result = ToRegister32(instr->result());
2772 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2773 DeoptimizeIfMinusZero(input, instr->environment());
2776 __ TryConvertDoubleToInt32(result, input, double_scratch());
2777 DeoptimizeIf(ne, instr->environment());
2779 if (instr->tag_result()) {
2780 __ SmiTag(result.X());
2785 void LCodeGen::DoDrop(LDrop* instr) {
2786 __ Drop(instr->count());
2790 void LCodeGen::DoDummy(LDummy* instr) {
2791 // Nothing to see here, move on!
2795 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2796 // Nothing to see here, move on!
2800 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2801 ASSERT(ToRegister(instr->context()).is(cp));
2802 // FunctionLiteral instruction is marked as call, we can trash any register.
2803 ASSERT(instr->IsMarkedAsCall());
2805 // Use the fast case closure allocation code that allocates in new
2806 // space for nested functions that don't need literals cloning.
2807 bool pretenure = instr->hydrogen()->pretenure();
2808 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2809 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
2810 instr->hydrogen()->is_generator());
2811 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2812 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2814 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2815 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2816 : factory()->false_value()));
2817 __ Push(cp, x2, x1);
2818 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
2823 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2824 Register map = ToRegister(instr->map());
2825 Register result = ToRegister(instr->result());
2826 Label load_cache, done;
2828 __ EnumLengthUntagged(result, map);
2829 __ Cbnz(result, &load_cache);
2831 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2834 __ Bind(&load_cache);
2835 __ LoadInstanceDescriptors(map, result);
2836 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2837 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2838 DeoptimizeIfZero(result, instr->environment());
2844 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2845 Register object = ToRegister(instr->object());
2846 Register null_value = x5;
2848 ASSERT(instr->IsMarkedAsCall());
2849 ASSERT(object.Is(x0));
2853 __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
2855 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2856 __ Cmp(object, null_value);
2859 __ JumpIfSmi(object, &deopt);
2861 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2862 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2865 Label use_cache, call_runtime;
2866 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2868 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2872 Deoptimize(instr->environment());
2874 // Get the set of properties to enumerate.
2875 __ Bind(&call_runtime);
2877 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2879 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2880 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
2882 __ Bind(&use_cache);
2886 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2887 Register input = ToRegister(instr->value());
2888 Register result = ToRegister(instr->result());
2890 __ AssertString(input);
2892 // Assert that we can use a W register load to get the hash.
2893 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2894 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2895 __ IndexFromHash(result, result);
2899 void LCodeGen::EmitGoto(int block) {
2900 // Do not emit jump if we are emitting a goto to the next block.
2901 if (!IsNextEmittedBlock(block)) {
2902 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2907 void LCodeGen::DoGoto(LGoto* instr) {
2908 EmitGoto(instr->block_id());
2912 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2913 LHasCachedArrayIndexAndBranch* instr) {
2914 Register input = ToRegister(instr->value());
2915 Register temp = ToRegister32(instr->temp());
2917 // Assert that the cache status bits fit in a W register.
2918 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
2919 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2920 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2921 EmitBranch(instr, eq);
2925 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2926 // to test but is only used in very restricted ways. The only possible kinds
2927 // of intervals are:
2928 // - [ FIRST_TYPE, instr->to() ]
2929 // - [ instr->form(), LAST_TYPE ]
2930 // - instr->from() == instr->to()
2932 // These kinds of intervals can be check with only one compare instruction
2933 // providing the correct value and test condition are used.
2935 // TestType() will return the value to use in the compare instruction and
2936 // BranchCondition() will return the condition to use depending on the kind
2937 // of interval actually specified in the instruction.
2938 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2939 InstanceType from = instr->from();
2940 InstanceType to = instr->to();
2941 if (from == FIRST_TYPE) return to;
2942 ASSERT((from == to) || (to == LAST_TYPE));
2947 // See comment above TestType function for what this function does.
2948 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2949 InstanceType from = instr->from();
2950 InstanceType to = instr->to();
2951 if (from == to) return eq;
2952 if (to == LAST_TYPE) return hs;
2953 if (from == FIRST_TYPE) return ls;
2959 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2960 Register input = ToRegister(instr->value());
2961 Register scratch = ToRegister(instr->temp());
2963 if (!instr->hydrogen()->value()->IsHeapObject()) {
2964 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2966 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2967 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2971 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2972 Register result = ToRegister(instr->result());
2973 Register base = ToRegister(instr->base_object());
2974 if (instr->offset()->IsConstantOperand()) {
2975 __ Add(result, base, ToOperand32I(instr->offset()));
2977 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
2982 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2983 ASSERT(ToRegister(instr->context()).is(cp));
2984 // Assert that the arguments are in the registers expected by InstanceofStub.
2985 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
2986 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
2988 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2989 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2991 // InstanceofStub returns a result in x0:
2992 // 0 => not an instance
2993 // smi 1 => instance.
2995 __ LoadTrueFalseRoots(x0, x1);
2996 __ Csel(x0, x0, x1, eq);
3000 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3001 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3003 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3004 LInstanceOfKnownGlobal* instr)
3005 : LDeferredCode(codegen), instr_(instr) { }
3006 virtual void Generate() {
3007 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3009 virtual LInstruction* instr() { return instr_; }
3011 LInstanceOfKnownGlobal* instr_;
3014 DeferredInstanceOfKnownGlobal* deferred =
3015 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3017 Label map_check, return_false, cache_miss, done;
3018 Register object = ToRegister(instr->value());
3019 Register result = ToRegister(instr->result());
3020 // x4 is expected in the associated deferred code and stub.
3021 Register map_check_site = x4;
3024 // This instruction is marked as call. We can clobber any register.
3025 ASSERT(instr->IsMarkedAsCall());
3027 // We must take into account that object is in x11.
3028 ASSERT(object.Is(x11));
3029 Register scratch = x10;
3031 // A Smi is not instance of anything.
3032 __ JumpIfSmi(object, &return_false);
3034 // This is the inlined call site instanceof cache. The two occurences of the
3035 // hole value will be patched to the last map/result pair generated by the
3037 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3039 // Below we use Factory::the_hole_value() on purpose instead of loading from
3040 // the root array to force relocation and later be able to patch with a
3042 InstructionAccurateScope scope(masm(), 5);
3043 __ bind(&map_check);
3044 // Will be patched with the cached map.
3045 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3046 __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
3047 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3048 __ cmp(map, scratch);
3049 __ b(&cache_miss, ne);
3050 // The address of this instruction is computed relative to the map check
3051 // above, so check the size of the code generated.
3052 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3053 // Will be patched with the cached result.
3054 __ LoadRelocated(result, Operand(factory()->the_hole_value()));
3058 // The inlined call site cache did not match.
3059 // Check null and string before calling the deferred code.
3060 __ Bind(&cache_miss);
3061 // Compute the address of the map check. It must not be clobbered until the
3062 // InstanceOfStub has used it.
3063 __ Adr(map_check_site, &map_check);
3064 // Null is not instance of anything.
3065 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3067 // String values are not instances of anything.
3068 // Return false if the object is a string. Otherwise, jump to the deferred
3070 // Note that we can't jump directly to deferred code from
3071 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3072 // code can be out of range.
3073 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3074 __ B(deferred->entry());
3076 __ Bind(&return_false);
3077 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3079 // Here result is either true or false.
3080 __ Bind(deferred->exit());
3085 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3086 Register result = ToRegister(instr->result());
3087 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
3088 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3089 flags = static_cast<InstanceofStub::Flags>(
3090 flags | InstanceofStub::kArgsInRegisters);
3091 flags = static_cast<InstanceofStub::Flags>(
3092 flags | InstanceofStub::kReturnTrueFalseObject);
3093 flags = static_cast<InstanceofStub::Flags>(
3094 flags | InstanceofStub::kCallSiteInlineCheck);
3096 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3097 LoadContextFromDeferred(instr->context());
3099 // Prepare InstanceofStub arguments.
3100 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
3101 __ LoadObject(InstanceofStub::right(), instr->function());
3103 InstanceofStub stub(flags);
3104 CallCodeGeneric(stub.GetCode(isolate()),
3105 RelocInfo::CODE_TARGET,
3107 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3108 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3109 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3111 // Put the result value into the result register slot.
3112 __ StoreToSafepointRegisterSlot(result, result);
3116 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3121 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3122 Register value = ToRegister32(instr->value());
3123 DoubleRegister result = ToDoubleRegister(instr->result());
3124 __ Scvtf(result, value);
3128 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3129 ASSERT(ToRegister(instr->context()).is(cp));
3130 // The function is required to be in x1.
3131 ASSERT(ToRegister(instr->function()).is(x1));
3132 ASSERT(instr->HasPointerMap());
3134 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3135 if (known_function.is_null()) {
3136 LPointerMap* pointers = instr->pointer_map();
3137 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3138 ParameterCount count(instr->arity());
3139 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3141 CallKnownFunction(known_function,
3142 instr->hydrogen()->formal_parameter_count(),
3150 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3151 Register temp1 = ToRegister(instr->temp1());
3152 Register temp2 = ToRegister(instr->temp2());
3154 // Get the frame pointer for the calling frame.
3155 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3157 // Skip the arguments adaptor frame if it exists.
3158 Label check_frame_marker;
3159 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3160 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3161 __ B(ne, &check_frame_marker);
3162 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3164 // Check the marker in the calling frame.
3165 __ Bind(&check_frame_marker);
3166 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3168 EmitCompareAndBranch(
3169 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3173 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3174 Label* is_object = instr->TrueLabel(chunk_);
3175 Label* is_not_object = instr->FalseLabel(chunk_);
3176 Register value = ToRegister(instr->value());
3177 Register map = ToRegister(instr->temp1());
3178 Register scratch = ToRegister(instr->temp2());
3180 __ JumpIfSmi(value, is_not_object);
3181 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3183 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3185 // Check for undetectable objects.
3186 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3187 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3189 // Check that instance type is in object type range.
3190 __ IsInstanceJSObjectType(map, scratch, NULL);
3191 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3192 // flags for "le" condition to check if the object's type is a valid
3194 EmitBranch(instr, le);
3198 Condition LCodeGen::EmitIsString(Register input,
3200 Label* is_not_string,
3201 SmiCheck check_needed = INLINE_SMI_CHECK) {
3202 if (check_needed == INLINE_SMI_CHECK) {
3203 __ JumpIfSmi(input, is_not_string);
3205 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3211 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3212 Register val = ToRegister(instr->value());
3213 Register scratch = ToRegister(instr->temp());
3215 SmiCheck check_needed =
3216 instr->hydrogen()->value()->IsHeapObject()
3217 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3218 Condition true_cond =
3219 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3221 EmitBranch(instr, true_cond);
3225 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3226 Register value = ToRegister(instr->value());
3227 STATIC_ASSERT(kSmiTag == 0);
3228 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3232 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3233 Register input = ToRegister(instr->value());
3234 Register temp = ToRegister(instr->temp());
3236 if (!instr->hydrogen()->value()->IsHeapObject()) {
3237 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3239 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3240 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3242 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3246 static const char* LabelType(LLabel* label) {
3247 if (label->is_loop_header()) return " (loop header)";
3248 if (label->is_osr_entry()) return " (OSR entry)";
3253 void LCodeGen::DoLabel(LLabel* label) {
3254 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3255 current_instruction_,
3256 label->hydrogen_value()->id(),
3260 __ Bind(label->label());
3261 current_block_ = label->block_id();
3266 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3267 Register context = ToRegister(instr->context());
3268 Register result = ToRegister(instr->result());
3269 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3270 if (instr->hydrogen()->RequiresHoleCheck()) {
3271 if (instr->hydrogen()->DeoptimizesOnHole()) {
3272 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3273 instr->environment());
3276 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3277 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3278 __ Bind(¬_the_hole);
3284 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3285 Register function = ToRegister(instr->function());
3286 Register result = ToRegister(instr->result());
3287 Register temp = ToRegister(instr->temp());
3290 // Check that the function really is a function. Leaves map in the result
3292 __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
3294 // Make sure that the function has an instance prototype.
3296 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
3297 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
3299 // Get the prototype or initial map from the function.
3300 __ Ldr(result, FieldMemOperand(function,
3301 JSFunction::kPrototypeOrInitialMapOffset));
3303 // Check that the function has a prototype or an initial map.
3304 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
3306 // If the function does not have an initial map, we're done.
3308 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3311 // Get the prototype from the initial map.
3312 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3315 // Non-instance prototype: fetch prototype from constructor field in initial
3317 __ Bind(&non_instance);
3318 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3323 Deoptimize(instr->environment());
3330 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3331 Register result = ToRegister(instr->result());
3332 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3333 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3334 if (instr->hydrogen()->RequiresHoleCheck()) {
3336 result, Heap::kTheHoleValueRootIndex, instr->environment());
3341 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3342 ASSERT(ToRegister(instr->context()).is(cp));
3343 ASSERT(ToRegister(instr->global_object()).Is(x0));
3344 ASSERT(ToRegister(instr->result()).Is(x0));
3345 __ Mov(x2, Operand(instr->name()));
3346 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3347 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3348 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3352 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3357 bool key_is_constant,
3359 ElementsKind elements_kind,
3360 int additional_index) {
3361 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3362 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3363 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3366 if (key_is_constant) {
3367 int base_offset = ((constant_key + additional_index) << element_size_shift);
3368 return MemOperand(base, base_offset + additional_offset);
3371 if (additional_index == 0) {
3373 // Key is smi: untag, and scale by element size.
3374 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3375 return MemOperand(scratch, additional_offset);
3377 // Key is not smi, and element size is not byte: scale by element size.
3378 if (additional_offset == 0) {
3379 return MemOperand(base, key, SXTW, element_size_shift);
3381 __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3382 return MemOperand(scratch, additional_offset);
3386 // TODO(all): Try to combine these cases a bit more intelligently.
3387 if (additional_offset == 0) {
3389 __ SmiUntag(scratch, key);
3390 __ Add(scratch.W(), scratch.W(), additional_index);
3392 __ Add(scratch.W(), key.W(), additional_index);
3394 return MemOperand(base, scratch, LSL, element_size_shift);
3397 __ Add(scratch, base,
3398 Operand::UntagSmiAndScale(key, element_size_shift));
3400 __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3404 (additional_index << element_size_shift) + additional_offset);
3410 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3411 Register ext_ptr = ToRegister(instr->elements());
3413 ElementsKind elements_kind = instr->elements_kind();
3415 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3416 bool key_is_constant = instr->key()->IsConstantOperand();
3417 Register key = no_reg;
3418 int constant_key = 0;
3419 if (key_is_constant) {
3420 ASSERT(instr->temp() == NULL);
3421 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3422 if (constant_key & 0xf0000000) {
3423 Abort(kArrayIndexConstantValueTooBig);
3426 scratch = ToRegister(instr->temp());
3427 key = ToRegister(instr->key());
3431 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3432 key_is_constant, constant_key,
3434 instr->additional_index());
3436 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3437 (elements_kind == FLOAT32_ELEMENTS)) {
3438 DoubleRegister result = ToDoubleRegister(instr->result());
3439 __ Ldr(result.S(), mem_op);
3440 __ Fcvt(result, result.S());
3441 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3442 (elements_kind == FLOAT64_ELEMENTS)) {
3443 DoubleRegister result = ToDoubleRegister(instr->result());
3444 __ Ldr(result, mem_op);
3446 Register result = ToRegister(instr->result());
3448 switch (elements_kind) {
3449 case EXTERNAL_INT8_ELEMENTS:
3451 __ Ldrsb(result, mem_op);
3453 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3454 case EXTERNAL_UINT8_ELEMENTS:
3455 case UINT8_ELEMENTS:
3456 case UINT8_CLAMPED_ELEMENTS:
3457 __ Ldrb(result, mem_op);
3459 case EXTERNAL_INT16_ELEMENTS:
3460 case INT16_ELEMENTS:
3461 __ Ldrsh(result, mem_op);
3463 case EXTERNAL_UINT16_ELEMENTS:
3464 case UINT16_ELEMENTS:
3465 __ Ldrh(result, mem_op);
3467 case EXTERNAL_INT32_ELEMENTS:
3468 case INT32_ELEMENTS:
3469 __ Ldrsw(result, mem_op);
3471 case EXTERNAL_UINT32_ELEMENTS:
3472 case UINT32_ELEMENTS:
3473 __ Ldr(result.W(), mem_op);
3474 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3475 // Deopt if value > 0x80000000.
3476 __ Tst(result, 0xFFFFFFFF80000000);
3477 DeoptimizeIf(ne, instr->environment());
3480 case FLOAT32_ELEMENTS:
3481 case FLOAT64_ELEMENTS:
3482 case EXTERNAL_FLOAT32_ELEMENTS:
3483 case EXTERNAL_FLOAT64_ELEMENTS:
3484 case FAST_HOLEY_DOUBLE_ELEMENTS:
3485 case FAST_HOLEY_ELEMENTS:
3486 case FAST_HOLEY_SMI_ELEMENTS:
3487 case FAST_DOUBLE_ELEMENTS:
3489 case FAST_SMI_ELEMENTS:
3490 case DICTIONARY_ELEMENTS:
3491 case SLOPPY_ARGUMENTS_ELEMENTS:
3499 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3503 ElementsKind elements_kind) {
3504 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3506 // Even though the HLoad/StoreKeyed instructions force the input
3507 // representation for the key to be an integer, the input gets replaced during
3508 // bounds check elimination with the index argument to the bounds check, which
3509 // can be tagged, so that case must be handled here, too.
3510 if (key_is_tagged) {
3511 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3513 // Sign extend key because it could be a 32-bit negative value or contain
3514 // garbage in the top 32-bits. The address computation happens in 64-bit.
3515 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3516 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3521 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3522 Register elements = ToRegister(instr->elements());
3523 DoubleRegister result = ToDoubleRegister(instr->result());
3527 if (instr->key()->IsConstantOperand()) {
3528 ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3529 (instr->temp() == NULL));
3531 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3532 if (constant_key & 0xf0000000) {
3533 Abort(kArrayIndexConstantValueTooBig);
3535 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
3536 instr->additional_index());
3537 load_base = elements;
3539 load_base = ToRegister(instr->temp());
3540 Register key = ToRegister(instr->key());
3541 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3542 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3543 instr->hydrogen()->elements_kind());
3544 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
3546 __ Ldr(result, FieldMemOperand(load_base, offset));
3548 if (instr->hydrogen()->RequiresHoleCheck()) {
3549 Register scratch = ToRegister(instr->temp());
3551 // TODO(all): Is it faster to reload this value to an integer register, or
3552 // move from fp to integer?
3553 __ Fmov(scratch, result);
3554 __ Cmp(scratch, kHoleNanInt64);
3555 DeoptimizeIf(eq, instr->environment());
3560 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3561 Register elements = ToRegister(instr->elements());
3562 Register result = ToRegister(instr->result());
3566 if (instr->key()->IsConstantOperand()) {
3567 ASSERT(instr->temp() == NULL);
3568 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3569 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3570 instr->additional_index());
3571 load_base = elements;
3573 load_base = ToRegister(instr->temp());
3574 Register key = ToRegister(instr->key());
3575 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3576 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3577 instr->hydrogen()->elements_kind());
3578 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3580 Representation representation = instr->hydrogen()->representation();
3582 if (representation.IsInteger32() &&
3583 instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
3584 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3585 __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
3586 Representation::Integer32());
3588 __ Load(result, FieldMemOperand(load_base, offset),
3592 if (instr->hydrogen()->RequiresHoleCheck()) {
3593 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3594 DeoptimizeIfNotSmi(result, instr->environment());
3596 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3597 instr->environment());
3603 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3604 ASSERT(ToRegister(instr->context()).is(cp));
3605 ASSERT(ToRegister(instr->object()).Is(x1));
3606 ASSERT(ToRegister(instr->key()).Is(x0));
3608 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3609 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3611 ASSERT(ToRegister(instr->result()).Is(x0));
3615 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3616 HObjectAccess access = instr->hydrogen()->access();
3617 int offset = access.offset();
3618 Register object = ToRegister(instr->object());
3620 if (access.IsExternalMemory()) {
3621 Register result = ToRegister(instr->result());
3622 __ Load(result, MemOperand(object, offset), access.representation());
3626 if (instr->hydrogen()->representation().IsDouble()) {
3627 FPRegister result = ToDoubleRegister(instr->result());
3628 __ Ldr(result, FieldMemOperand(object, offset));
3632 Register result = ToRegister(instr->result());
3634 if (access.IsInobject()) {
3637 // Load the properties array, using result as a scratch register.
3638 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3642 if (access.representation().IsSmi() &&
3643 instr->hydrogen()->representation().IsInteger32()) {
3644 // Read int value directly from upper half of the smi.
3645 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3646 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3647 Representation::Integer32());
3649 __ Load(result, FieldMemOperand(source, offset), access.representation());
3654 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3655 ASSERT(ToRegister(instr->context()).is(cp));
3656 // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3657 ASSERT(ToRegister(instr->object()).is(x0));
3658 __ Mov(x2, Operand(instr->name()));
3660 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3661 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3663 ASSERT(ToRegister(instr->result()).is(x0));
3667 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3668 Register result = ToRegister(instr->result());
3669 __ LoadRoot(result, instr->index());
3673 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3674 Register result = ToRegister(instr->result());
3675 Register map = ToRegister(instr->value());
3676 __ EnumLengthSmi(result, map);
3680 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3681 Representation r = instr->hydrogen()->value()->representation();
3683 DoubleRegister input = ToDoubleRegister(instr->value());
3684 DoubleRegister result = ToDoubleRegister(instr->result());
3685 __ Fabs(result, input);
3686 } else if (r.IsSmi() || r.IsInteger32()) {
3687 Register input = r.IsSmi() ? ToRegister(instr->value())
3688 : ToRegister32(instr->value());
3689 Register result = r.IsSmi() ? ToRegister(instr->result())
3690 : ToRegister32(instr->result());
3692 __ Abs(result, input, NULL, &done);
3693 Deoptimize(instr->environment());
3699 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3701 Label* allocation_entry) {
3702 // Handle the tricky cases of MathAbsTagged:
3703 // - HeapNumber inputs.
3704 // - Negative inputs produce a positive result, so a new HeapNumber is
3705 // allocated to hold it.
3706 // - Positive inputs are returned as-is, since there is no need to allocate
3707 // a new HeapNumber for the result.
3708 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3709 // a smi. In this case, the inline code sets the result and jumps directly
3710 // to the allocation_entry label.
3711 ASSERT(instr->context() != NULL);
3712 ASSERT(ToRegister(instr->context()).is(cp));
3713 Register input = ToRegister(instr->value());
3714 Register temp1 = ToRegister(instr->temp1());
3715 Register temp2 = ToRegister(instr->temp2());
3716 Register result_bits = ToRegister(instr->temp3());
3717 Register result = ToRegister(instr->result());
3719 Label runtime_allocation;
3721 // Deoptimize if the input is not a HeapNumber.
3722 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3723 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3724 instr->environment());
3726 // If the argument is positive, we can return it as-is, without any need to
3727 // allocate a new HeapNumber for the result. We have to do this in integer
3728 // registers (rather than with fabs) because we need to be able to distinguish
3730 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3731 __ Mov(result, input);
3732 __ Tbz(result_bits, kXSignBit, exit);
3734 // Calculate abs(input) by clearing the sign bit.
3735 __ Bic(result_bits, result_bits, kXSignMask);
3737 // Allocate a new HeapNumber to hold the result.
3738 // result_bits The bit representation of the (double) result.
3739 __ Bind(allocation_entry);
3740 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3741 // The inline (non-deferred) code will store result_bits into result.
3744 __ Bind(&runtime_allocation);
3745 if (FLAG_debug_code) {
3746 // Because result is in the pointer map, we need to make sure it has a valid
3747 // tagged value before we call the runtime. We speculatively set it to the
3748 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3751 Register input = ToRegister(instr->value());
3752 __ JumpIfSmi(result, &result_ok);
3753 __ Cmp(input, result);
3754 __ Assert(eq, kUnexpectedValue);
3755 __ Bind(&result_ok);
3758 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3759 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3761 __ StoreToSafepointRegisterSlot(x0, result);
3763 // The inline (non-deferred) code will store result_bits into result.
3767 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3768 // Class for deferred case.
3769 class DeferredMathAbsTagged: public LDeferredCode {
3771 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3772 : LDeferredCode(codegen), instr_(instr) { }
3773 virtual void Generate() {
3774 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3775 allocation_entry());
3777 virtual LInstruction* instr() { return instr_; }
3778 Label* allocation_entry() { return &allocation; }
3780 LMathAbsTagged* instr_;
3784 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3785 // in GenerateDeferredCode. Tidy this up.
3786 ASSERT(!NeedsDeferredFrame());
3788 DeferredMathAbsTagged* deferred =
3789 new(zone()) DeferredMathAbsTagged(this, instr);
3791 ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3792 instr->hydrogen()->value()->representation().IsSmi());
3793 Register input = ToRegister(instr->value());
3794 Register result_bits = ToRegister(instr->temp3());
3795 Register result = ToRegister(instr->result());
3798 // Handle smis inline.
3799 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3800 // never get set by the negation. This is therefore the same as the Integer32
3801 // case in DoMathAbs, except that it operates on 64-bit values.
3802 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3804 __ JumpIfNotSmi(input, deferred->entry());
3806 __ Abs(result, input, NULL, &done);
3808 // The result is the magnitude (abs) of the smallest value a smi can
3809 // represent, encoded as a double.
3810 __ Mov(result_bits, double_to_rawbits(0x80000000));
3811 __ B(deferred->allocation_entry());
3813 __ Bind(deferred->exit());
3814 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3820 void LCodeGen::DoMathExp(LMathExp* instr) {
3821 DoubleRegister input = ToDoubleRegister(instr->value());
3822 DoubleRegister result = ToDoubleRegister(instr->result());
3823 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3824 DoubleRegister double_temp2 = double_scratch();
3825 Register temp1 = ToRegister(instr->temp1());
3826 Register temp2 = ToRegister(instr->temp2());
3827 Register temp3 = ToRegister(instr->temp3());
3829 MathExpGenerator::EmitMathExp(masm(), input, result,
3830 double_temp1, double_temp2,
3831 temp1, temp2, temp3);
3835 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3836 // TODO(jbramley): If we could provide a double result, we could use frintm
3837 // and produce a valid double result in a single instruction.
3838 DoubleRegister input = ToDoubleRegister(instr->value());
3839 Register result = ToRegister(instr->result());
3841 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3842 DeoptimizeIfMinusZero(input, instr->environment());
3845 __ Fcvtms(result, input);
3847 // Check that the result fits into a 32-bit integer.
3848 // - The result did not overflow.
3849 __ Cmp(result, Operand(result, SXTW));
3850 // - The input was not NaN.
3851 __ Fccmp(input, input, NoFlag, eq);
3852 DeoptimizeIf(ne, instr->environment());
3856 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3857 Register dividend = ToRegister32(instr->dividend());
3858 Register result = ToRegister32(instr->result());
3859 int32_t divisor = instr->divisor();
3861 // If the divisor is positive, things are easy: There can be no deopts and we
3862 // can simply do an arithmetic right shift.
3863 if (divisor == 1) return;
3864 int32_t shift = WhichPowerOf2Abs(divisor);
3866 __ Mov(result, Operand(dividend, ASR, shift));
3870 // If the divisor is negative, we have to negate and handle edge cases.
3871 Label not_kmin_int, done;
3872 __ Negs(result, dividend);
3873 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3874 DeoptimizeIf(eq, instr->environment());
3876 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3877 // Note that we could emit branch-free code, but that would need one more
3879 if (divisor == -1) {
3880 DeoptimizeIf(vs, instr->environment());
3882 __ B(vc, ¬_kmin_int);
3883 __ Mov(result, kMinInt / divisor);
3887 __ bind(¬_kmin_int);
3888 __ Mov(result, Operand(dividend, ASR, shift));
3893 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3894 Register dividend = ToRegister32(instr->dividend());
3895 int32_t divisor = instr->divisor();
3896 Register result = ToRegister32(instr->result());
3897 ASSERT(!AreAliased(dividend, result));
3900 Deoptimize(instr->environment());
3904 // Check for (0 / -x) that will produce negative zero.
3905 HMathFloorOfDiv* hdiv = instr->hydrogen();
3906 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3907 __ Cmp(dividend, 0);
3908 DeoptimizeIf(eq, instr->environment());
3911 // Easy case: We need no dynamic check for the dividend and the flooring
3912 // division is the same as the truncating division.
3913 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3914 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3915 __ TruncatingDiv(result, dividend, Abs(divisor));
3916 if (divisor < 0) __ Neg(result, result);
3920 // In the general case we may need to adjust before and after the truncating
3921 // division to get a flooring division.
3922 Register temp = ToRegister32(instr->temp());
3923 ASSERT(!AreAliased(temp, dividend, result));
3924 Label needs_adjustment, done;
3925 __ Cmp(dividend, 0);
3926 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3927 __ TruncatingDiv(result, dividend, Abs(divisor));
3928 if (divisor < 0) __ Neg(result, result);
3930 __ bind(&needs_adjustment);
3931 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3932 __ TruncatingDiv(result, temp, Abs(divisor));
3933 if (divisor < 0) __ Neg(result, result);
3934 __ Sub(result, result, Operand(1));
3939 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3940 Register dividend = ToRegister32(instr->dividend());
3941 Register divisor = ToRegister32(instr->divisor());
3942 Register remainder = ToRegister32(instr->temp());
3943 Register result = ToRegister32(instr->result());
3945 // This can't cause an exception on ARM, so we can speculatively
3946 // execute it already now.
3947 __ Sdiv(result, dividend, divisor);
3950 DeoptimizeIfZero(divisor, instr->environment());
3952 // Check for (kMinInt / -1).
3953 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3954 // The V flag will be set iff dividend == kMinInt.
3955 __ Cmp(dividend, 1);
3956 __ Ccmp(divisor, -1, NoFlag, vs);
3957 DeoptimizeIf(eq, instr->environment());
3960 // Check for (0 / -x) that will produce negative zero.
3961 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3963 __ Ccmp(dividend, 0, ZFlag, mi);
3964 // "divisor" can't be null because the code would have already been
3965 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
3966 // In this case we need to deoptimize to produce a -0.
3967 DeoptimizeIf(eq, instr->environment());
3971 // If both operands have the same sign then we are done.
3972 __ Eor(remainder, dividend, divisor);
3973 __ Tbz(remainder, kWSignBit, &done);
3975 // Check if the result needs to be corrected.
3976 __ Msub(remainder, result, divisor, dividend);
3977 __ Cbz(remainder, &done);
3978 __ Sub(result, result, 1);
3984 void LCodeGen::DoMathLog(LMathLog* instr) {
3985 ASSERT(instr->IsMarkedAsCall());
3986 ASSERT(ToDoubleRegister(instr->value()).is(d0));
3987 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3989 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3993 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3994 Register input = ToRegister32(instr->value());
3995 Register result = ToRegister32(instr->result());
3996 __ Clz(result, input);
4000 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4001 DoubleRegister input = ToDoubleRegister(instr->value());
4002 DoubleRegister result = ToDoubleRegister(instr->result());
4005 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4006 // Math.pow(-Infinity, 0.5) == +Infinity
4007 // Math.pow(-0.0, 0.5) == +0.0
4009 // Catch -infinity inputs first.
4010 // TODO(jbramley): A constant infinity register would be helpful here.
4011 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4012 __ Fcmp(double_scratch(), input);
4013 __ Fabs(result, input);
4016 // Add +0.0 to convert -0.0 to +0.0.
4017 __ Fadd(double_scratch(), input, fp_zero);
4018 __ Fsqrt(result, double_scratch());
4024 void LCodeGen::DoPower(LPower* instr) {
4025 Representation exponent_type = instr->hydrogen()->right()->representation();
4026 // Having marked this as a call, we can use any registers.
4027 // Just make sure that the input/output registers are the expected ones.
4028 ASSERT(!instr->right()->IsDoubleRegister() ||
4029 ToDoubleRegister(instr->right()).is(d1));
4030 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4031 ToRegister(instr->right()).is(x11));
4032 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4033 ASSERT(ToDoubleRegister(instr->left()).is(d0));
4034 ASSERT(ToDoubleRegister(instr->result()).is(d0));
4036 if (exponent_type.IsSmi()) {
4037 MathPowStub stub(MathPowStub::TAGGED);
4039 } else if (exponent_type.IsTagged()) {
4041 __ JumpIfSmi(x11, &no_deopt);
4042 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4043 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4044 instr->environment());
4046 MathPowStub stub(MathPowStub::TAGGED);
4048 } else if (exponent_type.IsInteger32()) {
4049 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4050 // supports large integer exponents.
4051 Register exponent = ToRegister(instr->right());
4052 __ Sxtw(exponent, exponent);
4053 MathPowStub stub(MathPowStub::INTEGER);
4056 ASSERT(exponent_type.IsDouble());
4057 MathPowStub stub(MathPowStub::DOUBLE);
4063 void LCodeGen::DoMathRound(LMathRound* instr) {
4064 // TODO(jbramley): We could provide a double result here using frint.
4065 DoubleRegister input = ToDoubleRegister(instr->value());
4066 DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
4067 Register result = ToRegister(instr->result());
4071 // Math.round() rounds to the nearest integer, with ties going towards
4072 // +infinity. This does not match any IEEE-754 rounding mode.
4073 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4074 // they can't be represented as integers.
4075 // - The sign of the result is the same as the sign of the input. This means
4076 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4079 DoubleRegister dot_five = double_scratch();
4080 __ Fmov(dot_five, 0.5);
4081 __ Fabs(temp1, input);
4082 __ Fcmp(temp1, dot_five);
4083 // If input is in [-0.5, -0], the result is -0.
4084 // If input is in [+0, +0.5[, the result is +0.
4085 // If the input is +0.5, the result is 1.
4086 __ B(hi, &try_rounding); // hi so NaN will also branch.
4088 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4089 __ Fmov(result, input);
4090 DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
4092 __ Fcmp(input, dot_five);
4093 __ Mov(result, 1); // +0.5.
4094 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4095 // flag kBailoutOnMinusZero, will return 0 (xzr).
4096 __ Csel(result, result, xzr, eq);
4099 __ Bind(&try_rounding);
4100 // Since we're providing a 32-bit result, we can implement ties-to-infinity by
4101 // adding 0.5 to the input, then taking the floor of the result. This does not
4102 // work for very large positive doubles because adding 0.5 would cause an
4103 // intermediate rounding stage, so a different approach will be necessary if a
4104 // double result is needed.
4105 __ Fadd(temp1, input, dot_five);
4106 __ Fcvtms(result, temp1);
4109 // * the input was NaN
4110 // * the result is not representable using a 32-bit integer.
4111 __ Fcmp(input, 0.0);
4112 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
4113 DeoptimizeIf(ne, instr->environment());
4119 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4120 DoubleRegister input = ToDoubleRegister(instr->value());
4121 DoubleRegister result = ToDoubleRegister(instr->result());
4122 __ Fsqrt(result, input);
4126 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4127 HMathMinMax::Operation op = instr->hydrogen()->operation();
4128 if (instr->hydrogen()->representation().IsInteger32()) {
4129 Register result = ToRegister32(instr->result());
4130 Register left = ToRegister32(instr->left());
4131 Operand right = ToOperand32I(instr->right());
4133 __ Cmp(left, right);
4134 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4135 } else if (instr->hydrogen()->representation().IsSmi()) {
4136 Register result = ToRegister(instr->result());
4137 Register left = ToRegister(instr->left());
4138 Operand right = ToOperand(instr->right());
4140 __ Cmp(left, right);
4141 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4143 ASSERT(instr->hydrogen()->representation().IsDouble());
4144 DoubleRegister result = ToDoubleRegister(instr->result());
4145 DoubleRegister left = ToDoubleRegister(instr->left());
4146 DoubleRegister right = ToDoubleRegister(instr->right());
4148 if (op == HMathMinMax::kMathMax) {
4149 __ Fmax(result, left, right);
4151 ASSERT(op == HMathMinMax::kMathMin);
4152 __ Fmin(result, left, right);
4158 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4159 Register dividend = ToRegister32(instr->dividend());
4160 int32_t divisor = instr->divisor();
4161 ASSERT(dividend.is(ToRegister32(instr->result())));
4163 // Theoretically, a variation of the branch-free code for integer division by
4164 // a power of 2 (calculating the remainder via an additional multiplication
4165 // (which gets simplified to an 'and') and subtraction) should be faster, and
4166 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4167 // indicate that positive dividends are heavily favored, so the branching
4168 // version performs better.
4169 HMod* hmod = instr->hydrogen();
4170 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4171 Label dividend_is_not_negative, done;
4172 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4173 __ Cmp(dividend, 0);
4174 __ B(pl, ÷nd_is_not_negative);
4175 // Note that this is correct even for kMinInt operands.
4176 __ Neg(dividend, dividend);
4177 __ And(dividend, dividend, mask);
4178 __ Negs(dividend, dividend);
4179 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4180 DeoptimizeIf(eq, instr->environment());
4185 __ bind(÷nd_is_not_negative);
4186 __ And(dividend, dividend, mask);
4191 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4192 Register dividend = ToRegister32(instr->dividend());
4193 int32_t divisor = instr->divisor();
4194 Register result = ToRegister32(instr->result());
4195 Register temp = ToRegister32(instr->temp());
4196 ASSERT(!AreAliased(dividend, result, temp));
4199 Deoptimize(instr->environment());
4203 __ TruncatingDiv(result, dividend, Abs(divisor));
4204 __ Sxtw(dividend.X(), dividend);
4205 __ Mov(temp, Abs(divisor));
4206 __ Smsubl(result.X(), result, temp, dividend.X());
4208 // Check for negative zero.
4209 HMod* hmod = instr->hydrogen();
4210 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4211 Label remainder_not_zero;
4212 __ Cbnz(result, &remainder_not_zero);
4213 DeoptimizeIfNegative(dividend, instr->environment());
4214 __ bind(&remainder_not_zero);
4219 void LCodeGen::DoModI(LModI* instr) {
4220 Register dividend = ToRegister32(instr->left());
4221 Register divisor = ToRegister32(instr->right());
4222 Register result = ToRegister32(instr->result());
4225 // modulo = dividend - quotient * divisor
4226 __ Sdiv(result, dividend, divisor);
4227 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4228 // Combine the deoptimization sites.
4230 __ Cbnz(divisor, &ok);
4232 Deoptimize(instr->environment());
4235 __ Msub(result, result, divisor, dividend);
4236 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4237 __ Cbnz(result, &done);
4238 if (deopt.is_bound()) { // TODO(all) This is a hack, remove this...
4239 __ Tbnz(dividend, kWSignBit, &deopt);
4241 DeoptimizeIfNegative(dividend, instr->environment());
4248 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4249 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4250 bool is_smi = instr->hydrogen()->representation().IsSmi();
4252 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4254 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4255 int32_t right = ToInteger32(instr->right());
4256 ASSERT((right > -kMaxInt) || (right < kMaxInt));
4258 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4259 bool bailout_on_minus_zero =
4260 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4262 if (bailout_on_minus_zero) {
4264 // The result is -0 if right is negative and left is zero.
4265 DeoptimizeIfZero(left, instr->environment());
4266 } else if (right == 0) {
4267 // The result is -0 if the right is zero and the left is negative.
4268 DeoptimizeIfNegative(left, instr->environment());
4273 // Cases which can detect overflow.
4276 // Only 0x80000000 can overflow here.
4277 __ Negs(result, left);
4278 DeoptimizeIf(vs, instr->environment());
4280 __ Neg(result, left);
4284 // This case can never overflow.
4288 // This case can never overflow.
4289 __ Mov(result, left, kDiscardForSameWReg);
4293 __ Adds(result, left, left);
4294 DeoptimizeIf(vs, instr->environment());
4296 __ Add(result, left, left);
4301 // Multiplication by constant powers of two (and some related values)
4302 // can be done efficiently with shifted operands.
4303 int32_t right_abs = Abs(right);
4305 if (IsPowerOf2(right_abs)) {
4306 int right_log2 = WhichPowerOf2(right_abs);
4309 Register scratch = result;
4310 ASSERT(!AreAliased(scratch, left));
4311 __ Cls(scratch, left);
4312 __ Cmp(scratch, right_log2);
4313 DeoptimizeIf(lt, instr->environment());
4317 // result = left << log2(right)
4318 __ Lsl(result, left, right_log2);
4320 // result = -left << log2(-right)
4322 __ Negs(result, Operand(left, LSL, right_log2));
4323 DeoptimizeIf(vs, instr->environment());
4325 __ Neg(result, Operand(left, LSL, right_log2));
4332 // For the following cases, we could perform a conservative overflow check
4333 // with CLS as above. However the few cycles saved are likely not worth
4334 // the risk of deoptimizing more often than required.
4335 ASSERT(!can_overflow);
4338 if (IsPowerOf2(right - 1)) {
4339 // result = left + left << log2(right - 1)
4340 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4341 } else if (IsPowerOf2(right + 1)) {
4342 // result = -left + left << log2(right + 1)
4343 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4344 __ Neg(result, result);
4349 if (IsPowerOf2(-right + 1)) {
4350 // result = left - left << log2(-right + 1)
4351 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4352 } else if (IsPowerOf2(-right - 1)) {
4353 // result = -left - left << log2(-right - 1)
4354 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4355 __ Neg(result, result);
4364 void LCodeGen::DoMulI(LMulI* instr) {
4365 Register result = ToRegister32(instr->result());
4366 Register left = ToRegister32(instr->left());
4367 Register right = ToRegister32(instr->right());
4369 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4370 bool bailout_on_minus_zero =
4371 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4373 if (bailout_on_minus_zero && !left.Is(right)) {
4374 // If one operand is zero and the other is negative, the result is -0.
4375 // - Set Z (eq) if either left or right, or both, are 0.
4377 __ Ccmp(right, 0, ZFlag, ne);
4378 // - If so (eq), set N (mi) if left + right is negative.
4379 // - Otherwise, clear N.
4380 __ Ccmn(left, right, NoFlag, eq);
4381 DeoptimizeIf(mi, instr->environment());
4385 __ Smull(result.X(), left, right);
4386 __ Cmp(result.X(), Operand(result, SXTW));
4387 DeoptimizeIf(ne, instr->environment());
4389 __ Mul(result, left, right);
4394 void LCodeGen::DoMulS(LMulS* instr) {
4395 Register result = ToRegister(instr->result());
4396 Register left = ToRegister(instr->left());
4397 Register right = ToRegister(instr->right());
4399 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4400 bool bailout_on_minus_zero =
4401 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4403 if (bailout_on_minus_zero && !left.Is(right)) {
4404 // If one operand is zero and the other is negative, the result is -0.
4405 // - Set Z (eq) if either left or right, or both, are 0.
4407 __ Ccmp(right, 0, ZFlag, ne);
4408 // - If so (eq), set N (mi) if left + right is negative.
4409 // - Otherwise, clear N.
4410 __ Ccmn(left, right, NoFlag, eq);
4411 DeoptimizeIf(mi, instr->environment());
4414 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4416 __ Smulh(result, left, right);
4417 __ Cmp(result, Operand(result.W(), SXTW));
4419 DeoptimizeIf(ne, instr->environment());
4421 if (AreAliased(result, left, right)) {
4422 // All three registers are the same: half untag the input and then
4423 // multiply, giving a tagged result.
4424 STATIC_ASSERT((kSmiShift % 2) == 0);
4425 __ Asr(result, left, kSmiShift / 2);
4426 __ Mul(result, result, result);
4427 } else if (result.Is(left) && !left.Is(right)) {
4428 // Registers result and left alias, right is distinct: untag left into
4429 // result, and then multiply by right, giving a tagged result.
4430 __ SmiUntag(result, left);
4431 __ Mul(result, result, right);
4433 ASSERT(!left.Is(result));
4434 // Registers result and right alias, left is distinct, or all registers
4435 // are distinct: untag right into result, and then multiply by left,
4436 // giving a tagged result.
4437 __ SmiUntag(result, right);
4438 __ Mul(result, left, result);
4444 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4445 // TODO(3095996): Get rid of this. For now, we need to make the
4446 // result register contain a valid pointer because it is already
4447 // contained in the register pointer map.
4448 Register result = ToRegister(instr->result());
4451 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4452 // NumberTagU and NumberTagD use the context from the frame, rather than
4453 // the environment's HContext or HInlinedContext value.
4454 // They only call Runtime::kHiddenAllocateHeapNumber.
4455 // The corresponding HChange instructions are added in a phase that does
4456 // not have easy access to the local context.
4457 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4458 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4459 RecordSafepointWithRegisters(
4460 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4461 __ StoreToSafepointRegisterSlot(x0, result);
4465 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4466 class DeferredNumberTagD: public LDeferredCode {
4468 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4469 : LDeferredCode(codegen), instr_(instr) { }
4470 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4471 virtual LInstruction* instr() { return instr_; }
4473 LNumberTagD* instr_;
4476 DoubleRegister input = ToDoubleRegister(instr->value());
4477 Register result = ToRegister(instr->result());
4478 Register temp1 = ToRegister(instr->temp1());
4479 Register temp2 = ToRegister(instr->temp2());
4481 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4482 if (FLAG_inline_new) {
4483 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4485 __ B(deferred->entry());
4488 __ Bind(deferred->exit());
4489 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4493 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4497 Label slow, convert_and_store;
4498 Register src = ToRegister32(value);
4499 Register dst = ToRegister(instr->result());
4500 Register scratch1 = ToRegister(temp1);
4502 if (FLAG_inline_new) {
4503 Register scratch2 = ToRegister(temp2);
4504 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4505 __ B(&convert_and_store);
4508 // Slow case: call the runtime system to do the number allocation.
4510 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4511 // register is stored, as this register is in the pointer map, but contains an
4515 // Preserve the value of all registers.
4516 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4518 // NumberTagU and NumberTagD use the context from the frame, rather than
4519 // the environment's HContext or HInlinedContext value.
4520 // They only call Runtime::kHiddenAllocateHeapNumber.
4521 // The corresponding HChange instructions are added in a phase that does
4522 // not have easy access to the local context.
4523 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4524 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4525 RecordSafepointWithRegisters(
4526 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4527 __ StoreToSafepointRegisterSlot(x0, dst);
4530 // Convert number to floating point and store in the newly allocated heap
4532 __ Bind(&convert_and_store);
4533 DoubleRegister dbl_scratch = double_scratch();
4534 __ Ucvtf(dbl_scratch, src);
4535 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4539 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4540 class DeferredNumberTagU: public LDeferredCode {
4542 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4543 : LDeferredCode(codegen), instr_(instr) { }
4544 virtual void Generate() {
4545 codegen()->DoDeferredNumberTagU(instr_,
4550 virtual LInstruction* instr() { return instr_; }
4552 LNumberTagU* instr_;
4555 Register value = ToRegister32(instr->value());
4556 Register result = ToRegister(instr->result());
4558 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4559 __ Cmp(value, Smi::kMaxValue);
4560 __ B(hi, deferred->entry());
4561 __ SmiTag(result, value.X());
4562 __ Bind(deferred->exit());
4566 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4567 Register input = ToRegister(instr->value());
4568 Register scratch = ToRegister(instr->temp());
4569 DoubleRegister result = ToDoubleRegister(instr->result());
4570 bool can_convert_undefined_to_nan =
4571 instr->hydrogen()->can_convert_undefined_to_nan();
4573 Label done, load_smi;
4575 // Work out what untag mode we're working with.
4576 HValue* value = instr->hydrogen()->value();
4577 NumberUntagDMode mode = value->representation().IsSmi()
4578 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4580 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4581 __ JumpIfSmi(input, &load_smi);
4583 Label convert_undefined;
4585 // Heap number map check.
4586 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4587 if (can_convert_undefined_to_nan) {
4588 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4589 &convert_undefined);
4591 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4592 instr->environment());
4595 // Load heap number.
4596 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4597 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4598 DeoptimizeIfMinusZero(result, instr->environment());
4602 if (can_convert_undefined_to_nan) {
4603 __ Bind(&convert_undefined);
4604 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4605 instr->environment());
4607 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4608 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4613 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4614 // Fall through to load_smi.
4617 // Smi to double register conversion.
4619 __ SmiUntagToDouble(result, input);
4625 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4626 // This is a pseudo-instruction that ensures that the environment here is
4627 // properly registered for deoptimization and records the assembler's PC
4629 LEnvironment* environment = instr->environment();
4631 // If the environment were already registered, we would have no way of
4632 // backpatching it with the spill slot operands.
4633 ASSERT(!environment->HasBeenRegistered());
4634 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4636 GenerateOsrPrologue();
4640 void LCodeGen::DoParameter(LParameter* instr) {
4645 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4646 LOperand* argument = instr->value();
4647 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4648 Abort(kDoPushArgumentNotImplementedForDoubleType);
4650 __ Push(ToRegister(argument));
4655 void LCodeGen::DoReturn(LReturn* instr) {
4656 if (FLAG_trace && info()->IsOptimizing()) {
4657 // Push the return value on the stack as the parameter.
4658 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4659 // managed by the register allocator and tearing down the frame, it's
4660 // safe to write to the context register.
4662 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4663 __ CallRuntime(Runtime::kTraceExit, 1);
4666 if (info()->saves_caller_doubles()) {
4667 RestoreCallerDoubles();
4670 int no_frame_start = -1;
4671 if (NeedsEagerFrame()) {
4672 Register stack_pointer = masm()->StackPointer();
4673 __ Mov(stack_pointer, fp);
4674 no_frame_start = masm_->pc_offset();
4678 if (instr->has_constant_parameter_count()) {
4679 int parameter_count = ToInteger32(instr->constant_parameter_count());
4680 __ Drop(parameter_count + 1);
4682 Register parameter_count = ToRegister(instr->parameter_count());
4683 __ DropBySMI(parameter_count);
4687 if (no_frame_start != -1) {
4688 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4693 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4696 String::Encoding encoding) {
4697 if (index->IsConstantOperand()) {
4698 int offset = ToInteger32(LConstantOperand::cast(index));
4699 if (encoding == String::TWO_BYTE_ENCODING) {
4700 offset *= kUC16Size;
4702 STATIC_ASSERT(kCharSize == 1);
4703 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4706 if (encoding == String::ONE_BYTE_ENCODING) {
4707 __ Add(temp, string, Operand(ToRegister32(index), SXTW));
4709 STATIC_ASSERT(kUC16Size == 2);
4710 __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
4712 return FieldMemOperand(temp, SeqString::kHeaderSize);
4716 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4717 String::Encoding encoding = instr->hydrogen()->encoding();
4718 Register string = ToRegister(instr->string());
4719 Register result = ToRegister(instr->result());
4720 Register temp = ToRegister(instr->temp());
4722 if (FLAG_debug_code) {
4723 // Even though this lithium instruction comes with a temp register, we
4724 // can't use it here because we want to use "AtStart" constraints on the
4725 // inputs and the debug code here needs a scratch register.
4726 UseScratchRegisterScope temps(masm());
4727 Register dbg_temp = temps.AcquireX();
4729 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4730 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4732 __ And(dbg_temp, dbg_temp,
4733 Operand(kStringRepresentationMask | kStringEncodingMask));
4734 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4735 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4736 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4737 ? one_byte_seq_type : two_byte_seq_type));
4738 __ Check(eq, kUnexpectedStringType);
4741 MemOperand operand =
4742 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4743 if (encoding == String::ONE_BYTE_ENCODING) {
4744 __ Ldrb(result, operand);
4746 __ Ldrh(result, operand);
4751 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4752 String::Encoding encoding = instr->hydrogen()->encoding();
4753 Register string = ToRegister(instr->string());
4754 Register value = ToRegister(instr->value());
4755 Register temp = ToRegister(instr->temp());
4757 if (FLAG_debug_code) {
4758 ASSERT(ToRegister(instr->context()).is(cp));
4759 Register index = ToRegister(instr->index());
4760 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4761 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4763 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4764 ? one_byte_seq_type : two_byte_seq_type;
4765 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4768 MemOperand operand =
4769 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4770 if (encoding == String::ONE_BYTE_ENCODING) {
4771 __ Strb(value, operand);
4773 __ Strh(value, operand);
4778 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4779 HChange* hchange = instr->hydrogen();
4780 Register input = ToRegister(instr->value());
4781 Register output = ToRegister(instr->result());
4782 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4783 hchange->value()->CheckFlag(HValue::kUint32)) {
4784 DeoptimizeIfNegative(input.W(), instr->environment());
4786 __ SmiTag(output, input);
4790 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4791 Register input = ToRegister(instr->value());
4792 Register result = ToRegister(instr->result());
4795 if (instr->needs_check()) {
4796 DeoptimizeIfNotSmi(input, instr->environment());
4800 __ SmiUntag(result, input);
4805 void LCodeGen::DoShiftI(LShiftI* instr) {
4806 LOperand* right_op = instr->right();
4807 Register left = ToRegister32(instr->left());
4808 Register result = ToRegister32(instr->result());
4810 if (right_op->IsRegister()) {
4811 Register right = ToRegister32(instr->right());
4812 switch (instr->op()) {
4813 case Token::ROR: __ Ror(result, left, right); break;
4814 case Token::SAR: __ Asr(result, left, right); break;
4815 case Token::SHL: __ Lsl(result, left, right); break;
4817 if (instr->can_deopt()) {
4818 Label right_not_zero;
4819 __ Cbnz(right, &right_not_zero);
4820 DeoptimizeIfNegative(left, instr->environment());
4821 __ Bind(&right_not_zero);
4823 __ Lsr(result, left, right);
4825 default: UNREACHABLE();
4828 ASSERT(right_op->IsConstantOperand());
4829 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4830 if (shift_count == 0) {
4831 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4832 DeoptimizeIfNegative(left, instr->environment());
4834 __ Mov(result, left, kDiscardForSameWReg);
4836 switch (instr->op()) {
4837 case Token::ROR: __ Ror(result, left, shift_count); break;
4838 case Token::SAR: __ Asr(result, left, shift_count); break;
4839 case Token::SHL: __ Lsl(result, left, shift_count); break;
4840 case Token::SHR: __ Lsr(result, left, shift_count); break;
4841 default: UNREACHABLE();
4848 void LCodeGen::DoShiftS(LShiftS* instr) {
4849 LOperand* right_op = instr->right();
4850 Register left = ToRegister(instr->left());
4851 Register result = ToRegister(instr->result());
4853 // Only ROR by register needs a temp.
4854 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4855 (instr->temp() == NULL));
4857 if (right_op->IsRegister()) {
4858 Register right = ToRegister(instr->right());
4859 switch (instr->op()) {
4861 Register temp = ToRegister(instr->temp());
4862 __ Ubfx(temp, right, kSmiShift, 5);
4863 __ SmiUntag(result, left);
4864 __ Ror(result.W(), result.W(), temp.W());
4869 __ Ubfx(result, right, kSmiShift, 5);
4870 __ Asr(result, left, result);
4871 __ Bic(result, result, kSmiShiftMask);
4874 __ Ubfx(result, right, kSmiShift, 5);
4875 __ Lsl(result, left, result);
4878 if (instr->can_deopt()) {
4879 Label right_not_zero;
4880 __ Cbnz(right, &right_not_zero);
4881 DeoptimizeIfNegative(left, instr->environment());
4882 __ Bind(&right_not_zero);
4884 __ Ubfx(result, right, kSmiShift, 5);
4885 __ Lsr(result, left, result);
4886 __ Bic(result, result, kSmiShiftMask);
4888 default: UNREACHABLE();
4891 ASSERT(right_op->IsConstantOperand());
4892 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4893 if (shift_count == 0) {
4894 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4895 DeoptimizeIfNegative(left, instr->environment());
4897 __ Mov(result, left);
4899 switch (instr->op()) {
4901 __ SmiUntag(result, left);
4902 __ Ror(result.W(), result.W(), shift_count);
4906 __ Asr(result, left, shift_count);
4907 __ Bic(result, result, kSmiShiftMask);
4910 __ Lsl(result, left, shift_count);
4913 __ Lsr(result, left, shift_count);
4914 __ Bic(result, result, kSmiShiftMask);
4916 default: UNREACHABLE();
4923 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4924 __ Debug("LDebugBreak", 0, BREAK);
4928 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4929 ASSERT(ToRegister(instr->context()).is(cp));
4930 Register scratch1 = x5;
4931 Register scratch2 = x6;
4932 ASSERT(instr->IsMarkedAsCall());
4934 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
4935 // TODO(all): if Mov could handle object in new space then it could be used
4937 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4938 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
4939 __ Push(cp, scratch1, scratch2); // The context is the first argument.
4940 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
4944 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4945 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4946 LoadContextFromDeferred(instr->context());
4947 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
4948 RecordSafepointWithLazyDeopt(
4949 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4950 ASSERT(instr->HasEnvironment());
4951 LEnvironment* env = instr->environment();
4952 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4956 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4957 class DeferredStackCheck: public LDeferredCode {
4959 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4960 : LDeferredCode(codegen), instr_(instr) { }
4961 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4962 virtual LInstruction* instr() { return instr_; }
4964 LStackCheck* instr_;
4967 ASSERT(instr->HasEnvironment());
4968 LEnvironment* env = instr->environment();
4969 // There is no LLazyBailout instruction for stack-checks. We have to
4970 // prepare for lazy deoptimization explicitly here.
4971 if (instr->hydrogen()->is_function_entry()) {
4972 // Perform stack overflow check.
4974 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4977 PredictableCodeSizeScope predictable(masm_,
4978 Assembler::kCallSizeWithRelocation);
4979 ASSERT(instr->context()->IsRegister());
4980 ASSERT(ToRegister(instr->context()).is(cp));
4981 CallCode(isolate()->builtins()->StackCheck(),
4982 RelocInfo::CODE_TARGET,
4986 ASSERT(instr->hydrogen()->is_backwards_branch());
4987 // Perform stack overflow check if this goto needs it before jumping.
4988 DeferredStackCheck* deferred_stack_check =
4989 new(zone()) DeferredStackCheck(this, instr);
4990 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4991 __ B(lo, deferred_stack_check->entry());
4993 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4994 __ Bind(instr->done_label());
4995 deferred_stack_check->SetExit(instr->done_label());
4996 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4997 // Don't record a deoptimization index for the safepoint here.
4998 // This will be done explicitly when emitting call and the safepoint in
4999 // the deferred code.
5004 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5005 Register function = ToRegister(instr->function());
5006 Register code_object = ToRegister(instr->code_object());
5007 Register temp = ToRegister(instr->temp());
5008 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5009 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5013 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5014 Register context = ToRegister(instr->context());
5015 Register value = ToRegister(instr->value());
5016 Register scratch = ToRegister(instr->temp());
5017 MemOperand target = ContextMemOperand(context, instr->slot_index());
5019 Label skip_assignment;
5021 if (instr->hydrogen()->RequiresHoleCheck()) {
5022 __ Ldr(scratch, target);
5023 if (instr->hydrogen()->DeoptimizesOnHole()) {
5024 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5025 instr->environment());
5027 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5031 __ Str(value, target);
5032 if (instr->hydrogen()->NeedsWriteBarrier()) {
5033 SmiCheck check_needed =
5034 instr->hydrogen()->value()->IsHeapObject()
5035 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5036 __ RecordWriteContextSlot(context,
5040 GetLinkRegisterState(),
5042 EMIT_REMEMBERED_SET,
5045 __ Bind(&skip_assignment);
5049 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5050 Register value = ToRegister(instr->value());
5051 Register cell = ToRegister(instr->temp1());
5054 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5056 // If the cell we are storing to contains the hole it could have
5057 // been deleted from the property dictionary. In that case, we need
5058 // to update the property details in the property dictionary to mark
5059 // it as no longer deleted. We deoptimize in that case.
5060 if (instr->hydrogen()->RequiresHoleCheck()) {
5061 Register payload = ToRegister(instr->temp2());
5062 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5064 payload, Heap::kTheHoleValueRootIndex, instr->environment());
5068 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5069 // Cells are always rescanned, so no write barrier here.
5073 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5074 Register ext_ptr = ToRegister(instr->elements());
5075 Register key = no_reg;
5077 ElementsKind elements_kind = instr->elements_kind();
5079 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5080 bool key_is_constant = instr->key()->IsConstantOperand();
5081 int constant_key = 0;
5082 if (key_is_constant) {
5083 ASSERT(instr->temp() == NULL);
5084 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5085 if (constant_key & 0xf0000000) {
5086 Abort(kArrayIndexConstantValueTooBig);
5089 key = ToRegister(instr->key());
5090 scratch = ToRegister(instr->temp());
5094 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5095 key_is_constant, constant_key,
5097 instr->additional_index());
5099 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5100 (elements_kind == FLOAT32_ELEMENTS)) {
5101 DoubleRegister value = ToDoubleRegister(instr->value());
5102 DoubleRegister dbl_scratch = double_scratch();
5103 __ Fcvt(dbl_scratch.S(), value);
5104 __ Str(dbl_scratch.S(), dst);
5105 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5106 (elements_kind == FLOAT64_ELEMENTS)) {
5107 DoubleRegister value = ToDoubleRegister(instr->value());
5110 Register value = ToRegister(instr->value());
5112 switch (elements_kind) {
5113 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5114 case EXTERNAL_INT8_ELEMENTS:
5115 case EXTERNAL_UINT8_ELEMENTS:
5116 case UINT8_ELEMENTS:
5117 case UINT8_CLAMPED_ELEMENTS:
5119 __ Strb(value, dst);
5121 case EXTERNAL_INT16_ELEMENTS:
5122 case EXTERNAL_UINT16_ELEMENTS:
5123 case INT16_ELEMENTS:
5124 case UINT16_ELEMENTS:
5125 __ Strh(value, dst);
5127 case EXTERNAL_INT32_ELEMENTS:
5128 case EXTERNAL_UINT32_ELEMENTS:
5129 case INT32_ELEMENTS:
5130 case UINT32_ELEMENTS:
5131 __ Str(value.W(), dst);
5133 case FLOAT32_ELEMENTS:
5134 case FLOAT64_ELEMENTS:
5135 case EXTERNAL_FLOAT32_ELEMENTS:
5136 case EXTERNAL_FLOAT64_ELEMENTS:
5137 case FAST_DOUBLE_ELEMENTS:
5139 case FAST_SMI_ELEMENTS:
5140 case FAST_HOLEY_DOUBLE_ELEMENTS:
5141 case FAST_HOLEY_ELEMENTS:
5142 case FAST_HOLEY_SMI_ELEMENTS:
5143 case DICTIONARY_ELEMENTS:
5144 case SLOPPY_ARGUMENTS_ELEMENTS:
5152 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5153 Register elements = ToRegister(instr->elements());
5154 DoubleRegister value = ToDoubleRegister(instr->value());
5155 Register store_base = no_reg;
5158 if (instr->key()->IsConstantOperand()) {
5159 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5160 if (constant_key & 0xf0000000) {
5161 Abort(kArrayIndexConstantValueTooBig);
5163 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
5164 instr->additional_index());
5165 store_base = elements;
5167 store_base = ToRegister(instr->temp());
5168 Register key = ToRegister(instr->key());
5169 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5170 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5171 instr->hydrogen()->elements_kind());
5172 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
5175 if (instr->NeedsCanonicalization()) {
5176 DoubleRegister dbl_scratch = double_scratch();
5177 __ Fmov(dbl_scratch,
5178 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
5179 __ Fmaxnm(dbl_scratch, dbl_scratch, value);
5180 __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
5182 __ Str(value, FieldMemOperand(store_base, offset));
5187 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5188 Register value = ToRegister(instr->value());
5189 Register elements = ToRegister(instr->elements());
5190 Register scratch = no_reg;
5191 Register store_base = no_reg;
5192 Register key = no_reg;
5195 if (!instr->key()->IsConstantOperand() ||
5196 instr->hydrogen()->NeedsWriteBarrier()) {
5197 scratch = ToRegister(instr->temp());
5200 if (instr->key()->IsConstantOperand()) {
5201 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5202 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
5203 instr->additional_index());
5204 store_base = elements;
5206 store_base = scratch;
5207 key = ToRegister(instr->key());
5208 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5209 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5210 instr->hydrogen()->elements_kind());
5211 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
5213 Representation representation = instr->hydrogen()->value()->representation();
5214 if (representation.IsInteger32()) {
5215 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5216 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5217 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5218 __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
5219 Representation::Integer32());
5221 __ Store(value, FieldMemOperand(store_base, offset), representation);
5224 if (instr->hydrogen()->NeedsWriteBarrier()) {
5225 ASSERT(representation.IsTagged());
5226 // This assignment may cause element_addr to alias store_base.
5227 Register element_addr = scratch;
5228 SmiCheck check_needed =
5229 instr->hydrogen()->value()->IsHeapObject()
5230 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5231 // Compute address of modified element and store it into key register.
5232 __ Add(element_addr, store_base, offset - kHeapObjectTag);
5233 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5234 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
5239 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5240 ASSERT(ToRegister(instr->context()).is(cp));
5241 ASSERT(ToRegister(instr->object()).Is(x2));
5242 ASSERT(ToRegister(instr->key()).Is(x1));
5243 ASSERT(ToRegister(instr->value()).Is(x0));
5245 Handle<Code> ic = instr->strict_mode() == STRICT
5246 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5247 : isolate()->builtins()->KeyedStoreIC_Initialize();
5248 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5252 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5253 Representation representation = instr->representation();
5255 Register object = ToRegister(instr->object());
5256 HObjectAccess access = instr->hydrogen()->access();
5257 Handle<Map> transition = instr->transition();
5258 int offset = access.offset();
5260 if (access.IsExternalMemory()) {
5261 ASSERT(transition.is_null());
5262 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5263 Register value = ToRegister(instr->value());
5264 __ Store(value, MemOperand(object, offset), representation);
5266 } else if (representation.IsDouble()) {
5267 ASSERT(transition.is_null());
5268 ASSERT(access.IsInobject());
5269 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5270 FPRegister value = ToDoubleRegister(instr->value());
5271 __ Str(value, FieldMemOperand(object, offset));
5275 Register value = ToRegister(instr->value());
5277 SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
5278 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5280 ASSERT(!(representation.IsSmi() &&
5281 instr->value()->IsConstantOperand() &&
5282 !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
5283 if (representation.IsHeapObject() &&
5284 !instr->hydrogen()->value()->type().IsHeapObject()) {
5285 DeoptimizeIfSmi(value, instr->environment());
5287 // We know that value is a smi now, so we can omit the check below.
5288 check_needed = OMIT_SMI_CHECK;
5291 if (!transition.is_null()) {
5292 // Store the new map value.
5293 Register new_map_value = ToRegister(instr->temp0());
5294 __ Mov(new_map_value, Operand(transition));
5295 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5296 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5297 // Update the write barrier for the map field.
5298 __ RecordWriteField(object,
5299 HeapObject::kMapOffset,
5301 ToRegister(instr->temp1()),
5302 GetLinkRegisterState(),
5304 OMIT_REMEMBERED_SET,
5310 Register destination;
5311 if (access.IsInobject()) {
5312 destination = object;
5314 Register temp0 = ToRegister(instr->temp0());
5315 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5316 destination = temp0;
5319 if (representation.IsSmi() &&
5320 instr->hydrogen()->value()->representation().IsInteger32()) {
5321 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5323 Register temp0 = ToRegister(instr->temp0());
5324 __ Ldr(temp0, FieldMemOperand(destination, offset));
5325 __ AssertSmi(temp0);
5326 // If destination aliased temp0, restore it to the address calculated
5328 if (destination.Is(temp0)) {
5329 ASSERT(!access.IsInobject());
5330 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5333 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5334 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5335 Representation::Integer32());
5337 __ Store(value, FieldMemOperand(destination, offset), representation);
5339 if (instr->hydrogen()->NeedsWriteBarrier()) {
5340 __ RecordWriteField(destination,
5342 value, // Clobbered.
5343 ToRegister(instr->temp1()), // Clobbered.
5344 GetLinkRegisterState(),
5346 EMIT_REMEMBERED_SET,
5352 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5353 ASSERT(ToRegister(instr->context()).is(cp));
5354 ASSERT(ToRegister(instr->value()).is(x0));
5355 ASSERT(ToRegister(instr->object()).is(x1));
5357 // Name must be in x2.
5358 __ Mov(x2, Operand(instr->name()));
5359 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5360 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5364 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5365 ASSERT(ToRegister(instr->context()).is(cp));
5366 ASSERT(ToRegister(instr->left()).Is(x1));
5367 ASSERT(ToRegister(instr->right()).Is(x0));
5368 StringAddStub stub(instr->hydrogen()->flags(),
5369 instr->hydrogen()->pretenure_flag());
5370 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5374 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5375 class DeferredStringCharCodeAt: public LDeferredCode {
5377 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5378 : LDeferredCode(codegen), instr_(instr) { }
5379 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5380 virtual LInstruction* instr() { return instr_; }
5382 LStringCharCodeAt* instr_;
5385 DeferredStringCharCodeAt* deferred =
5386 new(zone()) DeferredStringCharCodeAt(this, instr);
5388 StringCharLoadGenerator::Generate(masm(),
5389 ToRegister(instr->string()),
5390 ToRegister32(instr->index()),
5391 ToRegister(instr->result()),
5393 __ Bind(deferred->exit());
5397 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5398 Register string = ToRegister(instr->string());
5399 Register result = ToRegister(instr->result());
5401 // TODO(3095996): Get rid of this. For now, we need to make the
5402 // result register contain a valid pointer because it is already
5403 // contained in the register pointer map.
5406 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5408 // Push the index as a smi. This is safe because of the checks in
5409 // DoStringCharCodeAt above.
5410 Register index = ToRegister(instr->index());
5414 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
5418 __ StoreToSafepointRegisterSlot(x0, result);
5422 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5423 class DeferredStringCharFromCode: public LDeferredCode {
5425 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5426 : LDeferredCode(codegen), instr_(instr) { }
5427 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5428 virtual LInstruction* instr() { return instr_; }
5430 LStringCharFromCode* instr_;
5433 DeferredStringCharFromCode* deferred =
5434 new(zone()) DeferredStringCharFromCode(this, instr);
5436 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5437 Register char_code = ToRegister32(instr->char_code());
5438 Register result = ToRegister(instr->result());
5440 __ Cmp(char_code, String::kMaxOneByteCharCode);
5441 __ B(hi, deferred->entry());
5442 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5443 __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
5444 __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
5445 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5446 __ B(eq, deferred->entry());
5447 __ Bind(deferred->exit());
5451 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5452 Register char_code = ToRegister(instr->char_code());
5453 Register result = ToRegister(instr->result());
5455 // TODO(3095996): Get rid of this. For now, we need to make the
5456 // result register contain a valid pointer because it is already
5457 // contained in the register pointer map.
5460 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5461 __ SmiTag(char_code);
5463 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5464 __ StoreToSafepointRegisterSlot(x0, result);
5468 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5469 ASSERT(ToRegister(instr->context()).is(cp));
5470 Token::Value op = instr->op();
5472 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5473 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5474 InlineSmiCheckInfo::EmitNotInlined(masm());
5476 Condition condition = TokenToCondition(op, false);
5478 EmitCompareAndBranch(instr, condition, x0, 0);
5482 void LCodeGen::DoSubI(LSubI* instr) {
5483 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5484 Register result = ToRegister32(instr->result());
5485 Register left = ToRegister32(instr->left());
5486 Operand right = ToOperand32I(instr->right());
5488 __ Subs(result, left, right);
5489 DeoptimizeIf(vs, instr->environment());
5491 __ Sub(result, left, right);
5496 void LCodeGen::DoSubS(LSubS* instr) {
5497 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5498 Register result = ToRegister(instr->result());
5499 Register left = ToRegister(instr->left());
5500 Operand right = ToOperand(instr->right());
5502 __ Subs(result, left, right);
5503 DeoptimizeIf(vs, instr->environment());
5505 __ Sub(result, left, right);
5510 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5514 Register input = ToRegister(value);
5515 Register scratch1 = ToRegister(temp1);
5516 DoubleRegister dbl_scratch1 = double_scratch();
5520 // Load heap object map.
5521 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5523 if (instr->truncating()) {
5524 Register output = ToRegister(instr->result());
5527 // If it's not a heap number, jump to undefined check.
5528 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5530 // A heap number: load value and convert to int32 using truncating function.
5531 __ TruncateHeapNumberToI(output, input);
5534 __ Bind(&check_bools);
5536 Register true_root = output;
5537 Register false_root = scratch1;
5538 __ LoadTrueFalseRoots(true_root, false_root);
5539 __ Cmp(input, true_root);
5540 __ Cset(output, eq);
5541 __ Ccmp(input, false_root, ZFlag, ne);
5544 // Output contains zero, undefined is converted to zero for truncating
5546 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5547 instr->environment());
5549 Register output = ToRegister32(instr->result());
5551 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5553 // Deoptimized if it's not a heap number.
5554 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5555 instr->environment());
5557 // A heap number: load value and convert to int32 using non-truncating
5558 // function. If the result is out of range, branch to deoptimize.
5559 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5560 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
5561 DeoptimizeIf(ne, instr->environment());
5563 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5566 __ Fmov(scratch1, dbl_scratch1);
5567 DeoptimizeIfNegative(scratch1, instr->environment());
5574 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5575 class DeferredTaggedToI: public LDeferredCode {
5577 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5578 : LDeferredCode(codegen), instr_(instr) { }
5579 virtual void Generate() {
5580 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5584 virtual LInstruction* instr() { return instr_; }
5589 Register input = ToRegister(instr->value());
5590 Register output = ToRegister(instr->result());
5592 if (instr->hydrogen()->value()->representation().IsSmi()) {
5593 __ SmiUntag(output, input);
5595 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5597 __ JumpIfNotSmi(input, deferred->entry());
5598 __ SmiUntag(output, input);
5599 __ Bind(deferred->exit());
5604 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5605 Register result = ToRegister(instr->result());
5606 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5610 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5611 ASSERT(ToRegister(instr->value()).Is(x0));
5612 ASSERT(ToRegister(instr->result()).Is(x0));
5614 CallRuntime(Runtime::kToFastProperties, 1, instr);
5618 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5619 ASSERT(ToRegister(instr->context()).is(cp));
5621 // Registers will be used as follows:
5622 // x7 = literals array.
5623 // x1 = regexp literal.
5624 // x0 = regexp literal clone.
5625 // x10-x12 are used as temporaries.
5626 int literal_offset =
5627 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5628 __ LoadObject(x7, instr->hydrogen()->literals());
5629 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5630 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5632 // Create regexp literal using runtime function
5633 // Result will be in x0.
5634 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5635 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5636 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5637 __ Push(x7, x12, x11, x10);
5638 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5641 __ Bind(&materialized);
5642 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5643 Label allocated, runtime_allocate;
5645 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5648 __ Bind(&runtime_allocate);
5649 __ Mov(x0, Smi::FromInt(size));
5651 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5654 __ Bind(&allocated);
5655 // Copy the content into the newly allocated memory.
5656 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5660 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5661 Register object = ToRegister(instr->object());
5662 Register temp1 = ToRegister(instr->temp1());
5664 Handle<Map> from_map = instr->original_map();
5665 Handle<Map> to_map = instr->transitioned_map();
5666 ElementsKind from_kind = instr->from_kind();
5667 ElementsKind to_kind = instr->to_kind();
5669 Label not_applicable;
5670 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5672 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5673 Register new_map = ToRegister(instr->temp2());
5674 __ Mov(new_map, Operand(to_map));
5675 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5677 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
5678 GetLinkRegisterState(), kDontSaveFPRegs);
5680 ASSERT(ToRegister(instr->context()).is(cp));
5681 PushSafepointRegistersScope scope(
5682 this, Safepoint::kWithRegistersAndDoubles);
5684 __ Mov(x1, Operand(to_map));
5685 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5686 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
5688 RecordSafepointWithRegistersAndDoubles(
5689 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5691 __ Bind(¬_applicable);
5695 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5696 Register object = ToRegister(instr->object());
5697 Register temp1 = ToRegister(instr->temp1());
5698 Register temp2 = ToRegister(instr->temp2());
5700 Label no_memento_found;
5701 __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
5702 Deoptimize(instr->environment());
5703 __ Bind(&no_memento_found);
5707 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5708 DoubleRegister input = ToDoubleRegister(instr->value());
5709 Register result = ToRegister(instr->result());
5710 __ TruncateDoubleToI(result, input);
5711 if (instr->tag_result()) {
5712 __ SmiTag(result, result);
5717 void LCodeGen::DoTypeof(LTypeof* instr) {
5718 Register input = ToRegister(instr->value());
5720 CallRuntime(Runtime::kTypeof, 1, instr);
5724 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5725 Handle<String> type_name = instr->type_literal();
5726 Label* true_label = instr->TrueLabel(chunk_);
5727 Label* false_label = instr->FalseLabel(chunk_);
5728 Register value = ToRegister(instr->value());
5730 if (type_name->Equals(heap()->number_string())) {
5731 ASSERT(instr->temp1() != NULL);
5732 Register map = ToRegister(instr->temp1());
5734 __ JumpIfSmi(value, true_label);
5735 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5736 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5737 EmitBranch(instr, eq);
5739 } else if (type_name->Equals(heap()->string_string())) {
5740 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5741 Register map = ToRegister(instr->temp1());
5742 Register scratch = ToRegister(instr->temp2());
5744 __ JumpIfSmi(value, false_label);
5745 __ JumpIfObjectType(
5746 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5747 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5748 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5750 } else if (type_name->Equals(heap()->symbol_string())) {
5751 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5752 Register map = ToRegister(instr->temp1());
5753 Register scratch = ToRegister(instr->temp2());
5755 __ JumpIfSmi(value, false_label);
5756 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5757 EmitBranch(instr, eq);
5759 } else if (type_name->Equals(heap()->boolean_string())) {
5760 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5761 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5762 EmitBranch(instr, eq);
5764 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5765 __ CompareRoot(value, Heap::kNullValueRootIndex);
5766 EmitBranch(instr, eq);
5768 } else if (type_name->Equals(heap()->undefined_string())) {
5769 ASSERT(instr->temp1() != NULL);
5770 Register scratch = ToRegister(instr->temp1());
5772 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5773 __ JumpIfSmi(value, false_label);
5774 // Check for undetectable objects and jump to the true branch in this case.
5775 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5776 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5777 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5779 } else if (type_name->Equals(heap()->function_string())) {
5780 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5781 ASSERT(instr->temp1() != NULL);
5782 Register type = ToRegister(instr->temp1());
5784 __ JumpIfSmi(value, false_label);
5785 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5786 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5787 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5789 } else if (type_name->Equals(heap()->object_string())) {
5790 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5791 Register map = ToRegister(instr->temp1());
5792 Register scratch = ToRegister(instr->temp2());
5794 __ JumpIfSmi(value, false_label);
5795 if (!FLAG_harmony_typeof) {
5796 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5798 __ JumpIfObjectType(value, map, scratch,
5799 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5800 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5801 __ B(gt, false_label);
5802 // Check for undetectable objects => false.
5803 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
5804 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5812 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5813 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5817 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5818 Register object = ToRegister(instr->value());
5819 Register map = ToRegister(instr->map());
5820 Register temp = ToRegister(instr->temp());
5821 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5823 DeoptimizeIf(ne, instr->environment());
5827 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5828 Register receiver = ToRegister(instr->receiver());
5829 Register function = ToRegister(instr->function());
5830 Register result = ToRegister(instr->result());
5832 // If the receiver is null or undefined, we have to pass the global object as
5833 // a receiver to normal functions. Values have to be passed unchanged to
5834 // builtins and strict-mode functions.
5835 Label global_object, done, deopt;
5837 if (!instr->hydrogen()->known_function()) {
5838 __ Ldr(result, FieldMemOperand(function,
5839 JSFunction::kSharedFunctionInfoOffset));
5841 // CompilerHints is an int32 field. See objects.h.
5843 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5845 // Do not transform the receiver to object for strict mode functions.
5846 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
5848 // Do not transform the receiver to object for builtins.
5849 __ Tbnz(result, SharedFunctionInfo::kNative, &done);
5852 // Normal function. Replace undefined or null with global receiver.
5853 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5854 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5856 // Deoptimize if the receiver is not a JS object.
5857 __ JumpIfSmi(receiver, &deopt);
5858 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5859 __ Mov(result, receiver);
5861 // Otherwise, fall through to deopt.
5864 Deoptimize(instr->environment());
5866 __ Bind(&global_object);
5867 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5868 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5869 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
5875 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5876 Register object = ToRegister(instr->object());
5877 Register index = ToRegister(instr->index());
5878 Register result = ToRegister(instr->result());
5880 __ AssertSmi(index);
5882 Label out_of_object, done;
5883 __ Cmp(index, Smi::FromInt(0));
5884 __ B(lt, &out_of_object);
5886 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5887 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5888 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5892 __ Bind(&out_of_object);
5893 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5894 // Index is equal to negated out of object property index plus 1.
5895 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5896 __ Ldr(result, FieldMemOperand(result,
5897 FixedArray::kHeaderSize - kPointerSize));
5901 } } // namespace v8::internal