1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "arm64/lithium-codegen-arm64.h"
8 #include "arm64/lithium-gap-resolver-arm64.h"
9 #include "code-stubs.h"
10 #include "stub-cache.h"
11 #include "hydrogen-osr.h"
17 class SafepointGenerator V8_FINAL : public CallWrapper {
19 SafepointGenerator(LCodeGen* codegen,
20 LPointerMap* pointers,
21 Safepoint::DeoptMode mode)
25 virtual ~SafepointGenerator() { }
27 virtual void BeforeCall(int call_size) const { }
29 virtual void AfterCall() const {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
35 LPointerMap* pointers_;
36 Safepoint::DeoptMode deopt_mode_;
42 // Emit code to branch if the given condition holds.
43 // The code generated here doesn't modify the flags and they must have
44 // been set by some prior instructions.
46 // The EmitInverted function simply inverts the condition.
47 class BranchOnCondition : public BranchGenerator {
49 BranchOnCondition(LCodeGen* codegen, Condition cond)
50 : BranchGenerator(codegen),
53 virtual void Emit(Label* label) const {
57 virtual void EmitInverted(Label* label) const {
59 __ B(InvertCondition(cond_), label);
68 // Emit code to compare lhs and rhs and branch if the condition holds.
69 // This uses MacroAssembler's CompareAndBranch function so it will handle
70 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
72 // EmitInverted still compares the two operands but inverts the condition.
73 class CompareAndBranch : public BranchGenerator {
75 CompareAndBranch(LCodeGen* codegen,
79 : BranchGenerator(codegen),
84 virtual void Emit(Label* label) const {
85 __ CompareAndBranch(lhs_, rhs_, cond_, label);
88 virtual void EmitInverted(Label* label) const {
89 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
99 // Test the input with the given mask and branch if the condition holds.
100 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
101 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
102 // conversion to Tbz/Tbnz when possible.
103 class TestAndBranch : public BranchGenerator {
105 TestAndBranch(LCodeGen* codegen,
107 const Register& value,
109 : BranchGenerator(codegen),
114 virtual void Emit(Label* label) const {
117 __ TestAndBranchIfAllClear(value_, mask_, label);
120 __ TestAndBranchIfAnySet(value_, mask_, label);
123 __ Tst(value_, mask_);
128 virtual void EmitInverted(Label* label) const {
129 // The inverse of "all clear" is "any set" and vice versa.
132 __ TestAndBranchIfAnySet(value_, mask_, label);
135 __ TestAndBranchIfAllClear(value_, mask_, label);
138 __ Tst(value_, mask_);
139 __ B(InvertCondition(cond_), label);
145 const Register& value_;
150 // Test the input and branch if it is non-zero and not a NaN.
151 class BranchIfNonZeroNumber : public BranchGenerator {
153 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
154 const FPRegister& scratch)
155 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
157 virtual void Emit(Label* label) const {
158 __ Fabs(scratch_, value_);
159 // Compare with 0.0. Because scratch_ is positive, the result can be one of
160 // nZCv (equal), nzCv (greater) or nzCV (unordered).
161 __ Fcmp(scratch_, 0.0);
165 virtual void EmitInverted(Label* label) const {
166 __ Fabs(scratch_, value_);
167 __ Fcmp(scratch_, 0.0);
172 const FPRegister& value_;
173 const FPRegister& scratch_;
177 // Test the input and branch if it is a heap number.
178 class BranchIfHeapNumber : public BranchGenerator {
180 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
181 : BranchGenerator(codegen), value_(value) { }
183 virtual void Emit(Label* label) const {
184 __ JumpIfHeapNumber(value_, label);
187 virtual void EmitInverted(Label* label) const {
188 __ JumpIfNotHeapNumber(value_, label);
192 const Register& value_;
196 // Test the input and branch if it is the specified root value.
197 class BranchIfRoot : public BranchGenerator {
199 BranchIfRoot(LCodeGen* codegen, const Register& value,
200 Heap::RootListIndex index)
201 : BranchGenerator(codegen), value_(value), index_(index) { }
203 virtual void Emit(Label* label) const {
204 __ JumpIfRoot(value_, index_, label);
207 virtual void EmitInverted(Label* label) const {
208 __ JumpIfNotRoot(value_, index_, label);
212 const Register& value_;
213 const Heap::RootListIndex index_;
217 void LCodeGen::WriteTranslation(LEnvironment* environment,
218 Translation* translation) {
219 if (environment == NULL) return;
221 // The translation includes one command per value in the environment.
222 int translation_size = environment->translation_size();
223 // The output frame height does not include the parameters.
224 int height = translation_size - environment->parameter_count();
226 WriteTranslation(environment->outer(), translation);
227 bool has_closure_id = !info()->closure().is_null() &&
228 !info()->closure().is_identical_to(environment->closure());
229 int closure_id = has_closure_id
230 ? DefineDeoptimizationLiteral(environment->closure())
231 : Translation::kSelfLiteralId;
233 switch (environment->frame_type()) {
235 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
238 translation->BeginConstructStubFrame(closure_id, translation_size);
241 ASSERT(translation_size == 1);
243 translation->BeginGetterStubFrame(closure_id);
246 ASSERT(translation_size == 2);
248 translation->BeginSetterStubFrame(closure_id);
251 translation->BeginCompiledStubFrame();
253 case ARGUMENTS_ADAPTOR:
254 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
260 int object_index = 0;
261 int dematerialized_index = 0;
262 for (int i = 0; i < translation_size; ++i) {
263 LOperand* value = environment->values()->at(i);
265 AddToTranslation(environment,
268 environment->HasTaggedValueAt(i),
269 environment->HasUint32ValueAt(i),
271 &dematerialized_index);
276 void LCodeGen::AddToTranslation(LEnvironment* environment,
277 Translation* translation,
281 int* object_index_pointer,
282 int* dematerialized_index_pointer) {
283 if (op == LEnvironment::materialization_marker()) {
284 int object_index = (*object_index_pointer)++;
285 if (environment->ObjectIsDuplicateAt(object_index)) {
286 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
287 translation->DuplicateObject(dupe_of);
290 int object_length = environment->ObjectLengthAt(object_index);
291 if (environment->ObjectIsArgumentsAt(object_index)) {
292 translation->BeginArgumentsObject(object_length);
294 translation->BeginCapturedObject(object_length);
296 int dematerialized_index = *dematerialized_index_pointer;
297 int env_offset = environment->translation_size() + dematerialized_index;
298 *dematerialized_index_pointer += object_length;
299 for (int i = 0; i < object_length; ++i) {
300 LOperand* value = environment->values()->at(env_offset + i);
301 AddToTranslation(environment,
304 environment->HasTaggedValueAt(env_offset + i),
305 environment->HasUint32ValueAt(env_offset + i),
306 object_index_pointer,
307 dematerialized_index_pointer);
312 if (op->IsStackSlot()) {
314 translation->StoreStackSlot(op->index());
315 } else if (is_uint32) {
316 translation->StoreUint32StackSlot(op->index());
318 translation->StoreInt32StackSlot(op->index());
320 } else if (op->IsDoubleStackSlot()) {
321 translation->StoreDoubleStackSlot(op->index());
322 } else if (op->IsRegister()) {
323 Register reg = ToRegister(op);
325 translation->StoreRegister(reg);
326 } else if (is_uint32) {
327 translation->StoreUint32Register(reg);
329 translation->StoreInt32Register(reg);
331 } else if (op->IsDoubleRegister()) {
332 DoubleRegister reg = ToDoubleRegister(op);
333 translation->StoreDoubleRegister(reg);
334 } else if (op->IsConstantOperand()) {
335 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
336 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
337 translation->StoreLiteral(src_index);
344 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
345 int result = deoptimization_literals_.length();
346 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
347 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
349 deoptimization_literals_.Add(literal, zone());
354 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
355 Safepoint::DeoptMode mode) {
356 environment->set_has_been_used();
357 if (!environment->HasBeenRegistered()) {
359 int jsframe_count = 0;
360 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
362 if (e->frame_type() == JS_FUNCTION) {
366 Translation translation(&translations_, frame_count, jsframe_count, zone());
367 WriteTranslation(environment, &translation);
368 int deoptimization_index = deoptimizations_.length();
369 int pc_offset = masm()->pc_offset();
370 environment->Register(deoptimization_index,
372 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
373 deoptimizations_.Add(environment, zone());
378 void LCodeGen::CallCode(Handle<Code> code,
379 RelocInfo::Mode mode,
380 LInstruction* instr) {
381 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
385 void LCodeGen::CallCodeGeneric(Handle<Code> code,
386 RelocInfo::Mode mode,
388 SafepointMode safepoint_mode) {
389 ASSERT(instr != NULL);
391 Assembler::BlockPoolsScope scope(masm_);
393 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
395 if ((code->kind() == Code::BINARY_OP_IC) ||
396 (code->kind() == Code::COMPARE_IC)) {
397 // Signal that we don't inline smi code before these stubs in the
398 // optimizing code generator.
399 InlineSmiCheckInfo::EmitNotInlined(masm());
404 void LCodeGen::DoCallFunction(LCallFunction* instr) {
405 ASSERT(ToRegister(instr->context()).is(cp));
406 ASSERT(ToRegister(instr->function()).Is(x1));
407 ASSERT(ToRegister(instr->result()).Is(x0));
409 int arity = instr->arity();
410 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
411 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
412 after_push_argument_ = false;
416 void LCodeGen::DoCallNew(LCallNew* instr) {
417 ASSERT(ToRegister(instr->context()).is(cp));
418 ASSERT(instr->IsMarkedAsCall());
419 ASSERT(ToRegister(instr->constructor()).is(x1));
421 __ Mov(x0, instr->arity());
422 // No cell in x2 for construct type feedback in optimized code.
423 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
425 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
426 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
427 after_push_argument_ = false;
429 ASSERT(ToRegister(instr->result()).is(x0));
433 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
434 ASSERT(instr->IsMarkedAsCall());
435 ASSERT(ToRegister(instr->context()).is(cp));
436 ASSERT(ToRegister(instr->constructor()).is(x1));
438 __ Mov(x0, Operand(instr->arity()));
439 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
441 ElementsKind kind = instr->hydrogen()->elements_kind();
442 AllocationSiteOverrideMode override_mode =
443 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
444 ? DISABLE_ALLOCATION_SITES
447 if (instr->arity() == 0) {
448 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
449 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
450 } else if (instr->arity() == 1) {
452 if (IsFastPackedElementsKind(kind)) {
455 // We might need to create a holey array; look at the first argument.
457 __ Cbz(x10, &packed_case);
459 ElementsKind holey_kind = GetHoleyElementsKind(kind);
460 ArraySingleArgumentConstructorStub stub(isolate(),
463 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
465 __ Bind(&packed_case);
468 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
469 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
472 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
473 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
475 after_push_argument_ = false;
477 ASSERT(ToRegister(instr->result()).is(x0));
481 void LCodeGen::CallRuntime(const Runtime::Function* function,
484 SaveFPRegsMode save_doubles) {
485 ASSERT(instr != NULL);
487 __ CallRuntime(function, num_arguments, save_doubles);
489 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
493 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
494 if (context->IsRegister()) {
495 __ Mov(cp, ToRegister(context));
496 } else if (context->IsStackSlot()) {
497 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
498 } else if (context->IsConstantOperand()) {
499 HConstant* constant =
500 chunk_->LookupConstant(LConstantOperand::cast(context));
501 __ LoadHeapObject(cp,
502 Handle<HeapObject>::cast(constant->handle(isolate())));
509 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
513 LoadContextFromDeferred(context);
514 __ CallRuntimeSaveDoubles(id);
515 RecordSafepointWithRegisters(
516 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
520 void LCodeGen::RecordAndWritePosition(int position) {
521 if (position == RelocInfo::kNoPosition) return;
522 masm()->positions_recorder()->RecordPosition(position);
523 masm()->positions_recorder()->WriteRecordedPositions();
527 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
528 SafepointMode safepoint_mode) {
529 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
530 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
532 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
533 RecordSafepointWithRegisters(
534 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
539 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
540 Safepoint::Kind kind,
542 Safepoint::DeoptMode deopt_mode) {
543 ASSERT(expected_safepoint_kind_ == kind);
545 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
546 Safepoint safepoint = safepoints_.DefineSafepoint(
547 masm(), kind, arguments, deopt_mode);
549 for (int i = 0; i < operands->length(); i++) {
550 LOperand* pointer = operands->at(i);
551 if (pointer->IsStackSlot()) {
552 safepoint.DefinePointerSlot(pointer->index(), zone());
553 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
554 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
558 if (kind & Safepoint::kWithRegisters) {
559 // Register cp always contains a pointer to the context.
560 safepoint.DefinePointerRegister(cp, zone());
564 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
565 Safepoint::DeoptMode deopt_mode) {
566 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
570 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
571 LPointerMap empty_pointers(zone());
572 RecordSafepoint(&empty_pointers, deopt_mode);
576 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
578 Safepoint::DeoptMode deopt_mode) {
579 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
583 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
584 LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
586 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
590 bool LCodeGen::GenerateCode() {
591 LPhase phase("Z_Code generation", chunk());
593 status_ = GENERATING;
595 // Open a frame scope to indicate that there is a frame on the stack. The
596 // NONE indicates that the scope shouldn't actually generate code to set up
597 // the frame (that is done in GeneratePrologue).
598 FrameScope frame_scope(masm_, StackFrame::NONE);
600 return GeneratePrologue() &&
602 GenerateDeferredCode() &&
603 GenerateDeoptJumpTable() &&
604 GenerateSafepointTable();
608 void LCodeGen::SaveCallerDoubles() {
609 ASSERT(info()->saves_caller_doubles());
610 ASSERT(NeedsEagerFrame());
611 Comment(";;; Save clobbered callee double registers");
612 BitVector* doubles = chunk()->allocated_double_registers();
613 BitVector::Iterator iterator(doubles);
615 while (!iterator.Done()) {
616 // TODO(all): Is this supposed to save just the callee-saved doubles? It
617 // looks like it's saving all of them.
618 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
619 __ Poke(value, count * kDoubleSize);
626 void LCodeGen::RestoreCallerDoubles() {
627 ASSERT(info()->saves_caller_doubles());
628 ASSERT(NeedsEagerFrame());
629 Comment(";;; Restore clobbered callee double registers");
630 BitVector* doubles = chunk()->allocated_double_registers();
631 BitVector::Iterator iterator(doubles);
633 while (!iterator.Done()) {
634 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
635 // looks like it's restoring all of them.
636 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
637 __ Peek(value, count * kDoubleSize);
644 bool LCodeGen::GeneratePrologue() {
645 ASSERT(is_generating());
647 if (info()->IsOptimizing()) {
648 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
650 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
652 // Sloppy mode functions and builtins need to replace the receiver with the
653 // global proxy when called as functions (without an explicit receiver
655 if (info_->this_has_uses() &&
656 info_->strict_mode() == SLOPPY &&
657 !info_->is_native()) {
659 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
660 __ Peek(x10, receiver_offset);
661 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
663 __ Ldr(x10, GlobalObjectMemOperand());
664 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
665 __ Poke(x10, receiver_offset);
671 ASSERT(__ StackPointer().Is(jssp));
672 info()->set_prologue_offset(masm_->pc_offset());
673 if (NeedsEagerFrame()) {
674 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
675 frame_is_built_ = true;
676 info_->AddNoFrameRange(0, masm_->pc_offset());
679 // Reserve space for the stack slots needed by the code.
680 int slots = GetStackSlotCount();
682 __ Claim(slots, kPointerSize);
685 if (info()->saves_caller_doubles()) {
689 // Allocate a local context if needed.
690 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
691 if (heap_slots > 0) {
692 Comment(";;; Allocate local context");
693 // Argument to NewContext is the function, which is in x1.
694 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
695 FastNewContextStub stub(isolate(), heap_slots);
699 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
701 RecordSafepoint(Safepoint::kNoLazyDeopt);
702 // Context is returned in x0. It replaces the context passed to us. It's
703 // saved in the stack and kept live in cp.
705 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
706 // Copy any necessary parameters into the context.
707 int num_parameters = scope()->num_parameters();
708 for (int i = 0; i < num_parameters; i++) {
709 Variable* var = scope()->parameter(i);
710 if (var->IsContextSlot()) {
712 Register scratch = x3;
714 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
715 (num_parameters - 1 - i) * kPointerSize;
716 // Load parameter from stack.
717 __ Ldr(value, MemOperand(fp, parameter_offset));
718 // Store it in the context.
719 MemOperand target = ContextMemOperand(cp, var->index());
720 __ Str(value, target);
721 // Update the write barrier. This clobbers value and scratch.
722 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
723 GetLinkRegisterState(), kSaveFPRegs);
726 Comment(";;; End allocate local context");
730 if (FLAG_trace && info()->IsOptimizing()) {
731 // We have not executed any compiled code yet, so cp still holds the
733 __ CallRuntime(Runtime::kTraceEnter, 0);
736 return !is_aborted();
740 void LCodeGen::GenerateOsrPrologue() {
741 // Generate the OSR entry prologue at the first unknown OSR value, or if there
742 // are none, at the OSR entrypoint instruction.
743 if (osr_pc_offset_ >= 0) return;
745 osr_pc_offset_ = masm()->pc_offset();
747 // Adjust the frame size, subsuming the unoptimized frame into the
749 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
755 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
756 if (instr->IsCall()) {
757 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
759 if (!instr->IsLazyBailout() && !instr->IsGap()) {
760 safepoints_.BumpLastLazySafepointIndex();
765 bool LCodeGen::GenerateDeferredCode() {
766 ASSERT(is_generating());
767 if (deferred_.length() > 0) {
768 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
769 LDeferredCode* code = deferred_[i];
772 instructions_->at(code->instruction_index())->hydrogen_value();
773 RecordAndWritePosition(
774 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
776 Comment(";;; <@%d,#%d> "
777 "-------------------- Deferred %s --------------------",
778 code->instruction_index(),
779 code->instr()->hydrogen_value()->id(),
780 code->instr()->Mnemonic());
782 __ Bind(code->entry());
784 if (NeedsDeferredFrame()) {
785 Comment(";;; Build frame");
786 ASSERT(!frame_is_built_);
787 ASSERT(info()->IsStub());
788 frame_is_built_ = true;
790 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
792 __ Add(fp, __ StackPointer(),
793 StandardFrameConstants::kFixedFrameSizeFromFp);
794 Comment(";;; Deferred code");
799 if (NeedsDeferredFrame()) {
800 Comment(";;; Destroy frame");
801 ASSERT(frame_is_built_);
802 __ Pop(xzr, cp, fp, lr);
803 frame_is_built_ = false;
810 // Force constant pool emission at the end of the deferred code to make
811 // sure that no constant pools are emitted after deferred code because
812 // deferred code generation is the last step which generates code. The two
813 // following steps will only output data used by crakshaft.
814 masm()->CheckConstPool(true, false);
816 return !is_aborted();
820 bool LCodeGen::GenerateDeoptJumpTable() {
821 if (deopt_jump_table_.length() > 0) {
822 Comment(";;; -------------------- Jump table --------------------");
825 __ bind(&table_start);
827 for (int i = 0; i < deopt_jump_table_.length(); i++) {
828 __ Bind(&deopt_jump_table_[i]->label);
829 Address entry = deopt_jump_table_[i]->address;
830 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
831 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
832 if (id == Deoptimizer::kNotDeoptimizationEntry) {
833 Comment(";;; jump table entry %d.", i);
835 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
837 if (deopt_jump_table_[i]->needs_frame) {
838 ASSERT(!info()->saves_caller_doubles());
840 UseScratchRegisterScope temps(masm());
841 Register stub_deopt_entry = temps.AcquireX();
842 Register stub_marker = temps.AcquireX();
844 __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
845 if (needs_frame.is_bound()) {
848 __ Bind(&needs_frame);
849 // This variant of deopt can only be used with stubs. Since we don't
850 // have a function pointer to install in the stack frame that we're
851 // building, install a special marker there instead.
852 ASSERT(info()->IsStub());
853 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
854 __ Push(lr, fp, cp, stub_marker);
855 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
856 __ Call(stub_deopt_entry);
859 if (info()->saves_caller_doubles()) {
860 ASSERT(info()->IsStub());
861 RestoreCallerDoubles();
863 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
865 masm()->CheckConstPool(false, false);
868 // Force constant pool emission at the end of the deopt jump table to make
869 // sure that no constant pools are emitted after.
870 masm()->CheckConstPool(true, false);
872 // The deoptimization jump table is the last part of the instruction
873 // sequence. Mark the generated code as done unless we bailed out.
874 if (!is_aborted()) status_ = DONE;
875 return !is_aborted();
879 bool LCodeGen::GenerateSafepointTable() {
881 // We do not know how much data will be emitted for the safepoint table, so
882 // force emission of the veneer pool.
883 masm()->CheckVeneerPool(true, true);
884 safepoints_.Emit(masm(), GetStackSlotCount());
885 return !is_aborted();
889 void LCodeGen::FinishCode(Handle<Code> code) {
891 code->set_stack_slots(GetStackSlotCount());
892 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
893 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
894 PopulateDeoptimizationData(code);
898 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
899 int length = deoptimizations_.length();
900 if (length == 0) return;
902 Handle<DeoptimizationInputData> data =
903 DeoptimizationInputData::New(isolate(), length, TENURED);
905 Handle<ByteArray> translations =
906 translations_.CreateByteArray(isolate()->factory());
907 data->SetTranslationByteArray(*translations);
908 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
909 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
910 if (info_->IsOptimizing()) {
911 // Reference to shared function info does not change between phases.
912 AllowDeferredHandleDereference allow_handle_dereference;
913 data->SetSharedFunctionInfo(*info_->shared_info());
915 data->SetSharedFunctionInfo(Smi::FromInt(0));
918 Handle<FixedArray> literals =
919 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
920 { AllowDeferredHandleDereference copy_handles;
921 for (int i = 0; i < deoptimization_literals_.length(); i++) {
922 literals->set(i, *deoptimization_literals_[i]);
924 data->SetLiteralArray(*literals);
927 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
928 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
930 // Populate the deoptimization entries.
931 for (int i = 0; i < length; i++) {
932 LEnvironment* env = deoptimizations_[i];
933 data->SetAstId(i, env->ast_id());
934 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
935 data->SetArgumentsStackHeight(i,
936 Smi::FromInt(env->arguments_stack_height()));
937 data->SetPc(i, Smi::FromInt(env->pc_offset()));
940 code->set_deoptimization_data(*data);
944 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
945 ASSERT(deoptimization_literals_.length() == 0);
947 const ZoneList<Handle<JSFunction> >* inlined_closures =
948 chunk()->inlined_closures();
950 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
951 DefineDeoptimizationLiteral(inlined_closures->at(i));
954 inlined_function_count_ = deoptimization_literals_.length();
958 void LCodeGen::DeoptimizeBranch(
959 LEnvironment* environment,
960 BranchType branch_type, Register reg, int bit,
961 Deoptimizer::BailoutType* override_bailout_type) {
962 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
963 Deoptimizer::BailoutType bailout_type =
964 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
966 if (override_bailout_type != NULL) {
967 bailout_type = *override_bailout_type;
970 ASSERT(environment->HasBeenRegistered());
971 ASSERT(info()->IsOptimizing() || info()->IsStub());
972 int id = environment->deoptimization_index();
974 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
977 Abort(kBailoutWasNotPrepared);
980 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
982 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
987 __ Ldr(w1, MemOperand(x0));
990 __ Mov(w1, FLAG_deopt_every_n_times);
991 __ Str(w1, MemOperand(x0));
993 ASSERT(frame_is_built_);
994 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
998 __ Str(w1, MemOperand(x0));
1003 if (info()->ShouldTrapOnDeopt()) {
1005 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1006 __ Debug("trap_on_deopt", __LINE__, BREAK);
1007 __ Bind(&dont_trap);
1010 ASSERT(info()->IsStub() || frame_is_built_);
1011 // Go through jump table if we need to build frame, or restore caller doubles.
1012 if (branch_type == always &&
1013 frame_is_built_ && !info()->saves_caller_doubles()) {
1014 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1016 // We often have several deopts to the same entry, reuse the last
1017 // jump entry if this is the case.
1018 if (deopt_jump_table_.is_empty() ||
1019 (deopt_jump_table_.last()->address != entry) ||
1020 (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1021 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1022 Deoptimizer::JumpTableEntry* table_entry =
1023 new(zone()) Deoptimizer::JumpTableEntry(entry,
1026 deopt_jump_table_.Add(table_entry, zone());
1028 __ B(&deopt_jump_table_.last()->label,
1029 branch_type, reg, bit);
1034 void LCodeGen::Deoptimize(LEnvironment* environment,
1035 Deoptimizer::BailoutType* override_bailout_type) {
1036 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1040 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1041 DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1045 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1046 DeoptimizeBranch(environment, reg_zero, rt);
1050 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1051 DeoptimizeBranch(environment, reg_not_zero, rt);
1055 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1056 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1057 DeoptimizeIfBitSet(rt, sign_bit, environment);
1061 void LCodeGen::DeoptimizeIfSmi(Register rt,
1062 LEnvironment* environment) {
1063 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
1067 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1068 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
1072 void LCodeGen::DeoptimizeIfRoot(Register rt,
1073 Heap::RootListIndex index,
1074 LEnvironment* environment) {
1075 __ CompareRoot(rt, index);
1076 DeoptimizeIf(eq, environment);
1080 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1081 Heap::RootListIndex index,
1082 LEnvironment* environment) {
1083 __ CompareRoot(rt, index);
1084 DeoptimizeIf(ne, environment);
1088 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
1089 LEnvironment* environment) {
1090 __ TestForMinusZero(input);
1091 DeoptimizeIf(vs, environment);
1095 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1097 LEnvironment* environment) {
1098 DeoptimizeBranch(environment, reg_bit_set, rt, bit);
1102 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1104 LEnvironment* environment) {
1105 DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
1109 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1110 if (!info()->IsStub()) {
1111 // Ensure that we have enough space after the previous lazy-bailout
1112 // instruction for patching the code here.
1113 intptr_t current_pc = masm()->pc_offset();
1115 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1116 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1117 ASSERT((padding_size % kInstructionSize) == 0);
1118 InstructionAccurateScope instruction_accurate(
1119 masm(), padding_size / kInstructionSize);
1121 while (padding_size > 0) {
1123 padding_size -= kInstructionSize;
1127 last_lazy_deopt_pc_ = masm()->pc_offset();
1131 Register LCodeGen::ToRegister(LOperand* op) const {
1132 // TODO(all): support zero register results, as ToRegister32.
1133 ASSERT((op != NULL) && op->IsRegister());
1134 return Register::FromAllocationIndex(op->index());
1138 Register LCodeGen::ToRegister32(LOperand* op) const {
1140 if (op->IsConstantOperand()) {
1141 // If this is a constant operand, the result must be the zero register.
1142 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1145 return ToRegister(op).W();
1150 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1151 HConstant* constant = chunk_->LookupConstant(op);
1152 return Smi::FromInt(constant->Integer32Value());
1156 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1157 ASSERT((op != NULL) && op->IsDoubleRegister());
1158 return DoubleRegister::FromAllocationIndex(op->index());
1162 Operand LCodeGen::ToOperand(LOperand* op) {
1164 if (op->IsConstantOperand()) {
1165 LConstantOperand* const_op = LConstantOperand::cast(op);
1166 HConstant* constant = chunk()->LookupConstant(const_op);
1167 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1169 ASSERT(constant->HasSmiValue());
1170 return Operand(Smi::FromInt(constant->Integer32Value()));
1171 } else if (r.IsInteger32()) {
1172 ASSERT(constant->HasInteger32Value());
1173 return Operand(constant->Integer32Value());
1174 } else if (r.IsDouble()) {
1175 Abort(kToOperandUnsupportedDoubleImmediate);
1177 ASSERT(r.IsTagged());
1178 return Operand(constant->handle(isolate()));
1179 } else if (op->IsRegister()) {
1180 return Operand(ToRegister(op));
1181 } else if (op->IsDoubleRegister()) {
1182 Abort(kToOperandIsDoubleRegisterUnimplemented);
1185 // Stack slots not implemented, use ToMemOperand instead.
1191 Operand LCodeGen::ToOperand32I(LOperand* op) {
1192 return ToOperand32(op, SIGNED_INT32);
1196 Operand LCodeGen::ToOperand32U(LOperand* op) {
1197 return ToOperand32(op, UNSIGNED_INT32);
1201 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1203 if (op->IsRegister()) {
1204 return Operand(ToRegister32(op));
1205 } else if (op->IsConstantOperand()) {
1206 LConstantOperand* const_op = LConstantOperand::cast(op);
1207 HConstant* constant = chunk()->LookupConstant(const_op);
1208 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1209 if (r.IsInteger32()) {
1210 ASSERT(constant->HasInteger32Value());
1211 return (signedness == SIGNED_INT32)
1212 ? Operand(constant->Integer32Value())
1213 : Operand(static_cast<uint32_t>(constant->Integer32Value()));
1215 // Other constants not implemented.
1216 Abort(kToOperand32UnsupportedImmediate);
1219 // Other cases are not implemented.
1225 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1227 return -(index + 1) * kPointerSize;
1231 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1233 ASSERT(!op->IsRegister());
1234 ASSERT(!op->IsDoubleRegister());
1235 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1236 if (NeedsEagerFrame()) {
1237 int fp_offset = StackSlotOffset(op->index());
1238 if (op->index() >= 0) {
1239 // Loads and stores have a bigger reach in positive offset than negative.
1240 // When the load or the store can't be done in one instruction via fp
1241 // (too big negative offset), we try to access via jssp (positive offset).
1242 // We can reference a stack slot from jssp only if jssp references the end
1243 // of the stack slots. It's not the case when:
1244 // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1245 // code saved the registers.
1246 // - after_push_argument_: arguments has been pushed for a call.
1247 // - inlined_arguments_: inlined arguments have been pushed once. All the
1248 // remainder of the function cannot trust jssp any longer.
1249 // - saves_caller_doubles: some double registers have been pushed, jssp
1250 // references the end of the double registers and not the end of the
1252 // Also, if the offset from fp is small enough to make a load/store in
1253 // one instruction, we use a fp access.
1254 if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1255 !inlined_arguments_ && !is_int9(fp_offset) &&
1256 !info()->saves_caller_doubles()) {
1258 (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1259 return MemOperand(masm()->StackPointer(), jssp_offset);
1262 return MemOperand(fp, fp_offset);
1264 // Retrieve parameter without eager stack-frame relative to the
1266 return MemOperand(masm()->StackPointer(),
1267 ArgumentsOffsetWithoutFrame(op->index()));
1272 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1273 HConstant* constant = chunk_->LookupConstant(op);
1274 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1275 return constant->handle(isolate());
1279 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1280 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1284 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1285 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1289 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1290 HConstant* constant = chunk_->LookupConstant(op);
1291 return constant->Integer32Value();
1295 double LCodeGen::ToDouble(LConstantOperand* op) const {
1296 HConstant* constant = chunk_->LookupConstant(op);
1297 ASSERT(constant->HasDoubleValue());
1298 return constant->DoubleValue();
1302 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1303 Condition cond = nv;
1306 case Token::EQ_STRICT:
1310 case Token::NE_STRICT:
1314 cond = is_unsigned ? lo : lt;
1317 cond = is_unsigned ? hi : gt;
1320 cond = is_unsigned ? ls : le;
1323 cond = is_unsigned ? hs : ge;
1326 case Token::INSTANCEOF:
1334 template<class InstrType>
1335 void LCodeGen::EmitBranchGeneric(InstrType instr,
1336 const BranchGenerator& branch) {
1337 int left_block = instr->TrueDestination(chunk_);
1338 int right_block = instr->FalseDestination(chunk_);
1340 int next_block = GetNextEmittedBlock();
1342 if (right_block == left_block) {
1343 EmitGoto(left_block);
1344 } else if (left_block == next_block) {
1345 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1346 } else if (right_block == next_block) {
1347 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1349 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1350 __ B(chunk_->GetAssemblyLabel(right_block));
1355 template<class InstrType>
1356 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1357 ASSERT((condition != al) && (condition != nv));
1358 BranchOnCondition branch(this, condition);
1359 EmitBranchGeneric(instr, branch);
1363 template<class InstrType>
1364 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1365 Condition condition,
1366 const Register& lhs,
1367 const Operand& rhs) {
1368 ASSERT((condition != al) && (condition != nv));
1369 CompareAndBranch branch(this, condition, lhs, rhs);
1370 EmitBranchGeneric(instr, branch);
1374 template<class InstrType>
1375 void LCodeGen::EmitTestAndBranch(InstrType instr,
1376 Condition condition,
1377 const Register& value,
1379 ASSERT((condition != al) && (condition != nv));
1380 TestAndBranch branch(this, condition, value, mask);
1381 EmitBranchGeneric(instr, branch);
1385 template<class InstrType>
1386 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1387 const FPRegister& value,
1388 const FPRegister& scratch) {
1389 BranchIfNonZeroNumber branch(this, value, scratch);
1390 EmitBranchGeneric(instr, branch);
1394 template<class InstrType>
1395 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1396 const Register& value) {
1397 BranchIfHeapNumber branch(this, value);
1398 EmitBranchGeneric(instr, branch);
1402 template<class InstrType>
1403 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1404 const Register& value,
1405 Heap::RootListIndex index) {
1406 BranchIfRoot branch(this, value, index);
1407 EmitBranchGeneric(instr, branch);
1411 void LCodeGen::DoGap(LGap* gap) {
1412 for (int i = LGap::FIRST_INNER_POSITION;
1413 i <= LGap::LAST_INNER_POSITION;
1415 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1416 LParallelMove* move = gap->GetParallelMove(inner_pos);
1418 resolver_.Resolve(move);
1424 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1425 Register arguments = ToRegister(instr->arguments());
1426 Register result = ToRegister(instr->result());
1428 // The pointer to the arguments array come from DoArgumentsElements.
1429 // It does not point directly to the arguments and there is an offest of
1430 // two words that we must take into account when accessing an argument.
1431 // Subtracting the index from length accounts for one, so we add one more.
1433 if (instr->length()->IsConstantOperand() &&
1434 instr->index()->IsConstantOperand()) {
1435 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1436 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1437 int offset = ((length - index) + 1) * kPointerSize;
1438 __ Ldr(result, MemOperand(arguments, offset));
1439 } else if (instr->index()->IsConstantOperand()) {
1440 Register length = ToRegister32(instr->length());
1441 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1442 int loc = index - 1;
1444 __ Sub(result.W(), length, loc);
1445 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1447 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1450 Register length = ToRegister32(instr->length());
1451 Operand index = ToOperand32I(instr->index());
1452 __ Sub(result.W(), length, index);
1453 __ Add(result.W(), result.W(), 1);
1454 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1459 void LCodeGen::DoAddE(LAddE* instr) {
1460 Register result = ToRegister(instr->result());
1461 Register left = ToRegister(instr->left());
1462 Operand right = (instr->right()->IsConstantOperand())
1463 ? ToInteger32(LConstantOperand::cast(instr->right()))
1464 : Operand(ToRegister32(instr->right()), SXTW);
1466 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1467 __ Add(result, left, right);
1471 void LCodeGen::DoAddI(LAddI* instr) {
1472 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1473 Register result = ToRegister32(instr->result());
1474 Register left = ToRegister32(instr->left());
1475 Operand right = ToOperand32I(instr->right());
1477 __ Adds(result, left, right);
1478 DeoptimizeIf(vs, instr->environment());
1480 __ Add(result, left, right);
1485 void LCodeGen::DoAddS(LAddS* instr) {
1486 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1487 Register result = ToRegister(instr->result());
1488 Register left = ToRegister(instr->left());
1489 Operand right = ToOperand(instr->right());
1491 __ Adds(result, left, right);
1492 DeoptimizeIf(vs, instr->environment());
1494 __ Add(result, left, right);
1499 void LCodeGen::DoAllocate(LAllocate* instr) {
1500 class DeferredAllocate: public LDeferredCode {
1502 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1503 : LDeferredCode(codegen), instr_(instr) { }
1504 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1505 virtual LInstruction* instr() { return instr_; }
1510 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1512 Register result = ToRegister(instr->result());
1513 Register temp1 = ToRegister(instr->temp1());
1514 Register temp2 = ToRegister(instr->temp2());
1516 // Allocate memory for the object.
1517 AllocationFlags flags = TAG_OBJECT;
1518 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1519 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1522 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1523 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1524 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1525 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1526 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1527 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1528 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1531 if (instr->size()->IsConstantOperand()) {
1532 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1533 if (size <= Page::kMaxRegularHeapObjectSize) {
1534 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1536 __ B(deferred->entry());
1539 Register size = ToRegister32(instr->size());
1540 __ Sxtw(size.X(), size);
1541 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1544 __ Bind(deferred->exit());
1546 if (instr->hydrogen()->MustPrefillWithFiller()) {
1547 Register filler_count = temp1;
1548 Register filler = temp2;
1549 Register untagged_result = ToRegister(instr->temp3());
1551 if (instr->size()->IsConstantOperand()) {
1552 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1553 __ Mov(filler_count, size / kPointerSize);
1555 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1558 __ Sub(untagged_result, result, kHeapObjectTag);
1559 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1560 __ FillFields(untagged_result, filler_count, filler);
1562 ASSERT(instr->temp3() == NULL);
1567 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1568 // TODO(3095996): Get rid of this. For now, we need to make the
1569 // result register contain a valid pointer because it is already
1570 // contained in the register pointer map.
1571 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1573 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1574 // We're in a SafepointRegistersScope so we can use any scratch registers.
1576 if (instr->size()->IsConstantOperand()) {
1577 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1579 __ SmiTag(size, ToRegister32(instr->size()).X());
1581 int flags = AllocateDoubleAlignFlag::encode(
1582 instr->hydrogen()->MustAllocateDoubleAligned());
1583 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1584 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1585 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1586 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1587 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1588 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1589 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1591 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1593 __ Mov(x10, Smi::FromInt(flags));
1596 CallRuntimeFromDeferred(
1597 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
1598 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1602 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1603 Register receiver = ToRegister(instr->receiver());
1604 Register function = ToRegister(instr->function());
1605 Register length = ToRegister32(instr->length());
1607 Register elements = ToRegister(instr->elements());
1608 Register scratch = x5;
1609 ASSERT(receiver.Is(x0)); // Used for parameter count.
1610 ASSERT(function.Is(x1)); // Required by InvokeFunction.
1611 ASSERT(ToRegister(instr->result()).Is(x0));
1612 ASSERT(instr->IsMarkedAsCall());
1614 // Copy the arguments to this function possibly from the
1615 // adaptor frame below it.
1616 const uint32_t kArgumentsLimit = 1 * KB;
1617 __ Cmp(length, kArgumentsLimit);
1618 DeoptimizeIf(hi, instr->environment());
1620 // Push the receiver and use the register to keep the original
1621 // number of arguments.
1623 Register argc = receiver;
1625 __ Sxtw(argc, length);
1626 // The arguments are at a one pointer size offset from elements.
1627 __ Add(elements, elements, 1 * kPointerSize);
1629 // Loop through the arguments pushing them onto the execution
1632 // length is a small non-negative integer, due to the test above.
1633 __ Cbz(length, &invoke);
1635 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1637 __ Subs(length, length, 1);
1641 ASSERT(instr->HasPointerMap());
1642 LPointerMap* pointers = instr->pointer_map();
1643 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1644 // The number of arguments is stored in argc (receiver) which is x0, as
1645 // expected by InvokeFunction.
1646 ParameterCount actual(argc);
1647 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1651 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1652 // We push some arguments and they will be pop in an other block. We can't
1653 // trust that jssp references the end of the stack slots until the end of
1655 inlined_arguments_ = true;
1656 Register result = ToRegister(instr->result());
1658 if (instr->hydrogen()->from_inlined()) {
1659 // When we are inside an inlined function, the arguments are the last things
1660 // that have been pushed on the stack. Therefore the arguments array can be
1661 // accessed directly from jssp.
1662 // However in the normal case, it is accessed via fp but there are two words
1663 // on the stack between fp and the arguments (the saved lr and fp) and the
1664 // LAccessArgumentsAt implementation take that into account.
1665 // In the inlined case we need to subtract the size of 2 words to jssp to
1666 // get a pointer which will work well with LAccessArgumentsAt.
1667 ASSERT(masm()->StackPointer().Is(jssp));
1668 __ Sub(result, jssp, 2 * kPointerSize);
1670 ASSERT(instr->temp() != NULL);
1671 Register previous_fp = ToRegister(instr->temp());
1674 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1676 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1677 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1678 __ Csel(result, fp, previous_fp, ne);
1683 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1684 Register elements = ToRegister(instr->elements());
1685 Register result = ToRegister32(instr->result());
1688 // If no arguments adaptor frame the number of arguments is fixed.
1689 __ Cmp(fp, elements);
1690 __ Mov(result, scope()->num_parameters());
1693 // Arguments adaptor frame present. Get argument length from there.
1694 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1696 UntagSmiMemOperand(result.X(),
1697 ArgumentsAdaptorFrameConstants::kLengthOffset));
1699 // Argument length is in result register.
1704 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1705 DoubleRegister left = ToDoubleRegister(instr->left());
1706 DoubleRegister right = ToDoubleRegister(instr->right());
1707 DoubleRegister result = ToDoubleRegister(instr->result());
1709 switch (instr->op()) {
1710 case Token::ADD: __ Fadd(result, left, right); break;
1711 case Token::SUB: __ Fsub(result, left, right); break;
1712 case Token::MUL: __ Fmul(result, left, right); break;
1713 case Token::DIV: __ Fdiv(result, left, right); break;
1715 // The ECMA-262 remainder operator is the remainder from a truncating
1716 // (round-towards-zero) division. Note that this differs from IEEE-754.
1718 // TODO(jbramley): See if it's possible to do this inline, rather than by
1719 // calling a helper function. With frintz (to produce the intermediate
1720 // quotient) and fmsub (to calculate the remainder without loss of
1721 // precision), it should be possible. However, we would need support for
1722 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1723 // support that yet.
1724 ASSERT(left.Is(d0));
1725 ASSERT(right.Is(d1));
1727 ExternalReference::mod_two_doubles_operation(isolate()),
1729 ASSERT(result.Is(d0));
1739 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1740 ASSERT(ToRegister(instr->context()).is(cp));
1741 ASSERT(ToRegister(instr->left()).is(x1));
1742 ASSERT(ToRegister(instr->right()).is(x0));
1743 ASSERT(ToRegister(instr->result()).is(x0));
1745 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
1746 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1750 void LCodeGen::DoBitI(LBitI* instr) {
1751 Register result = ToRegister32(instr->result());
1752 Register left = ToRegister32(instr->left());
1753 Operand right = ToOperand32U(instr->right());
1755 switch (instr->op()) {
1756 case Token::BIT_AND: __ And(result, left, right); break;
1757 case Token::BIT_OR: __ Orr(result, left, right); break;
1758 case Token::BIT_XOR: __ Eor(result, left, right); break;
1766 void LCodeGen::DoBitS(LBitS* instr) {
1767 Register result = ToRegister(instr->result());
1768 Register left = ToRegister(instr->left());
1769 Operand right = ToOperand(instr->right());
1771 switch (instr->op()) {
1772 case Token::BIT_AND: __ And(result, left, right); break;
1773 case Token::BIT_OR: __ Orr(result, left, right); break;
1774 case Token::BIT_XOR: __ Eor(result, left, right); break;
1782 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1783 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1784 ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1785 ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1786 if (instr->index()->IsConstantOperand()) {
1787 Operand index = ToOperand32I(instr->index());
1788 Register length = ToRegister32(instr->length());
1789 __ Cmp(length, index);
1790 cond = ReverseConditionForCmp(cond);
1792 Register index = ToRegister32(instr->index());
1793 Operand length = ToOperand32I(instr->length());
1794 __ Cmp(index, length);
1796 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1797 __ Assert(InvertCondition(cond), kEliminatedBoundsCheckFailed);
1799 DeoptimizeIf(cond, instr->environment());
1804 void LCodeGen::DoBranch(LBranch* instr) {
1805 Representation r = instr->hydrogen()->value()->representation();
1806 Label* true_label = instr->TrueLabel(chunk_);
1807 Label* false_label = instr->FalseLabel(chunk_);
1809 if (r.IsInteger32()) {
1810 ASSERT(!info()->IsStub());
1811 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1812 } else if (r.IsSmi()) {
1813 ASSERT(!info()->IsStub());
1814 STATIC_ASSERT(kSmiTag == 0);
1815 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1816 } else if (r.IsDouble()) {
1817 DoubleRegister value = ToDoubleRegister(instr->value());
1818 // Test the double value. Zero and NaN are false.
1819 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1821 ASSERT(r.IsTagged());
1822 Register value = ToRegister(instr->value());
1823 HType type = instr->hydrogen()->value()->type();
1825 if (type.IsBoolean()) {
1826 ASSERT(!info()->IsStub());
1827 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1828 EmitBranch(instr, eq);
1829 } else if (type.IsSmi()) {
1830 ASSERT(!info()->IsStub());
1831 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1832 } else if (type.IsJSArray()) {
1833 ASSERT(!info()->IsStub());
1834 EmitGoto(instr->TrueDestination(chunk()));
1835 } else if (type.IsHeapNumber()) {
1836 ASSERT(!info()->IsStub());
1837 __ Ldr(double_scratch(), FieldMemOperand(value,
1838 HeapNumber::kValueOffset));
1839 // Test the double value. Zero and NaN are false.
1840 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1841 } else if (type.IsString()) {
1842 ASSERT(!info()->IsStub());
1843 Register temp = ToRegister(instr->temp1());
1844 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1845 EmitCompareAndBranch(instr, ne, temp, 0);
1847 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1848 // Avoid deopts in the case where we've never executed this path before.
1849 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1851 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1852 // undefined -> false.
1854 value, Heap::kUndefinedValueRootIndex, false_label);
1857 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1858 // Boolean -> its value.
1860 value, Heap::kTrueValueRootIndex, true_label);
1862 value, Heap::kFalseValueRootIndex, false_label);
1865 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1868 value, Heap::kNullValueRootIndex, false_label);
1871 if (expected.Contains(ToBooleanStub::SMI)) {
1872 // Smis: 0 -> false, all other -> true.
1873 ASSERT(Smi::FromInt(0) == 0);
1874 __ Cbz(value, false_label);
1875 __ JumpIfSmi(value, true_label);
1876 } else if (expected.NeedsMap()) {
1877 // If we need a map later and have a smi, deopt.
1878 DeoptimizeIfSmi(value, instr->environment());
1881 Register map = NoReg;
1882 Register scratch = NoReg;
1884 if (expected.NeedsMap()) {
1885 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1886 map = ToRegister(instr->temp1());
1887 scratch = ToRegister(instr->temp2());
1889 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1891 if (expected.CanBeUndetectable()) {
1892 // Undetectable -> false.
1893 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1894 __ TestAndBranchIfAnySet(
1895 scratch, 1 << Map::kIsUndetectable, false_label);
1899 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1900 // spec object -> true.
1901 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1902 __ B(ge, true_label);
1905 if (expected.Contains(ToBooleanStub::STRING)) {
1906 // String value -> false iff empty.
1908 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1909 __ B(ge, ¬_string);
1910 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1911 __ Cbz(scratch, false_label);
1913 __ Bind(¬_string);
1916 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1917 // Symbol value -> true.
1918 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1919 __ B(eq, true_label);
1922 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1923 Label not_heap_number;
1924 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1926 __ Ldr(double_scratch(),
1927 FieldMemOperand(value, HeapNumber::kValueOffset));
1928 __ Fcmp(double_scratch(), 0.0);
1929 // If we got a NaN (overflow bit is set), jump to the false branch.
1930 __ B(vs, false_label);
1931 __ B(eq, false_label);
1933 __ Bind(¬_heap_number);
1936 if (!expected.IsGeneric()) {
1937 // We've seen something for the first time -> deopt.
1938 // This can only happen if we are not generic already.
1939 Deoptimize(instr->environment());
1946 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1947 int formal_parameter_count,
1949 LInstruction* instr,
1950 Register function_reg) {
1951 bool dont_adapt_arguments =
1952 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1953 bool can_invoke_directly =
1954 dont_adapt_arguments || formal_parameter_count == arity;
1956 // The function interface relies on the following register assignments.
1957 ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1958 Register arity_reg = x0;
1960 LPointerMap* pointers = instr->pointer_map();
1962 // If necessary, load the function object.
1963 if (function_reg.IsNone()) {
1965 __ LoadObject(function_reg, function);
1968 if (FLAG_debug_code) {
1970 // Try to confirm that function_reg (x1) is a tagged pointer.
1971 __ JumpIfNotSmi(function_reg, &is_not_smi);
1972 __ Abort(kExpectedFunctionObject);
1973 __ Bind(&is_not_smi);
1976 if (can_invoke_directly) {
1978 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1980 // Set the arguments count if adaption is not needed. Assumes that x0 is
1981 // available to write to at this point.
1982 if (dont_adapt_arguments) {
1983 __ Mov(arity_reg, arity);
1987 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
1990 // Set up deoptimization.
1991 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1993 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
1994 ParameterCount count(arity);
1995 ParameterCount expected(formal_parameter_count);
1996 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2001 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2002 ASSERT(instr->IsMarkedAsCall());
2003 ASSERT(ToRegister(instr->result()).Is(x0));
2005 LPointerMap* pointers = instr->pointer_map();
2006 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2008 if (instr->target()->IsConstantOperand()) {
2009 LConstantOperand* target = LConstantOperand::cast(instr->target());
2010 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2011 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2012 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2013 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2014 // this understanding is correct.
2015 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2017 ASSERT(instr->target()->IsRegister());
2018 Register target = ToRegister(instr->target());
2019 generator.BeforeCall(__ CallSize(target));
2020 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2023 generator.AfterCall();
2024 after_push_argument_ = false;
2028 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2029 ASSERT(instr->IsMarkedAsCall());
2030 ASSERT(ToRegister(instr->function()).is(x1));
2032 if (instr->hydrogen()->pass_argument_count()) {
2033 __ Mov(x0, Operand(instr->arity()));
2037 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2039 // Load the code entry address
2040 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2043 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2044 after_push_argument_ = false;
2048 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2049 CallRuntime(instr->function(), instr->arity(), instr);
2050 after_push_argument_ = false;
2054 void LCodeGen::DoCallStub(LCallStub* instr) {
2055 ASSERT(ToRegister(instr->context()).is(cp));
2056 ASSERT(ToRegister(instr->result()).is(x0));
2057 switch (instr->hydrogen()->major_key()) {
2058 case CodeStub::RegExpExec: {
2059 RegExpExecStub stub(isolate());
2060 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2063 case CodeStub::SubString: {
2064 SubStringStub stub(isolate());
2065 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2068 case CodeStub::StringCompare: {
2069 StringCompareStub stub(isolate());
2070 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2076 after_push_argument_ = false;
2080 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2081 GenerateOsrPrologue();
2085 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2086 Register temp = ToRegister(instr->temp());
2088 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2091 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2092 RecordSafepointWithRegisters(
2093 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2094 __ StoreToSafepointRegisterSlot(x0, temp);
2096 DeoptimizeIfSmi(temp, instr->environment());
2100 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2101 class DeferredCheckMaps: public LDeferredCode {
2103 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2104 : LDeferredCode(codegen), instr_(instr), object_(object) {
2105 SetExit(check_maps());
2107 virtual void Generate() {
2108 codegen()->DoDeferredInstanceMigration(instr_, object_);
2110 Label* check_maps() { return &check_maps_; }
2111 virtual LInstruction* instr() { return instr_; }
2118 if (instr->hydrogen()->IsStabilityCheck()) {
2119 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2120 for (int i = 0; i < maps->size(); ++i) {
2121 AddStabilityDependency(maps->at(i).handle());
2126 Register object = ToRegister(instr->value());
2127 Register map_reg = ToRegister(instr->temp());
2129 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2131 DeferredCheckMaps* deferred = NULL;
2132 if (instr->hydrogen()->HasMigrationTarget()) {
2133 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2134 __ Bind(deferred->check_maps());
2137 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2139 for (int i = 0; i < maps->size() - 1; i++) {
2140 Handle<Map> map = maps->at(i).handle();
2141 __ CompareMap(map_reg, map);
2144 Handle<Map> map = maps->at(maps->size() - 1).handle();
2145 __ CompareMap(map_reg, map);
2147 // We didn't match a map.
2148 if (instr->hydrogen()->HasMigrationTarget()) {
2149 __ B(ne, deferred->entry());
2151 DeoptimizeIf(ne, instr->environment());
2158 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2159 if (!instr->hydrogen()->value()->IsHeapObject()) {
2160 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2165 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2166 Register value = ToRegister(instr->value());
2167 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
2168 DeoptimizeIfNotSmi(value, instr->environment());
2172 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2173 Register input = ToRegister(instr->value());
2174 Register scratch = ToRegister(instr->temp());
2176 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2177 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2179 if (instr->hydrogen()->is_interval_check()) {
2180 InstanceType first, last;
2181 instr->hydrogen()->GetCheckInterval(&first, &last);
2183 __ Cmp(scratch, first);
2184 if (first == last) {
2185 // If there is only one type in the interval check for equality.
2186 DeoptimizeIf(ne, instr->environment());
2187 } else if (last == LAST_TYPE) {
2188 // We don't need to compare with the higher bound of the interval.
2189 DeoptimizeIf(lo, instr->environment());
2191 // If we are below the lower bound, set the C flag and clear the Z flag
2192 // to force a deopt.
2193 __ Ccmp(scratch, last, CFlag, hs);
2194 DeoptimizeIf(hi, instr->environment());
2199 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2201 if (IsPowerOf2(mask)) {
2202 ASSERT((tag == 0) || (tag == mask));
2204 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2206 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2210 __ Tst(scratch, mask);
2212 __ And(scratch, scratch, mask);
2213 __ Cmp(scratch, tag);
2215 DeoptimizeIf(ne, instr->environment());
2221 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2222 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2223 Register result = ToRegister32(instr->result());
2224 __ ClampDoubleToUint8(result, input, double_scratch());
2228 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2229 Register input = ToRegister32(instr->unclamped());
2230 Register result = ToRegister32(instr->result());
2231 __ ClampInt32ToUint8(result, input);
2235 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2236 Register input = ToRegister(instr->unclamped());
2237 Register result = ToRegister32(instr->result());
2238 Register scratch = ToRegister(instr->temp1());
2241 // Both smi and heap number cases are handled.
2243 __ JumpIfNotSmi(input, &is_not_smi);
2244 __ SmiUntag(result.X(), input);
2245 __ ClampInt32ToUint8(result);
2248 __ Bind(&is_not_smi);
2250 // Check for heap number.
2251 Label is_heap_number;
2252 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2253 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2255 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2256 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2257 instr->environment());
2261 // Heap number case.
2262 __ Bind(&is_heap_number);
2263 DoubleRegister dbl_scratch = double_scratch();
2264 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2265 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2266 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2272 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2273 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2274 Register result_reg = ToRegister(instr->result());
2275 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2276 __ Fmov(result_reg, value_reg);
2277 __ Mov(result_reg, Operand(result_reg, LSR, 32));
2279 __ Fmov(result_reg.W(), value_reg.S());
2284 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2285 Register hi_reg = ToRegister(instr->hi());
2286 Register lo_reg = ToRegister(instr->lo());
2287 Register temp = ToRegister(instr->temp());
2288 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2290 __ And(temp, lo_reg, Operand(0xffffffff));
2291 __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
2292 __ Fmov(result_reg, temp);
2296 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2297 Handle<String> class_name = instr->hydrogen()->class_name();
2298 Label* true_label = instr->TrueLabel(chunk_);
2299 Label* false_label = instr->FalseLabel(chunk_);
2300 Register input = ToRegister(instr->value());
2301 Register scratch1 = ToRegister(instr->temp1());
2302 Register scratch2 = ToRegister(instr->temp2());
2304 __ JumpIfSmi(input, false_label);
2306 Register map = scratch2;
2307 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2308 // Assuming the following assertions, we can use the same compares to test
2309 // for both being a function type and being in the object type range.
2310 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2311 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2312 FIRST_SPEC_OBJECT_TYPE + 1);
2313 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2314 LAST_SPEC_OBJECT_TYPE - 1);
2315 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2317 // We expect CompareObjectType to load the object instance type in scratch1.
2318 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2319 __ B(lt, false_label);
2320 __ B(eq, true_label);
2321 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2322 __ B(eq, true_label);
2324 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2327 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2328 // Check if the constructor in the map is a function.
2329 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2331 // Objects with a non-function constructor have class 'Object'.
2332 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2333 __ JumpIfNotObjectType(
2334 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2336 __ JumpIfNotObjectType(
2337 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2340 // The constructor function is in scratch1. Get its instance class name.
2342 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2344 FieldMemOperand(scratch1,
2345 SharedFunctionInfo::kInstanceClassNameOffset));
2347 // The class name we are testing against is internalized since it's a literal.
2348 // The name in the constructor is internalized because of the way the context
2349 // is booted. This routine isn't expected to work for random API-created
2350 // classes and it doesn't have to because you can't access it with natives
2351 // syntax. Since both sides are internalized it is sufficient to use an
2352 // identity comparison.
2353 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2357 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2358 ASSERT(instr->hydrogen()->representation().IsDouble());
2359 FPRegister object = ToDoubleRegister(instr->object());
2360 Register temp = ToRegister(instr->temp());
2362 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2363 // (relatively expensive) hole-NaN check.
2364 __ Fcmp(object, object);
2365 __ B(vc, instr->FalseLabel(chunk_));
2367 // We have a NaN, but is it the hole?
2368 __ Fmov(temp, object);
2369 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2373 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2374 ASSERT(instr->hydrogen()->representation().IsTagged());
2375 Register object = ToRegister(instr->object());
2377 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2381 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2382 Register value = ToRegister(instr->value());
2383 Register map = ToRegister(instr->temp());
2385 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2386 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2390 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2391 Representation rep = instr->hydrogen()->value()->representation();
2392 ASSERT(!rep.IsInteger32());
2393 Register scratch = ToRegister(instr->temp());
2395 if (rep.IsDouble()) {
2396 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2397 instr->TrueLabel(chunk()));
2399 Register value = ToRegister(instr->value());
2400 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2401 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2402 __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
2403 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
2405 EmitGoto(instr->FalseDestination(chunk()));
2409 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2410 LOperand* left = instr->left();
2411 LOperand* right = instr->right();
2412 Condition cond = TokenToCondition(instr->op(), false);
2414 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2415 // We can statically evaluate the comparison.
2416 double left_val = ToDouble(LConstantOperand::cast(left));
2417 double right_val = ToDouble(LConstantOperand::cast(right));
2418 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2419 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2420 EmitGoto(next_block);
2422 if (instr->is_double()) {
2423 if (right->IsConstantOperand()) {
2424 __ Fcmp(ToDoubleRegister(left),
2425 ToDouble(LConstantOperand::cast(right)));
2426 } else if (left->IsConstantOperand()) {
2427 // Transpose the operands and reverse the condition.
2428 __ Fcmp(ToDoubleRegister(right),
2429 ToDouble(LConstantOperand::cast(left)));
2430 cond = ReverseConditionForCmp(cond);
2432 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2435 // If a NaN is involved, i.e. the result is unordered (V set),
2436 // jump to false block label.
2437 __ B(vs, instr->FalseLabel(chunk_));
2438 EmitBranch(instr, cond);
2440 if (instr->hydrogen_value()->representation().IsInteger32()) {
2441 if (right->IsConstantOperand()) {
2442 EmitCompareAndBranch(instr,
2445 ToOperand32I(right));
2447 // Transpose the operands and reverse the condition.
2448 EmitCompareAndBranch(instr,
2449 ReverseConditionForCmp(cond),
2450 ToRegister32(right),
2451 ToOperand32I(left));
2454 ASSERT(instr->hydrogen_value()->representation().IsSmi());
2455 if (right->IsConstantOperand()) {
2456 int32_t value = ToInteger32(LConstantOperand::cast(right));
2457 EmitCompareAndBranch(instr,
2460 Operand(Smi::FromInt(value)));
2461 } else if (left->IsConstantOperand()) {
2462 // Transpose the operands and reverse the condition.
2463 int32_t value = ToInteger32(LConstantOperand::cast(left));
2464 EmitCompareAndBranch(instr,
2465 ReverseConditionForCmp(cond),
2467 Operand(Smi::FromInt(value)));
2469 EmitCompareAndBranch(instr,
2480 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2481 Register left = ToRegister(instr->left());
2482 Register right = ToRegister(instr->right());
2483 EmitCompareAndBranch(instr, eq, left, right);
2487 void LCodeGen::DoCmpT(LCmpT* instr) {
2488 ASSERT(ToRegister(instr->context()).is(cp));
2489 Token::Value op = instr->op();
2490 Condition cond = TokenToCondition(op, false);
2492 ASSERT(ToRegister(instr->left()).Is(x1));
2493 ASSERT(ToRegister(instr->right()).Is(x0));
2494 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2495 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2496 // Signal that we don't inline smi code before this stub.
2497 InlineSmiCheckInfo::EmitNotInlined(masm());
2499 // Return true or false depending on CompareIC result.
2500 // This instruction is marked as call. We can clobber any register.
2501 ASSERT(instr->IsMarkedAsCall());
2502 __ LoadTrueFalseRoots(x1, x2);
2504 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2508 void LCodeGen::DoConstantD(LConstantD* instr) {
2509 ASSERT(instr->result()->IsDoubleRegister());
2510 DoubleRegister result = ToDoubleRegister(instr->result());
2511 __ Fmov(result, instr->value());
2515 void LCodeGen::DoConstantE(LConstantE* instr) {
2516 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2520 void LCodeGen::DoConstantI(LConstantI* instr) {
2521 ASSERT(is_int32(instr->value()));
2522 // Cast the value here to ensure that the value isn't sign extended by the
2523 // implicit Operand constructor.
2524 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2528 void LCodeGen::DoConstantS(LConstantS* instr) {
2529 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2533 void LCodeGen::DoConstantT(LConstantT* instr) {
2534 Handle<Object> value = instr->value(isolate());
2535 AllowDeferredHandleDereference smi_check;
2536 __ LoadObject(ToRegister(instr->result()), value);
2540 void LCodeGen::DoContext(LContext* instr) {
2541 // If there is a non-return use, the context must be moved to a register.
2542 Register result = ToRegister(instr->result());
2543 if (info()->IsOptimizing()) {
2544 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2546 // If there is no frame, the context must be in cp.
2547 ASSERT(result.is(cp));
2552 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2553 Register reg = ToRegister(instr->value());
2554 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2555 AllowDeferredHandleDereference smi_check;
2556 if (isolate()->heap()->InNewSpace(*object)) {
2557 UseScratchRegisterScope temps(masm());
2558 Register temp = temps.AcquireX();
2559 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2560 __ Mov(temp, Operand(Handle<Object>(cell)));
2561 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2564 __ Cmp(reg, Operand(object));
2566 DeoptimizeIf(ne, instr->environment());
2570 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2571 last_lazy_deopt_pc_ = masm()->pc_offset();
2572 ASSERT(instr->HasEnvironment());
2573 LEnvironment* env = instr->environment();
2574 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2575 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2579 void LCodeGen::DoDateField(LDateField* instr) {
2580 Register object = ToRegister(instr->date());
2581 Register result = ToRegister(instr->result());
2582 Register temp1 = x10;
2583 Register temp2 = x11;
2584 Smi* index = instr->index();
2585 Label runtime, done;
2587 ASSERT(object.is(result) && object.Is(x0));
2588 ASSERT(instr->IsMarkedAsCall());
2590 DeoptimizeIfSmi(object, instr->environment());
2591 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2592 DeoptimizeIf(ne, instr->environment());
2594 if (index->value() == 0) {
2595 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2597 if (index->value() < JSDate::kFirstUncachedField) {
2598 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2599 __ Mov(temp1, Operand(stamp));
2600 __ Ldr(temp1, MemOperand(temp1));
2601 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2602 __ Cmp(temp1, temp2);
2604 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2605 kPointerSize * index->value()));
2610 __ Mov(x1, Operand(index));
2611 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2618 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2619 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2620 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2621 // needed return address), even though the implementation of LAZY and EAGER is
2622 // now identical. When LAZY is eventually completely folded into EAGER, remove
2623 // the special case below.
2624 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2625 type = Deoptimizer::LAZY;
2628 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2629 Deoptimize(instr->environment(), &type);
2633 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2634 Register dividend = ToRegister32(instr->dividend());
2635 int32_t divisor = instr->divisor();
2636 Register result = ToRegister32(instr->result());
2637 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
2638 ASSERT(!result.is(dividend));
2640 // Check for (0 / -x) that will produce negative zero.
2641 HDiv* hdiv = instr->hydrogen();
2642 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2643 __ Cmp(dividend, 0);
2644 DeoptimizeIf(eq, instr->environment());
2646 // Check for (kMinInt / -1).
2647 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2648 __ Cmp(dividend, kMinInt);
2649 DeoptimizeIf(eq, instr->environment());
2651 // Deoptimize if remainder will not be 0.
2652 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2653 divisor != 1 && divisor != -1) {
2654 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2655 __ Tst(dividend, mask);
2656 DeoptimizeIf(ne, instr->environment());
2659 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2660 __ Neg(result, dividend);
2663 int32_t shift = WhichPowerOf2Abs(divisor);
2665 __ Mov(result, dividend);
2666 } else if (shift == 1) {
2667 __ Add(result, dividend, Operand(dividend, LSR, 31));
2669 __ Mov(result, Operand(dividend, ASR, 31));
2670 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2672 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2673 if (divisor < 0) __ Neg(result, result);
2677 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2678 Register dividend = ToRegister32(instr->dividend());
2679 int32_t divisor = instr->divisor();
2680 Register result = ToRegister32(instr->result());
2681 ASSERT(!AreAliased(dividend, result));
2684 Deoptimize(instr->environment());
2688 // Check for (0 / -x) that will produce negative zero.
2689 HDiv* hdiv = instr->hydrogen();
2690 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2691 DeoptimizeIfZero(dividend, instr->environment());
2694 __ TruncatingDiv(result, dividend, Abs(divisor));
2695 if (divisor < 0) __ Neg(result, result);
2697 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2698 Register temp = ToRegister32(instr->temp());
2699 ASSERT(!AreAliased(dividend, result, temp));
2700 __ Sxtw(dividend.X(), dividend);
2701 __ Mov(temp, divisor);
2702 __ Smsubl(temp.X(), result, temp, dividend.X());
2703 DeoptimizeIfNotZero(temp, instr->environment());
2708 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2709 void LCodeGen::DoDivI(LDivI* instr) {
2710 HBinaryOperation* hdiv = instr->hydrogen();
2711 Register dividend = ToRegister32(instr->dividend());
2712 Register divisor = ToRegister32(instr->divisor());
2713 Register result = ToRegister32(instr->result());
2715 // Issue the division first, and then check for any deopt cases whilst the
2716 // result is computed.
2717 __ Sdiv(result, dividend, divisor);
2719 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2720 ASSERT_EQ(NULL, instr->temp());
2725 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2726 DeoptimizeIfZero(divisor, instr->environment());
2729 // Check for (0 / -x) as that will produce negative zero.
2730 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2733 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2734 // zero, ie. zero dividend with negative divisor deopts.
2735 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2736 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2737 __ Ccmp(dividend, 0, NoFlag, mi);
2738 DeoptimizeIf(eq, instr->environment());
2741 // Check for (kMinInt / -1).
2742 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2743 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2745 __ Cmp(dividend, 1);
2746 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2747 // -1. If overflow is clear, set the flags for condition ne, as the
2748 // dividend isn't -1, and thus we shouldn't deopt.
2749 __ Ccmp(divisor, -1, NoFlag, vs);
2750 DeoptimizeIf(eq, instr->environment());
2753 // Compute remainder and deopt if it's not zero.
2754 Register remainder = ToRegister32(instr->temp());
2755 __ Msub(remainder, result, divisor, dividend);
2756 DeoptimizeIfNotZero(remainder, instr->environment());
2760 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2761 DoubleRegister input = ToDoubleRegister(instr->value());
2762 Register result = ToRegister32(instr->result());
2764 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2765 DeoptimizeIfMinusZero(input, instr->environment());
2768 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2769 DeoptimizeIf(ne, instr->environment());
2771 if (instr->tag_result()) {
2772 __ SmiTag(result.X());
2777 void LCodeGen::DoDrop(LDrop* instr) {
2778 __ Drop(instr->count());
2782 void LCodeGen::DoDummy(LDummy* instr) {
2783 // Nothing to see here, move on!
2787 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2788 // Nothing to see here, move on!
2792 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2793 ASSERT(ToRegister(instr->context()).is(cp));
2794 // FunctionLiteral instruction is marked as call, we can trash any register.
2795 ASSERT(instr->IsMarkedAsCall());
2797 // Use the fast case closure allocation code that allocates in new
2798 // space for nested functions that don't need literals cloning.
2799 bool pretenure = instr->hydrogen()->pretenure();
2800 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2801 FastNewClosureStub stub(isolate(),
2802 instr->hydrogen()->strict_mode(),
2803 instr->hydrogen()->is_generator());
2804 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2805 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2807 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2808 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2809 : factory()->false_value()));
2810 __ Push(cp, x2, x1);
2811 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
2816 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2817 Register map = ToRegister(instr->map());
2818 Register result = ToRegister(instr->result());
2819 Label load_cache, done;
2821 __ EnumLengthUntagged(result, map);
2822 __ Cbnz(result, &load_cache);
2824 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2827 __ Bind(&load_cache);
2828 __ LoadInstanceDescriptors(map, result);
2829 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2830 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2831 DeoptimizeIfZero(result, instr->environment());
2837 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2838 Register object = ToRegister(instr->object());
2839 Register null_value = x5;
2841 ASSERT(instr->IsMarkedAsCall());
2842 ASSERT(object.Is(x0));
2844 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
2845 instr->environment());
2847 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2848 __ Cmp(object, null_value);
2849 DeoptimizeIf(eq, instr->environment());
2851 DeoptimizeIfSmi(object, instr->environment());
2853 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2854 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2855 DeoptimizeIf(le, instr->environment());
2857 Label use_cache, call_runtime;
2858 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2860 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2863 // Get the set of properties to enumerate.
2864 __ Bind(&call_runtime);
2866 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2868 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2869 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
2871 __ Bind(&use_cache);
2875 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2876 Register input = ToRegister(instr->value());
2877 Register result = ToRegister(instr->result());
2879 __ AssertString(input);
2881 // Assert that we can use a W register load to get the hash.
2882 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2883 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2884 __ IndexFromHash(result, result);
2888 void LCodeGen::EmitGoto(int block) {
2889 // Do not emit jump if we are emitting a goto to the next block.
2890 if (!IsNextEmittedBlock(block)) {
2891 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2896 void LCodeGen::DoGoto(LGoto* instr) {
2897 EmitGoto(instr->block_id());
2901 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2902 LHasCachedArrayIndexAndBranch* instr) {
2903 Register input = ToRegister(instr->value());
2904 Register temp = ToRegister32(instr->temp());
2906 // Assert that the cache status bits fit in a W register.
2907 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
2908 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2909 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2910 EmitBranch(instr, eq);
2914 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2915 // to test but is only used in very restricted ways. The only possible kinds
2916 // of intervals are:
2917 // - [ FIRST_TYPE, instr->to() ]
2918 // - [ instr->form(), LAST_TYPE ]
2919 // - instr->from() == instr->to()
2921 // These kinds of intervals can be check with only one compare instruction
2922 // providing the correct value and test condition are used.
2924 // TestType() will return the value to use in the compare instruction and
2925 // BranchCondition() will return the condition to use depending on the kind
2926 // of interval actually specified in the instruction.
2927 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2928 InstanceType from = instr->from();
2929 InstanceType to = instr->to();
2930 if (from == FIRST_TYPE) return to;
2931 ASSERT((from == to) || (to == LAST_TYPE));
2936 // See comment above TestType function for what this function does.
2937 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2938 InstanceType from = instr->from();
2939 InstanceType to = instr->to();
2940 if (from == to) return eq;
2941 if (to == LAST_TYPE) return hs;
2942 if (from == FIRST_TYPE) return ls;
2948 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2949 Register input = ToRegister(instr->value());
2950 Register scratch = ToRegister(instr->temp());
2952 if (!instr->hydrogen()->value()->IsHeapObject()) {
2953 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2955 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2956 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2960 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2961 Register result = ToRegister(instr->result());
2962 Register base = ToRegister(instr->base_object());
2963 if (instr->offset()->IsConstantOperand()) {
2964 __ Add(result, base, ToOperand32I(instr->offset()));
2966 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
2971 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2972 ASSERT(ToRegister(instr->context()).is(cp));
2973 // Assert that the arguments are in the registers expected by InstanceofStub.
2974 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
2975 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
2977 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2978 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2980 // InstanceofStub returns a result in x0:
2981 // 0 => not an instance
2982 // smi 1 => instance.
2984 __ LoadTrueFalseRoots(x0, x1);
2985 __ Csel(x0, x0, x1, eq);
2989 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2990 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2992 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2993 LInstanceOfKnownGlobal* instr)
2994 : LDeferredCode(codegen), instr_(instr) { }
2995 virtual void Generate() {
2996 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
2998 virtual LInstruction* instr() { return instr_; }
3000 LInstanceOfKnownGlobal* instr_;
3003 DeferredInstanceOfKnownGlobal* deferred =
3004 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3006 Label map_check, return_false, cache_miss, done;
3007 Register object = ToRegister(instr->value());
3008 Register result = ToRegister(instr->result());
3009 // x4 is expected in the associated deferred code and stub.
3010 Register map_check_site = x4;
3013 // This instruction is marked as call. We can clobber any register.
3014 ASSERT(instr->IsMarkedAsCall());
3016 // We must take into account that object is in x11.
3017 ASSERT(object.Is(x11));
3018 Register scratch = x10;
3020 // A Smi is not instance of anything.
3021 __ JumpIfSmi(object, &return_false);
3023 // This is the inlined call site instanceof cache. The two occurences of the
3024 // hole value will be patched to the last map/result pair generated by the
3026 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3028 // Below we use Factory::the_hole_value() on purpose instead of loading from
3029 // the root array to force relocation and later be able to patch with a
3031 InstructionAccurateScope scope(masm(), 5);
3032 __ bind(&map_check);
3033 // Will be patched with the cached map.
3034 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3035 __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
3036 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3037 __ cmp(map, scratch);
3038 __ b(&cache_miss, ne);
3039 // The address of this instruction is computed relative to the map check
3040 // above, so check the size of the code generated.
3041 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3042 // Will be patched with the cached result.
3043 __ LoadRelocated(result, Operand(factory()->the_hole_value()));
3047 // The inlined call site cache did not match.
3048 // Check null and string before calling the deferred code.
3049 __ Bind(&cache_miss);
3050 // Compute the address of the map check. It must not be clobbered until the
3051 // InstanceOfStub has used it.
3052 __ Adr(map_check_site, &map_check);
3053 // Null is not instance of anything.
3054 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3056 // String values are not instances of anything.
3057 // Return false if the object is a string. Otherwise, jump to the deferred
3059 // Note that we can't jump directly to deferred code from
3060 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3061 // code can be out of range.
3062 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3063 __ B(deferred->entry());
3065 __ Bind(&return_false);
3066 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3068 // Here result is either true or false.
3069 __ Bind(deferred->exit());
3074 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3075 Register result = ToRegister(instr->result());
3076 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
3077 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3078 flags = static_cast<InstanceofStub::Flags>(
3079 flags | InstanceofStub::kArgsInRegisters);
3080 flags = static_cast<InstanceofStub::Flags>(
3081 flags | InstanceofStub::kReturnTrueFalseObject);
3082 flags = static_cast<InstanceofStub::Flags>(
3083 flags | InstanceofStub::kCallSiteInlineCheck);
3085 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3086 LoadContextFromDeferred(instr->context());
3088 // Prepare InstanceofStub arguments.
3089 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
3090 __ LoadObject(InstanceofStub::right(), instr->function());
3092 InstanceofStub stub(isolate(), flags);
3093 CallCodeGeneric(stub.GetCode(),
3094 RelocInfo::CODE_TARGET,
3096 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3097 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3098 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3100 // Put the result value into the result register slot.
3101 __ StoreToSafepointRegisterSlot(result, result);
3105 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3110 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3111 Register value = ToRegister32(instr->value());
3112 DoubleRegister result = ToDoubleRegister(instr->result());
3113 __ Scvtf(result, value);
3117 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3118 ASSERT(ToRegister(instr->context()).is(cp));
3119 // The function is required to be in x1.
3120 ASSERT(ToRegister(instr->function()).is(x1));
3121 ASSERT(instr->HasPointerMap());
3123 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3124 if (known_function.is_null()) {
3125 LPointerMap* pointers = instr->pointer_map();
3126 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3127 ParameterCount count(instr->arity());
3128 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3130 CallKnownFunction(known_function,
3131 instr->hydrogen()->formal_parameter_count(),
3136 after_push_argument_ = false;
3140 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3141 Register temp1 = ToRegister(instr->temp1());
3142 Register temp2 = ToRegister(instr->temp2());
3144 // Get the frame pointer for the calling frame.
3145 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3147 // Skip the arguments adaptor frame if it exists.
3148 Label check_frame_marker;
3149 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3150 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3151 __ B(ne, &check_frame_marker);
3152 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3154 // Check the marker in the calling frame.
3155 __ Bind(&check_frame_marker);
3156 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3158 EmitCompareAndBranch(
3159 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3163 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3164 Label* is_object = instr->TrueLabel(chunk_);
3165 Label* is_not_object = instr->FalseLabel(chunk_);
3166 Register value = ToRegister(instr->value());
3167 Register map = ToRegister(instr->temp1());
3168 Register scratch = ToRegister(instr->temp2());
3170 __ JumpIfSmi(value, is_not_object);
3171 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3173 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3175 // Check for undetectable objects.
3176 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3177 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3179 // Check that instance type is in object type range.
3180 __ IsInstanceJSObjectType(map, scratch, NULL);
3181 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3182 // flags for "le" condition to check if the object's type is a valid
3184 EmitBranch(instr, le);
3188 Condition LCodeGen::EmitIsString(Register input,
3190 Label* is_not_string,
3191 SmiCheck check_needed = INLINE_SMI_CHECK) {
3192 if (check_needed == INLINE_SMI_CHECK) {
3193 __ JumpIfSmi(input, is_not_string);
3195 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3201 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3202 Register val = ToRegister(instr->value());
3203 Register scratch = ToRegister(instr->temp());
3205 SmiCheck check_needed =
3206 instr->hydrogen()->value()->IsHeapObject()
3207 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3208 Condition true_cond =
3209 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3211 EmitBranch(instr, true_cond);
3215 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3216 Register value = ToRegister(instr->value());
3217 STATIC_ASSERT(kSmiTag == 0);
3218 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3222 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3223 Register input = ToRegister(instr->value());
3224 Register temp = ToRegister(instr->temp());
3226 if (!instr->hydrogen()->value()->IsHeapObject()) {
3227 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3229 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3230 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3232 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3236 static const char* LabelType(LLabel* label) {
3237 if (label->is_loop_header()) return " (loop header)";
3238 if (label->is_osr_entry()) return " (OSR entry)";
3243 void LCodeGen::DoLabel(LLabel* label) {
3244 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3245 current_instruction_,
3246 label->hydrogen_value()->id(),
3250 __ Bind(label->label());
3251 current_block_ = label->block_id();
3256 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3257 Register context = ToRegister(instr->context());
3258 Register result = ToRegister(instr->result());
3259 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3260 if (instr->hydrogen()->RequiresHoleCheck()) {
3261 if (instr->hydrogen()->DeoptimizesOnHole()) {
3262 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3263 instr->environment());
3266 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3267 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3268 __ Bind(¬_the_hole);
3274 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3275 Register function = ToRegister(instr->function());
3276 Register result = ToRegister(instr->result());
3277 Register temp = ToRegister(instr->temp());
3279 // Check that the function really is a function. Leaves map in the result
3281 __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
3282 DeoptimizeIf(ne, instr->environment());
3284 // Make sure that the function has an instance prototype.
3286 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
3287 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
3289 // Get the prototype or initial map from the function.
3290 __ Ldr(result, FieldMemOperand(function,
3291 JSFunction::kPrototypeOrInitialMapOffset));
3293 // Check that the function has a prototype or an initial map.
3294 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3295 instr->environment());
3297 // If the function does not have an initial map, we're done.
3299 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3302 // Get the prototype from the initial map.
3303 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3306 // Non-instance prototype: fetch prototype from constructor field in initial
3308 __ Bind(&non_instance);
3309 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3316 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3317 Register result = ToRegister(instr->result());
3318 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3319 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3320 if (instr->hydrogen()->RequiresHoleCheck()) {
3322 result, Heap::kTheHoleValueRootIndex, instr->environment());
3327 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3328 ASSERT(ToRegister(instr->context()).is(cp));
3329 ASSERT(ToRegister(instr->global_object()).Is(x0));
3330 ASSERT(ToRegister(instr->result()).Is(x0));
3331 __ Mov(x2, Operand(instr->name()));
3332 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3333 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3334 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3338 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3343 bool key_is_constant,
3345 ElementsKind elements_kind,
3346 int additional_index) {
3347 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3348 int additional_offset = additional_index << element_size_shift;
3349 if (IsFixedTypedArrayElementsKind(elements_kind)) {
3350 additional_offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
3353 if (key_is_constant) {
3354 int key_offset = constant_key << element_size_shift;
3355 return MemOperand(base, key_offset + additional_offset);
3359 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3360 return MemOperand(scratch, additional_offset);
3363 if (additional_offset == 0) {
3364 return MemOperand(base, key, SXTW, element_size_shift);
3367 ASSERT(!AreAliased(scratch, key));
3368 __ Add(scratch, base, additional_offset);
3369 return MemOperand(scratch, key, SXTW, element_size_shift);
3373 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3374 Register ext_ptr = ToRegister(instr->elements());
3376 ElementsKind elements_kind = instr->elements_kind();
3378 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3379 bool key_is_constant = instr->key()->IsConstantOperand();
3380 Register key = no_reg;
3381 int constant_key = 0;
3382 if (key_is_constant) {
3383 ASSERT(instr->temp() == NULL);
3384 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3385 if (constant_key & 0xf0000000) {
3386 Abort(kArrayIndexConstantValueTooBig);
3389 scratch = ToRegister(instr->temp());
3390 key = ToRegister(instr->key());
3394 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3395 key_is_constant, constant_key,
3397 instr->additional_index());
3399 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3400 (elements_kind == FLOAT32_ELEMENTS)) {
3401 DoubleRegister result = ToDoubleRegister(instr->result());
3402 __ Ldr(result.S(), mem_op);
3403 __ Fcvt(result, result.S());
3404 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3405 (elements_kind == FLOAT64_ELEMENTS)) {
3406 DoubleRegister result = ToDoubleRegister(instr->result());
3407 __ Ldr(result, mem_op);
3409 Register result = ToRegister(instr->result());
3411 switch (elements_kind) {
3412 case EXTERNAL_INT8_ELEMENTS:
3414 __ Ldrsb(result, mem_op);
3416 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3417 case EXTERNAL_UINT8_ELEMENTS:
3418 case UINT8_ELEMENTS:
3419 case UINT8_CLAMPED_ELEMENTS:
3420 __ Ldrb(result, mem_op);
3422 case EXTERNAL_INT16_ELEMENTS:
3423 case INT16_ELEMENTS:
3424 __ Ldrsh(result, mem_op);
3426 case EXTERNAL_UINT16_ELEMENTS:
3427 case UINT16_ELEMENTS:
3428 __ Ldrh(result, mem_op);
3430 case EXTERNAL_INT32_ELEMENTS:
3431 case INT32_ELEMENTS:
3432 __ Ldrsw(result, mem_op);
3434 case EXTERNAL_UINT32_ELEMENTS:
3435 case UINT32_ELEMENTS:
3436 __ Ldr(result.W(), mem_op);
3437 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3438 // Deopt if value > 0x80000000.
3439 __ Tst(result, 0xFFFFFFFF80000000);
3440 DeoptimizeIf(ne, instr->environment());
3443 case FLOAT32_ELEMENTS:
3444 case FLOAT64_ELEMENTS:
3445 case EXTERNAL_FLOAT32_ELEMENTS:
3446 case EXTERNAL_FLOAT64_ELEMENTS:
3447 case FAST_HOLEY_DOUBLE_ELEMENTS:
3448 case FAST_HOLEY_ELEMENTS:
3449 case FAST_HOLEY_SMI_ELEMENTS:
3450 case FAST_DOUBLE_ELEMENTS:
3452 case FAST_SMI_ELEMENTS:
3453 case DICTIONARY_ELEMENTS:
3454 case SLOPPY_ARGUMENTS_ELEMENTS:
3462 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3466 ElementsKind elements_kind) {
3467 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3469 // Even though the HLoad/StoreKeyed instructions force the input
3470 // representation for the key to be an integer, the input gets replaced during
3471 // bounds check elimination with the index argument to the bounds check, which
3472 // can be tagged, so that case must be handled here, too.
3473 if (key_is_tagged) {
3474 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3476 // Sign extend key because it could be a 32-bit negative value or contain
3477 // garbage in the top 32-bits. The address computation happens in 64-bit.
3478 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3479 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3484 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3485 Register elements = ToRegister(instr->elements());
3486 DoubleRegister result = ToDoubleRegister(instr->result());
3490 if (instr->key()->IsConstantOperand()) {
3491 ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3492 (instr->temp() == NULL));
3494 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3495 if (constant_key & 0xf0000000) {
3496 Abort(kArrayIndexConstantValueTooBig);
3498 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
3499 instr->additional_index());
3500 load_base = elements;
3502 load_base = ToRegister(instr->temp());
3503 Register key = ToRegister(instr->key());
3504 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3505 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3506 instr->hydrogen()->elements_kind());
3507 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
3509 __ Ldr(result, FieldMemOperand(load_base, offset));
3511 if (instr->hydrogen()->RequiresHoleCheck()) {
3512 Register scratch = ToRegister(instr->temp());
3514 // TODO(all): Is it faster to reload this value to an integer register, or
3515 // move from fp to integer?
3516 __ Fmov(scratch, result);
3517 __ Cmp(scratch, kHoleNanInt64);
3518 DeoptimizeIf(eq, instr->environment());
3523 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3524 Register elements = ToRegister(instr->elements());
3525 Register result = ToRegister(instr->result());
3529 if (instr->key()->IsConstantOperand()) {
3530 ASSERT(instr->temp() == NULL);
3531 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3532 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3533 instr->additional_index());
3534 load_base = elements;
3536 load_base = ToRegister(instr->temp());
3537 Register key = ToRegister(instr->key());
3538 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3539 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3540 instr->hydrogen()->elements_kind());
3541 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3543 Representation representation = instr->hydrogen()->representation();
3545 if (representation.IsInteger32() &&
3546 instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
3547 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3548 __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
3549 Representation::Integer32());
3551 __ Load(result, FieldMemOperand(load_base, offset),
3555 if (instr->hydrogen()->RequiresHoleCheck()) {
3556 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3557 DeoptimizeIfNotSmi(result, instr->environment());
3559 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3560 instr->environment());
3566 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3567 ASSERT(ToRegister(instr->context()).is(cp));
3568 ASSERT(ToRegister(instr->object()).Is(x1));
3569 ASSERT(ToRegister(instr->key()).Is(x0));
3571 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3572 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3574 ASSERT(ToRegister(instr->result()).Is(x0));
3578 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3579 HObjectAccess access = instr->hydrogen()->access();
3580 int offset = access.offset();
3581 Register object = ToRegister(instr->object());
3583 if (access.IsExternalMemory()) {
3584 Register result = ToRegister(instr->result());
3585 __ Load(result, MemOperand(object, offset), access.representation());
3589 if (instr->hydrogen()->representation().IsDouble()) {
3590 FPRegister result = ToDoubleRegister(instr->result());
3591 __ Ldr(result, FieldMemOperand(object, offset));
3595 Register result = ToRegister(instr->result());
3597 if (access.IsInobject()) {
3600 // Load the properties array, using result as a scratch register.
3601 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3605 if (access.representation().IsSmi() &&
3606 instr->hydrogen()->representation().IsInteger32()) {
3607 // Read int value directly from upper half of the smi.
3608 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3609 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3610 Representation::Integer32());
3612 __ Load(result, FieldMemOperand(source, offset), access.representation());
3617 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3618 ASSERT(ToRegister(instr->context()).is(cp));
3619 // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3620 ASSERT(ToRegister(instr->object()).is(x0));
3621 __ Mov(x2, Operand(instr->name()));
3623 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3624 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3626 ASSERT(ToRegister(instr->result()).is(x0));
3630 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3631 Register result = ToRegister(instr->result());
3632 __ LoadRoot(result, instr->index());
3636 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3637 Register result = ToRegister(instr->result());
3638 Register map = ToRegister(instr->value());
3639 __ EnumLengthSmi(result, map);
3643 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3644 Representation r = instr->hydrogen()->value()->representation();
3646 DoubleRegister input = ToDoubleRegister(instr->value());
3647 DoubleRegister result = ToDoubleRegister(instr->result());
3648 __ Fabs(result, input);
3649 } else if (r.IsSmi() || r.IsInteger32()) {
3650 Register input = r.IsSmi() ? ToRegister(instr->value())
3651 : ToRegister32(instr->value());
3652 Register result = r.IsSmi() ? ToRegister(instr->result())
3653 : ToRegister32(instr->result());
3654 __ Abs(result, input);
3655 DeoptimizeIf(vs, instr->environment());
3660 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3662 Label* allocation_entry) {
3663 // Handle the tricky cases of MathAbsTagged:
3664 // - HeapNumber inputs.
3665 // - Negative inputs produce a positive result, so a new HeapNumber is
3666 // allocated to hold it.
3667 // - Positive inputs are returned as-is, since there is no need to allocate
3668 // a new HeapNumber for the result.
3669 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3670 // a smi. In this case, the inline code sets the result and jumps directly
3671 // to the allocation_entry label.
3672 ASSERT(instr->context() != NULL);
3673 ASSERT(ToRegister(instr->context()).is(cp));
3674 Register input = ToRegister(instr->value());
3675 Register temp1 = ToRegister(instr->temp1());
3676 Register temp2 = ToRegister(instr->temp2());
3677 Register result_bits = ToRegister(instr->temp3());
3678 Register result = ToRegister(instr->result());
3680 Label runtime_allocation;
3682 // Deoptimize if the input is not a HeapNumber.
3683 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3684 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3685 instr->environment());
3687 // If the argument is positive, we can return it as-is, without any need to
3688 // allocate a new HeapNumber for the result. We have to do this in integer
3689 // registers (rather than with fabs) because we need to be able to distinguish
3691 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3692 __ Mov(result, input);
3693 __ Tbz(result_bits, kXSignBit, exit);
3695 // Calculate abs(input) by clearing the sign bit.
3696 __ Bic(result_bits, result_bits, kXSignMask);
3698 // Allocate a new HeapNumber to hold the result.
3699 // result_bits The bit representation of the (double) result.
3700 __ Bind(allocation_entry);
3701 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3702 // The inline (non-deferred) code will store result_bits into result.
3705 __ Bind(&runtime_allocation);
3706 if (FLAG_debug_code) {
3707 // Because result is in the pointer map, we need to make sure it has a valid
3708 // tagged value before we call the runtime. We speculatively set it to the
3709 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3712 Register input = ToRegister(instr->value());
3713 __ JumpIfSmi(result, &result_ok);
3714 __ Cmp(input, result);
3715 __ Assert(eq, kUnexpectedValue);
3716 __ Bind(&result_ok);
3719 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3720 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3722 __ StoreToSafepointRegisterSlot(x0, result);
3724 // The inline (non-deferred) code will store result_bits into result.
3728 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3729 // Class for deferred case.
3730 class DeferredMathAbsTagged: public LDeferredCode {
3732 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3733 : LDeferredCode(codegen), instr_(instr) { }
3734 virtual void Generate() {
3735 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3736 allocation_entry());
3738 virtual LInstruction* instr() { return instr_; }
3739 Label* allocation_entry() { return &allocation; }
3741 LMathAbsTagged* instr_;
3745 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3746 // in GenerateDeferredCode. Tidy this up.
3747 ASSERT(!NeedsDeferredFrame());
3749 DeferredMathAbsTagged* deferred =
3750 new(zone()) DeferredMathAbsTagged(this, instr);
3752 ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3753 instr->hydrogen()->value()->representation().IsSmi());
3754 Register input = ToRegister(instr->value());
3755 Register result_bits = ToRegister(instr->temp3());
3756 Register result = ToRegister(instr->result());
3759 // Handle smis inline.
3760 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3761 // never get set by the negation. This is therefore the same as the Integer32
3762 // case in DoMathAbs, except that it operates on 64-bit values.
3763 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3765 __ JumpIfNotSmi(input, deferred->entry());
3767 __ Abs(result, input, NULL, &done);
3769 // The result is the magnitude (abs) of the smallest value a smi can
3770 // represent, encoded as a double.
3771 __ Mov(result_bits, double_to_rawbits(0x80000000));
3772 __ B(deferred->allocation_entry());
3774 __ Bind(deferred->exit());
3775 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3781 void LCodeGen::DoMathExp(LMathExp* instr) {
3782 DoubleRegister input = ToDoubleRegister(instr->value());
3783 DoubleRegister result = ToDoubleRegister(instr->result());
3784 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3785 DoubleRegister double_temp2 = double_scratch();
3786 Register temp1 = ToRegister(instr->temp1());
3787 Register temp2 = ToRegister(instr->temp2());
3788 Register temp3 = ToRegister(instr->temp3());
3790 MathExpGenerator::EmitMathExp(masm(), input, result,
3791 double_temp1, double_temp2,
3792 temp1, temp2, temp3);
3796 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3797 DoubleRegister input = ToDoubleRegister(instr->value());
3798 DoubleRegister result = ToDoubleRegister(instr->result());
3800 __ Frintm(result, input);
3804 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3805 DoubleRegister input = ToDoubleRegister(instr->value());
3806 Register result = ToRegister(instr->result());
3808 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3809 DeoptimizeIfMinusZero(input, instr->environment());
3812 __ Fcvtms(result, input);
3814 // Check that the result fits into a 32-bit integer.
3815 // - The result did not overflow.
3816 __ Cmp(result, Operand(result, SXTW));
3817 // - The input was not NaN.
3818 __ Fccmp(input, input, NoFlag, eq);
3819 DeoptimizeIf(ne, instr->environment());
3823 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3824 Register dividend = ToRegister32(instr->dividend());
3825 Register result = ToRegister32(instr->result());
3826 int32_t divisor = instr->divisor();
3828 // If the divisor is positive, things are easy: There can be no deopts and we
3829 // can simply do an arithmetic right shift.
3830 if (divisor == 1) return;
3831 int32_t shift = WhichPowerOf2Abs(divisor);
3833 __ Mov(result, Operand(dividend, ASR, shift));
3837 // If the divisor is negative, we have to negate and handle edge cases.
3838 __ Negs(result, dividend);
3839 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3840 DeoptimizeIf(eq, instr->environment());
3843 // If the negation could not overflow, simply shifting is OK.
3844 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3845 __ Mov(result, Operand(dividend, ASR, shift));
3849 // Dividing by -1 is basically negation, unless we overflow.
3850 if (divisor == -1) {
3851 DeoptimizeIf(vs, instr->environment());
3855 // Using a conditional data processing instruction would need 1 more register.
3856 Label not_kmin_int, done;
3857 __ B(vc, ¬_kmin_int);
3858 __ Mov(result, kMinInt / divisor);
3860 __ bind(¬_kmin_int);
3861 __ Mov(result, Operand(dividend, ASR, shift));
3866 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3867 Register dividend = ToRegister32(instr->dividend());
3868 int32_t divisor = instr->divisor();
3869 Register result = ToRegister32(instr->result());
3870 ASSERT(!AreAliased(dividend, result));
3873 Deoptimize(instr->environment());
3877 // Check for (0 / -x) that will produce negative zero.
3878 HMathFloorOfDiv* hdiv = instr->hydrogen();
3879 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3880 __ Cmp(dividend, 0);
3881 DeoptimizeIf(eq, instr->environment());
3884 // Easy case: We need no dynamic check for the dividend and the flooring
3885 // division is the same as the truncating division.
3886 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3887 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3888 __ TruncatingDiv(result, dividend, Abs(divisor));
3889 if (divisor < 0) __ Neg(result, result);
3893 // In the general case we may need to adjust before and after the truncating
3894 // division to get a flooring division.
3895 Register temp = ToRegister32(instr->temp());
3896 ASSERT(!AreAliased(temp, dividend, result));
3897 Label needs_adjustment, done;
3898 __ Cmp(dividend, 0);
3899 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3900 __ TruncatingDiv(result, dividend, Abs(divisor));
3901 if (divisor < 0) __ Neg(result, result);
3903 __ bind(&needs_adjustment);
3904 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3905 __ TruncatingDiv(result, temp, Abs(divisor));
3906 if (divisor < 0) __ Neg(result, result);
3907 __ Sub(result, result, Operand(1));
3912 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3913 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3914 Register dividend = ToRegister32(instr->dividend());
3915 Register divisor = ToRegister32(instr->divisor());
3916 Register remainder = ToRegister32(instr->temp());
3917 Register result = ToRegister32(instr->result());
3919 // This can't cause an exception on ARM, so we can speculatively
3920 // execute it already now.
3921 __ Sdiv(result, dividend, divisor);
3924 DeoptimizeIfZero(divisor, instr->environment());
3926 // Check for (kMinInt / -1).
3927 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3928 // The V flag will be set iff dividend == kMinInt.
3929 __ Cmp(dividend, 1);
3930 __ Ccmp(divisor, -1, NoFlag, vs);
3931 DeoptimizeIf(eq, instr->environment());
3934 // Check for (0 / -x) that will produce negative zero.
3935 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3937 __ Ccmp(dividend, 0, ZFlag, mi);
3938 // "divisor" can't be null because the code would have already been
3939 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
3940 // In this case we need to deoptimize to produce a -0.
3941 DeoptimizeIf(eq, instr->environment());
3945 // If both operands have the same sign then we are done.
3946 __ Eor(remainder, dividend, divisor);
3947 __ Tbz(remainder, kWSignBit, &done);
3949 // Check if the result needs to be corrected.
3950 __ Msub(remainder, result, divisor, dividend);
3951 __ Cbz(remainder, &done);
3952 __ Sub(result, result, 1);
3958 void LCodeGen::DoMathLog(LMathLog* instr) {
3959 ASSERT(instr->IsMarkedAsCall());
3960 ASSERT(ToDoubleRegister(instr->value()).is(d0));
3961 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3963 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3967 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3968 Register input = ToRegister32(instr->value());
3969 Register result = ToRegister32(instr->result());
3970 __ Clz(result, input);
3974 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3975 DoubleRegister input = ToDoubleRegister(instr->value());
3976 DoubleRegister result = ToDoubleRegister(instr->result());
3979 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
3980 // Math.pow(-Infinity, 0.5) == +Infinity
3981 // Math.pow(-0.0, 0.5) == +0.0
3983 // Catch -infinity inputs first.
3984 // TODO(jbramley): A constant infinity register would be helpful here.
3985 __ Fmov(double_scratch(), kFP64NegativeInfinity);
3986 __ Fcmp(double_scratch(), input);
3987 __ Fabs(result, input);
3990 // Add +0.0 to convert -0.0 to +0.0.
3991 __ Fadd(double_scratch(), input, fp_zero);
3992 __ Fsqrt(result, double_scratch());
3998 void LCodeGen::DoPower(LPower* instr) {
3999 Representation exponent_type = instr->hydrogen()->right()->representation();
4000 // Having marked this as a call, we can use any registers.
4001 // Just make sure that the input/output registers are the expected ones.
4002 ASSERT(!instr->right()->IsDoubleRegister() ||
4003 ToDoubleRegister(instr->right()).is(d1));
4004 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4005 ToRegister(instr->right()).is(x11));
4006 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4007 ASSERT(ToDoubleRegister(instr->left()).is(d0));
4008 ASSERT(ToDoubleRegister(instr->result()).is(d0));
4010 if (exponent_type.IsSmi()) {
4011 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4013 } else if (exponent_type.IsTagged()) {
4015 __ JumpIfSmi(x11, &no_deopt);
4016 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4017 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4018 instr->environment());
4020 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4022 } else if (exponent_type.IsInteger32()) {
4023 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4024 // supports large integer exponents.
4025 Register exponent = ToRegister(instr->right());
4026 __ Sxtw(exponent, exponent);
4027 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4030 ASSERT(exponent_type.IsDouble());
4031 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4037 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4038 DoubleRegister input = ToDoubleRegister(instr->value());
4039 DoubleRegister result = ToDoubleRegister(instr->result());
4040 DoubleRegister scratch_d = double_scratch();
4042 ASSERT(!AreAliased(input, result, scratch_d));
4046 __ Frinta(result, input);
4047 __ Fcmp(input, 0.0);
4048 __ Fccmp(result, input, ZFlag, lt);
4049 // The result is correct if the input was in [-0, +infinity], or was a
4050 // negative integral value.
4053 // Here the input is negative, non integral, with an exponent lower than 52.
4054 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4055 // case. So we can safely add 0.5.
4056 __ Fmov(scratch_d, 0.5);
4057 __ Fadd(result, input, scratch_d);
4058 __ Frintm(result, result);
4059 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4060 __ Fabs(result, result);
4061 __ Fneg(result, result);
4067 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4068 DoubleRegister input = ToDoubleRegister(instr->value());
4069 DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
4070 Register result = ToRegister(instr->result());
4074 // Math.round() rounds to the nearest integer, with ties going towards
4075 // +infinity. This does not match any IEEE-754 rounding mode.
4076 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4077 // they can't be represented as integers.
4078 // - The sign of the result is the same as the sign of the input. This means
4079 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4082 DoubleRegister dot_five = double_scratch();
4083 __ Fmov(dot_five, 0.5);
4084 __ Fabs(temp1, input);
4085 __ Fcmp(temp1, dot_five);
4086 // If input is in [-0.5, -0], the result is -0.
4087 // If input is in [+0, +0.5[, the result is +0.
4088 // If the input is +0.5, the result is 1.
4089 __ B(hi, &try_rounding); // hi so NaN will also branch.
4091 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4092 __ Fmov(result, input);
4093 DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
4095 __ Fcmp(input, dot_five);
4096 __ Mov(result, 1); // +0.5.
4097 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4098 // flag kBailoutOnMinusZero, will return 0 (xzr).
4099 __ Csel(result, result, xzr, eq);
4102 __ Bind(&try_rounding);
4103 // Since we're providing a 32-bit result, we can implement ties-to-infinity by
4104 // adding 0.5 to the input, then taking the floor of the result. This does not
4105 // work for very large positive doubles because adding 0.5 would cause an
4106 // intermediate rounding stage, so a different approach is necessary when a
4107 // double result is needed.
4108 __ Fadd(temp1, input, dot_five);
4109 __ Fcvtms(result, temp1);
4112 // * the input was NaN
4113 // * the result is not representable using a 32-bit integer.
4114 __ Fcmp(input, 0.0);
4115 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
4116 DeoptimizeIf(ne, instr->environment());
4122 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4123 DoubleRegister input = ToDoubleRegister(instr->value());
4124 DoubleRegister result = ToDoubleRegister(instr->result());
4125 __ Fsqrt(result, input);
4129 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4130 HMathMinMax::Operation op = instr->hydrogen()->operation();
4131 if (instr->hydrogen()->representation().IsInteger32()) {
4132 Register result = ToRegister32(instr->result());
4133 Register left = ToRegister32(instr->left());
4134 Operand right = ToOperand32I(instr->right());
4136 __ Cmp(left, right);
4137 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4138 } else if (instr->hydrogen()->representation().IsSmi()) {
4139 Register result = ToRegister(instr->result());
4140 Register left = ToRegister(instr->left());
4141 Operand right = ToOperand(instr->right());
4143 __ Cmp(left, right);
4144 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4146 ASSERT(instr->hydrogen()->representation().IsDouble());
4147 DoubleRegister result = ToDoubleRegister(instr->result());
4148 DoubleRegister left = ToDoubleRegister(instr->left());
4149 DoubleRegister right = ToDoubleRegister(instr->right());
4151 if (op == HMathMinMax::kMathMax) {
4152 __ Fmax(result, left, right);
4154 ASSERT(op == HMathMinMax::kMathMin);
4155 __ Fmin(result, left, right);
4161 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4162 Register dividend = ToRegister32(instr->dividend());
4163 int32_t divisor = instr->divisor();
4164 ASSERT(dividend.is(ToRegister32(instr->result())));
4166 // Theoretically, a variation of the branch-free code for integer division by
4167 // a power of 2 (calculating the remainder via an additional multiplication
4168 // (which gets simplified to an 'and') and subtraction) should be faster, and
4169 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4170 // indicate that positive dividends are heavily favored, so the branching
4171 // version performs better.
4172 HMod* hmod = instr->hydrogen();
4173 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4174 Label dividend_is_not_negative, done;
4175 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4176 __ Cmp(dividend, 0);
4177 __ B(pl, ÷nd_is_not_negative);
4178 // Note that this is correct even for kMinInt operands.
4179 __ Neg(dividend, dividend);
4180 __ And(dividend, dividend, mask);
4181 __ Negs(dividend, dividend);
4182 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4183 DeoptimizeIf(eq, instr->environment());
4188 __ bind(÷nd_is_not_negative);
4189 __ And(dividend, dividend, mask);
4194 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4195 Register dividend = ToRegister32(instr->dividend());
4196 int32_t divisor = instr->divisor();
4197 Register result = ToRegister32(instr->result());
4198 Register temp = ToRegister32(instr->temp());
4199 ASSERT(!AreAliased(dividend, result, temp));
4202 Deoptimize(instr->environment());
4206 __ TruncatingDiv(result, dividend, Abs(divisor));
4207 __ Sxtw(dividend.X(), dividend);
4208 __ Mov(temp, Abs(divisor));
4209 __ Smsubl(result.X(), result, temp, dividend.X());
4211 // Check for negative zero.
4212 HMod* hmod = instr->hydrogen();
4213 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4214 Label remainder_not_zero;
4215 __ Cbnz(result, &remainder_not_zero);
4216 DeoptimizeIfNegative(dividend, instr->environment());
4217 __ bind(&remainder_not_zero);
4222 void LCodeGen::DoModI(LModI* instr) {
4223 Register dividend = ToRegister32(instr->left());
4224 Register divisor = ToRegister32(instr->right());
4225 Register result = ToRegister32(instr->result());
4228 // modulo = dividend - quotient * divisor
4229 __ Sdiv(result, dividend, divisor);
4230 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4231 DeoptimizeIfZero(divisor, instr->environment());
4233 __ Msub(result, result, divisor, dividend);
4234 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4235 __ Cbnz(result, &done);
4236 DeoptimizeIfNegative(dividend, instr->environment());
4242 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4243 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4244 bool is_smi = instr->hydrogen()->representation().IsSmi();
4246 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4248 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4249 int32_t right = ToInteger32(instr->right());
4250 ASSERT((right > -kMaxInt) || (right < kMaxInt));
4252 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4253 bool bailout_on_minus_zero =
4254 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4256 if (bailout_on_minus_zero) {
4258 // The result is -0 if right is negative and left is zero.
4259 DeoptimizeIfZero(left, instr->environment());
4260 } else if (right == 0) {
4261 // The result is -0 if the right is zero and the left is negative.
4262 DeoptimizeIfNegative(left, instr->environment());
4267 // Cases which can detect overflow.
4270 // Only 0x80000000 can overflow here.
4271 __ Negs(result, left);
4272 DeoptimizeIf(vs, instr->environment());
4274 __ Neg(result, left);
4278 // This case can never overflow.
4282 // This case can never overflow.
4283 __ Mov(result, left, kDiscardForSameWReg);
4287 __ Adds(result, left, left);
4288 DeoptimizeIf(vs, instr->environment());
4290 __ Add(result, left, left);
4295 // Multiplication by constant powers of two (and some related values)
4296 // can be done efficiently with shifted operands.
4297 int32_t right_abs = Abs(right);
4299 if (IsPowerOf2(right_abs)) {
4300 int right_log2 = WhichPowerOf2(right_abs);
4303 Register scratch = result;
4304 ASSERT(!AreAliased(scratch, left));
4305 __ Cls(scratch, left);
4306 __ Cmp(scratch, right_log2);
4307 DeoptimizeIf(lt, instr->environment());
4311 // result = left << log2(right)
4312 __ Lsl(result, left, right_log2);
4314 // result = -left << log2(-right)
4316 __ Negs(result, Operand(left, LSL, right_log2));
4317 DeoptimizeIf(vs, instr->environment());
4319 __ Neg(result, Operand(left, LSL, right_log2));
4326 // For the following cases, we could perform a conservative overflow check
4327 // with CLS as above. However the few cycles saved are likely not worth
4328 // the risk of deoptimizing more often than required.
4329 ASSERT(!can_overflow);
4332 if (IsPowerOf2(right - 1)) {
4333 // result = left + left << log2(right - 1)
4334 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4335 } else if (IsPowerOf2(right + 1)) {
4336 // result = -left + left << log2(right + 1)
4337 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4338 __ Neg(result, result);
4343 if (IsPowerOf2(-right + 1)) {
4344 // result = left - left << log2(-right + 1)
4345 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4346 } else if (IsPowerOf2(-right - 1)) {
4347 // result = -left - left << log2(-right - 1)
4348 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4349 __ Neg(result, result);
4358 void LCodeGen::DoMulI(LMulI* instr) {
4359 Register result = ToRegister32(instr->result());
4360 Register left = ToRegister32(instr->left());
4361 Register right = ToRegister32(instr->right());
4363 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4364 bool bailout_on_minus_zero =
4365 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4367 if (bailout_on_minus_zero && !left.Is(right)) {
4368 // If one operand is zero and the other is negative, the result is -0.
4369 // - Set Z (eq) if either left or right, or both, are 0.
4371 __ Ccmp(right, 0, ZFlag, ne);
4372 // - If so (eq), set N (mi) if left + right is negative.
4373 // - Otherwise, clear N.
4374 __ Ccmn(left, right, NoFlag, eq);
4375 DeoptimizeIf(mi, instr->environment());
4379 __ Smull(result.X(), left, right);
4380 __ Cmp(result.X(), Operand(result, SXTW));
4381 DeoptimizeIf(ne, instr->environment());
4383 __ Mul(result, left, right);
4388 void LCodeGen::DoMulS(LMulS* instr) {
4389 Register result = ToRegister(instr->result());
4390 Register left = ToRegister(instr->left());
4391 Register right = ToRegister(instr->right());
4393 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4394 bool bailout_on_minus_zero =
4395 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4397 if (bailout_on_minus_zero && !left.Is(right)) {
4398 // If one operand is zero and the other is negative, the result is -0.
4399 // - Set Z (eq) if either left or right, or both, are 0.
4401 __ Ccmp(right, 0, ZFlag, ne);
4402 // - If so (eq), set N (mi) if left + right is negative.
4403 // - Otherwise, clear N.
4404 __ Ccmn(left, right, NoFlag, eq);
4405 DeoptimizeIf(mi, instr->environment());
4408 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4410 __ Smulh(result, left, right);
4411 __ Cmp(result, Operand(result.W(), SXTW));
4413 DeoptimizeIf(ne, instr->environment());
4415 if (AreAliased(result, left, right)) {
4416 // All three registers are the same: half untag the input and then
4417 // multiply, giving a tagged result.
4418 STATIC_ASSERT((kSmiShift % 2) == 0);
4419 __ Asr(result, left, kSmiShift / 2);
4420 __ Mul(result, result, result);
4421 } else if (result.Is(left) && !left.Is(right)) {
4422 // Registers result and left alias, right is distinct: untag left into
4423 // result, and then multiply by right, giving a tagged result.
4424 __ SmiUntag(result, left);
4425 __ Mul(result, result, right);
4427 ASSERT(!left.Is(result));
4428 // Registers result and right alias, left is distinct, or all registers
4429 // are distinct: untag right into result, and then multiply by left,
4430 // giving a tagged result.
4431 __ SmiUntag(result, right);
4432 __ Mul(result, left, result);
4438 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4439 // TODO(3095996): Get rid of this. For now, we need to make the
4440 // result register contain a valid pointer because it is already
4441 // contained in the register pointer map.
4442 Register result = ToRegister(instr->result());
4445 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4446 // NumberTagU and NumberTagD use the context from the frame, rather than
4447 // the environment's HContext or HInlinedContext value.
4448 // They only call Runtime::kHiddenAllocateHeapNumber.
4449 // The corresponding HChange instructions are added in a phase that does
4450 // not have easy access to the local context.
4451 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4452 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4453 RecordSafepointWithRegisters(
4454 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4455 __ StoreToSafepointRegisterSlot(x0, result);
4459 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4460 class DeferredNumberTagD: public LDeferredCode {
4462 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4463 : LDeferredCode(codegen), instr_(instr) { }
4464 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4465 virtual LInstruction* instr() { return instr_; }
4467 LNumberTagD* instr_;
4470 DoubleRegister input = ToDoubleRegister(instr->value());
4471 Register result = ToRegister(instr->result());
4472 Register temp1 = ToRegister(instr->temp1());
4473 Register temp2 = ToRegister(instr->temp2());
4475 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4476 if (FLAG_inline_new) {
4477 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4479 __ B(deferred->entry());
4482 __ Bind(deferred->exit());
4483 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4487 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4491 Label slow, convert_and_store;
4492 Register src = ToRegister32(value);
4493 Register dst = ToRegister(instr->result());
4494 Register scratch1 = ToRegister(temp1);
4496 if (FLAG_inline_new) {
4497 Register scratch2 = ToRegister(temp2);
4498 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4499 __ B(&convert_and_store);
4502 // Slow case: call the runtime system to do the number allocation.
4504 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4505 // register is stored, as this register is in the pointer map, but contains an
4509 // Preserve the value of all registers.
4510 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4512 // NumberTagU and NumberTagD use the context from the frame, rather than
4513 // the environment's HContext or HInlinedContext value.
4514 // They only call Runtime::kHiddenAllocateHeapNumber.
4515 // The corresponding HChange instructions are added in a phase that does
4516 // not have easy access to the local context.
4517 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4518 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4519 RecordSafepointWithRegisters(
4520 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4521 __ StoreToSafepointRegisterSlot(x0, dst);
4524 // Convert number to floating point and store in the newly allocated heap
4526 __ Bind(&convert_and_store);
4527 DoubleRegister dbl_scratch = double_scratch();
4528 __ Ucvtf(dbl_scratch, src);
4529 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4533 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4534 class DeferredNumberTagU: public LDeferredCode {
4536 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4537 : LDeferredCode(codegen), instr_(instr) { }
4538 virtual void Generate() {
4539 codegen()->DoDeferredNumberTagU(instr_,
4544 virtual LInstruction* instr() { return instr_; }
4546 LNumberTagU* instr_;
4549 Register value = ToRegister32(instr->value());
4550 Register result = ToRegister(instr->result());
4552 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4553 __ Cmp(value, Smi::kMaxValue);
4554 __ B(hi, deferred->entry());
4555 __ SmiTag(result, value.X());
4556 __ Bind(deferred->exit());
4560 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4561 Register input = ToRegister(instr->value());
4562 Register scratch = ToRegister(instr->temp());
4563 DoubleRegister result = ToDoubleRegister(instr->result());
4564 bool can_convert_undefined_to_nan =
4565 instr->hydrogen()->can_convert_undefined_to_nan();
4567 Label done, load_smi;
4569 // Work out what untag mode we're working with.
4570 HValue* value = instr->hydrogen()->value();
4571 NumberUntagDMode mode = value->representation().IsSmi()
4572 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4574 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4575 __ JumpIfSmi(input, &load_smi);
4577 Label convert_undefined;
4579 // Heap number map check.
4580 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4581 if (can_convert_undefined_to_nan) {
4582 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4583 &convert_undefined);
4585 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4586 instr->environment());
4589 // Load heap number.
4590 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4591 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4592 DeoptimizeIfMinusZero(result, instr->environment());
4596 if (can_convert_undefined_to_nan) {
4597 __ Bind(&convert_undefined);
4598 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4599 instr->environment());
4601 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4602 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4607 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4608 // Fall through to load_smi.
4611 // Smi to double register conversion.
4613 __ SmiUntagToDouble(result, input);
4619 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4620 // This is a pseudo-instruction that ensures that the environment here is
4621 // properly registered for deoptimization and records the assembler's PC
4623 LEnvironment* environment = instr->environment();
4625 // If the environment were already registered, we would have no way of
4626 // backpatching it with the spill slot operands.
4627 ASSERT(!environment->HasBeenRegistered());
4628 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4630 GenerateOsrPrologue();
4634 void LCodeGen::DoParameter(LParameter* instr) {
4639 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4640 LOperand* argument = instr->value();
4641 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4642 Abort(kDoPushArgumentNotImplementedForDoubleType);
4644 __ Push(ToRegister(argument));
4645 after_push_argument_ = true;
4650 void LCodeGen::DoReturn(LReturn* instr) {
4651 if (FLAG_trace && info()->IsOptimizing()) {
4652 // Push the return value on the stack as the parameter.
4653 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4654 // managed by the register allocator and tearing down the frame, it's
4655 // safe to write to the context register.
4657 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4658 __ CallRuntime(Runtime::kTraceExit, 1);
4661 if (info()->saves_caller_doubles()) {
4662 RestoreCallerDoubles();
4665 int no_frame_start = -1;
4666 if (NeedsEagerFrame()) {
4667 Register stack_pointer = masm()->StackPointer();
4668 __ Mov(stack_pointer, fp);
4669 no_frame_start = masm_->pc_offset();
4673 if (instr->has_constant_parameter_count()) {
4674 int parameter_count = ToInteger32(instr->constant_parameter_count());
4675 __ Drop(parameter_count + 1);
4677 Register parameter_count = ToRegister(instr->parameter_count());
4678 __ DropBySMI(parameter_count);
4682 if (no_frame_start != -1) {
4683 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4688 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4691 String::Encoding encoding) {
4692 if (index->IsConstantOperand()) {
4693 int offset = ToInteger32(LConstantOperand::cast(index));
4694 if (encoding == String::TWO_BYTE_ENCODING) {
4695 offset *= kUC16Size;
4697 STATIC_ASSERT(kCharSize == 1);
4698 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4701 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4702 if (encoding == String::ONE_BYTE_ENCODING) {
4703 return MemOperand(temp, ToRegister32(index), SXTW);
4705 STATIC_ASSERT(kUC16Size == 2);
4706 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4711 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4712 String::Encoding encoding = instr->hydrogen()->encoding();
4713 Register string = ToRegister(instr->string());
4714 Register result = ToRegister(instr->result());
4715 Register temp = ToRegister(instr->temp());
4717 if (FLAG_debug_code) {
4718 // Even though this lithium instruction comes with a temp register, we
4719 // can't use it here because we want to use "AtStart" constraints on the
4720 // inputs and the debug code here needs a scratch register.
4721 UseScratchRegisterScope temps(masm());
4722 Register dbg_temp = temps.AcquireX();
4724 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4725 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4727 __ And(dbg_temp, dbg_temp,
4728 Operand(kStringRepresentationMask | kStringEncodingMask));
4729 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4730 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4731 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4732 ? one_byte_seq_type : two_byte_seq_type));
4733 __ Check(eq, kUnexpectedStringType);
4736 MemOperand operand =
4737 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4738 if (encoding == String::ONE_BYTE_ENCODING) {
4739 __ Ldrb(result, operand);
4741 __ Ldrh(result, operand);
4746 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4747 String::Encoding encoding = instr->hydrogen()->encoding();
4748 Register string = ToRegister(instr->string());
4749 Register value = ToRegister(instr->value());
4750 Register temp = ToRegister(instr->temp());
4752 if (FLAG_debug_code) {
4753 ASSERT(ToRegister(instr->context()).is(cp));
4754 Register index = ToRegister(instr->index());
4755 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4756 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4758 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4759 ? one_byte_seq_type : two_byte_seq_type;
4760 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4763 MemOperand operand =
4764 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4765 if (encoding == String::ONE_BYTE_ENCODING) {
4766 __ Strb(value, operand);
4768 __ Strh(value, operand);
4773 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4774 HChange* hchange = instr->hydrogen();
4775 Register input = ToRegister(instr->value());
4776 Register output = ToRegister(instr->result());
4777 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4778 hchange->value()->CheckFlag(HValue::kUint32)) {
4779 DeoptimizeIfNegative(input.W(), instr->environment());
4781 __ SmiTag(output, input);
4785 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4786 Register input = ToRegister(instr->value());
4787 Register result = ToRegister(instr->result());
4790 if (instr->needs_check()) {
4791 DeoptimizeIfNotSmi(input, instr->environment());
4795 __ SmiUntag(result, input);
4800 void LCodeGen::DoShiftI(LShiftI* instr) {
4801 LOperand* right_op = instr->right();
4802 Register left = ToRegister32(instr->left());
4803 Register result = ToRegister32(instr->result());
4805 if (right_op->IsRegister()) {
4806 Register right = ToRegister32(instr->right());
4807 switch (instr->op()) {
4808 case Token::ROR: __ Ror(result, left, right); break;
4809 case Token::SAR: __ Asr(result, left, right); break;
4810 case Token::SHL: __ Lsl(result, left, right); break;
4812 if (instr->can_deopt()) {
4813 Label right_not_zero;
4814 __ Cbnz(right, &right_not_zero);
4815 DeoptimizeIfNegative(left, instr->environment());
4816 __ Bind(&right_not_zero);
4818 __ Lsr(result, left, right);
4820 default: UNREACHABLE();
4823 ASSERT(right_op->IsConstantOperand());
4824 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4825 if (shift_count == 0) {
4826 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4827 DeoptimizeIfNegative(left, instr->environment());
4829 __ Mov(result, left, kDiscardForSameWReg);
4831 switch (instr->op()) {
4832 case Token::ROR: __ Ror(result, left, shift_count); break;
4833 case Token::SAR: __ Asr(result, left, shift_count); break;
4834 case Token::SHL: __ Lsl(result, left, shift_count); break;
4835 case Token::SHR: __ Lsr(result, left, shift_count); break;
4836 default: UNREACHABLE();
4843 void LCodeGen::DoShiftS(LShiftS* instr) {
4844 LOperand* right_op = instr->right();
4845 Register left = ToRegister(instr->left());
4846 Register result = ToRegister(instr->result());
4848 // Only ROR by register needs a temp.
4849 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4850 (instr->temp() == NULL));
4852 if (right_op->IsRegister()) {
4853 Register right = ToRegister(instr->right());
4854 switch (instr->op()) {
4856 Register temp = ToRegister(instr->temp());
4857 __ Ubfx(temp, right, kSmiShift, 5);
4858 __ SmiUntag(result, left);
4859 __ Ror(result.W(), result.W(), temp.W());
4864 __ Ubfx(result, right, kSmiShift, 5);
4865 __ Asr(result, left, result);
4866 __ Bic(result, result, kSmiShiftMask);
4869 __ Ubfx(result, right, kSmiShift, 5);
4870 __ Lsl(result, left, result);
4873 if (instr->can_deopt()) {
4874 Label right_not_zero;
4875 __ Cbnz(right, &right_not_zero);
4876 DeoptimizeIfNegative(left, instr->environment());
4877 __ Bind(&right_not_zero);
4879 __ Ubfx(result, right, kSmiShift, 5);
4880 __ Lsr(result, left, result);
4881 __ Bic(result, result, kSmiShiftMask);
4883 default: UNREACHABLE();
4886 ASSERT(right_op->IsConstantOperand());
4887 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4888 if (shift_count == 0) {
4889 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4890 DeoptimizeIfNegative(left, instr->environment());
4892 __ Mov(result, left);
4894 switch (instr->op()) {
4896 __ SmiUntag(result, left);
4897 __ Ror(result.W(), result.W(), shift_count);
4901 __ Asr(result, left, shift_count);
4902 __ Bic(result, result, kSmiShiftMask);
4905 __ Lsl(result, left, shift_count);
4908 __ Lsr(result, left, shift_count);
4909 __ Bic(result, result, kSmiShiftMask);
4911 default: UNREACHABLE();
4918 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4919 __ Debug("LDebugBreak", 0, BREAK);
4923 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4924 ASSERT(ToRegister(instr->context()).is(cp));
4925 Register scratch1 = x5;
4926 Register scratch2 = x6;
4927 ASSERT(instr->IsMarkedAsCall());
4929 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
4930 // TODO(all): if Mov could handle object in new space then it could be used
4932 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4933 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
4934 __ Push(cp, scratch1, scratch2); // The context is the first argument.
4935 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
4939 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4940 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4941 LoadContextFromDeferred(instr->context());
4942 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
4943 RecordSafepointWithLazyDeopt(
4944 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4945 ASSERT(instr->HasEnvironment());
4946 LEnvironment* env = instr->environment();
4947 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4951 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4952 class DeferredStackCheck: public LDeferredCode {
4954 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4955 : LDeferredCode(codegen), instr_(instr) { }
4956 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4957 virtual LInstruction* instr() { return instr_; }
4959 LStackCheck* instr_;
4962 ASSERT(instr->HasEnvironment());
4963 LEnvironment* env = instr->environment();
4964 // There is no LLazyBailout instruction for stack-checks. We have to
4965 // prepare for lazy deoptimization explicitly here.
4966 if (instr->hydrogen()->is_function_entry()) {
4967 // Perform stack overflow check.
4969 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4972 PredictableCodeSizeScope predictable(masm_,
4973 Assembler::kCallSizeWithRelocation);
4974 ASSERT(instr->context()->IsRegister());
4975 ASSERT(ToRegister(instr->context()).is(cp));
4976 CallCode(isolate()->builtins()->StackCheck(),
4977 RelocInfo::CODE_TARGET,
4981 ASSERT(instr->hydrogen()->is_backwards_branch());
4982 // Perform stack overflow check if this goto needs it before jumping.
4983 DeferredStackCheck* deferred_stack_check =
4984 new(zone()) DeferredStackCheck(this, instr);
4985 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4986 __ B(lo, deferred_stack_check->entry());
4988 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4989 __ Bind(instr->done_label());
4990 deferred_stack_check->SetExit(instr->done_label());
4991 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4992 // Don't record a deoptimization index for the safepoint here.
4993 // This will be done explicitly when emitting call and the safepoint in
4994 // the deferred code.
4999 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5000 Register function = ToRegister(instr->function());
5001 Register code_object = ToRegister(instr->code_object());
5002 Register temp = ToRegister(instr->temp());
5003 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5004 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5008 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5009 Register context = ToRegister(instr->context());
5010 Register value = ToRegister(instr->value());
5011 Register scratch = ToRegister(instr->temp());
5012 MemOperand target = ContextMemOperand(context, instr->slot_index());
5014 Label skip_assignment;
5016 if (instr->hydrogen()->RequiresHoleCheck()) {
5017 __ Ldr(scratch, target);
5018 if (instr->hydrogen()->DeoptimizesOnHole()) {
5019 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5020 instr->environment());
5022 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5026 __ Str(value, target);
5027 if (instr->hydrogen()->NeedsWriteBarrier()) {
5028 SmiCheck check_needed =
5029 instr->hydrogen()->value()->IsHeapObject()
5030 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5031 __ RecordWriteContextSlot(context,
5035 GetLinkRegisterState(),
5037 EMIT_REMEMBERED_SET,
5040 __ Bind(&skip_assignment);
5044 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5045 Register value = ToRegister(instr->value());
5046 Register cell = ToRegister(instr->temp1());
5049 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5051 // If the cell we are storing to contains the hole it could have
5052 // been deleted from the property dictionary. In that case, we need
5053 // to update the property details in the property dictionary to mark
5054 // it as no longer deleted. We deoptimize in that case.
5055 if (instr->hydrogen()->RequiresHoleCheck()) {
5056 Register payload = ToRegister(instr->temp2());
5057 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5059 payload, Heap::kTheHoleValueRootIndex, instr->environment());
5063 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5064 // Cells are always rescanned, so no write barrier here.
5068 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5069 Register ext_ptr = ToRegister(instr->elements());
5070 Register key = no_reg;
5072 ElementsKind elements_kind = instr->elements_kind();
5074 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5075 bool key_is_constant = instr->key()->IsConstantOperand();
5076 int constant_key = 0;
5077 if (key_is_constant) {
5078 ASSERT(instr->temp() == NULL);
5079 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5080 if (constant_key & 0xf0000000) {
5081 Abort(kArrayIndexConstantValueTooBig);
5084 key = ToRegister(instr->key());
5085 scratch = ToRegister(instr->temp());
5089 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5090 key_is_constant, constant_key,
5092 instr->additional_index());
5094 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5095 (elements_kind == FLOAT32_ELEMENTS)) {
5096 DoubleRegister value = ToDoubleRegister(instr->value());
5097 DoubleRegister dbl_scratch = double_scratch();
5098 __ Fcvt(dbl_scratch.S(), value);
5099 __ Str(dbl_scratch.S(), dst);
5100 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5101 (elements_kind == FLOAT64_ELEMENTS)) {
5102 DoubleRegister value = ToDoubleRegister(instr->value());
5105 Register value = ToRegister(instr->value());
5107 switch (elements_kind) {
5108 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5109 case EXTERNAL_INT8_ELEMENTS:
5110 case EXTERNAL_UINT8_ELEMENTS:
5111 case UINT8_ELEMENTS:
5112 case UINT8_CLAMPED_ELEMENTS:
5114 __ Strb(value, dst);
5116 case EXTERNAL_INT16_ELEMENTS:
5117 case EXTERNAL_UINT16_ELEMENTS:
5118 case INT16_ELEMENTS:
5119 case UINT16_ELEMENTS:
5120 __ Strh(value, dst);
5122 case EXTERNAL_INT32_ELEMENTS:
5123 case EXTERNAL_UINT32_ELEMENTS:
5124 case INT32_ELEMENTS:
5125 case UINT32_ELEMENTS:
5126 __ Str(value.W(), dst);
5128 case FLOAT32_ELEMENTS:
5129 case FLOAT64_ELEMENTS:
5130 case EXTERNAL_FLOAT32_ELEMENTS:
5131 case EXTERNAL_FLOAT64_ELEMENTS:
5132 case FAST_DOUBLE_ELEMENTS:
5134 case FAST_SMI_ELEMENTS:
5135 case FAST_HOLEY_DOUBLE_ELEMENTS:
5136 case FAST_HOLEY_ELEMENTS:
5137 case FAST_HOLEY_SMI_ELEMENTS:
5138 case DICTIONARY_ELEMENTS:
5139 case SLOPPY_ARGUMENTS_ELEMENTS:
5147 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5148 Register elements = ToRegister(instr->elements());
5149 DoubleRegister value = ToDoubleRegister(instr->value());
5150 Register store_base = no_reg;
5153 if (instr->key()->IsConstantOperand()) {
5154 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5155 if (constant_key & 0xf0000000) {
5156 Abort(kArrayIndexConstantValueTooBig);
5158 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
5159 instr->additional_index());
5160 store_base = elements;
5162 store_base = ToRegister(instr->temp());
5163 Register key = ToRegister(instr->key());
5164 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5165 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5166 instr->hydrogen()->elements_kind());
5167 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
5170 if (instr->NeedsCanonicalization()) {
5171 DoubleRegister dbl_scratch = double_scratch();
5172 __ Fmov(dbl_scratch,
5173 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
5174 __ Fmaxnm(dbl_scratch, dbl_scratch, value);
5175 __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
5177 __ Str(value, FieldMemOperand(store_base, offset));
5182 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5183 Register value = ToRegister(instr->value());
5184 Register elements = ToRegister(instr->elements());
5185 Register scratch = no_reg;
5186 Register store_base = no_reg;
5187 Register key = no_reg;
5190 if (!instr->key()->IsConstantOperand() ||
5191 instr->hydrogen()->NeedsWriteBarrier()) {
5192 scratch = ToRegister(instr->temp());
5195 if (instr->key()->IsConstantOperand()) {
5196 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5197 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
5198 instr->additional_index());
5199 store_base = elements;
5201 store_base = scratch;
5202 key = ToRegister(instr->key());
5203 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5204 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
5205 instr->hydrogen()->elements_kind());
5206 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
5208 Representation representation = instr->hydrogen()->value()->representation();
5209 if (representation.IsInteger32()) {
5210 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5211 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5212 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5213 __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
5214 Representation::Integer32());
5216 __ Store(value, FieldMemOperand(store_base, offset), representation);
5219 if (instr->hydrogen()->NeedsWriteBarrier()) {
5220 ASSERT(representation.IsTagged());
5221 // This assignment may cause element_addr to alias store_base.
5222 Register element_addr = scratch;
5223 SmiCheck check_needed =
5224 instr->hydrogen()->value()->IsHeapObject()
5225 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5226 // Compute address of modified element and store it into key register.
5227 __ Add(element_addr, store_base, offset - kHeapObjectTag);
5228 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5229 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
5234 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5235 ASSERT(ToRegister(instr->context()).is(cp));
5236 ASSERT(ToRegister(instr->object()).Is(x2));
5237 ASSERT(ToRegister(instr->key()).Is(x1));
5238 ASSERT(ToRegister(instr->value()).Is(x0));
5240 Handle<Code> ic = instr->strict_mode() == STRICT
5241 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5242 : isolate()->builtins()->KeyedStoreIC_Initialize();
5243 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5247 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5248 Representation representation = instr->representation();
5250 Register object = ToRegister(instr->object());
5251 HObjectAccess access = instr->hydrogen()->access();
5252 int offset = access.offset();
5254 if (access.IsExternalMemory()) {
5255 ASSERT(!instr->hydrogen()->has_transition());
5256 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5257 Register value = ToRegister(instr->value());
5258 __ Store(value, MemOperand(object, offset), representation);
5260 } else if (representation.IsDouble()) {
5261 ASSERT(access.IsInobject());
5262 ASSERT(!instr->hydrogen()->has_transition());
5263 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5264 FPRegister value = ToDoubleRegister(instr->value());
5265 __ Str(value, FieldMemOperand(object, offset));
5269 Register value = ToRegister(instr->value());
5271 SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
5272 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5274 ASSERT(!(representation.IsSmi() &&
5275 instr->value()->IsConstantOperand() &&
5276 !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
5277 if (representation.IsHeapObject() &&
5278 !instr->hydrogen()->value()->type().IsHeapObject()) {
5279 DeoptimizeIfSmi(value, instr->environment());
5281 // We know now that value is not a smi, so we can omit the check below.
5282 check_needed = OMIT_SMI_CHECK;
5285 if (instr->hydrogen()->has_transition()) {
5286 Handle<Map> transition = instr->hydrogen()->transition_map();
5287 AddDeprecationDependency(transition);
5288 // Store the new map value.
5289 Register new_map_value = ToRegister(instr->temp0());
5290 __ Mov(new_map_value, Operand(transition));
5291 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5292 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5293 // Update the write barrier for the map field.
5294 __ RecordWriteField(object,
5295 HeapObject::kMapOffset,
5297 ToRegister(instr->temp1()),
5298 GetLinkRegisterState(),
5300 OMIT_REMEMBERED_SET,
5306 Register destination;
5307 if (access.IsInobject()) {
5308 destination = object;
5310 Register temp0 = ToRegister(instr->temp0());
5311 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5312 destination = temp0;
5315 if (representation.IsSmi() &&
5316 instr->hydrogen()->value()->representation().IsInteger32()) {
5317 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5319 Register temp0 = ToRegister(instr->temp0());
5320 __ Ldr(temp0, FieldMemOperand(destination, offset));
5321 __ AssertSmi(temp0);
5322 // If destination aliased temp0, restore it to the address calculated
5324 if (destination.Is(temp0)) {
5325 ASSERT(!access.IsInobject());
5326 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5329 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5330 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5331 Representation::Integer32());
5333 __ Store(value, FieldMemOperand(destination, offset), representation);
5335 if (instr->hydrogen()->NeedsWriteBarrier()) {
5336 __ RecordWriteField(destination,
5338 value, // Clobbered.
5339 ToRegister(instr->temp1()), // Clobbered.
5340 GetLinkRegisterState(),
5342 EMIT_REMEMBERED_SET,
5348 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5349 ASSERT(ToRegister(instr->context()).is(cp));
5350 ASSERT(ToRegister(instr->value()).is(x0));
5351 ASSERT(ToRegister(instr->object()).is(x1));
5353 // Name must be in x2.
5354 __ Mov(x2, Operand(instr->name()));
5355 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5356 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5360 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5361 ASSERT(ToRegister(instr->context()).is(cp));
5362 ASSERT(ToRegister(instr->left()).Is(x1));
5363 ASSERT(ToRegister(instr->right()).Is(x0));
5364 StringAddStub stub(isolate(),
5365 instr->hydrogen()->flags(),
5366 instr->hydrogen()->pretenure_flag());
5367 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5371 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5372 class DeferredStringCharCodeAt: public LDeferredCode {
5374 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5375 : LDeferredCode(codegen), instr_(instr) { }
5376 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5377 virtual LInstruction* instr() { return instr_; }
5379 LStringCharCodeAt* instr_;
5382 DeferredStringCharCodeAt* deferred =
5383 new(zone()) DeferredStringCharCodeAt(this, instr);
5385 StringCharLoadGenerator::Generate(masm(),
5386 ToRegister(instr->string()),
5387 ToRegister32(instr->index()),
5388 ToRegister(instr->result()),
5390 __ Bind(deferred->exit());
5394 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5395 Register string = ToRegister(instr->string());
5396 Register result = ToRegister(instr->result());
5398 // TODO(3095996): Get rid of this. For now, we need to make the
5399 // result register contain a valid pointer because it is already
5400 // contained in the register pointer map.
5403 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5405 // Push the index as a smi. This is safe because of the checks in
5406 // DoStringCharCodeAt above.
5407 Register index = ToRegister(instr->index());
5411 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
5415 __ StoreToSafepointRegisterSlot(x0, result);
5419 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5420 class DeferredStringCharFromCode: public LDeferredCode {
5422 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5423 : LDeferredCode(codegen), instr_(instr) { }
5424 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5425 virtual LInstruction* instr() { return instr_; }
5427 LStringCharFromCode* instr_;
5430 DeferredStringCharFromCode* deferred =
5431 new(zone()) DeferredStringCharFromCode(this, instr);
5433 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5434 Register char_code = ToRegister32(instr->char_code());
5435 Register result = ToRegister(instr->result());
5437 __ Cmp(char_code, String::kMaxOneByteCharCode);
5438 __ B(hi, deferred->entry());
5439 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5440 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5441 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5442 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5443 __ B(eq, deferred->entry());
5444 __ Bind(deferred->exit());
5448 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5449 Register char_code = ToRegister(instr->char_code());
5450 Register result = ToRegister(instr->result());
5452 // TODO(3095996): Get rid of this. For now, we need to make the
5453 // result register contain a valid pointer because it is already
5454 // contained in the register pointer map.
5457 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5458 __ SmiTag(char_code);
5460 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5461 __ StoreToSafepointRegisterSlot(x0, result);
5465 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5466 ASSERT(ToRegister(instr->context()).is(cp));
5467 Token::Value op = instr->op();
5469 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5470 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5471 InlineSmiCheckInfo::EmitNotInlined(masm());
5473 Condition condition = TokenToCondition(op, false);
5475 EmitCompareAndBranch(instr, condition, x0, 0);
5479 void LCodeGen::DoSubI(LSubI* instr) {
5480 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5481 Register result = ToRegister32(instr->result());
5482 Register left = ToRegister32(instr->left());
5483 Operand right = ToOperand32I(instr->right());
5485 __ Subs(result, left, right);
5486 DeoptimizeIf(vs, instr->environment());
5488 __ Sub(result, left, right);
5493 void LCodeGen::DoSubS(LSubS* instr) {
5494 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5495 Register result = ToRegister(instr->result());
5496 Register left = ToRegister(instr->left());
5497 Operand right = ToOperand(instr->right());
5499 __ Subs(result, left, right);
5500 DeoptimizeIf(vs, instr->environment());
5502 __ Sub(result, left, right);
5507 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5511 Register input = ToRegister(value);
5512 Register scratch1 = ToRegister(temp1);
5513 DoubleRegister dbl_scratch1 = double_scratch();
5517 // Load heap object map.
5518 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5520 if (instr->truncating()) {
5521 Register output = ToRegister(instr->result());
5524 // If it's not a heap number, jump to undefined check.
5525 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5527 // A heap number: load value and convert to int32 using truncating function.
5528 __ TruncateHeapNumberToI(output, input);
5531 __ Bind(&check_bools);
5533 Register true_root = output;
5534 Register false_root = scratch1;
5535 __ LoadTrueFalseRoots(true_root, false_root);
5536 __ Cmp(input, true_root);
5537 __ Cset(output, eq);
5538 __ Ccmp(input, false_root, ZFlag, ne);
5541 // Output contains zero, undefined is converted to zero for truncating
5543 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5544 instr->environment());
5546 Register output = ToRegister32(instr->result());
5548 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5550 // Deoptimized if it's not a heap number.
5551 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5552 instr->environment());
5554 // A heap number: load value and convert to int32 using non-truncating
5555 // function. If the result is out of range, branch to deoptimize.
5556 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5557 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5558 DeoptimizeIf(ne, instr->environment());
5560 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5563 __ Fmov(scratch1, dbl_scratch1);
5564 DeoptimizeIfNegative(scratch1, instr->environment());
5571 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5572 class DeferredTaggedToI: public LDeferredCode {
5574 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5575 : LDeferredCode(codegen), instr_(instr) { }
5576 virtual void Generate() {
5577 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5581 virtual LInstruction* instr() { return instr_; }
5586 Register input = ToRegister(instr->value());
5587 Register output = ToRegister(instr->result());
5589 if (instr->hydrogen()->value()->representation().IsSmi()) {
5590 __ SmiUntag(output, input);
5592 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5594 __ JumpIfNotSmi(input, deferred->entry());
5595 __ SmiUntag(output, input);
5596 __ Bind(deferred->exit());
5601 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5602 Register result = ToRegister(instr->result());
5603 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5607 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5608 ASSERT(ToRegister(instr->value()).Is(x0));
5609 ASSERT(ToRegister(instr->result()).Is(x0));
5611 CallRuntime(Runtime::kToFastProperties, 1, instr);
5615 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5616 ASSERT(ToRegister(instr->context()).is(cp));
5618 // Registers will be used as follows:
5619 // x7 = literals array.
5620 // x1 = regexp literal.
5621 // x0 = regexp literal clone.
5622 // x10-x12 are used as temporaries.
5623 int literal_offset =
5624 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5625 __ LoadObject(x7, instr->hydrogen()->literals());
5626 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5627 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5629 // Create regexp literal using runtime function
5630 // Result will be in x0.
5631 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5632 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5633 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5634 __ Push(x7, x12, x11, x10);
5635 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5638 __ Bind(&materialized);
5639 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5640 Label allocated, runtime_allocate;
5642 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5645 __ Bind(&runtime_allocate);
5646 __ Mov(x0, Smi::FromInt(size));
5648 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5651 __ Bind(&allocated);
5652 // Copy the content into the newly allocated memory.
5653 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5657 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5658 Register object = ToRegister(instr->object());
5660 Handle<Map> from_map = instr->original_map();
5661 Handle<Map> to_map = instr->transitioned_map();
5662 ElementsKind from_kind = instr->from_kind();
5663 ElementsKind to_kind = instr->to_kind();
5665 Label not_applicable;
5667 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5668 Register temp1 = ToRegister(instr->temp1());
5669 Register new_map = ToRegister(instr->temp2());
5670 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5671 __ Mov(new_map, Operand(to_map));
5672 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5674 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
5675 GetLinkRegisterState(), kDontSaveFPRegs);
5678 UseScratchRegisterScope temps(masm());
5679 // Use the temp register only in a restricted scope - the codegen checks
5680 // that we do not use any register across a call.
5681 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5684 ASSERT(object.is(x0));
5685 ASSERT(ToRegister(instr->context()).is(cp));
5686 PushSafepointRegistersScope scope(
5687 this, Safepoint::kWithRegistersAndDoubles);
5688 __ Mov(x1, Operand(to_map));
5689 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5690 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5692 RecordSafepointWithRegistersAndDoubles(
5693 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5695 __ Bind(¬_applicable);
5699 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5700 Register object = ToRegister(instr->object());
5701 Register temp1 = ToRegister(instr->temp1());
5702 Register temp2 = ToRegister(instr->temp2());
5704 Label no_memento_found;
5705 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5706 DeoptimizeIf(eq, instr->environment());
5707 __ Bind(&no_memento_found);
5711 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5712 DoubleRegister input = ToDoubleRegister(instr->value());
5713 Register result = ToRegister(instr->result());
5714 __ TruncateDoubleToI(result, input);
5715 if (instr->tag_result()) {
5716 __ SmiTag(result, result);
5721 void LCodeGen::DoTypeof(LTypeof* instr) {
5722 Register input = ToRegister(instr->value());
5724 CallRuntime(Runtime::kTypeof, 1, instr);
5728 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5729 Handle<String> type_name = instr->type_literal();
5730 Label* true_label = instr->TrueLabel(chunk_);
5731 Label* false_label = instr->FalseLabel(chunk_);
5732 Register value = ToRegister(instr->value());
5734 Factory* factory = isolate()->factory();
5735 if (String::Equals(type_name, factory->number_string())) {
5736 ASSERT(instr->temp1() != NULL);
5737 Register map = ToRegister(instr->temp1());
5739 __ JumpIfSmi(value, true_label);
5740 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5741 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5742 EmitBranch(instr, eq);
5744 } else if (String::Equals(type_name, factory->string_string())) {
5745 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5746 Register map = ToRegister(instr->temp1());
5747 Register scratch = ToRegister(instr->temp2());
5749 __ JumpIfSmi(value, false_label);
5750 __ JumpIfObjectType(
5751 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5752 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5753 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5755 } else if (String::Equals(type_name, factory->symbol_string())) {
5756 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5757 Register map = ToRegister(instr->temp1());
5758 Register scratch = ToRegister(instr->temp2());
5760 __ JumpIfSmi(value, false_label);
5761 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5762 EmitBranch(instr, eq);
5764 } else if (String::Equals(type_name, factory->boolean_string())) {
5765 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5766 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5767 EmitBranch(instr, eq);
5769 } else if (FLAG_harmony_typeof &&
5770 String::Equals(type_name, factory->null_string())) {
5771 __ CompareRoot(value, Heap::kNullValueRootIndex);
5772 EmitBranch(instr, eq);
5774 } else if (String::Equals(type_name, factory->undefined_string())) {
5775 ASSERT(instr->temp1() != NULL);
5776 Register scratch = ToRegister(instr->temp1());
5778 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5779 __ JumpIfSmi(value, false_label);
5780 // Check for undetectable objects and jump to the true branch in this case.
5781 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5782 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5783 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5785 } else if (String::Equals(type_name, factory->function_string())) {
5786 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5787 ASSERT(instr->temp1() != NULL);
5788 Register type = ToRegister(instr->temp1());
5790 __ JumpIfSmi(value, false_label);
5791 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5792 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5793 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5795 } else if (String::Equals(type_name, factory->object_string())) {
5796 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5797 Register map = ToRegister(instr->temp1());
5798 Register scratch = ToRegister(instr->temp2());
5800 __ JumpIfSmi(value, false_label);
5801 if (!FLAG_harmony_typeof) {
5802 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5804 __ JumpIfObjectType(value, map, scratch,
5805 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5806 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5807 __ B(gt, false_label);
5808 // Check for undetectable objects => false.
5809 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
5810 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5818 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5819 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5823 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5824 Register object = ToRegister(instr->value());
5825 Register map = ToRegister(instr->map());
5826 Register temp = ToRegister(instr->temp());
5827 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5829 DeoptimizeIf(ne, instr->environment());
5833 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5834 Register receiver = ToRegister(instr->receiver());
5835 Register function = ToRegister(instr->function());
5836 Register result = ToRegister(instr->result());
5838 // If the receiver is null or undefined, we have to pass the global object as
5839 // a receiver to normal functions. Values have to be passed unchanged to
5840 // builtins and strict-mode functions.
5841 Label global_object, done, copy_receiver;
5843 if (!instr->hydrogen()->known_function()) {
5844 __ Ldr(result, FieldMemOperand(function,
5845 JSFunction::kSharedFunctionInfoOffset));
5847 // CompilerHints is an int32 field. See objects.h.
5849 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5851 // Do not transform the receiver to object for strict mode functions.
5852 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5854 // Do not transform the receiver to object for builtins.
5855 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5858 // Normal function. Replace undefined or null with global receiver.
5859 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5860 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5862 // Deoptimize if the receiver is not a JS object.
5863 DeoptimizeIfSmi(receiver, instr->environment());
5864 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5865 __ B(ge, ©_receiver);
5866 Deoptimize(instr->environment());
5868 __ Bind(&global_object);
5869 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5870 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5871 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
5874 __ Bind(©_receiver);
5875 __ Mov(result, receiver);
5880 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5884 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5888 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5889 RecordSafepointWithRegisters(
5890 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5891 __ StoreToSafepointRegisterSlot(x0, result);
5895 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5896 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5898 DeferredLoadMutableDouble(LCodeGen* codegen,
5899 LLoadFieldByIndex* instr,
5903 : LDeferredCode(codegen),
5909 virtual void Generate() V8_OVERRIDE {
5910 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5912 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5914 LLoadFieldByIndex* instr_;
5919 Register object = ToRegister(instr->object());
5920 Register index = ToRegister(instr->index());
5921 Register result = ToRegister(instr->result());
5923 __ AssertSmi(index);
5925 DeferredLoadMutableDouble* deferred;
5926 deferred = new(zone()) DeferredLoadMutableDouble(
5927 this, instr, result, object, index);
5929 Label out_of_object, done;
5931 __ TestAndBranchIfAnySet(
5932 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
5933 __ Mov(index, Operand(index, ASR, 1));
5935 __ Cmp(index, Smi::FromInt(0));
5936 __ B(lt, &out_of_object);
5938 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5939 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5940 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5944 __ Bind(&out_of_object);
5945 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5946 // Index is equal to negated out of object property index plus 1.
5947 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5948 __ Ldr(result, FieldMemOperand(result,
5949 FixedArray::kHeaderSize - kPointerSize));
5950 __ Bind(deferred->exit());
5954 } } // namespace v8::internal