1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h"
11 #include "src/stub-cache.h"
17 class SafepointGenerator V8_FINAL : public CallWrapper {
19 SafepointGenerator(LCodeGen* codegen,
20 LPointerMap* pointers,
21 Safepoint::DeoptMode mode)
25 virtual ~SafepointGenerator() { }
27 virtual void BeforeCall(int call_size) const { }
29 virtual void AfterCall() const {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
35 LPointerMap* pointers_;
36 Safepoint::DeoptMode deopt_mode_;
42 // Emit code to branch if the given condition holds.
43 // The code generated here doesn't modify the flags and they must have
44 // been set by some prior instructions.
46 // The EmitInverted function simply inverts the condition.
47 class BranchOnCondition : public BranchGenerator {
49 BranchOnCondition(LCodeGen* codegen, Condition cond)
50 : BranchGenerator(codegen),
53 virtual void Emit(Label* label) const {
57 virtual void EmitInverted(Label* label) const {
59 __ B(NegateCondition(cond_), label);
68 // Emit code to compare lhs and rhs and branch if the condition holds.
69 // This uses MacroAssembler's CompareAndBranch function so it will handle
70 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
72 // EmitInverted still compares the two operands but inverts the condition.
73 class CompareAndBranch : public BranchGenerator {
75 CompareAndBranch(LCodeGen* codegen,
79 : BranchGenerator(codegen),
84 virtual void Emit(Label* label) const {
85 __ CompareAndBranch(lhs_, rhs_, cond_, label);
88 virtual void EmitInverted(Label* label) const {
89 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
99 // Test the input with the given mask and branch if the condition holds.
100 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
101 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
102 // conversion to Tbz/Tbnz when possible.
103 class TestAndBranch : public BranchGenerator {
105 TestAndBranch(LCodeGen* codegen,
107 const Register& value,
109 : BranchGenerator(codegen),
114 virtual void Emit(Label* label) const {
117 __ TestAndBranchIfAllClear(value_, mask_, label);
120 __ TestAndBranchIfAnySet(value_, mask_, label);
123 __ Tst(value_, mask_);
128 virtual void EmitInverted(Label* label) const {
129 // The inverse of "all clear" is "any set" and vice versa.
132 __ TestAndBranchIfAnySet(value_, mask_, label);
135 __ TestAndBranchIfAllClear(value_, mask_, label);
138 __ Tst(value_, mask_);
139 __ B(NegateCondition(cond_), label);
145 const Register& value_;
150 // Test the input and branch if it is non-zero and not a NaN.
151 class BranchIfNonZeroNumber : public BranchGenerator {
153 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
154 const FPRegister& scratch)
155 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
157 virtual void Emit(Label* label) const {
158 __ Fabs(scratch_, value_);
159 // Compare with 0.0. Because scratch_ is positive, the result can be one of
160 // nZCv (equal), nzCv (greater) or nzCV (unordered).
161 __ Fcmp(scratch_, 0.0);
165 virtual void EmitInverted(Label* label) const {
166 __ Fabs(scratch_, value_);
167 __ Fcmp(scratch_, 0.0);
172 const FPRegister& value_;
173 const FPRegister& scratch_;
177 // Test the input and branch if it is a heap number.
178 class BranchIfHeapNumber : public BranchGenerator {
180 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
181 : BranchGenerator(codegen), value_(value) { }
183 virtual void Emit(Label* label) const {
184 __ JumpIfHeapNumber(value_, label);
187 virtual void EmitInverted(Label* label) const {
188 __ JumpIfNotHeapNumber(value_, label);
192 const Register& value_;
196 // Test the input and branch if it is the specified root value.
197 class BranchIfRoot : public BranchGenerator {
199 BranchIfRoot(LCodeGen* codegen, const Register& value,
200 Heap::RootListIndex index)
201 : BranchGenerator(codegen), value_(value), index_(index) { }
203 virtual void Emit(Label* label) const {
204 __ JumpIfRoot(value_, index_, label);
207 virtual void EmitInverted(Label* label) const {
208 __ JumpIfNotRoot(value_, index_, label);
212 const Register& value_;
213 const Heap::RootListIndex index_;
217 void LCodeGen::WriteTranslation(LEnvironment* environment,
218 Translation* translation) {
219 if (environment == NULL) return;
221 // The translation includes one command per value in the environment.
222 int translation_size = environment->translation_size();
223 // The output frame height does not include the parameters.
224 int height = translation_size - environment->parameter_count();
226 WriteTranslation(environment->outer(), translation);
227 bool has_closure_id = !info()->closure().is_null() &&
228 !info()->closure().is_identical_to(environment->closure());
229 int closure_id = has_closure_id
230 ? DefineDeoptimizationLiteral(environment->closure())
231 : Translation::kSelfLiteralId;
233 switch (environment->frame_type()) {
235 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
238 translation->BeginConstructStubFrame(closure_id, translation_size);
241 DCHECK(translation_size == 1);
243 translation->BeginGetterStubFrame(closure_id);
246 DCHECK(translation_size == 2);
248 translation->BeginSetterStubFrame(closure_id);
251 translation->BeginCompiledStubFrame();
253 case ARGUMENTS_ADAPTOR:
254 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
260 int object_index = 0;
261 int dematerialized_index = 0;
262 for (int i = 0; i < translation_size; ++i) {
263 LOperand* value = environment->values()->at(i);
265 AddToTranslation(environment,
268 environment->HasTaggedValueAt(i),
269 environment->HasUint32ValueAt(i),
271 &dematerialized_index);
276 void LCodeGen::AddToTranslation(LEnvironment* environment,
277 Translation* translation,
281 int* object_index_pointer,
282 int* dematerialized_index_pointer) {
283 if (op == LEnvironment::materialization_marker()) {
284 int object_index = (*object_index_pointer)++;
285 if (environment->ObjectIsDuplicateAt(object_index)) {
286 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
287 translation->DuplicateObject(dupe_of);
290 int object_length = environment->ObjectLengthAt(object_index);
291 if (environment->ObjectIsArgumentsAt(object_index)) {
292 translation->BeginArgumentsObject(object_length);
294 translation->BeginCapturedObject(object_length);
296 int dematerialized_index = *dematerialized_index_pointer;
297 int env_offset = environment->translation_size() + dematerialized_index;
298 *dematerialized_index_pointer += object_length;
299 for (int i = 0; i < object_length; ++i) {
300 LOperand* value = environment->values()->at(env_offset + i);
301 AddToTranslation(environment,
304 environment->HasTaggedValueAt(env_offset + i),
305 environment->HasUint32ValueAt(env_offset + i),
306 object_index_pointer,
307 dematerialized_index_pointer);
312 if (op->IsStackSlot()) {
314 translation->StoreStackSlot(op->index());
315 } else if (is_uint32) {
316 translation->StoreUint32StackSlot(op->index());
318 translation->StoreInt32StackSlot(op->index());
320 } else if (op->IsDoubleStackSlot()) {
321 translation->StoreDoubleStackSlot(op->index());
322 } else if (op->IsRegister()) {
323 Register reg = ToRegister(op);
325 translation->StoreRegister(reg);
326 } else if (is_uint32) {
327 translation->StoreUint32Register(reg);
329 translation->StoreInt32Register(reg);
331 } else if (op->IsDoubleRegister()) {
332 DoubleRegister reg = ToDoubleRegister(op);
333 translation->StoreDoubleRegister(reg);
334 } else if (op->IsConstantOperand()) {
335 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
336 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
337 translation->StoreLiteral(src_index);
344 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
345 int result = deoptimization_literals_.length();
346 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
347 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
349 deoptimization_literals_.Add(literal, zone());
354 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
355 Safepoint::DeoptMode mode) {
356 environment->set_has_been_used();
357 if (!environment->HasBeenRegistered()) {
359 int jsframe_count = 0;
360 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
362 if (e->frame_type() == JS_FUNCTION) {
366 Translation translation(&translations_, frame_count, jsframe_count, zone());
367 WriteTranslation(environment, &translation);
368 int deoptimization_index = deoptimizations_.length();
369 int pc_offset = masm()->pc_offset();
370 environment->Register(deoptimization_index,
372 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
373 deoptimizations_.Add(environment, zone());
378 void LCodeGen::CallCode(Handle<Code> code,
379 RelocInfo::Mode mode,
380 LInstruction* instr) {
381 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
385 void LCodeGen::CallCodeGeneric(Handle<Code> code,
386 RelocInfo::Mode mode,
388 SafepointMode safepoint_mode) {
389 DCHECK(instr != NULL);
391 Assembler::BlockPoolsScope scope(masm_);
393 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
395 if ((code->kind() == Code::BINARY_OP_IC) ||
396 (code->kind() == Code::COMPARE_IC)) {
397 // Signal that we don't inline smi code before these stubs in the
398 // optimizing code generator.
399 InlineSmiCheckInfo::EmitNotInlined(masm());
404 void LCodeGen::DoCallFunction(LCallFunction* instr) {
405 DCHECK(ToRegister(instr->context()).is(cp));
406 DCHECK(ToRegister(instr->function()).Is(x1));
407 DCHECK(ToRegister(instr->result()).Is(x0));
409 int arity = instr->arity();
410 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
411 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
412 after_push_argument_ = false;
416 void LCodeGen::DoCallNew(LCallNew* instr) {
417 DCHECK(ToRegister(instr->context()).is(cp));
418 DCHECK(instr->IsMarkedAsCall());
419 DCHECK(ToRegister(instr->constructor()).is(x1));
421 __ Mov(x0, instr->arity());
422 // No cell in x2 for construct type feedback in optimized code.
423 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
425 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
426 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
427 after_push_argument_ = false;
429 DCHECK(ToRegister(instr->result()).is(x0));
433 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
434 DCHECK(instr->IsMarkedAsCall());
435 DCHECK(ToRegister(instr->context()).is(cp));
436 DCHECK(ToRegister(instr->constructor()).is(x1));
438 __ Mov(x0, Operand(instr->arity()));
439 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
441 ElementsKind kind = instr->hydrogen()->elements_kind();
442 AllocationSiteOverrideMode override_mode =
443 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
444 ? DISABLE_ALLOCATION_SITES
447 if (instr->arity() == 0) {
448 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
449 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
450 } else if (instr->arity() == 1) {
452 if (IsFastPackedElementsKind(kind)) {
455 // We might need to create a holey array; look at the first argument.
457 __ Cbz(x10, &packed_case);
459 ElementsKind holey_kind = GetHoleyElementsKind(kind);
460 ArraySingleArgumentConstructorStub stub(isolate(),
463 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
465 __ Bind(&packed_case);
468 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
469 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
472 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
473 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
475 after_push_argument_ = false;
477 DCHECK(ToRegister(instr->result()).is(x0));
481 void LCodeGen::CallRuntime(const Runtime::Function* function,
484 SaveFPRegsMode save_doubles) {
485 DCHECK(instr != NULL);
487 __ CallRuntime(function, num_arguments, save_doubles);
489 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
493 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
494 if (context->IsRegister()) {
495 __ Mov(cp, ToRegister(context));
496 } else if (context->IsStackSlot()) {
497 __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
498 } else if (context->IsConstantOperand()) {
499 HConstant* constant =
500 chunk_->LookupConstant(LConstantOperand::cast(context));
501 __ LoadHeapObject(cp,
502 Handle<HeapObject>::cast(constant->handle(isolate())));
509 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
513 LoadContextFromDeferred(context);
514 __ CallRuntimeSaveDoubles(id);
515 RecordSafepointWithRegisters(
516 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
520 void LCodeGen::RecordAndWritePosition(int position) {
521 if (position == RelocInfo::kNoPosition) return;
522 masm()->positions_recorder()->RecordPosition(position);
523 masm()->positions_recorder()->WriteRecordedPositions();
527 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
528 SafepointMode safepoint_mode) {
529 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
530 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
532 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
533 RecordSafepointWithRegisters(
534 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
539 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
540 Safepoint::Kind kind,
542 Safepoint::DeoptMode deopt_mode) {
543 DCHECK(expected_safepoint_kind_ == kind);
545 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
546 Safepoint safepoint = safepoints_.DefineSafepoint(
547 masm(), kind, arguments, deopt_mode);
549 for (int i = 0; i < operands->length(); i++) {
550 LOperand* pointer = operands->at(i);
551 if (pointer->IsStackSlot()) {
552 safepoint.DefinePointerSlot(pointer->index(), zone());
553 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
554 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
558 if (kind & Safepoint::kWithRegisters) {
559 // Register cp always contains a pointer to the context.
560 safepoint.DefinePointerRegister(cp, zone());
564 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
565 Safepoint::DeoptMode deopt_mode) {
566 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
570 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
571 LPointerMap empty_pointers(zone());
572 RecordSafepoint(&empty_pointers, deopt_mode);
576 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
578 Safepoint::DeoptMode deopt_mode) {
579 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
583 bool LCodeGen::GenerateCode() {
584 LPhase phase("Z_Code generation", chunk());
586 status_ = GENERATING;
588 // Open a frame scope to indicate that there is a frame on the stack. The
589 // NONE indicates that the scope shouldn't actually generate code to set up
590 // the frame (that is done in GeneratePrologue).
591 FrameScope frame_scope(masm_, StackFrame::NONE);
593 return GeneratePrologue() &&
595 GenerateDeferredCode() &&
596 GenerateDeoptJumpTable() &&
597 GenerateSafepointTable();
601 void LCodeGen::SaveCallerDoubles() {
602 DCHECK(info()->saves_caller_doubles());
603 DCHECK(NeedsEagerFrame());
604 Comment(";;; Save clobbered callee double registers");
605 BitVector* doubles = chunk()->allocated_double_registers();
606 BitVector::Iterator iterator(doubles);
608 while (!iterator.Done()) {
609 // TODO(all): Is this supposed to save just the callee-saved doubles? It
610 // looks like it's saving all of them.
611 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
612 __ Poke(value, count * kDoubleSize);
619 void LCodeGen::RestoreCallerDoubles() {
620 DCHECK(info()->saves_caller_doubles());
621 DCHECK(NeedsEagerFrame());
622 Comment(";;; Restore clobbered callee double registers");
623 BitVector* doubles = chunk()->allocated_double_registers();
624 BitVector::Iterator iterator(doubles);
626 while (!iterator.Done()) {
627 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
628 // looks like it's restoring all of them.
629 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
630 __ Peek(value, count * kDoubleSize);
637 bool LCodeGen::GeneratePrologue() {
638 DCHECK(is_generating());
640 if (info()->IsOptimizing()) {
641 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
643 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
645 // Sloppy mode functions and builtins need to replace the receiver with the
646 // global proxy when called as functions (without an explicit receiver
648 if (info_->this_has_uses() &&
649 info_->strict_mode() == SLOPPY &&
650 !info_->is_native()) {
652 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
653 __ Peek(x10, receiver_offset);
654 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
656 __ Ldr(x10, GlobalObjectMemOperand());
657 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
658 __ Poke(x10, receiver_offset);
664 DCHECK(__ StackPointer().Is(jssp));
665 info()->set_prologue_offset(masm_->pc_offset());
666 if (NeedsEagerFrame()) {
667 if (info()->IsStub()) {
670 __ Prologue(info()->IsCodePreAgingActive());
672 frame_is_built_ = true;
673 info_->AddNoFrameRange(0, masm_->pc_offset());
676 // Reserve space for the stack slots needed by the code.
677 int slots = GetStackSlotCount();
679 __ Claim(slots, kPointerSize);
682 if (info()->saves_caller_doubles()) {
686 // Allocate a local context if needed.
687 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
688 if (heap_slots > 0) {
689 Comment(";;; Allocate local context");
690 bool need_write_barrier = true;
691 // Argument to NewContext is the function, which is in x1.
692 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
693 FastNewContextStub stub(isolate(), heap_slots);
695 // Result of FastNewContextStub is always in new space.
696 need_write_barrier = false;
699 __ CallRuntime(Runtime::kNewFunctionContext, 1);
701 RecordSafepoint(Safepoint::kNoLazyDeopt);
702 // Context is returned in x0. It replaces the context passed to us. It's
703 // saved in the stack and kept live in cp.
705 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
706 // Copy any necessary parameters into the context.
707 int num_parameters = scope()->num_parameters();
708 for (int i = 0; i < num_parameters; i++) {
709 Variable* var = scope()->parameter(i);
710 if (var->IsContextSlot()) {
712 Register scratch = x3;
714 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
715 (num_parameters - 1 - i) * kPointerSize;
716 // Load parameter from stack.
717 __ Ldr(value, MemOperand(fp, parameter_offset));
718 // Store it in the context.
719 MemOperand target = ContextMemOperand(cp, var->index());
720 __ Str(value, target);
721 // Update the write barrier. This clobbers value and scratch.
722 if (need_write_barrier) {
723 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
724 GetLinkRegisterState(), kSaveFPRegs);
725 } else if (FLAG_debug_code) {
727 __ JumpIfInNewSpace(cp, &done);
728 __ Abort(kExpectedNewSpaceObject);
733 Comment(";;; End allocate local context");
737 if (FLAG_trace && info()->IsOptimizing()) {
738 // We have not executed any compiled code yet, so cp still holds the
740 __ CallRuntime(Runtime::kTraceEnter, 0);
743 return !is_aborted();
747 void LCodeGen::GenerateOsrPrologue() {
748 // Generate the OSR entry prologue at the first unknown OSR value, or if there
749 // are none, at the OSR entrypoint instruction.
750 if (osr_pc_offset_ >= 0) return;
752 osr_pc_offset_ = masm()->pc_offset();
754 // Adjust the frame size, subsuming the unoptimized frame into the
756 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
762 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
763 if (instr->IsCall()) {
764 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
766 if (!instr->IsLazyBailout() && !instr->IsGap()) {
767 safepoints_.BumpLastLazySafepointIndex();
772 bool LCodeGen::GenerateDeferredCode() {
773 DCHECK(is_generating());
774 if (deferred_.length() > 0) {
775 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
776 LDeferredCode* code = deferred_[i];
779 instructions_->at(code->instruction_index())->hydrogen_value();
780 RecordAndWritePosition(
781 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
783 Comment(";;; <@%d,#%d> "
784 "-------------------- Deferred %s --------------------",
785 code->instruction_index(),
786 code->instr()->hydrogen_value()->id(),
787 code->instr()->Mnemonic());
789 __ Bind(code->entry());
791 if (NeedsDeferredFrame()) {
792 Comment(";;; Build frame");
793 DCHECK(!frame_is_built_);
794 DCHECK(info()->IsStub());
795 frame_is_built_ = true;
797 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
799 __ Add(fp, __ StackPointer(),
800 StandardFrameConstants::kFixedFrameSizeFromFp);
801 Comment(";;; Deferred code");
806 if (NeedsDeferredFrame()) {
807 Comment(";;; Destroy frame");
808 DCHECK(frame_is_built_);
809 __ Pop(xzr, cp, fp, lr);
810 frame_is_built_ = false;
817 // Force constant pool emission at the end of the deferred code to make
818 // sure that no constant pools are emitted after deferred code because
819 // deferred code generation is the last step which generates code. The two
820 // following steps will only output data used by crakshaft.
821 masm()->CheckConstPool(true, false);
823 return !is_aborted();
827 bool LCodeGen::GenerateDeoptJumpTable() {
828 Label needs_frame, restore_caller_doubles, call_deopt_entry;
830 if (deopt_jump_table_.length() > 0) {
831 Comment(";;; -------------------- Jump table --------------------");
832 Address base = deopt_jump_table_[0]->address;
834 UseScratchRegisterScope temps(masm());
835 Register entry_offset = temps.AcquireX();
837 int length = deopt_jump_table_.length();
838 for (int i = 0; i < length; i++) {
839 __ Bind(&deopt_jump_table_[i]->label);
841 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
842 Address entry = deopt_jump_table_[i]->address;
843 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
844 if (id == Deoptimizer::kNotDeoptimizationEntry) {
845 Comment(";;; jump table entry %d.", i);
847 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
850 // Second-level deopt table entries are contiguous and small, so instead
851 // of loading the full, absolute address of each one, load the base
852 // address and add an immediate offset.
853 __ Mov(entry_offset, entry - base);
855 // The last entry can fall through into `call_deopt_entry`, avoiding a
857 bool last_entry = (i + 1) == length;
859 if (deopt_jump_table_[i]->needs_frame) {
860 DCHECK(!info()->saves_caller_doubles());
861 if (!needs_frame.is_bound()) {
862 // This variant of deopt can only be used with stubs. Since we don't
863 // have a function pointer to install in the stack frame that we're
864 // building, install a special marker there instead.
865 DCHECK(info()->IsStub());
867 UseScratchRegisterScope temps(masm());
868 Register stub_marker = temps.AcquireX();
869 __ Bind(&needs_frame);
870 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
871 __ Push(lr, fp, cp, stub_marker);
872 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
873 if (!last_entry) __ B(&call_deopt_entry);
875 // Reuse the existing needs_frame code.
878 } else if (info()->saves_caller_doubles()) {
879 DCHECK(info()->IsStub());
880 if (!restore_caller_doubles.is_bound()) {
881 __ Bind(&restore_caller_doubles);
882 RestoreCallerDoubles();
883 if (!last_entry) __ B(&call_deopt_entry);
885 // Reuse the existing restore_caller_doubles code.
886 __ B(&restore_caller_doubles);
889 // There is nothing special to do, so just continue to the second-level
891 if (!last_entry) __ B(&call_deopt_entry);
894 masm()->CheckConstPool(false, last_entry);
897 // Generate common code for calling the second-level deopt table.
898 Register deopt_entry = temps.AcquireX();
899 __ Bind(&call_deopt_entry);
900 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
901 RelocInfo::RUNTIME_ENTRY));
902 __ Add(deopt_entry, deopt_entry, entry_offset);
903 __ Call(deopt_entry);
906 // Force constant pool emission at the end of the deopt jump table to make
907 // sure that no constant pools are emitted after.
908 masm()->CheckConstPool(true, false);
910 // The deoptimization jump table is the last part of the instruction
911 // sequence. Mark the generated code as done unless we bailed out.
912 if (!is_aborted()) status_ = DONE;
913 return !is_aborted();
917 bool LCodeGen::GenerateSafepointTable() {
919 // We do not know how much data will be emitted for the safepoint table, so
920 // force emission of the veneer pool.
921 masm()->CheckVeneerPool(true, true);
922 safepoints_.Emit(masm(), GetStackSlotCount());
923 return !is_aborted();
927 void LCodeGen::FinishCode(Handle<Code> code) {
929 code->set_stack_slots(GetStackSlotCount());
930 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
931 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
932 PopulateDeoptimizationData(code);
936 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
937 int length = deoptimizations_.length();
938 if (length == 0) return;
940 Handle<DeoptimizationInputData> data =
941 DeoptimizationInputData::New(isolate(), length, 0, TENURED);
943 Handle<ByteArray> translations =
944 translations_.CreateByteArray(isolate()->factory());
945 data->SetTranslationByteArray(*translations);
946 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
947 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
948 if (info_->IsOptimizing()) {
949 // Reference to shared function info does not change between phases.
950 AllowDeferredHandleDereference allow_handle_dereference;
951 data->SetSharedFunctionInfo(*info_->shared_info());
953 data->SetSharedFunctionInfo(Smi::FromInt(0));
956 Handle<FixedArray> literals =
957 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
958 { AllowDeferredHandleDereference copy_handles;
959 for (int i = 0; i < deoptimization_literals_.length(); i++) {
960 literals->set(i, *deoptimization_literals_[i]);
962 data->SetLiteralArray(*literals);
965 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
966 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
968 // Populate the deoptimization entries.
969 for (int i = 0; i < length; i++) {
970 LEnvironment* env = deoptimizations_[i];
971 data->SetAstId(i, env->ast_id());
972 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
973 data->SetArgumentsStackHeight(i,
974 Smi::FromInt(env->arguments_stack_height()));
975 data->SetPc(i, Smi::FromInt(env->pc_offset()));
978 code->set_deoptimization_data(*data);
982 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
983 DCHECK(deoptimization_literals_.length() == 0);
985 const ZoneList<Handle<JSFunction> >* inlined_closures =
986 chunk()->inlined_closures();
988 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
989 DefineDeoptimizationLiteral(inlined_closures->at(i));
992 inlined_function_count_ = deoptimization_literals_.length();
996 void LCodeGen::DeoptimizeBranch(
997 LEnvironment* environment,
998 BranchType branch_type, Register reg, int bit,
999 Deoptimizer::BailoutType* override_bailout_type) {
1000 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1001 Deoptimizer::BailoutType bailout_type =
1002 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1004 if (override_bailout_type != NULL) {
1005 bailout_type = *override_bailout_type;
1008 DCHECK(environment->HasBeenRegistered());
1009 DCHECK(info()->IsOptimizing() || info()->IsStub());
1010 int id = environment->deoptimization_index();
1012 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1014 if (entry == NULL) {
1015 Abort(kBailoutWasNotPrepared);
1018 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1020 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1022 __ Push(x0, x1, x2);
1025 __ Ldr(w1, MemOperand(x0));
1027 __ B(gt, ¬_zero);
1028 __ Mov(w1, FLAG_deopt_every_n_times);
1029 __ Str(w1, MemOperand(x0));
1031 DCHECK(frame_is_built_);
1032 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1036 __ Str(w1, MemOperand(x0));
1041 if (info()->ShouldTrapOnDeopt()) {
1043 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1044 __ Debug("trap_on_deopt", __LINE__, BREAK);
1045 __ Bind(&dont_trap);
1048 DCHECK(info()->IsStub() || frame_is_built_);
1049 // Go through jump table if we need to build frame, or restore caller doubles.
1050 if (branch_type == always &&
1051 frame_is_built_ && !info()->saves_caller_doubles()) {
1052 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1054 // We often have several deopts to the same entry, reuse the last
1055 // jump entry if this is the case.
1056 if (deopt_jump_table_.is_empty() ||
1057 (deopt_jump_table_.last()->address != entry) ||
1058 (deopt_jump_table_.last()->bailout_type != bailout_type) ||
1059 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
1060 Deoptimizer::JumpTableEntry* table_entry =
1061 new(zone()) Deoptimizer::JumpTableEntry(entry,
1064 deopt_jump_table_.Add(table_entry, zone());
1066 __ B(&deopt_jump_table_.last()->label,
1067 branch_type, reg, bit);
1072 void LCodeGen::Deoptimize(LEnvironment* environment,
1073 Deoptimizer::BailoutType* override_bailout_type) {
1074 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1078 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1079 DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1083 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1084 DeoptimizeBranch(environment, reg_zero, rt);
1088 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
1089 DeoptimizeBranch(environment, reg_not_zero, rt);
1093 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1094 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1095 DeoptimizeIfBitSet(rt, sign_bit, environment);
1099 void LCodeGen::DeoptimizeIfSmi(Register rt,
1100 LEnvironment* environment) {
1101 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
1105 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1106 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
1110 void LCodeGen::DeoptimizeIfRoot(Register rt,
1111 Heap::RootListIndex index,
1112 LEnvironment* environment) {
1113 __ CompareRoot(rt, index);
1114 DeoptimizeIf(eq, environment);
1118 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1119 Heap::RootListIndex index,
1120 LEnvironment* environment) {
1121 __ CompareRoot(rt, index);
1122 DeoptimizeIf(ne, environment);
1126 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
1127 LEnvironment* environment) {
1128 __ TestForMinusZero(input);
1129 DeoptimizeIf(vs, environment);
1133 void LCodeGen::DeoptimizeIfBitSet(Register rt,
1135 LEnvironment* environment) {
1136 DeoptimizeBranch(environment, reg_bit_set, rt, bit);
1140 void LCodeGen::DeoptimizeIfBitClear(Register rt,
1142 LEnvironment* environment) {
1143 DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
1147 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1148 if (!info()->IsStub()) {
1149 // Ensure that we have enough space after the previous lazy-bailout
1150 // instruction for patching the code here.
1151 intptr_t current_pc = masm()->pc_offset();
1153 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1154 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1155 DCHECK((padding_size % kInstructionSize) == 0);
1156 InstructionAccurateScope instruction_accurate(
1157 masm(), padding_size / kInstructionSize);
1159 while (padding_size > 0) {
1161 padding_size -= kInstructionSize;
1165 last_lazy_deopt_pc_ = masm()->pc_offset();
1169 Register LCodeGen::ToRegister(LOperand* op) const {
1170 // TODO(all): support zero register results, as ToRegister32.
1171 DCHECK((op != NULL) && op->IsRegister());
1172 return Register::FromAllocationIndex(op->index());
1176 Register LCodeGen::ToRegister32(LOperand* op) const {
1178 if (op->IsConstantOperand()) {
1179 // If this is a constant operand, the result must be the zero register.
1180 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1183 return ToRegister(op).W();
1188 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1189 HConstant* constant = chunk_->LookupConstant(op);
1190 return Smi::FromInt(constant->Integer32Value());
1194 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1195 DCHECK((op != NULL) && op->IsDoubleRegister());
1196 return DoubleRegister::FromAllocationIndex(op->index());
1200 Operand LCodeGen::ToOperand(LOperand* op) {
1202 if (op->IsConstantOperand()) {
1203 LConstantOperand* const_op = LConstantOperand::cast(op);
1204 HConstant* constant = chunk()->LookupConstant(const_op);
1205 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1207 DCHECK(constant->HasSmiValue());
1208 return Operand(Smi::FromInt(constant->Integer32Value()));
1209 } else if (r.IsInteger32()) {
1210 DCHECK(constant->HasInteger32Value());
1211 return Operand(constant->Integer32Value());
1212 } else if (r.IsDouble()) {
1213 Abort(kToOperandUnsupportedDoubleImmediate);
1215 DCHECK(r.IsTagged());
1216 return Operand(constant->handle(isolate()));
1217 } else if (op->IsRegister()) {
1218 return Operand(ToRegister(op));
1219 } else if (op->IsDoubleRegister()) {
1220 Abort(kToOperandIsDoubleRegisterUnimplemented);
1223 // Stack slots not implemented, use ToMemOperand instead.
1229 Operand LCodeGen::ToOperand32I(LOperand* op) {
1230 return ToOperand32(op, SIGNED_INT32);
1234 Operand LCodeGen::ToOperand32U(LOperand* op) {
1235 return ToOperand32(op, UNSIGNED_INT32);
1239 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1241 if (op->IsRegister()) {
1242 return Operand(ToRegister32(op));
1243 } else if (op->IsConstantOperand()) {
1244 LConstantOperand* const_op = LConstantOperand::cast(op);
1245 HConstant* constant = chunk()->LookupConstant(const_op);
1246 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1247 if (r.IsInteger32()) {
1248 DCHECK(constant->HasInteger32Value());
1249 return (signedness == SIGNED_INT32)
1250 ? Operand(constant->Integer32Value())
1251 : Operand(static_cast<uint32_t>(constant->Integer32Value()));
1253 // Other constants not implemented.
1254 Abort(kToOperand32UnsupportedImmediate);
1257 // Other cases are not implemented.
1263 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1265 return -(index + 1) * kPointerSize;
1269 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1271 DCHECK(!op->IsRegister());
1272 DCHECK(!op->IsDoubleRegister());
1273 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1274 if (NeedsEagerFrame()) {
1275 int fp_offset = StackSlotOffset(op->index());
1276 if (op->index() >= 0) {
1277 // Loads and stores have a bigger reach in positive offset than negative.
1278 // When the load or the store can't be done in one instruction via fp
1279 // (too big negative offset), we try to access via jssp (positive offset).
1280 // We can reference a stack slot from jssp only if jssp references the end
1281 // of the stack slots. It's not the case when:
1282 // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1283 // code saved the registers.
1284 // - after_push_argument_: arguments has been pushed for a call.
1285 // - inlined_arguments_: inlined arguments have been pushed once. All the
1286 // remainder of the function cannot trust jssp any longer.
1287 // - saves_caller_doubles: some double registers have been pushed, jssp
1288 // references the end of the double registers and not the end of the
1290 // Also, if the offset from fp is small enough to make a load/store in
1291 // one instruction, we use a fp access.
1292 if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1293 !inlined_arguments_ && !is_int9(fp_offset) &&
1294 !info()->saves_caller_doubles()) {
1296 (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1297 return MemOperand(masm()->StackPointer(), jssp_offset);
1300 return MemOperand(fp, fp_offset);
1302 // Retrieve parameter without eager stack-frame relative to the
1304 return MemOperand(masm()->StackPointer(),
1305 ArgumentsOffsetWithoutFrame(op->index()));
1310 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1311 HConstant* constant = chunk_->LookupConstant(op);
1312 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1313 return constant->handle(isolate());
1318 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
1319 IntegerSignedness signedness) {
1320 if (shift_info->shift() == NO_SHIFT) {
1321 return (signedness == SIGNED_INT32) ? ToOperand32I(right)
1322 : ToOperand32U(right);
1325 ToRegister32(right),
1326 shift_info->shift(),
1327 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1332 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1333 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1337 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1338 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1342 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1343 HConstant* constant = chunk_->LookupConstant(op);
1344 return constant->Integer32Value();
1348 double LCodeGen::ToDouble(LConstantOperand* op) const {
1349 HConstant* constant = chunk_->LookupConstant(op);
1350 DCHECK(constant->HasDoubleValue());
1351 return constant->DoubleValue();
1355 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1356 Condition cond = nv;
1359 case Token::EQ_STRICT:
1363 case Token::NE_STRICT:
1367 cond = is_unsigned ? lo : lt;
1370 cond = is_unsigned ? hi : gt;
1373 cond = is_unsigned ? ls : le;
1376 cond = is_unsigned ? hs : ge;
1379 case Token::INSTANCEOF:
1387 template<class InstrType>
1388 void LCodeGen::EmitBranchGeneric(InstrType instr,
1389 const BranchGenerator& branch) {
1390 int left_block = instr->TrueDestination(chunk_);
1391 int right_block = instr->FalseDestination(chunk_);
1393 int next_block = GetNextEmittedBlock();
1395 if (right_block == left_block) {
1396 EmitGoto(left_block);
1397 } else if (left_block == next_block) {
1398 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1399 } else if (right_block == next_block) {
1400 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1402 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1403 __ B(chunk_->GetAssemblyLabel(right_block));
1408 template<class InstrType>
1409 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1410 DCHECK((condition != al) && (condition != nv));
1411 BranchOnCondition branch(this, condition);
1412 EmitBranchGeneric(instr, branch);
1416 template<class InstrType>
1417 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1418 Condition condition,
1419 const Register& lhs,
1420 const Operand& rhs) {
1421 DCHECK((condition != al) && (condition != nv));
1422 CompareAndBranch branch(this, condition, lhs, rhs);
1423 EmitBranchGeneric(instr, branch);
1427 template<class InstrType>
1428 void LCodeGen::EmitTestAndBranch(InstrType instr,
1429 Condition condition,
1430 const Register& value,
1432 DCHECK((condition != al) && (condition != nv));
1433 TestAndBranch branch(this, condition, value, mask);
1434 EmitBranchGeneric(instr, branch);
1438 template<class InstrType>
1439 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1440 const FPRegister& value,
1441 const FPRegister& scratch) {
1442 BranchIfNonZeroNumber branch(this, value, scratch);
1443 EmitBranchGeneric(instr, branch);
1447 template<class InstrType>
1448 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1449 const Register& value) {
1450 BranchIfHeapNumber branch(this, value);
1451 EmitBranchGeneric(instr, branch);
1455 template<class InstrType>
1456 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1457 const Register& value,
1458 Heap::RootListIndex index) {
1459 BranchIfRoot branch(this, value, index);
1460 EmitBranchGeneric(instr, branch);
1464 void LCodeGen::DoGap(LGap* gap) {
1465 for (int i = LGap::FIRST_INNER_POSITION;
1466 i <= LGap::LAST_INNER_POSITION;
1468 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1469 LParallelMove* move = gap->GetParallelMove(inner_pos);
1471 resolver_.Resolve(move);
1477 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1478 Register arguments = ToRegister(instr->arguments());
1479 Register result = ToRegister(instr->result());
1481 // The pointer to the arguments array come from DoArgumentsElements.
1482 // It does not point directly to the arguments and there is an offest of
1483 // two words that we must take into account when accessing an argument.
1484 // Subtracting the index from length accounts for one, so we add one more.
1486 if (instr->length()->IsConstantOperand() &&
1487 instr->index()->IsConstantOperand()) {
1488 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1489 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1490 int offset = ((length - index) + 1) * kPointerSize;
1491 __ Ldr(result, MemOperand(arguments, offset));
1492 } else if (instr->index()->IsConstantOperand()) {
1493 Register length = ToRegister32(instr->length());
1494 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1495 int loc = index - 1;
1497 __ Sub(result.W(), length, loc);
1498 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1500 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1503 Register length = ToRegister32(instr->length());
1504 Operand index = ToOperand32I(instr->index());
1505 __ Sub(result.W(), length, index);
1506 __ Add(result.W(), result.W(), 1);
1507 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1512 void LCodeGen::DoAddE(LAddE* instr) {
1513 Register result = ToRegister(instr->result());
1514 Register left = ToRegister(instr->left());
1515 Operand right = (instr->right()->IsConstantOperand())
1516 ? ToInteger32(LConstantOperand::cast(instr->right()))
1517 : Operand(ToRegister32(instr->right()), SXTW);
1519 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1520 __ Add(result, left, right);
1524 void LCodeGen::DoAddI(LAddI* instr) {
1525 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1526 Register result = ToRegister32(instr->result());
1527 Register left = ToRegister32(instr->left());
1528 Operand right = ToShiftedRightOperand32I(instr->right(), instr);
1531 __ Adds(result, left, right);
1532 DeoptimizeIf(vs, instr->environment());
1534 __ Add(result, left, right);
1539 void LCodeGen::DoAddS(LAddS* instr) {
1540 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1541 Register result = ToRegister(instr->result());
1542 Register left = ToRegister(instr->left());
1543 Operand right = ToOperand(instr->right());
1545 __ Adds(result, left, right);
1546 DeoptimizeIf(vs, instr->environment());
1548 __ Add(result, left, right);
1553 void LCodeGen::DoAllocate(LAllocate* instr) {
1554 class DeferredAllocate: public LDeferredCode {
1556 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1557 : LDeferredCode(codegen), instr_(instr) { }
1558 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1559 virtual LInstruction* instr() { return instr_; }
1564 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1566 Register result = ToRegister(instr->result());
1567 Register temp1 = ToRegister(instr->temp1());
1568 Register temp2 = ToRegister(instr->temp2());
1570 // Allocate memory for the object.
1571 AllocationFlags flags = TAG_OBJECT;
1572 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1573 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1576 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1577 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1578 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1579 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1580 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1581 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1582 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1585 if (instr->size()->IsConstantOperand()) {
1586 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1587 if (size <= Page::kMaxRegularHeapObjectSize) {
1588 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1590 __ B(deferred->entry());
1593 Register size = ToRegister32(instr->size());
1594 __ Sxtw(size.X(), size);
1595 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1598 __ Bind(deferred->exit());
1600 if (instr->hydrogen()->MustPrefillWithFiller()) {
1601 Register filler_count = temp1;
1602 Register filler = temp2;
1603 Register untagged_result = ToRegister(instr->temp3());
1605 if (instr->size()->IsConstantOperand()) {
1606 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1607 __ Mov(filler_count, size / kPointerSize);
1609 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1612 __ Sub(untagged_result, result, kHeapObjectTag);
1613 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1614 __ FillFields(untagged_result, filler_count, filler);
1616 DCHECK(instr->temp3() == NULL);
1621 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1622 // TODO(3095996): Get rid of this. For now, we need to make the
1623 // result register contain a valid pointer because it is already
1624 // contained in the register pointer map.
1625 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1627 PushSafepointRegistersScope scope(this);
1628 // We're in a SafepointRegistersScope so we can use any scratch registers.
1630 if (instr->size()->IsConstantOperand()) {
1631 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1633 __ SmiTag(size, ToRegister32(instr->size()).X());
1635 int flags = AllocateDoubleAlignFlag::encode(
1636 instr->hydrogen()->MustAllocateDoubleAligned());
1637 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1638 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1639 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1640 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1641 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1642 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1643 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1645 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1647 __ Mov(x10, Smi::FromInt(flags));
1650 CallRuntimeFromDeferred(
1651 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1652 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1656 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1657 Register receiver = ToRegister(instr->receiver());
1658 Register function = ToRegister(instr->function());
1659 Register length = ToRegister32(instr->length());
1661 Register elements = ToRegister(instr->elements());
1662 Register scratch = x5;
1663 DCHECK(receiver.Is(x0)); // Used for parameter count.
1664 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1665 DCHECK(ToRegister(instr->result()).Is(x0));
1666 DCHECK(instr->IsMarkedAsCall());
1668 // Copy the arguments to this function possibly from the
1669 // adaptor frame below it.
1670 const uint32_t kArgumentsLimit = 1 * KB;
1671 __ Cmp(length, kArgumentsLimit);
1672 DeoptimizeIf(hi, instr->environment());
1674 // Push the receiver and use the register to keep the original
1675 // number of arguments.
1677 Register argc = receiver;
1679 __ Sxtw(argc, length);
1680 // The arguments are at a one pointer size offset from elements.
1681 __ Add(elements, elements, 1 * kPointerSize);
1683 // Loop through the arguments pushing them onto the execution
1686 // length is a small non-negative integer, due to the test above.
1687 __ Cbz(length, &invoke);
1689 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1691 __ Subs(length, length, 1);
1695 DCHECK(instr->HasPointerMap());
1696 LPointerMap* pointers = instr->pointer_map();
1697 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1698 // The number of arguments is stored in argc (receiver) which is x0, as
1699 // expected by InvokeFunction.
1700 ParameterCount actual(argc);
1701 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1705 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1706 // We push some arguments and they will be pop in an other block. We can't
1707 // trust that jssp references the end of the stack slots until the end of
1709 inlined_arguments_ = true;
1710 Register result = ToRegister(instr->result());
1712 if (instr->hydrogen()->from_inlined()) {
1713 // When we are inside an inlined function, the arguments are the last things
1714 // that have been pushed on the stack. Therefore the arguments array can be
1715 // accessed directly from jssp.
1716 // However in the normal case, it is accessed via fp but there are two words
1717 // on the stack between fp and the arguments (the saved lr and fp) and the
1718 // LAccessArgumentsAt implementation take that into account.
1719 // In the inlined case we need to subtract the size of 2 words to jssp to
1720 // get a pointer which will work well with LAccessArgumentsAt.
1721 DCHECK(masm()->StackPointer().Is(jssp));
1722 __ Sub(result, jssp, 2 * kPointerSize);
1724 DCHECK(instr->temp() != NULL);
1725 Register previous_fp = ToRegister(instr->temp());
1728 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1730 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1731 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1732 __ Csel(result, fp, previous_fp, ne);
1737 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1738 Register elements = ToRegister(instr->elements());
1739 Register result = ToRegister32(instr->result());
1742 // If no arguments adaptor frame the number of arguments is fixed.
1743 __ Cmp(fp, elements);
1744 __ Mov(result, scope()->num_parameters());
1747 // Arguments adaptor frame present. Get argument length from there.
1748 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1750 UntagSmiMemOperand(result.X(),
1751 ArgumentsAdaptorFrameConstants::kLengthOffset));
1753 // Argument length is in result register.
1758 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1759 DoubleRegister left = ToDoubleRegister(instr->left());
1760 DoubleRegister right = ToDoubleRegister(instr->right());
1761 DoubleRegister result = ToDoubleRegister(instr->result());
1763 switch (instr->op()) {
1764 case Token::ADD: __ Fadd(result, left, right); break;
1765 case Token::SUB: __ Fsub(result, left, right); break;
1766 case Token::MUL: __ Fmul(result, left, right); break;
1767 case Token::DIV: __ Fdiv(result, left, right); break;
1769 // The ECMA-262 remainder operator is the remainder from a truncating
1770 // (round-towards-zero) division. Note that this differs from IEEE-754.
1772 // TODO(jbramley): See if it's possible to do this inline, rather than by
1773 // calling a helper function. With frintz (to produce the intermediate
1774 // quotient) and fmsub (to calculate the remainder without loss of
1775 // precision), it should be possible. However, we would need support for
1776 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1777 // support that yet.
1778 DCHECK(left.Is(d0));
1779 DCHECK(right.Is(d1));
1781 ExternalReference::mod_two_doubles_operation(isolate()),
1783 DCHECK(result.Is(d0));
1793 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1794 DCHECK(ToRegister(instr->context()).is(cp));
1795 DCHECK(ToRegister(instr->left()).is(x1));
1796 DCHECK(ToRegister(instr->right()).is(x0));
1797 DCHECK(ToRegister(instr->result()).is(x0));
1799 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
1800 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1804 void LCodeGen::DoBitI(LBitI* instr) {
1805 Register result = ToRegister32(instr->result());
1806 Register left = ToRegister32(instr->left());
1807 Operand right = ToShiftedRightOperand32U(instr->right(), instr);
1809 switch (instr->op()) {
1810 case Token::BIT_AND: __ And(result, left, right); break;
1811 case Token::BIT_OR: __ Orr(result, left, right); break;
1812 case Token::BIT_XOR: __ Eor(result, left, right); break;
1820 void LCodeGen::DoBitS(LBitS* instr) {
1821 Register result = ToRegister(instr->result());
1822 Register left = ToRegister(instr->left());
1823 Operand right = ToOperand(instr->right());
1825 switch (instr->op()) {
1826 case Token::BIT_AND: __ And(result, left, right); break;
1827 case Token::BIT_OR: __ Orr(result, left, right); break;
1828 case Token::BIT_XOR: __ Eor(result, left, right); break;
1836 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1837 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1838 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1839 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1840 if (instr->index()->IsConstantOperand()) {
1841 Operand index = ToOperand32I(instr->index());
1842 Register length = ToRegister32(instr->length());
1843 __ Cmp(length, index);
1844 cond = CommuteCondition(cond);
1846 Register index = ToRegister32(instr->index());
1847 Operand length = ToOperand32I(instr->length());
1848 __ Cmp(index, length);
1850 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1851 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1853 DeoptimizeIf(cond, instr->environment());
1858 void LCodeGen::DoBranch(LBranch* instr) {
1859 Representation r = instr->hydrogen()->value()->representation();
1860 Label* true_label = instr->TrueLabel(chunk_);
1861 Label* false_label = instr->FalseLabel(chunk_);
1863 if (r.IsInteger32()) {
1864 DCHECK(!info()->IsStub());
1865 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1866 } else if (r.IsSmi()) {
1867 DCHECK(!info()->IsStub());
1868 STATIC_ASSERT(kSmiTag == 0);
1869 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1870 } else if (r.IsDouble()) {
1871 DoubleRegister value = ToDoubleRegister(instr->value());
1872 // Test the double value. Zero and NaN are false.
1873 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1875 DCHECK(r.IsTagged());
1876 Register value = ToRegister(instr->value());
1877 HType type = instr->hydrogen()->value()->type();
1879 if (type.IsBoolean()) {
1880 DCHECK(!info()->IsStub());
1881 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1882 EmitBranch(instr, eq);
1883 } else if (type.IsSmi()) {
1884 DCHECK(!info()->IsStub());
1885 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1886 } else if (type.IsJSArray()) {
1887 DCHECK(!info()->IsStub());
1888 EmitGoto(instr->TrueDestination(chunk()));
1889 } else if (type.IsHeapNumber()) {
1890 DCHECK(!info()->IsStub());
1891 __ Ldr(double_scratch(), FieldMemOperand(value,
1892 HeapNumber::kValueOffset));
1893 // Test the double value. Zero and NaN are false.
1894 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1895 } else if (type.IsString()) {
1896 DCHECK(!info()->IsStub());
1897 Register temp = ToRegister(instr->temp1());
1898 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1899 EmitCompareAndBranch(instr, ne, temp, 0);
1901 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1902 // Avoid deopts in the case where we've never executed this path before.
1903 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1905 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1906 // undefined -> false.
1908 value, Heap::kUndefinedValueRootIndex, false_label);
1911 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1912 // Boolean -> its value.
1914 value, Heap::kTrueValueRootIndex, true_label);
1916 value, Heap::kFalseValueRootIndex, false_label);
1919 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1922 value, Heap::kNullValueRootIndex, false_label);
1925 if (expected.Contains(ToBooleanStub::SMI)) {
1926 // Smis: 0 -> false, all other -> true.
1927 DCHECK(Smi::FromInt(0) == 0);
1928 __ Cbz(value, false_label);
1929 __ JumpIfSmi(value, true_label);
1930 } else if (expected.NeedsMap()) {
1931 // If we need a map later and have a smi, deopt.
1932 DeoptimizeIfSmi(value, instr->environment());
1935 Register map = NoReg;
1936 Register scratch = NoReg;
1938 if (expected.NeedsMap()) {
1939 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1940 map = ToRegister(instr->temp1());
1941 scratch = ToRegister(instr->temp2());
1943 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1945 if (expected.CanBeUndetectable()) {
1946 // Undetectable -> false.
1947 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1948 __ TestAndBranchIfAnySet(
1949 scratch, 1 << Map::kIsUndetectable, false_label);
1953 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1954 // spec object -> true.
1955 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1956 __ B(ge, true_label);
1959 if (expected.Contains(ToBooleanStub::STRING)) {
1960 // String value -> false iff empty.
1962 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1963 __ B(ge, ¬_string);
1964 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1965 __ Cbz(scratch, false_label);
1967 __ Bind(¬_string);
1970 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1971 // Symbol value -> true.
1972 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1973 __ B(eq, true_label);
1976 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1977 Label not_heap_number;
1978 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1980 __ Ldr(double_scratch(),
1981 FieldMemOperand(value, HeapNumber::kValueOffset));
1982 __ Fcmp(double_scratch(), 0.0);
1983 // If we got a NaN (overflow bit is set), jump to the false branch.
1984 __ B(vs, false_label);
1985 __ B(eq, false_label);
1987 __ Bind(¬_heap_number);
1990 if (!expected.IsGeneric()) {
1991 // We've seen something for the first time -> deopt.
1992 // This can only happen if we are not generic already.
1993 Deoptimize(instr->environment());
2000 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2001 int formal_parameter_count,
2003 LInstruction* instr,
2004 Register function_reg) {
2005 bool dont_adapt_arguments =
2006 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2007 bool can_invoke_directly =
2008 dont_adapt_arguments || formal_parameter_count == arity;
2010 // The function interface relies on the following register assignments.
2011 DCHECK(function_reg.Is(x1) || function_reg.IsNone());
2012 Register arity_reg = x0;
2014 LPointerMap* pointers = instr->pointer_map();
2016 // If necessary, load the function object.
2017 if (function_reg.IsNone()) {
2019 __ LoadObject(function_reg, function);
2022 if (FLAG_debug_code) {
2024 // Try to confirm that function_reg (x1) is a tagged pointer.
2025 __ JumpIfNotSmi(function_reg, &is_not_smi);
2026 __ Abort(kExpectedFunctionObject);
2027 __ Bind(&is_not_smi);
2030 if (can_invoke_directly) {
2032 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2034 // Set the arguments count if adaption is not needed. Assumes that x0 is
2035 // available to write to at this point.
2036 if (dont_adapt_arguments) {
2037 __ Mov(arity_reg, arity);
2041 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2044 // Set up deoptimization.
2045 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2047 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2048 ParameterCount count(arity);
2049 ParameterCount expected(formal_parameter_count);
2050 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2055 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2056 DCHECK(instr->IsMarkedAsCall());
2057 DCHECK(ToRegister(instr->result()).Is(x0));
2059 LPointerMap* pointers = instr->pointer_map();
2060 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2062 if (instr->target()->IsConstantOperand()) {
2063 LConstantOperand* target = LConstantOperand::cast(instr->target());
2064 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2065 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2066 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2067 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2068 // this understanding is correct.
2069 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2071 DCHECK(instr->target()->IsRegister());
2072 Register target = ToRegister(instr->target());
2073 generator.BeforeCall(__ CallSize(target));
2074 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2077 generator.AfterCall();
2078 after_push_argument_ = false;
2082 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2083 DCHECK(instr->IsMarkedAsCall());
2084 DCHECK(ToRegister(instr->function()).is(x1));
2086 if (instr->hydrogen()->pass_argument_count()) {
2087 __ Mov(x0, Operand(instr->arity()));
2091 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2093 // Load the code entry address
2094 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2097 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2098 after_push_argument_ = false;
2102 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2103 CallRuntime(instr->function(), instr->arity(), instr);
2104 after_push_argument_ = false;
2108 void LCodeGen::DoCallStub(LCallStub* instr) {
2109 DCHECK(ToRegister(instr->context()).is(cp));
2110 DCHECK(ToRegister(instr->result()).is(x0));
2111 switch (instr->hydrogen()->major_key()) {
2112 case CodeStub::RegExpExec: {
2113 RegExpExecStub stub(isolate());
2114 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2117 case CodeStub::SubString: {
2118 SubStringStub stub(isolate());
2119 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2122 case CodeStub::StringCompare: {
2123 StringCompareStub stub(isolate());
2124 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2130 after_push_argument_ = false;
2134 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2135 GenerateOsrPrologue();
2139 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2140 Register temp = ToRegister(instr->temp());
2142 PushSafepointRegistersScope scope(this);
2145 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2146 RecordSafepointWithRegisters(
2147 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2148 __ StoreToSafepointRegisterSlot(x0, temp);
2150 DeoptimizeIfSmi(temp, instr->environment());
2154 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2155 class DeferredCheckMaps: public LDeferredCode {
2157 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2158 : LDeferredCode(codegen), instr_(instr), object_(object) {
2159 SetExit(check_maps());
2161 virtual void Generate() {
2162 codegen()->DoDeferredInstanceMigration(instr_, object_);
2164 Label* check_maps() { return &check_maps_; }
2165 virtual LInstruction* instr() { return instr_; }
2172 if (instr->hydrogen()->IsStabilityCheck()) {
2173 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2174 for (int i = 0; i < maps->size(); ++i) {
2175 AddStabilityDependency(maps->at(i).handle());
2180 Register object = ToRegister(instr->value());
2181 Register map_reg = ToRegister(instr->temp());
2183 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2185 DeferredCheckMaps* deferred = NULL;
2186 if (instr->hydrogen()->HasMigrationTarget()) {
2187 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2188 __ Bind(deferred->check_maps());
2191 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2193 for (int i = 0; i < maps->size() - 1; i++) {
2194 Handle<Map> map = maps->at(i).handle();
2195 __ CompareMap(map_reg, map);
2198 Handle<Map> map = maps->at(maps->size() - 1).handle();
2199 __ CompareMap(map_reg, map);
2201 // We didn't match a map.
2202 if (instr->hydrogen()->HasMigrationTarget()) {
2203 __ B(ne, deferred->entry());
2205 DeoptimizeIf(ne, instr->environment());
2212 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2213 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2214 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2219 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2220 Register value = ToRegister(instr->value());
2221 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2222 DeoptimizeIfNotSmi(value, instr->environment());
2226 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2227 Register input = ToRegister(instr->value());
2228 Register scratch = ToRegister(instr->temp());
2230 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2231 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2233 if (instr->hydrogen()->is_interval_check()) {
2234 InstanceType first, last;
2235 instr->hydrogen()->GetCheckInterval(&first, &last);
2237 __ Cmp(scratch, first);
2238 if (first == last) {
2239 // If there is only one type in the interval check for equality.
2240 DeoptimizeIf(ne, instr->environment());
2241 } else if (last == LAST_TYPE) {
2242 // We don't need to compare with the higher bound of the interval.
2243 DeoptimizeIf(lo, instr->environment());
2245 // If we are below the lower bound, set the C flag and clear the Z flag
2246 // to force a deopt.
2247 __ Ccmp(scratch, last, CFlag, hs);
2248 DeoptimizeIf(hi, instr->environment());
2253 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2255 if (IsPowerOf2(mask)) {
2256 DCHECK((tag == 0) || (tag == mask));
2258 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2260 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2264 __ Tst(scratch, mask);
2266 __ And(scratch, scratch, mask);
2267 __ Cmp(scratch, tag);
2269 DeoptimizeIf(ne, instr->environment());
2275 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2276 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2277 Register result = ToRegister32(instr->result());
2278 __ ClampDoubleToUint8(result, input, double_scratch());
2282 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2283 Register input = ToRegister32(instr->unclamped());
2284 Register result = ToRegister32(instr->result());
2285 __ ClampInt32ToUint8(result, input);
2289 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2290 Register input = ToRegister(instr->unclamped());
2291 Register result = ToRegister32(instr->result());
2292 Register scratch = ToRegister(instr->temp1());
2295 // Both smi and heap number cases are handled.
2297 __ JumpIfNotSmi(input, &is_not_smi);
2298 __ SmiUntag(result.X(), input);
2299 __ ClampInt32ToUint8(result);
2302 __ Bind(&is_not_smi);
2304 // Check for heap number.
2305 Label is_heap_number;
2306 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2307 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2309 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2310 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2311 instr->environment());
2315 // Heap number case.
2316 __ Bind(&is_heap_number);
2317 DoubleRegister dbl_scratch = double_scratch();
2318 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2319 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2320 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2326 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2327 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2328 Register result_reg = ToRegister(instr->result());
2329 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2330 __ Fmov(result_reg, value_reg);
2331 __ Lsr(result_reg, result_reg, 32);
2333 __ Fmov(result_reg.W(), value_reg.S());
2338 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2339 Register hi_reg = ToRegister(instr->hi());
2340 Register lo_reg = ToRegister(instr->lo());
2341 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2343 // Insert the least significant 32 bits of hi_reg into the most significant
2344 // 32 bits of lo_reg, and move to a floating point register.
2345 __ Bfi(lo_reg, hi_reg, 32, 32);
2346 __ Fmov(result_reg, lo_reg);
2350 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2351 Handle<String> class_name = instr->hydrogen()->class_name();
2352 Label* true_label = instr->TrueLabel(chunk_);
2353 Label* false_label = instr->FalseLabel(chunk_);
2354 Register input = ToRegister(instr->value());
2355 Register scratch1 = ToRegister(instr->temp1());
2356 Register scratch2 = ToRegister(instr->temp2());
2358 __ JumpIfSmi(input, false_label);
2360 Register map = scratch2;
2361 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2362 // Assuming the following assertions, we can use the same compares to test
2363 // for both being a function type and being in the object type range.
2364 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2365 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2366 FIRST_SPEC_OBJECT_TYPE + 1);
2367 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2368 LAST_SPEC_OBJECT_TYPE - 1);
2369 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2371 // We expect CompareObjectType to load the object instance type in scratch1.
2372 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2373 __ B(lt, false_label);
2374 __ B(eq, true_label);
2375 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2376 __ B(eq, true_label);
2378 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2381 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2382 // Check if the constructor in the map is a function.
2383 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2385 // Objects with a non-function constructor have class 'Object'.
2386 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2387 __ JumpIfNotObjectType(
2388 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2390 __ JumpIfNotObjectType(
2391 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2394 // The constructor function is in scratch1. Get its instance class name.
2396 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2398 FieldMemOperand(scratch1,
2399 SharedFunctionInfo::kInstanceClassNameOffset));
2401 // The class name we are testing against is internalized since it's a literal.
2402 // The name in the constructor is internalized because of the way the context
2403 // is booted. This routine isn't expected to work for random API-created
2404 // classes and it doesn't have to because you can't access it with natives
2405 // syntax. Since both sides are internalized it is sufficient to use an
2406 // identity comparison.
2407 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2411 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2412 DCHECK(instr->hydrogen()->representation().IsDouble());
2413 FPRegister object = ToDoubleRegister(instr->object());
2414 Register temp = ToRegister(instr->temp());
2416 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2417 // (relatively expensive) hole-NaN check.
2418 __ Fcmp(object, object);
2419 __ B(vc, instr->FalseLabel(chunk_));
2421 // We have a NaN, but is it the hole?
2422 __ Fmov(temp, object);
2423 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2427 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2428 DCHECK(instr->hydrogen()->representation().IsTagged());
2429 Register object = ToRegister(instr->object());
2431 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2435 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2436 Register value = ToRegister(instr->value());
2437 Register map = ToRegister(instr->temp());
2439 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2440 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2444 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2445 Representation rep = instr->hydrogen()->value()->representation();
2446 DCHECK(!rep.IsInteger32());
2447 Register scratch = ToRegister(instr->temp());
2449 if (rep.IsDouble()) {
2450 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2451 instr->TrueLabel(chunk()));
2453 Register value = ToRegister(instr->value());
2454 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2455 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2456 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2457 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2459 EmitGoto(instr->FalseDestination(chunk()));
2463 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2464 LOperand* left = instr->left();
2465 LOperand* right = instr->right();
2467 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2468 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2469 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2471 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2472 // We can statically evaluate the comparison.
2473 double left_val = ToDouble(LConstantOperand::cast(left));
2474 double right_val = ToDouble(LConstantOperand::cast(right));
2475 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2476 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2477 EmitGoto(next_block);
2479 if (instr->is_double()) {
2480 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2482 // If a NaN is involved, i.e. the result is unordered (V set),
2483 // jump to false block label.
2484 __ B(vs, instr->FalseLabel(chunk_));
2485 EmitBranch(instr, cond);
2487 if (instr->hydrogen_value()->representation().IsInteger32()) {
2488 if (right->IsConstantOperand()) {
2489 EmitCompareAndBranch(instr,
2492 ToOperand32I(right));
2494 // Commute the operands and the condition.
2495 EmitCompareAndBranch(instr,
2496 CommuteCondition(cond),
2497 ToRegister32(right),
2498 ToOperand32I(left));
2501 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2502 if (right->IsConstantOperand()) {
2503 int32_t value = ToInteger32(LConstantOperand::cast(right));
2504 EmitCompareAndBranch(instr,
2507 Operand(Smi::FromInt(value)));
2508 } else if (left->IsConstantOperand()) {
2509 // Commute the operands and the condition.
2510 int32_t value = ToInteger32(LConstantOperand::cast(left));
2511 EmitCompareAndBranch(instr,
2512 CommuteCondition(cond),
2514 Operand(Smi::FromInt(value)));
2516 EmitCompareAndBranch(instr,
2527 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2528 Register left = ToRegister(instr->left());
2529 Register right = ToRegister(instr->right());
2530 EmitCompareAndBranch(instr, eq, left, right);
2534 void LCodeGen::DoCmpT(LCmpT* instr) {
2535 DCHECK(ToRegister(instr->context()).is(cp));
2536 Token::Value op = instr->op();
2537 Condition cond = TokenToCondition(op, false);
2539 DCHECK(ToRegister(instr->left()).Is(x1));
2540 DCHECK(ToRegister(instr->right()).Is(x0));
2541 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2542 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2543 // Signal that we don't inline smi code before this stub.
2544 InlineSmiCheckInfo::EmitNotInlined(masm());
2546 // Return true or false depending on CompareIC result.
2547 // This instruction is marked as call. We can clobber any register.
2548 DCHECK(instr->IsMarkedAsCall());
2549 __ LoadTrueFalseRoots(x1, x2);
2551 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2555 void LCodeGen::DoConstantD(LConstantD* instr) {
2556 DCHECK(instr->result()->IsDoubleRegister());
2557 DoubleRegister result = ToDoubleRegister(instr->result());
2558 if (instr->value() == 0) {
2559 if (copysign(1.0, instr->value()) == 1.0) {
2560 __ Fmov(result, fp_zero);
2562 __ Fneg(result, fp_zero);
2565 __ Fmov(result, instr->value());
2570 void LCodeGen::DoConstantE(LConstantE* instr) {
2571 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2575 void LCodeGen::DoConstantI(LConstantI* instr) {
2576 DCHECK(is_int32(instr->value()));
2577 // Cast the value here to ensure that the value isn't sign extended by the
2578 // implicit Operand constructor.
2579 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2583 void LCodeGen::DoConstantS(LConstantS* instr) {
2584 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2588 void LCodeGen::DoConstantT(LConstantT* instr) {
2589 Handle<Object> object = instr->value(isolate());
2590 AllowDeferredHandleDereference smi_check;
2591 __ LoadObject(ToRegister(instr->result()), object);
2595 void LCodeGen::DoContext(LContext* instr) {
2596 // If there is a non-return use, the context must be moved to a register.
2597 Register result = ToRegister(instr->result());
2598 if (info()->IsOptimizing()) {
2599 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2601 // If there is no frame, the context must be in cp.
2602 DCHECK(result.is(cp));
2607 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2608 Register reg = ToRegister(instr->value());
2609 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2610 AllowDeferredHandleDereference smi_check;
2611 if (isolate()->heap()->InNewSpace(*object)) {
2612 UseScratchRegisterScope temps(masm());
2613 Register temp = temps.AcquireX();
2614 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2615 __ Mov(temp, Operand(Handle<Object>(cell)));
2616 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2619 __ Cmp(reg, Operand(object));
2621 DeoptimizeIf(ne, instr->environment());
2625 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2626 last_lazy_deopt_pc_ = masm()->pc_offset();
2627 DCHECK(instr->HasEnvironment());
2628 LEnvironment* env = instr->environment();
2629 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2630 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2634 void LCodeGen::DoDateField(LDateField* instr) {
2635 Register object = ToRegister(instr->date());
2636 Register result = ToRegister(instr->result());
2637 Register temp1 = x10;
2638 Register temp2 = x11;
2639 Smi* index = instr->index();
2640 Label runtime, done;
2642 DCHECK(object.is(result) && object.Is(x0));
2643 DCHECK(instr->IsMarkedAsCall());
2645 DeoptimizeIfSmi(object, instr->environment());
2646 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2647 DeoptimizeIf(ne, instr->environment());
2649 if (index->value() == 0) {
2650 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2652 if (index->value() < JSDate::kFirstUncachedField) {
2653 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2654 __ Mov(temp1, Operand(stamp));
2655 __ Ldr(temp1, MemOperand(temp1));
2656 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2657 __ Cmp(temp1, temp2);
2659 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2660 kPointerSize * index->value()));
2665 __ Mov(x1, Operand(index));
2666 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2673 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2674 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2675 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2676 // needed return address), even though the implementation of LAZY and EAGER is
2677 // now identical. When LAZY is eventually completely folded into EAGER, remove
2678 // the special case below.
2679 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2680 type = Deoptimizer::LAZY;
2683 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2684 Deoptimize(instr->environment(), &type);
2688 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2689 Register dividend = ToRegister32(instr->dividend());
2690 int32_t divisor = instr->divisor();
2691 Register result = ToRegister32(instr->result());
2692 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
2693 DCHECK(!result.is(dividend));
2695 // Check for (0 / -x) that will produce negative zero.
2696 HDiv* hdiv = instr->hydrogen();
2697 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2698 DeoptimizeIfZero(dividend, instr->environment());
2700 // Check for (kMinInt / -1).
2701 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2702 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2704 __ Cmp(dividend, 1);
2705 DeoptimizeIf(vs, instr->environment());
2707 // Deoptimize if remainder will not be 0.
2708 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2709 divisor != 1 && divisor != -1) {
2710 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2711 __ Tst(dividend, mask);
2712 DeoptimizeIf(ne, instr->environment());
2715 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2716 __ Neg(result, dividend);
2719 int32_t shift = WhichPowerOf2Abs(divisor);
2721 __ Mov(result, dividend);
2722 } else if (shift == 1) {
2723 __ Add(result, dividend, Operand(dividend, LSR, 31));
2725 __ Mov(result, Operand(dividend, ASR, 31));
2726 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2728 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2729 if (divisor < 0) __ Neg(result, result);
2733 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2734 Register dividend = ToRegister32(instr->dividend());
2735 int32_t divisor = instr->divisor();
2736 Register result = ToRegister32(instr->result());
2737 DCHECK(!AreAliased(dividend, result));
2740 Deoptimize(instr->environment());
2744 // Check for (0 / -x) that will produce negative zero.
2745 HDiv* hdiv = instr->hydrogen();
2746 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2747 DeoptimizeIfZero(dividend, instr->environment());
2750 __ TruncatingDiv(result, dividend, Abs(divisor));
2751 if (divisor < 0) __ Neg(result, result);
2753 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2754 Register temp = ToRegister32(instr->temp());
2755 DCHECK(!AreAliased(dividend, result, temp));
2756 __ Sxtw(dividend.X(), dividend);
2757 __ Mov(temp, divisor);
2758 __ Smsubl(temp.X(), result, temp, dividend.X());
2759 DeoptimizeIfNotZero(temp, instr->environment());
2764 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2765 void LCodeGen::DoDivI(LDivI* instr) {
2766 HBinaryOperation* hdiv = instr->hydrogen();
2767 Register dividend = ToRegister32(instr->dividend());
2768 Register divisor = ToRegister32(instr->divisor());
2769 Register result = ToRegister32(instr->result());
2771 // Issue the division first, and then check for any deopt cases whilst the
2772 // result is computed.
2773 __ Sdiv(result, dividend, divisor);
2775 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2776 DCHECK_EQ(NULL, instr->temp());
2781 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2782 DeoptimizeIfZero(divisor, instr->environment());
2785 // Check for (0 / -x) as that will produce negative zero.
2786 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2789 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2790 // zero, ie. zero dividend with negative divisor deopts.
2791 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2792 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2793 __ Ccmp(dividend, 0, NoFlag, mi);
2794 DeoptimizeIf(eq, instr->environment());
2797 // Check for (kMinInt / -1).
2798 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2799 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2801 __ Cmp(dividend, 1);
2802 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2803 // -1. If overflow is clear, set the flags for condition ne, as the
2804 // dividend isn't -1, and thus we shouldn't deopt.
2805 __ Ccmp(divisor, -1, NoFlag, vs);
2806 DeoptimizeIf(eq, instr->environment());
2809 // Compute remainder and deopt if it's not zero.
2810 Register remainder = ToRegister32(instr->temp());
2811 __ Msub(remainder, result, divisor, dividend);
2812 DeoptimizeIfNotZero(remainder, instr->environment());
2816 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2817 DoubleRegister input = ToDoubleRegister(instr->value());
2818 Register result = ToRegister32(instr->result());
2820 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2821 DeoptimizeIfMinusZero(input, instr->environment());
2824 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2825 DeoptimizeIf(ne, instr->environment());
2827 if (instr->tag_result()) {
2828 __ SmiTag(result.X());
2833 void LCodeGen::DoDrop(LDrop* instr) {
2834 __ Drop(instr->count());
2838 void LCodeGen::DoDummy(LDummy* instr) {
2839 // Nothing to see here, move on!
2843 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2844 // Nothing to see here, move on!
2848 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2849 DCHECK(ToRegister(instr->context()).is(cp));
2850 // FunctionLiteral instruction is marked as call, we can trash any register.
2851 DCHECK(instr->IsMarkedAsCall());
2853 // Use the fast case closure allocation code that allocates in new
2854 // space for nested functions that don't need literals cloning.
2855 bool pretenure = instr->hydrogen()->pretenure();
2856 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2857 FastNewClosureStub stub(isolate(),
2858 instr->hydrogen()->strict_mode(),
2859 instr->hydrogen()->is_generator());
2860 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2861 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2863 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2864 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2865 : factory()->false_value()));
2866 __ Push(cp, x2, x1);
2867 CallRuntime(Runtime::kNewClosure, 3, instr);
2872 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2873 Register map = ToRegister(instr->map());
2874 Register result = ToRegister(instr->result());
2875 Label load_cache, done;
2877 __ EnumLengthUntagged(result, map);
2878 __ Cbnz(result, &load_cache);
2880 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2883 __ Bind(&load_cache);
2884 __ LoadInstanceDescriptors(map, result);
2885 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2886 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2887 DeoptimizeIfZero(result, instr->environment());
2893 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2894 Register object = ToRegister(instr->object());
2895 Register null_value = x5;
2897 DCHECK(instr->IsMarkedAsCall());
2898 DCHECK(object.Is(x0));
2900 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
2901 instr->environment());
2903 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2904 __ Cmp(object, null_value);
2905 DeoptimizeIf(eq, instr->environment());
2907 DeoptimizeIfSmi(object, instr->environment());
2909 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2910 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2911 DeoptimizeIf(le, instr->environment());
2913 Label use_cache, call_runtime;
2914 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2916 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2919 // Get the set of properties to enumerate.
2920 __ Bind(&call_runtime);
2922 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2924 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2925 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
2927 __ Bind(&use_cache);
2931 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2932 Register input = ToRegister(instr->value());
2933 Register result = ToRegister(instr->result());
2935 __ AssertString(input);
2937 // Assert that we can use a W register load to get the hash.
2938 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2939 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2940 __ IndexFromHash(result, result);
2944 void LCodeGen::EmitGoto(int block) {
2945 // Do not emit jump if we are emitting a goto to the next block.
2946 if (!IsNextEmittedBlock(block)) {
2947 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2952 void LCodeGen::DoGoto(LGoto* instr) {
2953 EmitGoto(instr->block_id());
2957 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2958 LHasCachedArrayIndexAndBranch* instr) {
2959 Register input = ToRegister(instr->value());
2960 Register temp = ToRegister32(instr->temp());
2962 // Assert that the cache status bits fit in a W register.
2963 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2964 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2965 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2966 EmitBranch(instr, eq);
2970 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2971 // to test but is only used in very restricted ways. The only possible kinds
2972 // of intervals are:
2973 // - [ FIRST_TYPE, instr->to() ]
2974 // - [ instr->form(), LAST_TYPE ]
2975 // - instr->from() == instr->to()
2977 // These kinds of intervals can be check with only one compare instruction
2978 // providing the correct value and test condition are used.
2980 // TestType() will return the value to use in the compare instruction and
2981 // BranchCondition() will return the condition to use depending on the kind
2982 // of interval actually specified in the instruction.
2983 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2984 InstanceType from = instr->from();
2985 InstanceType to = instr->to();
2986 if (from == FIRST_TYPE) return to;
2987 DCHECK((from == to) || (to == LAST_TYPE));
2992 // See comment above TestType function for what this function does.
2993 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2994 InstanceType from = instr->from();
2995 InstanceType to = instr->to();
2996 if (from == to) return eq;
2997 if (to == LAST_TYPE) return hs;
2998 if (from == FIRST_TYPE) return ls;
3004 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3005 Register input = ToRegister(instr->value());
3006 Register scratch = ToRegister(instr->temp());
3008 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3009 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3011 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3012 EmitBranch(instr, BranchCondition(instr->hydrogen()));
3016 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3017 Register result = ToRegister(instr->result());
3018 Register base = ToRegister(instr->base_object());
3019 if (instr->offset()->IsConstantOperand()) {
3020 __ Add(result, base, ToOperand32I(instr->offset()));
3022 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3027 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3028 DCHECK(ToRegister(instr->context()).is(cp));
3029 // Assert that the arguments are in the registers expected by InstanceofStub.
3030 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3031 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3033 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3034 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3036 // InstanceofStub returns a result in x0:
3037 // 0 => not an instance
3038 // smi 1 => instance.
3040 __ LoadTrueFalseRoots(x0, x1);
3041 __ Csel(x0, x0, x1, eq);
3045 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3046 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3048 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3049 LInstanceOfKnownGlobal* instr)
3050 : LDeferredCode(codegen), instr_(instr) { }
3051 virtual void Generate() {
3052 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3054 virtual LInstruction* instr() { return instr_; }
3056 LInstanceOfKnownGlobal* instr_;
3059 DeferredInstanceOfKnownGlobal* deferred =
3060 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3062 Label map_check, return_false, cache_miss, done;
3063 Register object = ToRegister(instr->value());
3064 Register result = ToRegister(instr->result());
3065 // x4 is expected in the associated deferred code and stub.
3066 Register map_check_site = x4;
3069 // This instruction is marked as call. We can clobber any register.
3070 DCHECK(instr->IsMarkedAsCall());
3072 // We must take into account that object is in x11.
3073 DCHECK(object.Is(x11));
3074 Register scratch = x10;
3076 // A Smi is not instance of anything.
3077 __ JumpIfSmi(object, &return_false);
3079 // This is the inlined call site instanceof cache. The two occurences of the
3080 // hole value will be patched to the last map/result pair generated by the
3082 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3084 // Below we use Factory::the_hole_value() on purpose instead of loading from
3085 // the root array to force relocation and later be able to patch with a
3087 InstructionAccurateScope scope(masm(), 5);
3088 __ bind(&map_check);
3089 // Will be patched with the cached map.
3090 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3091 __ ldr(scratch, Immediate(Handle<Object>(cell)));
3092 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3093 __ cmp(map, scratch);
3094 __ b(&cache_miss, ne);
3095 // The address of this instruction is computed relative to the map check
3096 // above, so check the size of the code generated.
3097 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3098 // Will be patched with the cached result.
3099 __ ldr(result, Immediate(factory()->the_hole_value()));
3103 // The inlined call site cache did not match.
3104 // Check null and string before calling the deferred code.
3105 __ Bind(&cache_miss);
3106 // Compute the address of the map check. It must not be clobbered until the
3107 // InstanceOfStub has used it.
3108 __ Adr(map_check_site, &map_check);
3109 // Null is not instance of anything.
3110 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3112 // String values are not instances of anything.
3113 // Return false if the object is a string. Otherwise, jump to the deferred
3115 // Note that we can't jump directly to deferred code from
3116 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3117 // code can be out of range.
3118 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3119 __ B(deferred->entry());
3121 __ Bind(&return_false);
3122 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3124 // Here result is either true or false.
3125 __ Bind(deferred->exit());
3130 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3131 Register result = ToRegister(instr->result());
3132 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3133 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3134 flags = static_cast<InstanceofStub::Flags>(
3135 flags | InstanceofStub::kArgsInRegisters);
3136 flags = static_cast<InstanceofStub::Flags>(
3137 flags | InstanceofStub::kReturnTrueFalseObject);
3138 flags = static_cast<InstanceofStub::Flags>(
3139 flags | InstanceofStub::kCallSiteInlineCheck);
3141 PushSafepointRegistersScope scope(this);
3142 LoadContextFromDeferred(instr->context());
3144 // Prepare InstanceofStub arguments.
3145 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3146 __ LoadObject(InstanceofStub::right(), instr->function());
3148 InstanceofStub stub(isolate(), flags);
3149 CallCodeGeneric(stub.GetCode(),
3150 RelocInfo::CODE_TARGET,
3152 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3153 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3154 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3156 // Put the result value into the result register slot.
3157 __ StoreToSafepointRegisterSlot(result, result);
3161 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3166 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3167 Register value = ToRegister32(instr->value());
3168 DoubleRegister result = ToDoubleRegister(instr->result());
3169 __ Scvtf(result, value);
3173 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3174 DCHECK(ToRegister(instr->context()).is(cp));
3175 // The function is required to be in x1.
3176 DCHECK(ToRegister(instr->function()).is(x1));
3177 DCHECK(instr->HasPointerMap());
3179 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3180 if (known_function.is_null()) {
3181 LPointerMap* pointers = instr->pointer_map();
3182 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3183 ParameterCount count(instr->arity());
3184 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3186 CallKnownFunction(known_function,
3187 instr->hydrogen()->formal_parameter_count(),
3192 after_push_argument_ = false;
3196 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3197 Register temp1 = ToRegister(instr->temp1());
3198 Register temp2 = ToRegister(instr->temp2());
3200 // Get the frame pointer for the calling frame.
3201 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3203 // Skip the arguments adaptor frame if it exists.
3204 Label check_frame_marker;
3205 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3206 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3207 __ B(ne, &check_frame_marker);
3208 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3210 // Check the marker in the calling frame.
3211 __ Bind(&check_frame_marker);
3212 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3214 EmitCompareAndBranch(
3215 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3219 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3220 Label* is_object = instr->TrueLabel(chunk_);
3221 Label* is_not_object = instr->FalseLabel(chunk_);
3222 Register value = ToRegister(instr->value());
3223 Register map = ToRegister(instr->temp1());
3224 Register scratch = ToRegister(instr->temp2());
3226 __ JumpIfSmi(value, is_not_object);
3227 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3229 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3231 // Check for undetectable objects.
3232 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3233 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3235 // Check that instance type is in object type range.
3236 __ IsInstanceJSObjectType(map, scratch, NULL);
3237 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3238 // flags for "le" condition to check if the object's type is a valid
3240 EmitBranch(instr, le);
3244 Condition LCodeGen::EmitIsString(Register input,
3246 Label* is_not_string,
3247 SmiCheck check_needed = INLINE_SMI_CHECK) {
3248 if (check_needed == INLINE_SMI_CHECK) {
3249 __ JumpIfSmi(input, is_not_string);
3251 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3257 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3258 Register val = ToRegister(instr->value());
3259 Register scratch = ToRegister(instr->temp());
3261 SmiCheck check_needed =
3262 instr->hydrogen()->value()->type().IsHeapObject()
3263 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3264 Condition true_cond =
3265 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3267 EmitBranch(instr, true_cond);
3271 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3272 Register value = ToRegister(instr->value());
3273 STATIC_ASSERT(kSmiTag == 0);
3274 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3278 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3279 Register input = ToRegister(instr->value());
3280 Register temp = ToRegister(instr->temp());
3282 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3283 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3285 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3286 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3288 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3292 static const char* LabelType(LLabel* label) {
3293 if (label->is_loop_header()) return " (loop header)";
3294 if (label->is_osr_entry()) return " (OSR entry)";
3299 void LCodeGen::DoLabel(LLabel* label) {
3300 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3301 current_instruction_,
3302 label->hydrogen_value()->id(),
3306 __ Bind(label->label());
3307 current_block_ = label->block_id();
3312 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3313 Register context = ToRegister(instr->context());
3314 Register result = ToRegister(instr->result());
3315 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3316 if (instr->hydrogen()->RequiresHoleCheck()) {
3317 if (instr->hydrogen()->DeoptimizesOnHole()) {
3318 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3319 instr->environment());
3322 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3323 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3324 __ Bind(¬_the_hole);
3330 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3331 Register function = ToRegister(instr->function());
3332 Register result = ToRegister(instr->result());
3333 Register temp = ToRegister(instr->temp());
3335 // Get the prototype or initial map from the function.
3336 __ Ldr(result, FieldMemOperand(function,
3337 JSFunction::kPrototypeOrInitialMapOffset));
3339 // Check that the function has a prototype or an initial map.
3340 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3341 instr->environment());
3343 // If the function does not have an initial map, we're done.
3345 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3348 // Get the prototype from the initial map.
3349 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3356 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3357 Register result = ToRegister(instr->result());
3358 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3359 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3360 if (instr->hydrogen()->RequiresHoleCheck()) {
3362 result, Heap::kTheHoleValueRootIndex, instr->environment());
3367 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3368 DCHECK(ToRegister(instr->context()).is(cp));
3369 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
3370 DCHECK(ToRegister(instr->result()).Is(x0));
3371 __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
3372 if (FLAG_vector_ics) {
3373 Register vector = ToRegister(instr->temp_vector());
3374 DCHECK(vector.is(LoadIC::VectorRegister()));
3375 __ Mov(vector, instr->hydrogen()->feedback_vector());
3376 // No need to allocate this register.
3377 DCHECK(LoadIC::SlotRegister().is(x0));
3378 __ Mov(LoadIC::SlotRegister(),
3379 Smi::FromInt(instr->hydrogen()->slot()));
3381 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3382 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3383 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3387 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3392 bool key_is_constant,
3394 ElementsKind elements_kind,
3396 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3398 if (key_is_constant) {
3399 int key_offset = constant_key << element_size_shift;
3400 return MemOperand(base, key_offset + base_offset);
3404 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3405 return MemOperand(scratch, base_offset);
3408 if (base_offset == 0) {
3409 return MemOperand(base, key, SXTW, element_size_shift);
3412 DCHECK(!AreAliased(scratch, key));
3413 __ Add(scratch, base, base_offset);
3414 return MemOperand(scratch, key, SXTW, element_size_shift);
3418 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3419 Register ext_ptr = ToRegister(instr->elements());
3421 ElementsKind elements_kind = instr->elements_kind();
3423 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3424 bool key_is_constant = instr->key()->IsConstantOperand();
3425 Register key = no_reg;
3426 int constant_key = 0;
3427 if (key_is_constant) {
3428 DCHECK(instr->temp() == NULL);
3429 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3430 if (constant_key & 0xf0000000) {
3431 Abort(kArrayIndexConstantValueTooBig);
3434 scratch = ToRegister(instr->temp());
3435 key = ToRegister(instr->key());
3439 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3440 key_is_constant, constant_key,
3442 instr->base_offset());
3444 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3445 (elements_kind == FLOAT32_ELEMENTS)) {
3446 DoubleRegister result = ToDoubleRegister(instr->result());
3447 __ Ldr(result.S(), mem_op);
3448 __ Fcvt(result, result.S());
3449 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3450 (elements_kind == FLOAT64_ELEMENTS)) {
3451 DoubleRegister result = ToDoubleRegister(instr->result());
3452 __ Ldr(result, mem_op);
3454 Register result = ToRegister(instr->result());
3456 switch (elements_kind) {
3457 case EXTERNAL_INT8_ELEMENTS:
3459 __ Ldrsb(result, mem_op);
3461 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3462 case EXTERNAL_UINT8_ELEMENTS:
3463 case UINT8_ELEMENTS:
3464 case UINT8_CLAMPED_ELEMENTS:
3465 __ Ldrb(result, mem_op);
3467 case EXTERNAL_INT16_ELEMENTS:
3468 case INT16_ELEMENTS:
3469 __ Ldrsh(result, mem_op);
3471 case EXTERNAL_UINT16_ELEMENTS:
3472 case UINT16_ELEMENTS:
3473 __ Ldrh(result, mem_op);
3475 case EXTERNAL_INT32_ELEMENTS:
3476 case INT32_ELEMENTS:
3477 __ Ldrsw(result, mem_op);
3479 case EXTERNAL_UINT32_ELEMENTS:
3480 case UINT32_ELEMENTS:
3481 __ Ldr(result.W(), mem_op);
3482 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3483 // Deopt if value > 0x80000000.
3484 __ Tst(result, 0xFFFFFFFF80000000);
3485 DeoptimizeIf(ne, instr->environment());
3488 case FLOAT32_ELEMENTS:
3489 case FLOAT64_ELEMENTS:
3490 case EXTERNAL_FLOAT32_ELEMENTS:
3491 case EXTERNAL_FLOAT64_ELEMENTS:
3492 case FAST_HOLEY_DOUBLE_ELEMENTS:
3493 case FAST_HOLEY_ELEMENTS:
3494 case FAST_HOLEY_SMI_ELEMENTS:
3495 case FAST_DOUBLE_ELEMENTS:
3497 case FAST_SMI_ELEMENTS:
3498 case DICTIONARY_ELEMENTS:
3499 case SLOPPY_ARGUMENTS_ELEMENTS:
3507 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3511 ElementsKind elements_kind,
3512 Representation representation,
3514 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3515 STATIC_ASSERT(kSmiTag == 0);
3516 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3518 // Even though the HLoad/StoreKeyed instructions force the input
3519 // representation for the key to be an integer, the input gets replaced during
3520 // bounds check elimination with the index argument to the bounds check, which
3521 // can be tagged, so that case must be handled here, too.
3522 if (key_is_tagged) {
3523 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3524 if (representation.IsInteger32()) {
3525 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3526 // Read or write only the smi payload in the case of fast smi arrays.
3527 return UntagSmiMemOperand(base, base_offset);
3529 return MemOperand(base, base_offset);
3532 // Sign extend key because it could be a 32-bit negative value or contain
3533 // garbage in the top 32-bits. The address computation happens in 64-bit.
3534 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3535 if (representation.IsInteger32()) {
3536 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3537 // Read or write only the smi payload in the case of fast smi arrays.
3538 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3539 return UntagSmiMemOperand(base, base_offset);
3541 __ Add(base, elements, base_offset);
3542 return MemOperand(base, key, SXTW, element_size_shift);
3548 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3549 Register elements = ToRegister(instr->elements());
3550 DoubleRegister result = ToDoubleRegister(instr->result());
3553 if (instr->key()->IsConstantOperand()) {
3554 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3555 (instr->temp() == NULL));
3557 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3558 if (constant_key & 0xf0000000) {
3559 Abort(kArrayIndexConstantValueTooBig);
3561 int offset = instr->base_offset() + constant_key * kDoubleSize;
3562 mem_op = MemOperand(elements, offset);
3564 Register load_base = ToRegister(instr->temp());
3565 Register key = ToRegister(instr->key());
3566 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3567 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3568 instr->hydrogen()->elements_kind(),
3569 instr->hydrogen()->representation(),
3570 instr->base_offset());
3573 __ Ldr(result, mem_op);
3575 if (instr->hydrogen()->RequiresHoleCheck()) {
3576 Register scratch = ToRegister(instr->temp());
3577 // Detect the hole NaN by adding one to the integer representation of the
3578 // result, and checking for overflow.
3579 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
3580 __ Ldr(scratch, mem_op);
3582 DeoptimizeIf(vs, instr->environment());
3587 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3588 Register elements = ToRegister(instr->elements());
3589 Register result = ToRegister(instr->result());
3592 Representation representation = instr->hydrogen()->representation();
3593 if (instr->key()->IsConstantOperand()) {
3594 DCHECK(instr->temp() == NULL);
3595 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3596 int offset = instr->base_offset() +
3597 ToInteger32(const_operand) * kPointerSize;
3598 if (representation.IsInteger32()) {
3599 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3600 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3601 STATIC_ASSERT(kSmiTag == 0);
3602 mem_op = UntagSmiMemOperand(elements, offset);
3604 mem_op = MemOperand(elements, offset);
3607 Register load_base = ToRegister(instr->temp());
3608 Register key = ToRegister(instr->key());
3609 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3611 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3612 instr->hydrogen()->elements_kind(),
3613 representation, instr->base_offset());
3616 __ Load(result, mem_op, representation);
3618 if (instr->hydrogen()->RequiresHoleCheck()) {
3619 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3620 DeoptimizeIfNotSmi(result, instr->environment());
3622 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3623 instr->environment());
3629 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3630 DCHECK(ToRegister(instr->context()).is(cp));
3631 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3632 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3633 if (FLAG_vector_ics) {
3634 Register vector = ToRegister(instr->temp_vector());
3635 DCHECK(vector.is(LoadIC::VectorRegister()));
3636 __ Mov(vector, instr->hydrogen()->feedback_vector());
3637 // No need to allocate this register.
3638 DCHECK(LoadIC::SlotRegister().is(x0));
3639 __ Mov(LoadIC::SlotRegister(),
3640 Smi::FromInt(instr->hydrogen()->slot()));
3643 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3644 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3646 DCHECK(ToRegister(instr->result()).Is(x0));
3650 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3651 HObjectAccess access = instr->hydrogen()->access();
3652 int offset = access.offset();
3653 Register object = ToRegister(instr->object());
3655 if (access.IsExternalMemory()) {
3656 Register result = ToRegister(instr->result());
3657 __ Load(result, MemOperand(object, offset), access.representation());
3661 if (instr->hydrogen()->representation().IsDouble()) {
3662 FPRegister result = ToDoubleRegister(instr->result());
3663 __ Ldr(result, FieldMemOperand(object, offset));
3667 Register result = ToRegister(instr->result());
3669 if (access.IsInobject()) {
3672 // Load the properties array, using result as a scratch register.
3673 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3677 if (access.representation().IsSmi() &&
3678 instr->hydrogen()->representation().IsInteger32()) {
3679 // Read int value directly from upper half of the smi.
3680 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3681 STATIC_ASSERT(kSmiTag == 0);
3682 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3683 Representation::Integer32());
3685 __ Load(result, FieldMemOperand(source, offset), access.representation());
3690 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3691 DCHECK(ToRegister(instr->context()).is(cp));
3692 // LoadIC expects name and receiver in registers.
3693 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3694 __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
3695 if (FLAG_vector_ics) {
3696 Register vector = ToRegister(instr->temp_vector());
3697 DCHECK(vector.is(LoadIC::VectorRegister()));
3698 __ Mov(vector, instr->hydrogen()->feedback_vector());
3699 // No need to allocate this register.
3700 DCHECK(LoadIC::SlotRegister().is(x0));
3701 __ Mov(LoadIC::SlotRegister(),
3702 Smi::FromInt(instr->hydrogen()->slot()));
3705 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3706 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3708 DCHECK(ToRegister(instr->result()).is(x0));
3712 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3713 Register result = ToRegister(instr->result());
3714 __ LoadRoot(result, instr->index());
3718 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3719 Register result = ToRegister(instr->result());
3720 Register map = ToRegister(instr->value());
3721 __ EnumLengthSmi(result, map);
3725 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3726 Representation r = instr->hydrogen()->value()->representation();
3728 DoubleRegister input = ToDoubleRegister(instr->value());
3729 DoubleRegister result = ToDoubleRegister(instr->result());
3730 __ Fabs(result, input);
3731 } else if (r.IsSmi() || r.IsInteger32()) {
3732 Register input = r.IsSmi() ? ToRegister(instr->value())
3733 : ToRegister32(instr->value());
3734 Register result = r.IsSmi() ? ToRegister(instr->result())
3735 : ToRegister32(instr->result());
3736 __ Abs(result, input);
3737 DeoptimizeIf(vs, instr->environment());
3742 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3744 Label* allocation_entry) {
3745 // Handle the tricky cases of MathAbsTagged:
3746 // - HeapNumber inputs.
3747 // - Negative inputs produce a positive result, so a new HeapNumber is
3748 // allocated to hold it.
3749 // - Positive inputs are returned as-is, since there is no need to allocate
3750 // a new HeapNumber for the result.
3751 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3752 // a smi. In this case, the inline code sets the result and jumps directly
3753 // to the allocation_entry label.
3754 DCHECK(instr->context() != NULL);
3755 DCHECK(ToRegister(instr->context()).is(cp));
3756 Register input = ToRegister(instr->value());
3757 Register temp1 = ToRegister(instr->temp1());
3758 Register temp2 = ToRegister(instr->temp2());
3759 Register result_bits = ToRegister(instr->temp3());
3760 Register result = ToRegister(instr->result());
3762 Label runtime_allocation;
3764 // Deoptimize if the input is not a HeapNumber.
3765 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3766 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3767 instr->environment());
3769 // If the argument is positive, we can return it as-is, without any need to
3770 // allocate a new HeapNumber for the result. We have to do this in integer
3771 // registers (rather than with fabs) because we need to be able to distinguish
3773 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3774 __ Mov(result, input);
3775 __ Tbz(result_bits, kXSignBit, exit);
3777 // Calculate abs(input) by clearing the sign bit.
3778 __ Bic(result_bits, result_bits, kXSignMask);
3780 // Allocate a new HeapNumber to hold the result.
3781 // result_bits The bit representation of the (double) result.
3782 __ Bind(allocation_entry);
3783 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3784 // The inline (non-deferred) code will store result_bits into result.
3787 __ Bind(&runtime_allocation);
3788 if (FLAG_debug_code) {
3789 // Because result is in the pointer map, we need to make sure it has a valid
3790 // tagged value before we call the runtime. We speculatively set it to the
3791 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3794 Register input = ToRegister(instr->value());
3795 __ JumpIfSmi(result, &result_ok);
3796 __ Cmp(input, result);
3797 __ Assert(eq, kUnexpectedValue);
3798 __ Bind(&result_ok);
3801 { PushSafepointRegistersScope scope(this);
3802 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3804 __ StoreToSafepointRegisterSlot(x0, result);
3806 // The inline (non-deferred) code will store result_bits into result.
3810 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3811 // Class for deferred case.
3812 class DeferredMathAbsTagged: public LDeferredCode {
3814 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3815 : LDeferredCode(codegen), instr_(instr) { }
3816 virtual void Generate() {
3817 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3818 allocation_entry());
3820 virtual LInstruction* instr() { return instr_; }
3821 Label* allocation_entry() { return &allocation; }
3823 LMathAbsTagged* instr_;
3827 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3828 // in GenerateDeferredCode. Tidy this up.
3829 DCHECK(!NeedsDeferredFrame());
3831 DeferredMathAbsTagged* deferred =
3832 new(zone()) DeferredMathAbsTagged(this, instr);
3834 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3835 instr->hydrogen()->value()->representation().IsSmi());
3836 Register input = ToRegister(instr->value());
3837 Register result_bits = ToRegister(instr->temp3());
3838 Register result = ToRegister(instr->result());
3841 // Handle smis inline.
3842 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3843 // never get set by the negation. This is therefore the same as the Integer32
3844 // case in DoMathAbs, except that it operates on 64-bit values.
3845 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3847 __ JumpIfNotSmi(input, deferred->entry());
3849 __ Abs(result, input, NULL, &done);
3851 // The result is the magnitude (abs) of the smallest value a smi can
3852 // represent, encoded as a double.
3853 __ Mov(result_bits, double_to_rawbits(0x80000000));
3854 __ B(deferred->allocation_entry());
3856 __ Bind(deferred->exit());
3857 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3863 void LCodeGen::DoMathExp(LMathExp* instr) {
3864 DoubleRegister input = ToDoubleRegister(instr->value());
3865 DoubleRegister result = ToDoubleRegister(instr->result());
3866 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3867 DoubleRegister double_temp2 = double_scratch();
3868 Register temp1 = ToRegister(instr->temp1());
3869 Register temp2 = ToRegister(instr->temp2());
3870 Register temp3 = ToRegister(instr->temp3());
3872 MathExpGenerator::EmitMathExp(masm(), input, result,
3873 double_temp1, double_temp2,
3874 temp1, temp2, temp3);
3878 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3879 DoubleRegister input = ToDoubleRegister(instr->value());
3880 DoubleRegister result = ToDoubleRegister(instr->result());
3882 __ Frintm(result, input);
3886 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3887 DoubleRegister input = ToDoubleRegister(instr->value());
3888 Register result = ToRegister(instr->result());
3890 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3891 DeoptimizeIfMinusZero(input, instr->environment());
3894 __ Fcvtms(result, input);
3896 // Check that the result fits into a 32-bit integer.
3897 // - The result did not overflow.
3898 __ Cmp(result, Operand(result, SXTW));
3899 // - The input was not NaN.
3900 __ Fccmp(input, input, NoFlag, eq);
3901 DeoptimizeIf(ne, instr->environment());
3905 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3906 Register dividend = ToRegister32(instr->dividend());
3907 Register result = ToRegister32(instr->result());
3908 int32_t divisor = instr->divisor();
3910 // If the divisor is 1, return the dividend.
3912 __ Mov(result, dividend, kDiscardForSameWReg);
3916 // If the divisor is positive, things are easy: There can be no deopts and we
3917 // can simply do an arithmetic right shift.
3918 int32_t shift = WhichPowerOf2Abs(divisor);
3920 __ Mov(result, Operand(dividend, ASR, shift));
3924 // If the divisor is negative, we have to negate and handle edge cases.
3925 __ Negs(result, dividend);
3926 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3927 DeoptimizeIf(eq, instr->environment());
3930 // Dividing by -1 is basically negation, unless we overflow.
3931 if (divisor == -1) {
3932 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3933 DeoptimizeIf(vs, instr->environment());
3938 // If the negation could not overflow, simply shifting is OK.
3939 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3940 __ Mov(result, Operand(dividend, ASR, shift));
3944 __ Asr(result, result, shift);
3945 __ Csel(result, result, kMinInt / divisor, vc);
3949 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3950 Register dividend = ToRegister32(instr->dividend());
3951 int32_t divisor = instr->divisor();
3952 Register result = ToRegister32(instr->result());
3953 DCHECK(!AreAliased(dividend, result));
3956 Deoptimize(instr->environment());
3960 // Check for (0 / -x) that will produce negative zero.
3961 HMathFloorOfDiv* hdiv = instr->hydrogen();
3962 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3963 DeoptimizeIfZero(dividend, instr->environment());
3966 // Easy case: We need no dynamic check for the dividend and the flooring
3967 // division is the same as the truncating division.
3968 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3969 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3970 __ TruncatingDiv(result, dividend, Abs(divisor));
3971 if (divisor < 0) __ Neg(result, result);
3975 // In the general case we may need to adjust before and after the truncating
3976 // division to get a flooring division.
3977 Register temp = ToRegister32(instr->temp());
3978 DCHECK(!AreAliased(temp, dividend, result));
3979 Label needs_adjustment, done;
3980 __ Cmp(dividend, 0);
3981 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3982 __ TruncatingDiv(result, dividend, Abs(divisor));
3983 if (divisor < 0) __ Neg(result, result);
3985 __ Bind(&needs_adjustment);
3986 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3987 __ TruncatingDiv(result, temp, Abs(divisor));
3988 if (divisor < 0) __ Neg(result, result);
3989 __ Sub(result, result, Operand(1));
3994 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3995 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3996 Register dividend = ToRegister32(instr->dividend());
3997 Register divisor = ToRegister32(instr->divisor());
3998 Register remainder = ToRegister32(instr->temp());
3999 Register result = ToRegister32(instr->result());
4001 // This can't cause an exception on ARM, so we can speculatively
4002 // execute it already now.
4003 __ Sdiv(result, dividend, divisor);
4006 DeoptimizeIfZero(divisor, instr->environment());
4008 // Check for (kMinInt / -1).
4009 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4010 // The V flag will be set iff dividend == kMinInt.
4011 __ Cmp(dividend, 1);
4012 __ Ccmp(divisor, -1, NoFlag, vs);
4013 DeoptimizeIf(eq, instr->environment());
4016 // Check for (0 / -x) that will produce negative zero.
4017 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4019 __ Ccmp(dividend, 0, ZFlag, mi);
4020 // "divisor" can't be null because the code would have already been
4021 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4022 // In this case we need to deoptimize to produce a -0.
4023 DeoptimizeIf(eq, instr->environment());
4027 // If both operands have the same sign then we are done.
4028 __ Eor(remainder, dividend, divisor);
4029 __ Tbz(remainder, kWSignBit, &done);
4031 // Check if the result needs to be corrected.
4032 __ Msub(remainder, result, divisor, dividend);
4033 __ Cbz(remainder, &done);
4034 __ Sub(result, result, 1);
4040 void LCodeGen::DoMathLog(LMathLog* instr) {
4041 DCHECK(instr->IsMarkedAsCall());
4042 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4043 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4045 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4049 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4050 Register input = ToRegister32(instr->value());
4051 Register result = ToRegister32(instr->result());
4052 __ Clz(result, input);
4056 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4057 DoubleRegister input = ToDoubleRegister(instr->value());
4058 DoubleRegister result = ToDoubleRegister(instr->result());
4061 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4062 // Math.pow(-Infinity, 0.5) == +Infinity
4063 // Math.pow(-0.0, 0.5) == +0.0
4065 // Catch -infinity inputs first.
4066 // TODO(jbramley): A constant infinity register would be helpful here.
4067 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4068 __ Fcmp(double_scratch(), input);
4069 __ Fabs(result, input);
4072 // Add +0.0 to convert -0.0 to +0.0.
4073 __ Fadd(double_scratch(), input, fp_zero);
4074 __ Fsqrt(result, double_scratch());
4080 void LCodeGen::DoPower(LPower* instr) {
4081 Representation exponent_type = instr->hydrogen()->right()->representation();
4082 // Having marked this as a call, we can use any registers.
4083 // Just make sure that the input/output registers are the expected ones.
4084 DCHECK(!instr->right()->IsDoubleRegister() ||
4085 ToDoubleRegister(instr->right()).is(d1));
4086 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4087 ToRegister(instr->right()).is(x11));
4088 DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4089 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4090 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4092 if (exponent_type.IsSmi()) {
4093 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4095 } else if (exponent_type.IsTagged()) {
4097 __ JumpIfSmi(x11, &no_deopt);
4098 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4099 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4100 instr->environment());
4102 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4104 } else if (exponent_type.IsInteger32()) {
4105 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4106 // supports large integer exponents.
4107 Register exponent = ToRegister(instr->right());
4108 __ Sxtw(exponent, exponent);
4109 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4112 DCHECK(exponent_type.IsDouble());
4113 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4119 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4120 DoubleRegister input = ToDoubleRegister(instr->value());
4121 DoubleRegister result = ToDoubleRegister(instr->result());
4122 DoubleRegister scratch_d = double_scratch();
4124 DCHECK(!AreAliased(input, result, scratch_d));
4128 __ Frinta(result, input);
4129 __ Fcmp(input, 0.0);
4130 __ Fccmp(result, input, ZFlag, lt);
4131 // The result is correct if the input was in [-0, +infinity], or was a
4132 // negative integral value.
4135 // Here the input is negative, non integral, with an exponent lower than 52.
4136 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4137 // case. So we can safely add 0.5.
4138 __ Fmov(scratch_d, 0.5);
4139 __ Fadd(result, input, scratch_d);
4140 __ Frintm(result, result);
4141 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4142 __ Fabs(result, result);
4143 __ Fneg(result, result);
4149 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4150 DoubleRegister input = ToDoubleRegister(instr->value());
4151 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4152 DoubleRegister dot_five = double_scratch();
4153 Register result = ToRegister(instr->result());
4156 // Math.round() rounds to the nearest integer, with ties going towards
4157 // +infinity. This does not match any IEEE-754 rounding mode.
4158 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4159 // they can't be represented as integers.
4160 // - The sign of the result is the same as the sign of the input. This means
4161 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4164 // Add 0.5 and round towards -infinity.
4165 __ Fmov(dot_five, 0.5);
4166 __ Fadd(temp, input, dot_five);
4167 __ Fcvtms(result, temp);
4169 // The result is correct if:
4170 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4171 // result is not 1, as 0.499...94 will wrongly map to 1.
4172 // result fits in 32 bits.
4173 __ Cmp(result, Operand(result.W(), SXTW));
4174 __ Ccmp(result, 1, ZFlag, eq);
4177 // At this point, we have to handle possible inputs of NaN or numbers in the
4178 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4180 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4182 DeoptimizeIf(hi, instr->environment());
4184 // Deoptimize for negative inputs, which at this point are only numbers in
4185 // the range [-0.5, -0.0]
4186 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4187 __ Fmov(result, input);
4188 DeoptimizeIfNegative(result, instr->environment());
4191 // Deoptimize if the input was NaN.
4192 __ Fcmp(input, dot_five);
4193 DeoptimizeIf(vs, instr->environment());
4195 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4196 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4197 // else 0; we avoid dealing with 0.499...94 directly.
4198 __ Cset(result, ge);
4203 void LCodeGen::DoMathFround(LMathFround* instr) {
4204 DoubleRegister input = ToDoubleRegister(instr->value());
4205 DoubleRegister result = ToDoubleRegister(instr->result());
4206 __ Fcvt(result.S(), input);
4207 __ Fcvt(result, result.S());
4211 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4212 DoubleRegister input = ToDoubleRegister(instr->value());
4213 DoubleRegister result = ToDoubleRegister(instr->result());
4214 __ Fsqrt(result, input);
4218 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4219 HMathMinMax::Operation op = instr->hydrogen()->operation();
4220 if (instr->hydrogen()->representation().IsInteger32()) {
4221 Register result = ToRegister32(instr->result());
4222 Register left = ToRegister32(instr->left());
4223 Operand right = ToOperand32I(instr->right());
4225 __ Cmp(left, right);
4226 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4227 } else if (instr->hydrogen()->representation().IsSmi()) {
4228 Register result = ToRegister(instr->result());
4229 Register left = ToRegister(instr->left());
4230 Operand right = ToOperand(instr->right());
4232 __ Cmp(left, right);
4233 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4235 DCHECK(instr->hydrogen()->representation().IsDouble());
4236 DoubleRegister result = ToDoubleRegister(instr->result());
4237 DoubleRegister left = ToDoubleRegister(instr->left());
4238 DoubleRegister right = ToDoubleRegister(instr->right());
4240 if (op == HMathMinMax::kMathMax) {
4241 __ Fmax(result, left, right);
4243 DCHECK(op == HMathMinMax::kMathMin);
4244 __ Fmin(result, left, right);
4250 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4251 Register dividend = ToRegister32(instr->dividend());
4252 int32_t divisor = instr->divisor();
4253 DCHECK(dividend.is(ToRegister32(instr->result())));
4255 // Theoretically, a variation of the branch-free code for integer division by
4256 // a power of 2 (calculating the remainder via an additional multiplication
4257 // (which gets simplified to an 'and') and subtraction) should be faster, and
4258 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4259 // indicate that positive dividends are heavily favored, so the branching
4260 // version performs better.
4261 HMod* hmod = instr->hydrogen();
4262 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4263 Label dividend_is_not_negative, done;
4264 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4265 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4266 // Note that this is correct even for kMinInt operands.
4267 __ Neg(dividend, dividend);
4268 __ And(dividend, dividend, mask);
4269 __ Negs(dividend, dividend);
4270 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4271 DeoptimizeIf(eq, instr->environment());
4276 __ bind(÷nd_is_not_negative);
4277 __ And(dividend, dividend, mask);
4282 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4283 Register dividend = ToRegister32(instr->dividend());
4284 int32_t divisor = instr->divisor();
4285 Register result = ToRegister32(instr->result());
4286 Register temp = ToRegister32(instr->temp());
4287 DCHECK(!AreAliased(dividend, result, temp));
4290 Deoptimize(instr->environment());
4294 __ TruncatingDiv(result, dividend, Abs(divisor));
4295 __ Sxtw(dividend.X(), dividend);
4296 __ Mov(temp, Abs(divisor));
4297 __ Smsubl(result.X(), result, temp, dividend.X());
4299 // Check for negative zero.
4300 HMod* hmod = instr->hydrogen();
4301 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4302 Label remainder_not_zero;
4303 __ Cbnz(result, &remainder_not_zero);
4304 DeoptimizeIfNegative(dividend, instr->environment());
4305 __ bind(&remainder_not_zero);
4310 void LCodeGen::DoModI(LModI* instr) {
4311 Register dividend = ToRegister32(instr->left());
4312 Register divisor = ToRegister32(instr->right());
4313 Register result = ToRegister32(instr->result());
4316 // modulo = dividend - quotient * divisor
4317 __ Sdiv(result, dividend, divisor);
4318 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4319 DeoptimizeIfZero(divisor, instr->environment());
4321 __ Msub(result, result, divisor, dividend);
4322 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4323 __ Cbnz(result, &done);
4324 DeoptimizeIfNegative(dividend, instr->environment());
4330 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4331 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4332 bool is_smi = instr->hydrogen()->representation().IsSmi();
4334 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4336 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4337 int32_t right = ToInteger32(instr->right());
4338 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4340 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4341 bool bailout_on_minus_zero =
4342 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4344 if (bailout_on_minus_zero) {
4346 // The result is -0 if right is negative and left is zero.
4347 DeoptimizeIfZero(left, instr->environment());
4348 } else if (right == 0) {
4349 // The result is -0 if the right is zero and the left is negative.
4350 DeoptimizeIfNegative(left, instr->environment());
4355 // Cases which can detect overflow.
4358 // Only 0x80000000 can overflow here.
4359 __ Negs(result, left);
4360 DeoptimizeIf(vs, instr->environment());
4362 __ Neg(result, left);
4366 // This case can never overflow.
4370 // This case can never overflow.
4371 __ Mov(result, left, kDiscardForSameWReg);
4375 __ Adds(result, left, left);
4376 DeoptimizeIf(vs, instr->environment());
4378 __ Add(result, left, left);
4383 // Multiplication by constant powers of two (and some related values)
4384 // can be done efficiently with shifted operands.
4385 int32_t right_abs = Abs(right);
4387 if (IsPowerOf2(right_abs)) {
4388 int right_log2 = WhichPowerOf2(right_abs);
4391 Register scratch = result;
4392 DCHECK(!AreAliased(scratch, left));
4393 __ Cls(scratch, left);
4394 __ Cmp(scratch, right_log2);
4395 DeoptimizeIf(lt, instr->environment());
4399 // result = left << log2(right)
4400 __ Lsl(result, left, right_log2);
4402 // result = -left << log2(-right)
4404 __ Negs(result, Operand(left, LSL, right_log2));
4405 DeoptimizeIf(vs, instr->environment());
4407 __ Neg(result, Operand(left, LSL, right_log2));
4414 // For the following cases, we could perform a conservative overflow check
4415 // with CLS as above. However the few cycles saved are likely not worth
4416 // the risk of deoptimizing more often than required.
4417 DCHECK(!can_overflow);
4420 if (IsPowerOf2(right - 1)) {
4421 // result = left + left << log2(right - 1)
4422 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4423 } else if (IsPowerOf2(right + 1)) {
4424 // result = -left + left << log2(right + 1)
4425 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4426 __ Neg(result, result);
4431 if (IsPowerOf2(-right + 1)) {
4432 // result = left - left << log2(-right + 1)
4433 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4434 } else if (IsPowerOf2(-right - 1)) {
4435 // result = -left - left << log2(-right - 1)
4436 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4437 __ Neg(result, result);
4446 void LCodeGen::DoMulI(LMulI* instr) {
4447 Register result = ToRegister32(instr->result());
4448 Register left = ToRegister32(instr->left());
4449 Register right = ToRegister32(instr->right());
4451 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4452 bool bailout_on_minus_zero =
4453 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4455 if (bailout_on_minus_zero && !left.Is(right)) {
4456 // If one operand is zero and the other is negative, the result is -0.
4457 // - Set Z (eq) if either left or right, or both, are 0.
4459 __ Ccmp(right, 0, ZFlag, ne);
4460 // - If so (eq), set N (mi) if left + right is negative.
4461 // - Otherwise, clear N.
4462 __ Ccmn(left, right, NoFlag, eq);
4463 DeoptimizeIf(mi, instr->environment());
4467 __ Smull(result.X(), left, right);
4468 __ Cmp(result.X(), Operand(result, SXTW));
4469 DeoptimizeIf(ne, instr->environment());
4471 __ Mul(result, left, right);
4476 void LCodeGen::DoMulS(LMulS* instr) {
4477 Register result = ToRegister(instr->result());
4478 Register left = ToRegister(instr->left());
4479 Register right = ToRegister(instr->right());
4481 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4482 bool bailout_on_minus_zero =
4483 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4485 if (bailout_on_minus_zero && !left.Is(right)) {
4486 // If one operand is zero and the other is negative, the result is -0.
4487 // - Set Z (eq) if either left or right, or both, are 0.
4489 __ Ccmp(right, 0, ZFlag, ne);
4490 // - If so (eq), set N (mi) if left + right is negative.
4491 // - Otherwise, clear N.
4492 __ Ccmn(left, right, NoFlag, eq);
4493 DeoptimizeIf(mi, instr->environment());
4496 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4498 __ Smulh(result, left, right);
4499 __ Cmp(result, Operand(result.W(), SXTW));
4501 DeoptimizeIf(ne, instr->environment());
4503 if (AreAliased(result, left, right)) {
4504 // All three registers are the same: half untag the input and then
4505 // multiply, giving a tagged result.
4506 STATIC_ASSERT((kSmiShift % 2) == 0);
4507 __ Asr(result, left, kSmiShift / 2);
4508 __ Mul(result, result, result);
4509 } else if (result.Is(left) && !left.Is(right)) {
4510 // Registers result and left alias, right is distinct: untag left into
4511 // result, and then multiply by right, giving a tagged result.
4512 __ SmiUntag(result, left);
4513 __ Mul(result, result, right);
4515 DCHECK(!left.Is(result));
4516 // Registers result and right alias, left is distinct, or all registers
4517 // are distinct: untag right into result, and then multiply by left,
4518 // giving a tagged result.
4519 __ SmiUntag(result, right);
4520 __ Mul(result, left, result);
4526 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4527 // TODO(3095996): Get rid of this. For now, we need to make the
4528 // result register contain a valid pointer because it is already
4529 // contained in the register pointer map.
4530 Register result = ToRegister(instr->result());
4533 PushSafepointRegistersScope scope(this);
4534 // NumberTagU and NumberTagD use the context from the frame, rather than
4535 // the environment's HContext or HInlinedContext value.
4536 // They only call Runtime::kAllocateHeapNumber.
4537 // The corresponding HChange instructions are added in a phase that does
4538 // not have easy access to the local context.
4539 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4540 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4541 RecordSafepointWithRegisters(
4542 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4543 __ StoreToSafepointRegisterSlot(x0, result);
4547 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4548 class DeferredNumberTagD: public LDeferredCode {
4550 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4551 : LDeferredCode(codegen), instr_(instr) { }
4552 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4553 virtual LInstruction* instr() { return instr_; }
4555 LNumberTagD* instr_;
4558 DoubleRegister input = ToDoubleRegister(instr->value());
4559 Register result = ToRegister(instr->result());
4560 Register temp1 = ToRegister(instr->temp1());
4561 Register temp2 = ToRegister(instr->temp2());
4563 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4564 if (FLAG_inline_new) {
4565 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4567 __ B(deferred->entry());
4570 __ Bind(deferred->exit());
4571 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4575 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4579 Label slow, convert_and_store;
4580 Register src = ToRegister32(value);
4581 Register dst = ToRegister(instr->result());
4582 Register scratch1 = ToRegister(temp1);
4584 if (FLAG_inline_new) {
4585 Register scratch2 = ToRegister(temp2);
4586 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4587 __ B(&convert_and_store);
4590 // Slow case: call the runtime system to do the number allocation.
4592 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4593 // register is stored, as this register is in the pointer map, but contains an
4597 // Preserve the value of all registers.
4598 PushSafepointRegistersScope scope(this);
4600 // NumberTagU and NumberTagD use the context from the frame, rather than
4601 // the environment's HContext or HInlinedContext value.
4602 // They only call Runtime::kAllocateHeapNumber.
4603 // The corresponding HChange instructions are added in a phase that does
4604 // not have easy access to the local context.
4605 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4606 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4607 RecordSafepointWithRegisters(
4608 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4609 __ StoreToSafepointRegisterSlot(x0, dst);
4612 // Convert number to floating point and store in the newly allocated heap
4614 __ Bind(&convert_and_store);
4615 DoubleRegister dbl_scratch = double_scratch();
4616 __ Ucvtf(dbl_scratch, src);
4617 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4621 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4622 class DeferredNumberTagU: public LDeferredCode {
4624 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4625 : LDeferredCode(codegen), instr_(instr) { }
4626 virtual void Generate() {
4627 codegen()->DoDeferredNumberTagU(instr_,
4632 virtual LInstruction* instr() { return instr_; }
4634 LNumberTagU* instr_;
4637 Register value = ToRegister32(instr->value());
4638 Register result = ToRegister(instr->result());
4640 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4641 __ Cmp(value, Smi::kMaxValue);
4642 __ B(hi, deferred->entry());
4643 __ SmiTag(result, value.X());
4644 __ Bind(deferred->exit());
4648 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4649 Register input = ToRegister(instr->value());
4650 Register scratch = ToRegister(instr->temp());
4651 DoubleRegister result = ToDoubleRegister(instr->result());
4652 bool can_convert_undefined_to_nan =
4653 instr->hydrogen()->can_convert_undefined_to_nan();
4655 Label done, load_smi;
4657 // Work out what untag mode we're working with.
4658 HValue* value = instr->hydrogen()->value();
4659 NumberUntagDMode mode = value->representation().IsSmi()
4660 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4662 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4663 __ JumpIfSmi(input, &load_smi);
4665 Label convert_undefined;
4667 // Heap number map check.
4668 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4669 if (can_convert_undefined_to_nan) {
4670 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4671 &convert_undefined);
4673 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
4674 instr->environment());
4677 // Load heap number.
4678 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4679 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4680 DeoptimizeIfMinusZero(result, instr->environment());
4684 if (can_convert_undefined_to_nan) {
4685 __ Bind(&convert_undefined);
4686 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4687 instr->environment());
4689 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4690 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4695 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4696 // Fall through to load_smi.
4699 // Smi to double register conversion.
4701 __ SmiUntagToDouble(result, input);
4707 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4708 // This is a pseudo-instruction that ensures that the environment here is
4709 // properly registered for deoptimization and records the assembler's PC
4711 LEnvironment* environment = instr->environment();
4713 // If the environment were already registered, we would have no way of
4714 // backpatching it with the spill slot operands.
4715 DCHECK(!environment->HasBeenRegistered());
4716 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4718 GenerateOsrPrologue();
4722 void LCodeGen::DoParameter(LParameter* instr) {
4727 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4728 __ PushPreamble(instr->argc(), kPointerSize);
4732 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4733 MacroAssembler::PushPopQueue args(masm());
4735 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4736 LOperand* arg = instr->argument(i);
4737 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4738 Abort(kDoPushArgumentNotImplementedForDoubleType);
4741 args.Queue(ToRegister(arg));
4744 // The preamble was done by LPreparePushArguments.
4745 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4747 after_push_argument_ = true;
4751 void LCodeGen::DoReturn(LReturn* instr) {
4752 if (FLAG_trace && info()->IsOptimizing()) {
4753 // Push the return value on the stack as the parameter.
4754 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4755 // managed by the register allocator and tearing down the frame, it's
4756 // safe to write to the context register.
4758 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4759 __ CallRuntime(Runtime::kTraceExit, 1);
4762 if (info()->saves_caller_doubles()) {
4763 RestoreCallerDoubles();
4766 int no_frame_start = -1;
4767 if (NeedsEagerFrame()) {
4768 Register stack_pointer = masm()->StackPointer();
4769 __ Mov(stack_pointer, fp);
4770 no_frame_start = masm_->pc_offset();
4774 if (instr->has_constant_parameter_count()) {
4775 int parameter_count = ToInteger32(instr->constant_parameter_count());
4776 __ Drop(parameter_count + 1);
4778 Register parameter_count = ToRegister(instr->parameter_count());
4779 __ DropBySMI(parameter_count);
4783 if (no_frame_start != -1) {
4784 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4789 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4792 String::Encoding encoding) {
4793 if (index->IsConstantOperand()) {
4794 int offset = ToInteger32(LConstantOperand::cast(index));
4795 if (encoding == String::TWO_BYTE_ENCODING) {
4796 offset *= kUC16Size;
4798 STATIC_ASSERT(kCharSize == 1);
4799 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4802 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4803 if (encoding == String::ONE_BYTE_ENCODING) {
4804 return MemOperand(temp, ToRegister32(index), SXTW);
4806 STATIC_ASSERT(kUC16Size == 2);
4807 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4812 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4813 String::Encoding encoding = instr->hydrogen()->encoding();
4814 Register string = ToRegister(instr->string());
4815 Register result = ToRegister(instr->result());
4816 Register temp = ToRegister(instr->temp());
4818 if (FLAG_debug_code) {
4819 // Even though this lithium instruction comes with a temp register, we
4820 // can't use it here because we want to use "AtStart" constraints on the
4821 // inputs and the debug code here needs a scratch register.
4822 UseScratchRegisterScope temps(masm());
4823 Register dbg_temp = temps.AcquireX();
4825 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4826 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4828 __ And(dbg_temp, dbg_temp,
4829 Operand(kStringRepresentationMask | kStringEncodingMask));
4830 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4831 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4832 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4833 ? one_byte_seq_type : two_byte_seq_type));
4834 __ Check(eq, kUnexpectedStringType);
4837 MemOperand operand =
4838 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4839 if (encoding == String::ONE_BYTE_ENCODING) {
4840 __ Ldrb(result, operand);
4842 __ Ldrh(result, operand);
4847 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4848 String::Encoding encoding = instr->hydrogen()->encoding();
4849 Register string = ToRegister(instr->string());
4850 Register value = ToRegister(instr->value());
4851 Register temp = ToRegister(instr->temp());
4853 if (FLAG_debug_code) {
4854 DCHECK(ToRegister(instr->context()).is(cp));
4855 Register index = ToRegister(instr->index());
4856 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4857 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4859 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4860 ? one_byte_seq_type : two_byte_seq_type;
4861 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4864 MemOperand operand =
4865 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4866 if (encoding == String::ONE_BYTE_ENCODING) {
4867 __ Strb(value, operand);
4869 __ Strh(value, operand);
4874 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4875 HChange* hchange = instr->hydrogen();
4876 Register input = ToRegister(instr->value());
4877 Register output = ToRegister(instr->result());
4878 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4879 hchange->value()->CheckFlag(HValue::kUint32)) {
4880 DeoptimizeIfNegative(input.W(), instr->environment());
4882 __ SmiTag(output, input);
4886 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4887 Register input = ToRegister(instr->value());
4888 Register result = ToRegister(instr->result());
4891 if (instr->needs_check()) {
4892 DeoptimizeIfNotSmi(input, instr->environment());
4896 __ SmiUntag(result, input);
4901 void LCodeGen::DoShiftI(LShiftI* instr) {
4902 LOperand* right_op = instr->right();
4903 Register left = ToRegister32(instr->left());
4904 Register result = ToRegister32(instr->result());
4906 if (right_op->IsRegister()) {
4907 Register right = ToRegister32(instr->right());
4908 switch (instr->op()) {
4909 case Token::ROR: __ Ror(result, left, right); break;
4910 case Token::SAR: __ Asr(result, left, right); break;
4911 case Token::SHL: __ Lsl(result, left, right); break;
4913 if (instr->can_deopt()) {
4914 Label right_not_zero;
4915 __ Cbnz(right, &right_not_zero);
4916 DeoptimizeIfNegative(left, instr->environment());
4917 __ Bind(&right_not_zero);
4919 __ Lsr(result, left, right);
4921 default: UNREACHABLE();
4924 DCHECK(right_op->IsConstantOperand());
4925 int shift_count = JSShiftAmountFromLConstant(right_op);
4926 if (shift_count == 0) {
4927 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4928 DeoptimizeIfNegative(left, instr->environment());
4930 __ Mov(result, left, kDiscardForSameWReg);
4932 switch (instr->op()) {
4933 case Token::ROR: __ Ror(result, left, shift_count); break;
4934 case Token::SAR: __ Asr(result, left, shift_count); break;
4935 case Token::SHL: __ Lsl(result, left, shift_count); break;
4936 case Token::SHR: __ Lsr(result, left, shift_count); break;
4937 default: UNREACHABLE();
4944 void LCodeGen::DoShiftS(LShiftS* instr) {
4945 LOperand* right_op = instr->right();
4946 Register left = ToRegister(instr->left());
4947 Register result = ToRegister(instr->result());
4949 // Only ROR by register needs a temp.
4950 DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4951 (instr->temp() == NULL));
4953 if (right_op->IsRegister()) {
4954 Register right = ToRegister(instr->right());
4955 switch (instr->op()) {
4957 Register temp = ToRegister(instr->temp());
4958 __ Ubfx(temp, right, kSmiShift, 5);
4959 __ SmiUntag(result, left);
4960 __ Ror(result.W(), result.W(), temp.W());
4965 __ Ubfx(result, right, kSmiShift, 5);
4966 __ Asr(result, left, result);
4967 __ Bic(result, result, kSmiShiftMask);
4970 __ Ubfx(result, right, kSmiShift, 5);
4971 __ Lsl(result, left, result);
4974 if (instr->can_deopt()) {
4975 Label right_not_zero;
4976 __ Cbnz(right, &right_not_zero);
4977 DeoptimizeIfNegative(left, instr->environment());
4978 __ Bind(&right_not_zero);
4980 __ Ubfx(result, right, kSmiShift, 5);
4981 __ Lsr(result, left, result);
4982 __ Bic(result, result, kSmiShiftMask);
4984 default: UNREACHABLE();
4987 DCHECK(right_op->IsConstantOperand());
4988 int shift_count = JSShiftAmountFromLConstant(right_op);
4989 if (shift_count == 0) {
4990 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4991 DeoptimizeIfNegative(left, instr->environment());
4993 __ Mov(result, left);
4995 switch (instr->op()) {
4997 __ SmiUntag(result, left);
4998 __ Ror(result.W(), result.W(), shift_count);
5002 __ Asr(result, left, shift_count);
5003 __ Bic(result, result, kSmiShiftMask);
5006 __ Lsl(result, left, shift_count);
5009 __ Lsr(result, left, shift_count);
5010 __ Bic(result, result, kSmiShiftMask);
5012 default: UNREACHABLE();
5019 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5020 __ Debug("LDebugBreak", 0, BREAK);
5024 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5025 DCHECK(ToRegister(instr->context()).is(cp));
5026 Register scratch1 = x5;
5027 Register scratch2 = x6;
5028 DCHECK(instr->IsMarkedAsCall());
5030 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
5031 // TODO(all): if Mov could handle object in new space then it could be used
5033 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5034 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5035 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5036 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5040 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5041 PushSafepointRegistersScope scope(this);
5042 LoadContextFromDeferred(instr->context());
5043 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5044 RecordSafepointWithLazyDeopt(
5045 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5046 DCHECK(instr->HasEnvironment());
5047 LEnvironment* env = instr->environment();
5048 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5052 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5053 class DeferredStackCheck: public LDeferredCode {
5055 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5056 : LDeferredCode(codegen), instr_(instr) { }
5057 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5058 virtual LInstruction* instr() { return instr_; }
5060 LStackCheck* instr_;
5063 DCHECK(instr->HasEnvironment());
5064 LEnvironment* env = instr->environment();
5065 // There is no LLazyBailout instruction for stack-checks. We have to
5066 // prepare for lazy deoptimization explicitly here.
5067 if (instr->hydrogen()->is_function_entry()) {
5068 // Perform stack overflow check.
5070 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5073 PredictableCodeSizeScope predictable(masm_,
5074 Assembler::kCallSizeWithRelocation);
5075 DCHECK(instr->context()->IsRegister());
5076 DCHECK(ToRegister(instr->context()).is(cp));
5077 CallCode(isolate()->builtins()->StackCheck(),
5078 RelocInfo::CODE_TARGET,
5082 DCHECK(instr->hydrogen()->is_backwards_branch());
5083 // Perform stack overflow check if this goto needs it before jumping.
5084 DeferredStackCheck* deferred_stack_check =
5085 new(zone()) DeferredStackCheck(this, instr);
5086 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5087 __ B(lo, deferred_stack_check->entry());
5089 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5090 __ Bind(instr->done_label());
5091 deferred_stack_check->SetExit(instr->done_label());
5092 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5093 // Don't record a deoptimization index for the safepoint here.
5094 // This will be done explicitly when emitting call and the safepoint in
5095 // the deferred code.
5100 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5101 Register function = ToRegister(instr->function());
5102 Register code_object = ToRegister(instr->code_object());
5103 Register temp = ToRegister(instr->temp());
5104 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5105 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5109 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5110 Register context = ToRegister(instr->context());
5111 Register value = ToRegister(instr->value());
5112 Register scratch = ToRegister(instr->temp());
5113 MemOperand target = ContextMemOperand(context, instr->slot_index());
5115 Label skip_assignment;
5117 if (instr->hydrogen()->RequiresHoleCheck()) {
5118 __ Ldr(scratch, target);
5119 if (instr->hydrogen()->DeoptimizesOnHole()) {
5120 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
5121 instr->environment());
5123 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5127 __ Str(value, target);
5128 if (instr->hydrogen()->NeedsWriteBarrier()) {
5129 SmiCheck check_needed =
5130 instr->hydrogen()->value()->type().IsHeapObject()
5131 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5132 __ RecordWriteContextSlot(context,
5136 GetLinkRegisterState(),
5138 EMIT_REMEMBERED_SET,
5141 __ Bind(&skip_assignment);
5145 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5146 Register value = ToRegister(instr->value());
5147 Register cell = ToRegister(instr->temp1());
5150 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5152 // If the cell we are storing to contains the hole it could have
5153 // been deleted from the property dictionary. In that case, we need
5154 // to update the property details in the property dictionary to mark
5155 // it as no longer deleted. We deoptimize in that case.
5156 if (instr->hydrogen()->RequiresHoleCheck()) {
5157 Register payload = ToRegister(instr->temp2());
5158 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5160 payload, Heap::kTheHoleValueRootIndex, instr->environment());
5164 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5165 // Cells are always rescanned, so no write barrier here.
5169 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5170 Register ext_ptr = ToRegister(instr->elements());
5171 Register key = no_reg;
5173 ElementsKind elements_kind = instr->elements_kind();
5175 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5176 bool key_is_constant = instr->key()->IsConstantOperand();
5177 int constant_key = 0;
5178 if (key_is_constant) {
5179 DCHECK(instr->temp() == NULL);
5180 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5181 if (constant_key & 0xf0000000) {
5182 Abort(kArrayIndexConstantValueTooBig);
5185 key = ToRegister(instr->key());
5186 scratch = ToRegister(instr->temp());
5190 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5191 key_is_constant, constant_key,
5193 instr->base_offset());
5195 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5196 (elements_kind == FLOAT32_ELEMENTS)) {
5197 DoubleRegister value = ToDoubleRegister(instr->value());
5198 DoubleRegister dbl_scratch = double_scratch();
5199 __ Fcvt(dbl_scratch.S(), value);
5200 __ Str(dbl_scratch.S(), dst);
5201 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5202 (elements_kind == FLOAT64_ELEMENTS)) {
5203 DoubleRegister value = ToDoubleRegister(instr->value());
5206 Register value = ToRegister(instr->value());
5208 switch (elements_kind) {
5209 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5210 case EXTERNAL_INT8_ELEMENTS:
5211 case EXTERNAL_UINT8_ELEMENTS:
5212 case UINT8_ELEMENTS:
5213 case UINT8_CLAMPED_ELEMENTS:
5215 __ Strb(value, dst);
5217 case EXTERNAL_INT16_ELEMENTS:
5218 case EXTERNAL_UINT16_ELEMENTS:
5219 case INT16_ELEMENTS:
5220 case UINT16_ELEMENTS:
5221 __ Strh(value, dst);
5223 case EXTERNAL_INT32_ELEMENTS:
5224 case EXTERNAL_UINT32_ELEMENTS:
5225 case INT32_ELEMENTS:
5226 case UINT32_ELEMENTS:
5227 __ Str(value.W(), dst);
5229 case FLOAT32_ELEMENTS:
5230 case FLOAT64_ELEMENTS:
5231 case EXTERNAL_FLOAT32_ELEMENTS:
5232 case EXTERNAL_FLOAT64_ELEMENTS:
5233 case FAST_DOUBLE_ELEMENTS:
5235 case FAST_SMI_ELEMENTS:
5236 case FAST_HOLEY_DOUBLE_ELEMENTS:
5237 case FAST_HOLEY_ELEMENTS:
5238 case FAST_HOLEY_SMI_ELEMENTS:
5239 case DICTIONARY_ELEMENTS:
5240 case SLOPPY_ARGUMENTS_ELEMENTS:
5248 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5249 Register elements = ToRegister(instr->elements());
5250 DoubleRegister value = ToDoubleRegister(instr->value());
5253 if (instr->key()->IsConstantOperand()) {
5254 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5255 if (constant_key & 0xf0000000) {
5256 Abort(kArrayIndexConstantValueTooBig);
5258 int offset = instr->base_offset() + constant_key * kDoubleSize;
5259 mem_op = MemOperand(elements, offset);
5261 Register store_base = ToRegister(instr->temp());
5262 Register key = ToRegister(instr->key());
5263 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5264 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5265 instr->hydrogen()->elements_kind(),
5266 instr->hydrogen()->representation(),
5267 instr->base_offset());
5270 if (instr->NeedsCanonicalization()) {
5271 __ CanonicalizeNaN(double_scratch(), value);
5272 __ Str(double_scratch(), mem_op);
5274 __ Str(value, mem_op);
5279 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5280 Register value = ToRegister(instr->value());
5281 Register elements = ToRegister(instr->elements());
5282 Register scratch = no_reg;
5283 Register store_base = no_reg;
5284 Register key = no_reg;
5287 if (!instr->key()->IsConstantOperand() ||
5288 instr->hydrogen()->NeedsWriteBarrier()) {
5289 scratch = ToRegister(instr->temp());
5292 Representation representation = instr->hydrogen()->value()->representation();
5293 if (instr->key()->IsConstantOperand()) {
5294 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5295 int offset = instr->base_offset() +
5296 ToInteger32(const_operand) * kPointerSize;
5297 store_base = elements;
5298 if (representation.IsInteger32()) {
5299 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5300 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5301 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5302 STATIC_ASSERT(kSmiTag == 0);
5303 mem_op = UntagSmiMemOperand(store_base, offset);
5305 mem_op = MemOperand(store_base, offset);
5308 store_base = scratch;
5309 key = ToRegister(instr->key());
5310 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5312 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5313 instr->hydrogen()->elements_kind(),
5314 representation, instr->base_offset());
5317 __ Store(value, mem_op, representation);
5319 if (instr->hydrogen()->NeedsWriteBarrier()) {
5320 DCHECK(representation.IsTagged());
5321 // This assignment may cause element_addr to alias store_base.
5322 Register element_addr = scratch;
5323 SmiCheck check_needed =
5324 instr->hydrogen()->value()->type().IsHeapObject()
5325 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5326 // Compute address of modified element and store it into key register.
5327 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5328 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5329 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5330 instr->hydrogen()->PointersToHereCheckForValue());
5335 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5336 DCHECK(ToRegister(instr->context()).is(cp));
5337 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
5338 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
5339 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
5341 Handle<Code> ic = instr->strict_mode() == STRICT
5342 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5343 : isolate()->builtins()->KeyedStoreIC_Initialize();
5344 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5348 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5349 Representation representation = instr->representation();
5351 Register object = ToRegister(instr->object());
5352 HObjectAccess access = instr->hydrogen()->access();
5353 int offset = access.offset();
5355 if (access.IsExternalMemory()) {
5356 DCHECK(!instr->hydrogen()->has_transition());
5357 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5358 Register value = ToRegister(instr->value());
5359 __ Store(value, MemOperand(object, offset), representation);
5363 __ AssertNotSmi(object);
5365 if (representation.IsDouble()) {
5366 DCHECK(access.IsInobject());
5367 DCHECK(!instr->hydrogen()->has_transition());
5368 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5369 FPRegister value = ToDoubleRegister(instr->value());
5370 __ Str(value, FieldMemOperand(object, offset));
5374 Register value = ToRegister(instr->value());
5376 DCHECK(!representation.IsSmi() ||
5377 !instr->value()->IsConstantOperand() ||
5378 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5380 if (instr->hydrogen()->has_transition()) {
5381 Handle<Map> transition = instr->hydrogen()->transition_map();
5382 AddDeprecationDependency(transition);
5383 // Store the new map value.
5384 Register new_map_value = ToRegister(instr->temp0());
5385 __ Mov(new_map_value, Operand(transition));
5386 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5387 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5388 // Update the write barrier for the map field.
5389 __ RecordWriteForMap(object,
5391 ToRegister(instr->temp1()),
5392 GetLinkRegisterState(),
5398 Register destination;
5399 if (access.IsInobject()) {
5400 destination = object;
5402 Register temp0 = ToRegister(instr->temp0());
5403 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5404 destination = temp0;
5407 if (representation.IsSmi() &&
5408 instr->hydrogen()->value()->representation().IsInteger32()) {
5409 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5411 Register temp0 = ToRegister(instr->temp0());
5412 __ Ldr(temp0, FieldMemOperand(destination, offset));
5413 __ AssertSmi(temp0);
5414 // If destination aliased temp0, restore it to the address calculated
5416 if (destination.Is(temp0)) {
5417 DCHECK(!access.IsInobject());
5418 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5421 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5422 STATIC_ASSERT(kSmiTag == 0);
5423 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5424 Representation::Integer32());
5426 __ Store(value, FieldMemOperand(destination, offset), representation);
5428 if (instr->hydrogen()->NeedsWriteBarrier()) {
5429 __ RecordWriteField(destination,
5431 value, // Clobbered.
5432 ToRegister(instr->temp1()), // Clobbered.
5433 GetLinkRegisterState(),
5435 EMIT_REMEMBERED_SET,
5436 instr->hydrogen()->SmiCheckForWriteBarrier(),
5437 instr->hydrogen()->PointersToHereCheckForValue());
5442 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5443 DCHECK(ToRegister(instr->context()).is(cp));
5444 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
5445 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
5447 __ Mov(StoreIC::NameRegister(), Operand(instr->name()));
5448 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5449 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5453 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5454 DCHECK(ToRegister(instr->context()).is(cp));
5455 DCHECK(ToRegister(instr->left()).Is(x1));
5456 DCHECK(ToRegister(instr->right()).Is(x0));
5457 StringAddStub stub(isolate(),
5458 instr->hydrogen()->flags(),
5459 instr->hydrogen()->pretenure_flag());
5460 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5464 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5465 class DeferredStringCharCodeAt: public LDeferredCode {
5467 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5468 : LDeferredCode(codegen), instr_(instr) { }
5469 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5470 virtual LInstruction* instr() { return instr_; }
5472 LStringCharCodeAt* instr_;
5475 DeferredStringCharCodeAt* deferred =
5476 new(zone()) DeferredStringCharCodeAt(this, instr);
5478 StringCharLoadGenerator::Generate(masm(),
5479 ToRegister(instr->string()),
5480 ToRegister32(instr->index()),
5481 ToRegister(instr->result()),
5483 __ Bind(deferred->exit());
5487 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5488 Register string = ToRegister(instr->string());
5489 Register result = ToRegister(instr->result());
5491 // TODO(3095996): Get rid of this. For now, we need to make the
5492 // result register contain a valid pointer because it is already
5493 // contained in the register pointer map.
5496 PushSafepointRegistersScope scope(this);
5498 // Push the index as a smi. This is safe because of the checks in
5499 // DoStringCharCodeAt above.
5500 Register index = ToRegister(instr->index());
5501 __ SmiTagAndPush(index);
5503 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5507 __ StoreToSafepointRegisterSlot(x0, result);
5511 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5512 class DeferredStringCharFromCode: public LDeferredCode {
5514 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5515 : LDeferredCode(codegen), instr_(instr) { }
5516 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5517 virtual LInstruction* instr() { return instr_; }
5519 LStringCharFromCode* instr_;
5522 DeferredStringCharFromCode* deferred =
5523 new(zone()) DeferredStringCharFromCode(this, instr);
5525 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5526 Register char_code = ToRegister32(instr->char_code());
5527 Register result = ToRegister(instr->result());
5529 __ Cmp(char_code, String::kMaxOneByteCharCode);
5530 __ B(hi, deferred->entry());
5531 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5532 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5533 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5534 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5535 __ B(eq, deferred->entry());
5536 __ Bind(deferred->exit());
5540 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5541 Register char_code = ToRegister(instr->char_code());
5542 Register result = ToRegister(instr->result());
5544 // TODO(3095996): Get rid of this. For now, we need to make the
5545 // result register contain a valid pointer because it is already
5546 // contained in the register pointer map.
5549 PushSafepointRegistersScope scope(this);
5550 __ SmiTagAndPush(char_code);
5551 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5552 __ StoreToSafepointRegisterSlot(x0, result);
5556 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5557 DCHECK(ToRegister(instr->context()).is(cp));
5558 Token::Value op = instr->op();
5560 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5561 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5562 InlineSmiCheckInfo::EmitNotInlined(masm());
5564 Condition condition = TokenToCondition(op, false);
5566 EmitCompareAndBranch(instr, condition, x0, 0);
5570 void LCodeGen::DoSubI(LSubI* instr) {
5571 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5572 Register result = ToRegister32(instr->result());
5573 Register left = ToRegister32(instr->left());
5574 Operand right = ToShiftedRightOperand32I(instr->right(), instr);
5577 __ Subs(result, left, right);
5578 DeoptimizeIf(vs, instr->environment());
5580 __ Sub(result, left, right);
5585 void LCodeGen::DoSubS(LSubS* instr) {
5586 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5587 Register result = ToRegister(instr->result());
5588 Register left = ToRegister(instr->left());
5589 Operand right = ToOperand(instr->right());
5591 __ Subs(result, left, right);
5592 DeoptimizeIf(vs, instr->environment());
5594 __ Sub(result, left, right);
5599 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5603 Register input = ToRegister(value);
5604 Register scratch1 = ToRegister(temp1);
5605 DoubleRegister dbl_scratch1 = double_scratch();
5609 // Load heap object map.
5610 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5612 if (instr->truncating()) {
5613 Register output = ToRegister(instr->result());
5616 // If it's not a heap number, jump to undefined check.
5617 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5619 // A heap number: load value and convert to int32 using truncating function.
5620 __ TruncateHeapNumberToI(output, input);
5623 __ Bind(&check_bools);
5625 Register true_root = output;
5626 Register false_root = scratch1;
5627 __ LoadTrueFalseRoots(true_root, false_root);
5628 __ Cmp(input, true_root);
5629 __ Cset(output, eq);
5630 __ Ccmp(input, false_root, ZFlag, ne);
5633 // Output contains zero, undefined is converted to zero for truncating
5635 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5636 instr->environment());
5638 Register output = ToRegister32(instr->result());
5640 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5642 // Deoptimized if it's not a heap number.
5643 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5644 instr->environment());
5646 // A heap number: load value and convert to int32 using non-truncating
5647 // function. If the result is out of range, branch to deoptimize.
5648 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5649 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5650 DeoptimizeIf(ne, instr->environment());
5652 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5655 __ Fmov(scratch1, dbl_scratch1);
5656 DeoptimizeIfNegative(scratch1, instr->environment());
5663 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5664 class DeferredTaggedToI: public LDeferredCode {
5666 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5667 : LDeferredCode(codegen), instr_(instr) { }
5668 virtual void Generate() {
5669 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5673 virtual LInstruction* instr() { return instr_; }
5678 Register input = ToRegister(instr->value());
5679 Register output = ToRegister(instr->result());
5681 if (instr->hydrogen()->value()->representation().IsSmi()) {
5682 __ SmiUntag(output, input);
5684 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5686 __ JumpIfNotSmi(input, deferred->entry());
5687 __ SmiUntag(output, input);
5688 __ Bind(deferred->exit());
5693 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5694 Register result = ToRegister(instr->result());
5695 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5699 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5700 DCHECK(ToRegister(instr->value()).Is(x0));
5701 DCHECK(ToRegister(instr->result()).Is(x0));
5703 CallRuntime(Runtime::kToFastProperties, 1, instr);
5707 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5708 DCHECK(ToRegister(instr->context()).is(cp));
5710 // Registers will be used as follows:
5711 // x7 = literals array.
5712 // x1 = regexp literal.
5713 // x0 = regexp literal clone.
5714 // x10-x12 are used as temporaries.
5715 int literal_offset =
5716 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5717 __ LoadObject(x7, instr->hydrogen()->literals());
5718 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5719 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5721 // Create regexp literal using runtime function
5722 // Result will be in x0.
5723 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5724 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5725 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5726 __ Push(x7, x12, x11, x10);
5727 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5730 __ Bind(&materialized);
5731 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5732 Label allocated, runtime_allocate;
5734 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5737 __ Bind(&runtime_allocate);
5738 __ Mov(x0, Smi::FromInt(size));
5740 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5743 __ Bind(&allocated);
5744 // Copy the content into the newly allocated memory.
5745 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5749 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5750 Register object = ToRegister(instr->object());
5752 Handle<Map> from_map = instr->original_map();
5753 Handle<Map> to_map = instr->transitioned_map();
5754 ElementsKind from_kind = instr->from_kind();
5755 ElementsKind to_kind = instr->to_kind();
5757 Label not_applicable;
5759 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5760 Register temp1 = ToRegister(instr->temp1());
5761 Register new_map = ToRegister(instr->temp2());
5762 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5763 __ Mov(new_map, Operand(to_map));
5764 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5766 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5770 UseScratchRegisterScope temps(masm());
5771 // Use the temp register only in a restricted scope - the codegen checks
5772 // that we do not use any register across a call.
5773 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5776 DCHECK(object.is(x0));
5777 DCHECK(ToRegister(instr->context()).is(cp));
5778 PushSafepointRegistersScope scope(this);
5779 __ Mov(x1, Operand(to_map));
5780 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5781 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5783 RecordSafepointWithRegisters(
5784 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5786 __ Bind(¬_applicable);
5790 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5791 Register object = ToRegister(instr->object());
5792 Register temp1 = ToRegister(instr->temp1());
5793 Register temp2 = ToRegister(instr->temp2());
5795 Label no_memento_found;
5796 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5797 DeoptimizeIf(eq, instr->environment());
5798 __ Bind(&no_memento_found);
5802 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5803 DoubleRegister input = ToDoubleRegister(instr->value());
5804 Register result = ToRegister(instr->result());
5805 __ TruncateDoubleToI(result, input);
5806 if (instr->tag_result()) {
5807 __ SmiTag(result, result);
5812 void LCodeGen::DoTypeof(LTypeof* instr) {
5813 Register input = ToRegister(instr->value());
5815 CallRuntime(Runtime::kTypeof, 1, instr);
5819 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5820 Handle<String> type_name = instr->type_literal();
5821 Label* true_label = instr->TrueLabel(chunk_);
5822 Label* false_label = instr->FalseLabel(chunk_);
5823 Register value = ToRegister(instr->value());
5825 Factory* factory = isolate()->factory();
5826 if (String::Equals(type_name, factory->number_string())) {
5827 DCHECK(instr->temp1() != NULL);
5828 Register map = ToRegister(instr->temp1());
5830 __ JumpIfSmi(value, true_label);
5831 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5832 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5833 EmitBranch(instr, eq);
5835 } else if (String::Equals(type_name, factory->string_string())) {
5836 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5837 Register map = ToRegister(instr->temp1());
5838 Register scratch = ToRegister(instr->temp2());
5840 __ JumpIfSmi(value, false_label);
5841 __ JumpIfObjectType(
5842 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5843 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5844 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5846 } else if (String::Equals(type_name, factory->symbol_string())) {
5847 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5848 Register map = ToRegister(instr->temp1());
5849 Register scratch = ToRegister(instr->temp2());
5851 __ JumpIfSmi(value, false_label);
5852 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5853 EmitBranch(instr, eq);
5855 } else if (String::Equals(type_name, factory->boolean_string())) {
5856 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5857 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5858 EmitBranch(instr, eq);
5860 } else if (String::Equals(type_name, factory->undefined_string())) {
5861 DCHECK(instr->temp1() != NULL);
5862 Register scratch = ToRegister(instr->temp1());
5864 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5865 __ JumpIfSmi(value, false_label);
5866 // Check for undetectable objects and jump to the true branch in this case.
5867 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5868 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5869 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5871 } else if (String::Equals(type_name, factory->function_string())) {
5872 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5873 DCHECK(instr->temp1() != NULL);
5874 Register type = ToRegister(instr->temp1());
5876 __ JumpIfSmi(value, false_label);
5877 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5878 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5879 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5881 } else if (String::Equals(type_name, factory->object_string())) {
5882 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5883 Register map = ToRegister(instr->temp1());
5884 Register scratch = ToRegister(instr->temp2());
5886 __ JumpIfSmi(value, false_label);
5887 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5888 __ JumpIfObjectType(value, map, scratch,
5889 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5890 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5891 __ B(gt, false_label);
5892 // Check for undetectable objects => false.
5893 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5894 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5902 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5903 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5907 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5908 Register object = ToRegister(instr->value());
5909 Register map = ToRegister(instr->map());
5910 Register temp = ToRegister(instr->temp());
5911 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5913 DeoptimizeIf(ne, instr->environment());
5917 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5918 Register receiver = ToRegister(instr->receiver());
5919 Register function = ToRegister(instr->function());
5920 Register result = ToRegister(instr->result());
5922 // If the receiver is null or undefined, we have to pass the global object as
5923 // a receiver to normal functions. Values have to be passed unchanged to
5924 // builtins and strict-mode functions.
5925 Label global_object, done, copy_receiver;
5927 if (!instr->hydrogen()->known_function()) {
5928 __ Ldr(result, FieldMemOperand(function,
5929 JSFunction::kSharedFunctionInfoOffset));
5931 // CompilerHints is an int32 field. See objects.h.
5933 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5935 // Do not transform the receiver to object for strict mode functions.
5936 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5938 // Do not transform the receiver to object for builtins.
5939 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5942 // Normal function. Replace undefined or null with global receiver.
5943 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5944 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5946 // Deoptimize if the receiver is not a JS object.
5947 DeoptimizeIfSmi(receiver, instr->environment());
5948 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5949 __ B(ge, ©_receiver);
5950 Deoptimize(instr->environment());
5952 __ Bind(&global_object);
5953 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5954 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5955 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5958 __ Bind(©_receiver);
5959 __ Mov(result, receiver);
5964 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5968 PushSafepointRegistersScope scope(this);
5972 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5973 RecordSafepointWithRegisters(
5974 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5975 __ StoreToSafepointRegisterSlot(x0, result);
5979 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5980 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5982 DeferredLoadMutableDouble(LCodeGen* codegen,
5983 LLoadFieldByIndex* instr,
5987 : LDeferredCode(codegen),
5993 virtual void Generate() V8_OVERRIDE {
5994 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5996 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5998 LLoadFieldByIndex* instr_;
6003 Register object = ToRegister(instr->object());
6004 Register index = ToRegister(instr->index());
6005 Register result = ToRegister(instr->result());
6007 __ AssertSmi(index);
6009 DeferredLoadMutableDouble* deferred;
6010 deferred = new(zone()) DeferredLoadMutableDouble(
6011 this, instr, result, object, index);
6013 Label out_of_object, done;
6015 __ TestAndBranchIfAnySet(
6016 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6017 __ Mov(index, Operand(index, ASR, 1));
6019 __ Cmp(index, Smi::FromInt(0));
6020 __ B(lt, &out_of_object);
6022 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6023 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6024 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6028 __ Bind(&out_of_object);
6029 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6030 // Index is equal to negated out of object property index plus 1.
6031 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6032 __ Ldr(result, FieldMemOperand(result,
6033 FixedArray::kHeaderSize - kPointerSize));
6034 __ Bind(deferred->exit());
6039 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6040 Register context = ToRegister(instr->context());
6041 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6045 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6046 Handle<ScopeInfo> scope_info = instr->scope_info();
6047 __ Push(scope_info);
6048 __ Push(ToRegister(instr->function()));
6049 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6050 RecordSafepoint(Safepoint::kNoLazyDeopt);
6055 } } // namespace v8::internal