1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
21 class SafepointGenerator FINAL : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen,
24 LPointerMap* pointers,
25 Safepoint::DeoptMode mode)
29 virtual ~SafepointGenerator() { }
31 virtual void BeforeCall(int call_size) const { }
33 virtual void AfterCall() const {
34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 LPointerMap* pointers_;
40 Safepoint::DeoptMode deopt_mode_;
46 // Emit code to branch if the given condition holds.
47 // The code generated here doesn't modify the flags and they must have
48 // been set by some prior instructions.
50 // The EmitInverted function simply inverts the condition.
51 class BranchOnCondition : public BranchGenerator {
53 BranchOnCondition(LCodeGen* codegen, Condition cond)
54 : BranchGenerator(codegen),
57 virtual void Emit(Label* label) const {
61 virtual void EmitInverted(Label* label) const {
63 __ B(NegateCondition(cond_), label);
72 // Emit code to compare lhs and rhs and branch if the condition holds.
73 // This uses MacroAssembler's CompareAndBranch function so it will handle
74 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
76 // EmitInverted still compares the two operands but inverts the condition.
77 class CompareAndBranch : public BranchGenerator {
79 CompareAndBranch(LCodeGen* codegen,
83 : BranchGenerator(codegen),
88 virtual void Emit(Label* label) const {
89 __ CompareAndBranch(lhs_, rhs_, cond_, label);
92 virtual void EmitInverted(Label* label) const {
93 __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
103 // Test the input with the given mask and branch if the condition holds.
104 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
105 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
106 // conversion to Tbz/Tbnz when possible.
107 class TestAndBranch : public BranchGenerator {
109 TestAndBranch(LCodeGen* codegen,
111 const Register& value,
113 : BranchGenerator(codegen),
118 virtual void Emit(Label* label) const {
121 __ TestAndBranchIfAllClear(value_, mask_, label);
124 __ TestAndBranchIfAnySet(value_, mask_, label);
127 __ Tst(value_, mask_);
132 virtual void EmitInverted(Label* label) const {
133 // The inverse of "all clear" is "any set" and vice versa.
136 __ TestAndBranchIfAnySet(value_, mask_, label);
139 __ TestAndBranchIfAllClear(value_, mask_, label);
142 __ Tst(value_, mask_);
143 __ B(NegateCondition(cond_), label);
149 const Register& value_;
154 // Test the input and branch if it is non-zero and not a NaN.
155 class BranchIfNonZeroNumber : public BranchGenerator {
157 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
158 const FPRegister& scratch)
159 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
161 virtual void Emit(Label* label) const {
162 __ Fabs(scratch_, value_);
163 // Compare with 0.0. Because scratch_ is positive, the result can be one of
164 // nZCv (equal), nzCv (greater) or nzCV (unordered).
165 __ Fcmp(scratch_, 0.0);
169 virtual void EmitInverted(Label* label) const {
170 __ Fabs(scratch_, value_);
171 __ Fcmp(scratch_, 0.0);
176 const FPRegister& value_;
177 const FPRegister& scratch_;
181 // Test the input and branch if it is a heap number.
182 class BranchIfHeapNumber : public BranchGenerator {
184 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
185 : BranchGenerator(codegen), value_(value) { }
187 virtual void Emit(Label* label) const {
188 __ JumpIfHeapNumber(value_, label);
191 virtual void EmitInverted(Label* label) const {
192 __ JumpIfNotHeapNumber(value_, label);
196 const Register& value_;
200 // Test the input and branch if it is the specified root value.
201 class BranchIfRoot : public BranchGenerator {
203 BranchIfRoot(LCodeGen* codegen, const Register& value,
204 Heap::RootListIndex index)
205 : BranchGenerator(codegen), value_(value), index_(index) { }
207 virtual void Emit(Label* label) const {
208 __ JumpIfRoot(value_, index_, label);
211 virtual void EmitInverted(Label* label) const {
212 __ JumpIfNotRoot(value_, index_, label);
216 const Register& value_;
217 const Heap::RootListIndex index_;
221 void LCodeGen::WriteTranslation(LEnvironment* environment,
222 Translation* translation) {
223 if (environment == NULL) return;
225 // The translation includes one command per value in the environment.
226 int translation_size = environment->translation_size();
227 // The output frame height does not include the parameters.
228 int height = translation_size - environment->parameter_count();
230 WriteTranslation(environment->outer(), translation);
231 bool has_closure_id = !info()->closure().is_null() &&
232 !info()->closure().is_identical_to(environment->closure());
233 int closure_id = has_closure_id
234 ? DefineDeoptimizationLiteral(environment->closure())
235 : Translation::kSelfLiteralId;
237 switch (environment->frame_type()) {
239 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
242 translation->BeginConstructStubFrame(closure_id, translation_size);
245 DCHECK(translation_size == 1);
247 translation->BeginGetterStubFrame(closure_id);
250 DCHECK(translation_size == 2);
252 translation->BeginSetterStubFrame(closure_id);
255 translation->BeginCompiledStubFrame();
257 case ARGUMENTS_ADAPTOR:
258 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
264 int object_index = 0;
265 int dematerialized_index = 0;
266 for (int i = 0; i < translation_size; ++i) {
267 LOperand* value = environment->values()->at(i);
269 AddToTranslation(environment,
272 environment->HasTaggedValueAt(i),
273 environment->HasUint32ValueAt(i),
275 &dematerialized_index);
280 void LCodeGen::AddToTranslation(LEnvironment* environment,
281 Translation* translation,
285 int* object_index_pointer,
286 int* dematerialized_index_pointer) {
287 if (op == LEnvironment::materialization_marker()) {
288 int object_index = (*object_index_pointer)++;
289 if (environment->ObjectIsDuplicateAt(object_index)) {
290 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
291 translation->DuplicateObject(dupe_of);
294 int object_length = environment->ObjectLengthAt(object_index);
295 if (environment->ObjectIsArgumentsAt(object_index)) {
296 translation->BeginArgumentsObject(object_length);
298 translation->BeginCapturedObject(object_length);
300 int dematerialized_index = *dematerialized_index_pointer;
301 int env_offset = environment->translation_size() + dematerialized_index;
302 *dematerialized_index_pointer += object_length;
303 for (int i = 0; i < object_length; ++i) {
304 LOperand* value = environment->values()->at(env_offset + i);
305 AddToTranslation(environment,
308 environment->HasTaggedValueAt(env_offset + i),
309 environment->HasUint32ValueAt(env_offset + i),
310 object_index_pointer,
311 dematerialized_index_pointer);
316 if (op->IsStackSlot()) {
318 translation->StoreStackSlot(op->index());
319 } else if (is_uint32) {
320 translation->StoreUint32StackSlot(op->index());
322 translation->StoreInt32StackSlot(op->index());
324 } else if (op->IsDoubleStackSlot()) {
325 translation->StoreDoubleStackSlot(op->index());
326 } else if (op->IsRegister()) {
327 Register reg = ToRegister(op);
329 translation->StoreRegister(reg);
330 } else if (is_uint32) {
331 translation->StoreUint32Register(reg);
333 translation->StoreInt32Register(reg);
335 } else if (op->IsDoubleRegister()) {
336 DoubleRegister reg = ToDoubleRegister(op);
337 translation->StoreDoubleRegister(reg);
338 } else if (op->IsConstantOperand()) {
339 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
340 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
341 translation->StoreLiteral(src_index);
348 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
349 int result = deoptimization_literals_.length();
350 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
351 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
353 deoptimization_literals_.Add(literal, zone());
358 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
359 Safepoint::DeoptMode mode) {
360 environment->set_has_been_used();
361 if (!environment->HasBeenRegistered()) {
363 int jsframe_count = 0;
364 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
366 if (e->frame_type() == JS_FUNCTION) {
370 Translation translation(&translations_, frame_count, jsframe_count, zone());
371 WriteTranslation(environment, &translation);
372 int deoptimization_index = deoptimizations_.length();
373 int pc_offset = masm()->pc_offset();
374 environment->Register(deoptimization_index,
376 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
377 deoptimizations_.Add(environment, zone());
382 void LCodeGen::CallCode(Handle<Code> code,
383 RelocInfo::Mode mode,
384 LInstruction* instr) {
385 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
389 void LCodeGen::CallCodeGeneric(Handle<Code> code,
390 RelocInfo::Mode mode,
392 SafepointMode safepoint_mode) {
393 DCHECK(instr != NULL);
395 Assembler::BlockPoolsScope scope(masm_);
397 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
399 if ((code->kind() == Code::BINARY_OP_IC) ||
400 (code->kind() == Code::COMPARE_IC)) {
401 // Signal that we don't inline smi code before these stubs in the
402 // optimizing code generator.
403 InlineSmiCheckInfo::EmitNotInlined(masm());
408 void LCodeGen::DoCallFunction(LCallFunction* instr) {
409 DCHECK(ToRegister(instr->context()).is(cp));
410 DCHECK(ToRegister(instr->function()).Is(x1));
411 DCHECK(ToRegister(instr->result()).Is(x0));
413 int arity = instr->arity();
414 CallFunctionFlags flags = instr->hydrogen()->function_flags();
415 if (instr->hydrogen()->HasVectorAndSlot()) {
416 Register slot_register = ToRegister(instr->temp_slot());
417 Register vector_register = ToRegister(instr->temp_vector());
418 DCHECK(slot_register.is(x3));
419 DCHECK(vector_register.is(x2));
421 AllowDeferredHandleDereference vector_structure_check;
422 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
423 int index = vector->GetIndex(instr->hydrogen()->slot());
425 __ Mov(vector_register, vector);
426 __ Mov(slot_register, Operand(Smi::FromInt(index)));
428 CallICState::CallType call_type =
429 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
432 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
433 CallCode(ic, RelocInfo::CODE_TARGET, instr);
435 CallFunctionStub stub(isolate(), arity, flags);
436 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
441 void LCodeGen::DoCallNew(LCallNew* instr) {
442 DCHECK(ToRegister(instr->context()).is(cp));
443 DCHECK(instr->IsMarkedAsCall());
444 DCHECK(ToRegister(instr->constructor()).is(x1));
446 __ Mov(x0, instr->arity());
447 // No cell in x2 for construct type feedback in optimized code.
448 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
450 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
451 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
453 DCHECK(ToRegister(instr->result()).is(x0));
457 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
458 DCHECK(instr->IsMarkedAsCall());
459 DCHECK(ToRegister(instr->context()).is(cp));
460 DCHECK(ToRegister(instr->constructor()).is(x1));
462 __ Mov(x0, Operand(instr->arity()));
463 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
465 ElementsKind kind = instr->hydrogen()->elements_kind();
466 AllocationSiteOverrideMode override_mode =
467 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
468 ? DISABLE_ALLOCATION_SITES
471 if (instr->arity() == 0) {
472 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
473 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
474 } else if (instr->arity() == 1) {
476 if (IsFastPackedElementsKind(kind)) {
479 // We might need to create a holey array; look at the first argument.
481 __ Cbz(x10, &packed_case);
483 ElementsKind holey_kind = GetHoleyElementsKind(kind);
484 ArraySingleArgumentConstructorStub stub(isolate(),
487 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
489 __ Bind(&packed_case);
492 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
493 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
496 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
497 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
500 DCHECK(ToRegister(instr->result()).is(x0));
504 void LCodeGen::CallRuntime(const Runtime::Function* function,
507 SaveFPRegsMode save_doubles) {
508 DCHECK(instr != NULL);
510 __ CallRuntime(function, num_arguments, save_doubles);
512 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
516 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
517 if (context->IsRegister()) {
518 __ Mov(cp, ToRegister(context));
519 } else if (context->IsStackSlot()) {
520 __ Ldr(cp, ToMemOperand(context));
521 } else if (context->IsConstantOperand()) {
522 HConstant* constant =
523 chunk_->LookupConstant(LConstantOperand::cast(context));
524 __ LoadHeapObject(cp,
525 Handle<HeapObject>::cast(constant->handle(isolate())));
532 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
536 LoadContextFromDeferred(context);
537 __ CallRuntimeSaveDoubles(id);
538 RecordSafepointWithRegisters(
539 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
543 void LCodeGen::RecordAndWritePosition(int position) {
544 if (position == RelocInfo::kNoPosition) return;
545 masm()->positions_recorder()->RecordPosition(position);
546 masm()->positions_recorder()->WriteRecordedPositions();
550 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
551 SafepointMode safepoint_mode) {
552 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
553 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
555 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
556 RecordSafepointWithRegisters(
557 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
562 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
563 Safepoint::Kind kind,
565 Safepoint::DeoptMode deopt_mode) {
566 DCHECK(expected_safepoint_kind_ == kind);
568 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
569 Safepoint safepoint = safepoints_.DefineSafepoint(
570 masm(), kind, arguments, deopt_mode);
572 for (int i = 0; i < operands->length(); i++) {
573 LOperand* pointer = operands->at(i);
574 if (pointer->IsStackSlot()) {
575 safepoint.DefinePointerSlot(pointer->index(), zone());
576 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
577 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
582 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
583 Safepoint::DeoptMode deopt_mode) {
584 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
588 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
589 LPointerMap empty_pointers(zone());
590 RecordSafepoint(&empty_pointers, deopt_mode);
594 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
596 Safepoint::DeoptMode deopt_mode) {
597 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
601 bool LCodeGen::GenerateCode() {
602 LPhase phase("Z_Code generation", chunk());
604 status_ = GENERATING;
606 // Open a frame scope to indicate that there is a frame on the stack. The
607 // NONE indicates that the scope shouldn't actually generate code to set up
608 // the frame (that is done in GeneratePrologue).
609 FrameScope frame_scope(masm_, StackFrame::NONE);
611 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
612 GenerateJumpTable() && GenerateSafepointTable();
616 void LCodeGen::SaveCallerDoubles() {
617 DCHECK(info()->saves_caller_doubles());
618 DCHECK(NeedsEagerFrame());
619 Comment(";;; Save clobbered callee double registers");
620 BitVector* doubles = chunk()->allocated_double_registers();
621 BitVector::Iterator iterator(doubles);
623 while (!iterator.Done()) {
624 // TODO(all): Is this supposed to save just the callee-saved doubles? It
625 // looks like it's saving all of them.
626 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
627 __ Poke(value, count * kDoubleSize);
634 void LCodeGen::RestoreCallerDoubles() {
635 DCHECK(info()->saves_caller_doubles());
636 DCHECK(NeedsEagerFrame());
637 Comment(";;; Restore clobbered callee double registers");
638 BitVector* doubles = chunk()->allocated_double_registers();
639 BitVector::Iterator iterator(doubles);
641 while (!iterator.Done()) {
642 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
643 // looks like it's restoring all of them.
644 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
645 __ Peek(value, count * kDoubleSize);
652 bool LCodeGen::GeneratePrologue() {
653 DCHECK(is_generating());
655 if (info()->IsOptimizing()) {
656 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
658 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
660 // Sloppy mode functions and builtins need to replace the receiver with the
661 // global proxy when called as functions (without an explicit receiver
663 if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
664 !info_->is_native()) {
666 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
667 __ Peek(x10, receiver_offset);
668 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
670 __ Ldr(x10, GlobalObjectMemOperand());
671 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
672 __ Poke(x10, receiver_offset);
678 DCHECK(__ StackPointer().Is(jssp));
679 info()->set_prologue_offset(masm_->pc_offset());
680 if (NeedsEagerFrame()) {
681 if (info()->IsStub()) {
684 __ Prologue(info()->IsCodePreAgingActive());
686 frame_is_built_ = true;
687 info_->AddNoFrameRange(0, masm_->pc_offset());
690 // Reserve space for the stack slots needed by the code.
691 int slots = GetStackSlotCount();
693 __ Claim(slots, kPointerSize);
696 if (info()->saves_caller_doubles()) {
700 // Allocate a local context if needed.
701 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
702 if (heap_slots > 0) {
703 Comment(";;; Allocate local context");
704 bool need_write_barrier = true;
705 // Argument to NewContext is the function, which is in x1.
706 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
707 FastNewContextStub stub(isolate(), heap_slots);
709 // Result of FastNewContextStub is always in new space.
710 need_write_barrier = false;
713 __ CallRuntime(Runtime::kNewFunctionContext, 1);
715 RecordSafepoint(Safepoint::kNoLazyDeopt);
716 // Context is returned in x0. It replaces the context passed to us. It's
717 // saved in the stack and kept live in cp.
719 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
720 // Copy any necessary parameters into the context.
721 int num_parameters = scope()->num_parameters();
722 for (int i = 0; i < num_parameters; i++) {
723 Variable* var = scope()->parameter(i);
724 if (var->IsContextSlot()) {
726 Register scratch = x3;
728 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
729 (num_parameters - 1 - i) * kPointerSize;
730 // Load parameter from stack.
731 __ Ldr(value, MemOperand(fp, parameter_offset));
732 // Store it in the context.
733 MemOperand target = ContextMemOperand(cp, var->index());
734 __ Str(value, target);
735 // Update the write barrier. This clobbers value and scratch.
736 if (need_write_barrier) {
737 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
738 GetLinkRegisterState(), kSaveFPRegs);
739 } else if (FLAG_debug_code) {
741 __ JumpIfInNewSpace(cp, &done);
742 __ Abort(kExpectedNewSpaceObject);
747 Comment(";;; End allocate local context");
751 if (FLAG_trace && info()->IsOptimizing()) {
752 // We have not executed any compiled code yet, so cp still holds the
754 __ CallRuntime(Runtime::kTraceEnter, 0);
757 return !is_aborted();
761 void LCodeGen::GenerateOsrPrologue() {
762 // Generate the OSR entry prologue at the first unknown OSR value, or if there
763 // are none, at the OSR entrypoint instruction.
764 if (osr_pc_offset_ >= 0) return;
766 osr_pc_offset_ = masm()->pc_offset();
768 // Adjust the frame size, subsuming the unoptimized frame into the
770 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
776 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
777 if (instr->IsCall()) {
778 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
780 if (!instr->IsLazyBailout() && !instr->IsGap()) {
781 safepoints_.BumpLastLazySafepointIndex();
786 bool LCodeGen::GenerateDeferredCode() {
787 DCHECK(is_generating());
788 if (deferred_.length() > 0) {
789 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
790 LDeferredCode* code = deferred_[i];
793 instructions_->at(code->instruction_index())->hydrogen_value();
794 RecordAndWritePosition(
795 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
797 Comment(";;; <@%d,#%d> "
798 "-------------------- Deferred %s --------------------",
799 code->instruction_index(),
800 code->instr()->hydrogen_value()->id(),
801 code->instr()->Mnemonic());
803 __ Bind(code->entry());
805 if (NeedsDeferredFrame()) {
806 Comment(";;; Build frame");
807 DCHECK(!frame_is_built_);
808 DCHECK(info()->IsStub());
809 frame_is_built_ = true;
811 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
813 __ Add(fp, __ StackPointer(),
814 StandardFrameConstants::kFixedFrameSizeFromFp);
815 Comment(";;; Deferred code");
820 if (NeedsDeferredFrame()) {
821 Comment(";;; Destroy frame");
822 DCHECK(frame_is_built_);
823 __ Pop(xzr, cp, fp, lr);
824 frame_is_built_ = false;
831 // Force constant pool emission at the end of the deferred code to make
832 // sure that no constant pools are emitted after deferred code because
833 // deferred code generation is the last step which generates code. The two
834 // following steps will only output data used by crakshaft.
835 masm()->CheckConstPool(true, false);
837 return !is_aborted();
841 bool LCodeGen::GenerateJumpTable() {
842 Label needs_frame, call_deopt_entry;
844 if (jump_table_.length() > 0) {
845 Comment(";;; -------------------- Jump table --------------------");
846 Address base = jump_table_[0]->address;
848 UseScratchRegisterScope temps(masm());
849 Register entry_offset = temps.AcquireX();
851 int length = jump_table_.length();
852 for (int i = 0; i < length; i++) {
853 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
854 __ Bind(&table_entry->label);
856 Address entry = table_entry->address;
857 DeoptComment(table_entry->deopt_info);
859 // Second-level deopt table entries are contiguous and small, so instead
860 // of loading the full, absolute address of each one, load the base
861 // address and add an immediate offset.
862 __ Mov(entry_offset, entry - base);
864 if (table_entry->needs_frame) {
865 DCHECK(!info()->saves_caller_doubles());
866 Comment(";;; call deopt with frame");
867 // Save lr before Bl, fp will be adjusted in the needs_frame code.
869 // Reuse the existing needs_frame code.
872 // There is nothing special to do, so just continue to the second-level
874 __ Bl(&call_deopt_entry);
876 info()->LogDeoptCallPosition(masm()->pc_offset(),
877 table_entry->deopt_info.inlining_id);
879 masm()->CheckConstPool(false, false);
882 if (needs_frame.is_linked()) {
883 // This variant of deopt can only be used with stubs. Since we don't
884 // have a function pointer to install in the stack frame that we're
885 // building, install a special marker there instead.
886 DCHECK(info()->IsStub());
888 Comment(";;; needs_frame common code");
889 UseScratchRegisterScope temps(masm());
890 Register stub_marker = temps.AcquireX();
891 __ Bind(&needs_frame);
892 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
893 __ Push(cp, stub_marker);
894 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
897 // Generate common code for calling the second-level deopt table.
898 __ Bind(&call_deopt_entry);
900 if (info()->saves_caller_doubles()) {
901 DCHECK(info()->IsStub());
902 RestoreCallerDoubles();
905 Register deopt_entry = temps.AcquireX();
906 __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
907 RelocInfo::RUNTIME_ENTRY));
908 __ Add(deopt_entry, deopt_entry, entry_offset);
912 // Force constant pool emission at the end of the deopt jump table to make
913 // sure that no constant pools are emitted after.
914 masm()->CheckConstPool(true, false);
916 // The deoptimization jump table is the last part of the instruction
917 // sequence. Mark the generated code as done unless we bailed out.
918 if (!is_aborted()) status_ = DONE;
919 return !is_aborted();
923 bool LCodeGen::GenerateSafepointTable() {
925 // We do not know how much data will be emitted for the safepoint table, so
926 // force emission of the veneer pool.
927 masm()->CheckVeneerPool(true, true);
928 safepoints_.Emit(masm(), GetStackSlotCount());
929 return !is_aborted();
933 void LCodeGen::FinishCode(Handle<Code> code) {
935 code->set_stack_slots(GetStackSlotCount());
936 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
937 PopulateDeoptimizationData(code);
941 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
942 int length = deoptimizations_.length();
943 if (length == 0) return;
945 Handle<DeoptimizationInputData> data =
946 DeoptimizationInputData::New(isolate(), length, TENURED);
948 Handle<ByteArray> translations =
949 translations_.CreateByteArray(isolate()->factory());
950 data->SetTranslationByteArray(*translations);
951 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
952 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
953 if (info_->IsOptimizing()) {
954 // Reference to shared function info does not change between phases.
955 AllowDeferredHandleDereference allow_handle_dereference;
956 data->SetSharedFunctionInfo(*info_->shared_info());
958 data->SetSharedFunctionInfo(Smi::FromInt(0));
960 data->SetWeakCellCache(Smi::FromInt(0));
962 Handle<FixedArray> literals =
963 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
964 { AllowDeferredHandleDereference copy_handles;
965 for (int i = 0; i < deoptimization_literals_.length(); i++) {
966 literals->set(i, *deoptimization_literals_[i]);
968 data->SetLiteralArray(*literals);
971 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
972 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
974 // Populate the deoptimization entries.
975 for (int i = 0; i < length; i++) {
976 LEnvironment* env = deoptimizations_[i];
977 data->SetAstId(i, env->ast_id());
978 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
979 data->SetArgumentsStackHeight(i,
980 Smi::FromInt(env->arguments_stack_height()));
981 data->SetPc(i, Smi::FromInt(env->pc_offset()));
984 code->set_deoptimization_data(*data);
988 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
989 DCHECK(deoptimization_literals_.length() == 0);
991 const ZoneList<Handle<JSFunction> >* inlined_closures =
992 chunk()->inlined_closures();
994 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
995 DefineDeoptimizationLiteral(inlined_closures->at(i));
998 inlined_function_count_ = deoptimization_literals_.length();
1002 void LCodeGen::DeoptimizeBranch(
1003 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
1004 BranchType branch_type, Register reg, int bit,
1005 Deoptimizer::BailoutType* override_bailout_type) {
1006 LEnvironment* environment = instr->environment();
1007 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1008 Deoptimizer::BailoutType bailout_type =
1009 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1011 if (override_bailout_type != NULL) {
1012 bailout_type = *override_bailout_type;
1015 DCHECK(environment->HasBeenRegistered());
1016 DCHECK(info()->IsOptimizing() || info()->IsStub());
1017 int id = environment->deoptimization_index();
1019 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1021 if (entry == NULL) {
1022 Abort(kBailoutWasNotPrepared);
1025 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1027 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1029 __ Push(x0, x1, x2);
1032 __ Ldr(w1, MemOperand(x0));
1034 __ B(gt, ¬_zero);
1035 __ Mov(w1, FLAG_deopt_every_n_times);
1036 __ Str(w1, MemOperand(x0));
1038 DCHECK(frame_is_built_);
1039 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1043 __ Str(w1, MemOperand(x0));
1048 if (info()->ShouldTrapOnDeopt()) {
1050 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1051 __ Debug("trap_on_deopt", __LINE__, BREAK);
1052 __ Bind(&dont_trap);
1055 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
1057 DCHECK(info()->IsStub() || frame_is_built_);
1058 // Go through jump table if we need to build frame, or restore caller doubles.
1059 if (branch_type == always &&
1060 frame_is_built_ && !info()->saves_caller_doubles()) {
1061 DeoptComment(deopt_info);
1062 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1063 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
1065 Deoptimizer::JumpTableEntry* table_entry =
1066 new (zone()) Deoptimizer::JumpTableEntry(
1067 entry, deopt_info, bailout_type, !frame_is_built_);
1068 // We often have several deopts to the same entry, reuse the last
1069 // jump entry if this is the case.
1070 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
1071 jump_table_.is_empty() ||
1072 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1073 jump_table_.Add(table_entry, zone());
1075 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1080 void LCodeGen::Deoptimize(LInstruction* instr,
1081 Deoptimizer::DeoptReason deopt_reason,
1082 Deoptimizer::BailoutType* override_bailout_type) {
1083 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
1084 override_bailout_type);
1088 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1089 Deoptimizer::DeoptReason deopt_reason) {
1090 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
1094 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1095 Deoptimizer::DeoptReason deopt_reason) {
1096 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
1100 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1101 Deoptimizer::DeoptReason deopt_reason) {
1102 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
1106 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1107 Deoptimizer::DeoptReason deopt_reason) {
1108 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1109 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
1113 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1114 Deoptimizer::DeoptReason deopt_reason) {
1115 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1119 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1120 Deoptimizer::DeoptReason deopt_reason) {
1121 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1125 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1126 LInstruction* instr,
1127 Deoptimizer::DeoptReason deopt_reason) {
1128 __ CompareRoot(rt, index);
1129 DeoptimizeIf(eq, instr, deopt_reason);
1133 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1134 LInstruction* instr,
1135 Deoptimizer::DeoptReason deopt_reason) {
1136 __ CompareRoot(rt, index);
1137 DeoptimizeIf(ne, instr, deopt_reason);
1141 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1142 Deoptimizer::DeoptReason deopt_reason) {
1143 __ TestForMinusZero(input);
1144 DeoptimizeIf(vs, instr, deopt_reason);
1148 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1149 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1150 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
1154 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1155 Deoptimizer::DeoptReason deopt_reason) {
1156 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
1160 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1161 Deoptimizer::DeoptReason deopt_reason) {
1162 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
1166 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1167 if (!info()->IsStub()) {
1168 // Ensure that we have enough space after the previous lazy-bailout
1169 // instruction for patching the code here.
1170 intptr_t current_pc = masm()->pc_offset();
1172 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1173 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1174 DCHECK((padding_size % kInstructionSize) == 0);
1175 InstructionAccurateScope instruction_accurate(
1176 masm(), padding_size / kInstructionSize);
1178 while (padding_size > 0) {
1180 padding_size -= kInstructionSize;
1184 last_lazy_deopt_pc_ = masm()->pc_offset();
1188 Register LCodeGen::ToRegister(LOperand* op) const {
1189 // TODO(all): support zero register results, as ToRegister32.
1190 DCHECK((op != NULL) && op->IsRegister());
1191 return Register::FromAllocationIndex(op->index());
1195 Register LCodeGen::ToRegister32(LOperand* op) const {
1197 if (op->IsConstantOperand()) {
1198 // If this is a constant operand, the result must be the zero register.
1199 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1202 return ToRegister(op).W();
1207 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1208 HConstant* constant = chunk_->LookupConstant(op);
1209 return Smi::FromInt(constant->Integer32Value());
1213 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1214 DCHECK((op != NULL) && op->IsDoubleRegister());
1215 return DoubleRegister::FromAllocationIndex(op->index());
1219 Operand LCodeGen::ToOperand(LOperand* op) {
1221 if (op->IsConstantOperand()) {
1222 LConstantOperand* const_op = LConstantOperand::cast(op);
1223 HConstant* constant = chunk()->LookupConstant(const_op);
1224 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1226 DCHECK(constant->HasSmiValue());
1227 return Operand(Smi::FromInt(constant->Integer32Value()));
1228 } else if (r.IsInteger32()) {
1229 DCHECK(constant->HasInteger32Value());
1230 return Operand(constant->Integer32Value());
1231 } else if (r.IsDouble()) {
1232 Abort(kToOperandUnsupportedDoubleImmediate);
1234 DCHECK(r.IsTagged());
1235 return Operand(constant->handle(isolate()));
1236 } else if (op->IsRegister()) {
1237 return Operand(ToRegister(op));
1238 } else if (op->IsDoubleRegister()) {
1239 Abort(kToOperandIsDoubleRegisterUnimplemented);
1242 // Stack slots not implemented, use ToMemOperand instead.
1248 Operand LCodeGen::ToOperand32(LOperand* op) {
1250 if (op->IsRegister()) {
1251 return Operand(ToRegister32(op));
1252 } else if (op->IsConstantOperand()) {
1253 LConstantOperand* const_op = LConstantOperand::cast(op);
1254 HConstant* constant = chunk()->LookupConstant(const_op);
1255 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1256 if (r.IsInteger32()) {
1257 return Operand(constant->Integer32Value());
1259 // Other constants not implemented.
1260 Abort(kToOperand32UnsupportedImmediate);
1263 // Other cases are not implemented.
1269 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1271 return -(index + 1) * kPointerSize;
1275 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1277 DCHECK(!op->IsRegister());
1278 DCHECK(!op->IsDoubleRegister());
1279 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1280 if (NeedsEagerFrame()) {
1281 return MemOperand(fp, StackSlotOffset(op->index()));
1283 // Retrieve parameter without eager stack-frame relative to the
1285 return MemOperand(masm()->StackPointer(),
1286 ArgumentsOffsetWithoutFrame(op->index()));
1291 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1292 HConstant* constant = chunk_->LookupConstant(op);
1293 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1294 return constant->handle(isolate());
1299 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
1300 if (shift_info->shift() == NO_SHIFT) {
1301 return ToOperand32(right);
1304 ToRegister32(right),
1305 shift_info->shift(),
1306 JSShiftAmountFromLConstant(shift_info->shift_amount()));
1311 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1312 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1316 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1317 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1321 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1322 HConstant* constant = chunk_->LookupConstant(op);
1323 return constant->Integer32Value();
1327 double LCodeGen::ToDouble(LConstantOperand* op) const {
1328 HConstant* constant = chunk_->LookupConstant(op);
1329 DCHECK(constant->HasDoubleValue());
1330 return constant->DoubleValue();
1334 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1335 Condition cond = nv;
1338 case Token::EQ_STRICT:
1342 case Token::NE_STRICT:
1346 cond = is_unsigned ? lo : lt;
1349 cond = is_unsigned ? hi : gt;
1352 cond = is_unsigned ? ls : le;
1355 cond = is_unsigned ? hs : ge;
1358 case Token::INSTANCEOF:
1366 template<class InstrType>
1367 void LCodeGen::EmitBranchGeneric(InstrType instr,
1368 const BranchGenerator& branch) {
1369 int left_block = instr->TrueDestination(chunk_);
1370 int right_block = instr->FalseDestination(chunk_);
1372 int next_block = GetNextEmittedBlock();
1374 if (right_block == left_block) {
1375 EmitGoto(left_block);
1376 } else if (left_block == next_block) {
1377 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1379 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1380 if (right_block != next_block) {
1381 __ B(chunk_->GetAssemblyLabel(right_block));
1387 template<class InstrType>
1388 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1389 DCHECK((condition != al) && (condition != nv));
1390 BranchOnCondition branch(this, condition);
1391 EmitBranchGeneric(instr, branch);
1395 template<class InstrType>
1396 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1397 Condition condition,
1398 const Register& lhs,
1399 const Operand& rhs) {
1400 DCHECK((condition != al) && (condition != nv));
1401 CompareAndBranch branch(this, condition, lhs, rhs);
1402 EmitBranchGeneric(instr, branch);
1406 template<class InstrType>
1407 void LCodeGen::EmitTestAndBranch(InstrType instr,
1408 Condition condition,
1409 const Register& value,
1411 DCHECK((condition != al) && (condition != nv));
1412 TestAndBranch branch(this, condition, value, mask);
1413 EmitBranchGeneric(instr, branch);
1417 template<class InstrType>
1418 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1419 const FPRegister& value,
1420 const FPRegister& scratch) {
1421 BranchIfNonZeroNumber branch(this, value, scratch);
1422 EmitBranchGeneric(instr, branch);
1426 template<class InstrType>
1427 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1428 const Register& value) {
1429 BranchIfHeapNumber branch(this, value);
1430 EmitBranchGeneric(instr, branch);
1434 template<class InstrType>
1435 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1436 const Register& value,
1437 Heap::RootListIndex index) {
1438 BranchIfRoot branch(this, value, index);
1439 EmitBranchGeneric(instr, branch);
1443 void LCodeGen::DoGap(LGap* gap) {
1444 for (int i = LGap::FIRST_INNER_POSITION;
1445 i <= LGap::LAST_INNER_POSITION;
1447 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1448 LParallelMove* move = gap->GetParallelMove(inner_pos);
1450 resolver_.Resolve(move);
1456 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1457 Register arguments = ToRegister(instr->arguments());
1458 Register result = ToRegister(instr->result());
1460 // The pointer to the arguments array come from DoArgumentsElements.
1461 // It does not point directly to the arguments and there is an offest of
1462 // two words that we must take into account when accessing an argument.
1463 // Subtracting the index from length accounts for one, so we add one more.
1465 if (instr->length()->IsConstantOperand() &&
1466 instr->index()->IsConstantOperand()) {
1467 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1468 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1469 int offset = ((length - index) + 1) * kPointerSize;
1470 __ Ldr(result, MemOperand(arguments, offset));
1471 } else if (instr->index()->IsConstantOperand()) {
1472 Register length = ToRegister32(instr->length());
1473 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1474 int loc = index - 1;
1476 __ Sub(result.W(), length, loc);
1477 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1479 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1482 Register length = ToRegister32(instr->length());
1483 Operand index = ToOperand32(instr->index());
1484 __ Sub(result.W(), length, index);
1485 __ Add(result.W(), result.W(), 1);
1486 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1491 void LCodeGen::DoAddE(LAddE* instr) {
1492 Register result = ToRegister(instr->result());
1493 Register left = ToRegister(instr->left());
1494 Operand right = (instr->right()->IsConstantOperand())
1495 ? ToInteger32(LConstantOperand::cast(instr->right()))
1496 : Operand(ToRegister32(instr->right()), SXTW);
1498 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1499 __ Add(result, left, right);
1503 void LCodeGen::DoAddI(LAddI* instr) {
1504 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1505 Register result = ToRegister32(instr->result());
1506 Register left = ToRegister32(instr->left());
1507 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1510 __ Adds(result, left, right);
1511 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1513 __ Add(result, left, right);
1518 void LCodeGen::DoAddS(LAddS* instr) {
1519 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1520 Register result = ToRegister(instr->result());
1521 Register left = ToRegister(instr->left());
1522 Operand right = ToOperand(instr->right());
1524 __ Adds(result, left, right);
1525 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1527 __ Add(result, left, right);
1532 void LCodeGen::DoAllocate(LAllocate* instr) {
1533 class DeferredAllocate: public LDeferredCode {
1535 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1536 : LDeferredCode(codegen), instr_(instr) { }
1537 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1538 virtual LInstruction* instr() { return instr_; }
1543 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1545 Register result = ToRegister(instr->result());
1546 Register temp1 = ToRegister(instr->temp1());
1547 Register temp2 = ToRegister(instr->temp2());
1549 // Allocate memory for the object.
1550 AllocationFlags flags = TAG_OBJECT;
1551 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1552 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1555 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1556 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1557 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1558 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1559 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1560 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1561 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1564 if (instr->size()->IsConstantOperand()) {
1565 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1566 if (size <= Page::kMaxRegularHeapObjectSize) {
1567 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1569 __ B(deferred->entry());
1572 Register size = ToRegister32(instr->size());
1573 __ Sxtw(size.X(), size);
1574 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1577 __ Bind(deferred->exit());
1579 if (instr->hydrogen()->MustPrefillWithFiller()) {
1580 Register filler_count = temp1;
1581 Register filler = temp2;
1582 Register untagged_result = ToRegister(instr->temp3());
1584 if (instr->size()->IsConstantOperand()) {
1585 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1586 __ Mov(filler_count, size / kPointerSize);
1588 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1591 __ Sub(untagged_result, result, kHeapObjectTag);
1592 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1593 __ FillFields(untagged_result, filler_count, filler);
1595 DCHECK(instr->temp3() == NULL);
1600 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1601 // TODO(3095996): Get rid of this. For now, we need to make the
1602 // result register contain a valid pointer because it is already
1603 // contained in the register pointer map.
1604 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1606 PushSafepointRegistersScope scope(this);
1607 // We're in a SafepointRegistersScope so we can use any scratch registers.
1609 if (instr->size()->IsConstantOperand()) {
1610 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1612 __ SmiTag(size, ToRegister32(instr->size()).X());
1614 int flags = AllocateDoubleAlignFlag::encode(
1615 instr->hydrogen()->MustAllocateDoubleAligned());
1616 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1617 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1618 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1619 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1620 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1621 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1622 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1624 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1626 __ Mov(x10, Smi::FromInt(flags));
1629 CallRuntimeFromDeferred(
1630 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1631 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1635 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1636 Register receiver = ToRegister(instr->receiver());
1637 Register function = ToRegister(instr->function());
1638 Register length = ToRegister32(instr->length());
1640 Register elements = ToRegister(instr->elements());
1641 Register scratch = x5;
1642 DCHECK(receiver.Is(x0)); // Used for parameter count.
1643 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1644 DCHECK(ToRegister(instr->result()).Is(x0));
1645 DCHECK(instr->IsMarkedAsCall());
1647 // Copy the arguments to this function possibly from the
1648 // adaptor frame below it.
1649 const uint32_t kArgumentsLimit = 1 * KB;
1650 __ Cmp(length, kArgumentsLimit);
1651 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
1653 // Push the receiver and use the register to keep the original
1654 // number of arguments.
1656 Register argc = receiver;
1658 __ Sxtw(argc, length);
1659 // The arguments are at a one pointer size offset from elements.
1660 __ Add(elements, elements, 1 * kPointerSize);
1662 // Loop through the arguments pushing them onto the execution
1665 // length is a small non-negative integer, due to the test above.
1666 __ Cbz(length, &invoke);
1668 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1670 __ Subs(length, length, 1);
1674 DCHECK(instr->HasPointerMap());
1675 LPointerMap* pointers = instr->pointer_map();
1676 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1677 // The number of arguments is stored in argc (receiver) which is x0, as
1678 // expected by InvokeFunction.
1679 ParameterCount actual(argc);
1680 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1684 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1685 Register result = ToRegister(instr->result());
1687 if (instr->hydrogen()->from_inlined()) {
1688 // When we are inside an inlined function, the arguments are the last things
1689 // that have been pushed on the stack. Therefore the arguments array can be
1690 // accessed directly from jssp.
1691 // However in the normal case, it is accessed via fp but there are two words
1692 // on the stack between fp and the arguments (the saved lr and fp) and the
1693 // LAccessArgumentsAt implementation take that into account.
1694 // In the inlined case we need to subtract the size of 2 words to jssp to
1695 // get a pointer which will work well with LAccessArgumentsAt.
1696 DCHECK(masm()->StackPointer().Is(jssp));
1697 __ Sub(result, jssp, 2 * kPointerSize);
1699 DCHECK(instr->temp() != NULL);
1700 Register previous_fp = ToRegister(instr->temp());
1703 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1705 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1706 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1707 __ Csel(result, fp, previous_fp, ne);
1712 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1713 Register elements = ToRegister(instr->elements());
1714 Register result = ToRegister32(instr->result());
1717 // If no arguments adaptor frame the number of arguments is fixed.
1718 __ Cmp(fp, elements);
1719 __ Mov(result, scope()->num_parameters());
1722 // Arguments adaptor frame present. Get argument length from there.
1723 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1725 UntagSmiMemOperand(result.X(),
1726 ArgumentsAdaptorFrameConstants::kLengthOffset));
1728 // Argument length is in result register.
1733 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1734 DoubleRegister left = ToDoubleRegister(instr->left());
1735 DoubleRegister right = ToDoubleRegister(instr->right());
1736 DoubleRegister result = ToDoubleRegister(instr->result());
1738 switch (instr->op()) {
1739 case Token::ADD: __ Fadd(result, left, right); break;
1740 case Token::SUB: __ Fsub(result, left, right); break;
1741 case Token::MUL: __ Fmul(result, left, right); break;
1742 case Token::DIV: __ Fdiv(result, left, right); break;
1744 // The ECMA-262 remainder operator is the remainder from a truncating
1745 // (round-towards-zero) division. Note that this differs from IEEE-754.
1747 // TODO(jbramley): See if it's possible to do this inline, rather than by
1748 // calling a helper function. With frintz (to produce the intermediate
1749 // quotient) and fmsub (to calculate the remainder without loss of
1750 // precision), it should be possible. However, we would need support for
1751 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1752 // support that yet.
1753 DCHECK(left.Is(d0));
1754 DCHECK(right.Is(d1));
1756 ExternalReference::mod_two_doubles_operation(isolate()),
1758 DCHECK(result.Is(d0));
1768 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1769 DCHECK(ToRegister(instr->context()).is(cp));
1770 DCHECK(ToRegister(instr->left()).is(x1));
1771 DCHECK(ToRegister(instr->right()).is(x0));
1772 DCHECK(ToRegister(instr->result()).is(x0));
1774 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1775 CallCode(code, RelocInfo::CODE_TARGET, instr);
1779 void LCodeGen::DoBitI(LBitI* instr) {
1780 Register result = ToRegister32(instr->result());
1781 Register left = ToRegister32(instr->left());
1782 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1784 switch (instr->op()) {
1785 case Token::BIT_AND: __ And(result, left, right); break;
1786 case Token::BIT_OR: __ Orr(result, left, right); break;
1787 case Token::BIT_XOR: __ Eor(result, left, right); break;
1795 void LCodeGen::DoBitS(LBitS* instr) {
1796 Register result = ToRegister(instr->result());
1797 Register left = ToRegister(instr->left());
1798 Operand right = ToOperand(instr->right());
1800 switch (instr->op()) {
1801 case Token::BIT_AND: __ And(result, left, right); break;
1802 case Token::BIT_OR: __ Orr(result, left, right); break;
1803 case Token::BIT_XOR: __ Eor(result, left, right); break;
1811 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1812 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1813 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1814 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1815 if (instr->index()->IsConstantOperand()) {
1816 Operand index = ToOperand32(instr->index());
1817 Register length = ToRegister32(instr->length());
1818 __ Cmp(length, index);
1819 cond = CommuteCondition(cond);
1821 Register index = ToRegister32(instr->index());
1822 Operand length = ToOperand32(instr->length());
1823 __ Cmp(index, length);
1825 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1826 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1828 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
1833 void LCodeGen::DoBranch(LBranch* instr) {
1834 Representation r = instr->hydrogen()->value()->representation();
1835 Label* true_label = instr->TrueLabel(chunk_);
1836 Label* false_label = instr->FalseLabel(chunk_);
1838 if (r.IsInteger32()) {
1839 DCHECK(!info()->IsStub());
1840 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1841 } else if (r.IsSmi()) {
1842 DCHECK(!info()->IsStub());
1843 STATIC_ASSERT(kSmiTag == 0);
1844 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1845 } else if (r.IsDouble()) {
1846 DoubleRegister value = ToDoubleRegister(instr->value());
1847 // Test the double value. Zero and NaN are false.
1848 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1850 DCHECK(r.IsTagged());
1851 Register value = ToRegister(instr->value());
1852 HType type = instr->hydrogen()->value()->type();
1854 if (type.IsBoolean()) {
1855 DCHECK(!info()->IsStub());
1856 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1857 EmitBranch(instr, eq);
1858 } else if (type.IsSmi()) {
1859 DCHECK(!info()->IsStub());
1860 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1861 } else if (type.IsJSArray()) {
1862 DCHECK(!info()->IsStub());
1863 EmitGoto(instr->TrueDestination(chunk()));
1864 } else if (type.IsHeapNumber()) {
1865 DCHECK(!info()->IsStub());
1866 __ Ldr(double_scratch(), FieldMemOperand(value,
1867 HeapNumber::kValueOffset));
1868 // Test the double value. Zero and NaN are false.
1869 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1870 } else if (type.IsString()) {
1871 DCHECK(!info()->IsStub());
1872 Register temp = ToRegister(instr->temp1());
1873 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1874 EmitCompareAndBranch(instr, ne, temp, 0);
1876 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1877 // Avoid deopts in the case where we've never executed this path before.
1878 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1880 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1881 // undefined -> false.
1883 value, Heap::kUndefinedValueRootIndex, false_label);
1886 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1887 // Boolean -> its value.
1889 value, Heap::kTrueValueRootIndex, true_label);
1891 value, Heap::kFalseValueRootIndex, false_label);
1894 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1897 value, Heap::kNullValueRootIndex, false_label);
1900 if (expected.Contains(ToBooleanStub::SMI)) {
1901 // Smis: 0 -> false, all other -> true.
1902 DCHECK(Smi::FromInt(0) == 0);
1903 __ Cbz(value, false_label);
1904 __ JumpIfSmi(value, true_label);
1905 } else if (expected.NeedsMap()) {
1906 // If we need a map later and have a smi, deopt.
1907 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
1910 Register map = NoReg;
1911 Register scratch = NoReg;
1913 if (expected.NeedsMap()) {
1914 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1915 map = ToRegister(instr->temp1());
1916 scratch = ToRegister(instr->temp2());
1918 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1920 if (expected.CanBeUndetectable()) {
1921 // Undetectable -> false.
1922 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1923 __ TestAndBranchIfAnySet(
1924 scratch, 1 << Map::kIsUndetectable, false_label);
1928 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1929 // spec object -> true.
1930 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1931 __ B(ge, true_label);
1934 if (expected.Contains(ToBooleanStub::STRING)) {
1935 // String value -> false iff empty.
1937 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1938 __ B(ge, ¬_string);
1939 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1940 __ Cbz(scratch, false_label);
1942 __ Bind(¬_string);
1945 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1946 // Symbol value -> true.
1947 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1948 __ B(eq, true_label);
1951 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1952 Label not_heap_number;
1953 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number);
1955 __ Ldr(double_scratch(),
1956 FieldMemOperand(value, HeapNumber::kValueOffset));
1957 __ Fcmp(double_scratch(), 0.0);
1958 // If we got a NaN (overflow bit is set), jump to the false branch.
1959 __ B(vs, false_label);
1960 __ B(eq, false_label);
1962 __ Bind(¬_heap_number);
1965 if (!expected.IsGeneric()) {
1966 // We've seen something for the first time -> deopt.
1967 // This can only happen if we are not generic already.
1968 Deoptimize(instr, Deoptimizer::kUnexpectedObject);
1975 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1976 int formal_parameter_count, int arity,
1977 LInstruction* instr) {
1978 bool dont_adapt_arguments =
1979 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1980 bool can_invoke_directly =
1981 dont_adapt_arguments || formal_parameter_count == arity;
1983 // The function interface relies on the following register assignments.
1984 Register function_reg = x1;
1985 Register arity_reg = x0;
1987 LPointerMap* pointers = instr->pointer_map();
1989 if (FLAG_debug_code) {
1991 // Try to confirm that function_reg (x1) is a tagged pointer.
1992 __ JumpIfNotSmi(function_reg, &is_not_smi);
1993 __ Abort(kExpectedFunctionObject);
1994 __ Bind(&is_not_smi);
1997 if (can_invoke_directly) {
1999 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2001 // Set the arguments count if adaption is not needed. Assumes that x0 is
2002 // available to write to at this point.
2003 if (dont_adapt_arguments) {
2004 __ Mov(arity_reg, arity);
2008 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2011 // Set up deoptimization.
2012 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2014 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2015 ParameterCount count(arity);
2016 ParameterCount expected(formal_parameter_count);
2017 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2022 void LCodeGen::DoTailCallThroughMegamorphicCache(
2023 LTailCallThroughMegamorphicCache* instr) {
2024 Register receiver = ToRegister(instr->receiver());
2025 Register name = ToRegister(instr->name());
2026 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
2027 DCHECK(name.is(LoadDescriptor::NameRegister()));
2028 DCHECK(receiver.is(x1));
2029 DCHECK(name.is(x2));
2030 Register scratch = x4;
2031 Register extra = x5;
2032 Register extra2 = x6;
2033 Register extra3 = x7;
2034 DCHECK(!FLAG_vector_ics ||
2035 !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
2036 scratch, extra, extra2, extra3));
2038 // Important for the tail-call.
2039 bool must_teardown_frame = NeedsEagerFrame();
2041 if (!instr->hydrogen()->is_just_miss()) {
2042 DCHECK(!instr->hydrogen()->is_keyed_load());
2044 // The probe will tail call to a handler if found.
2045 isolate()->stub_cache()->GenerateProbe(
2046 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
2047 receiver, name, scratch, extra, extra2, extra3);
2050 // Tail call to miss if we ended up here.
2051 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
2052 if (instr->hydrogen()->is_keyed_load()) {
2053 KeyedLoadIC::GenerateMiss(masm());
2055 LoadIC::GenerateMiss(masm());
2060 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2061 DCHECK(instr->IsMarkedAsCall());
2062 DCHECK(ToRegister(instr->result()).Is(x0));
2064 if (instr->hydrogen()->IsTailCall()) {
2065 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
2067 if (instr->target()->IsConstantOperand()) {
2068 LConstantOperand* target = LConstantOperand::cast(instr->target());
2069 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2070 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2071 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2072 // this understanding is correct.
2073 __ Jump(code, RelocInfo::CODE_TARGET);
2075 DCHECK(instr->target()->IsRegister());
2076 Register target = ToRegister(instr->target());
2077 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2081 LPointerMap* pointers = instr->pointer_map();
2082 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2084 if (instr->target()->IsConstantOperand()) {
2085 LConstantOperand* target = LConstantOperand::cast(instr->target());
2086 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2087 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2088 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2089 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2090 // this understanding is correct.
2091 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2093 DCHECK(instr->target()->IsRegister());
2094 Register target = ToRegister(instr->target());
2095 generator.BeforeCall(__ CallSize(target));
2096 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2099 generator.AfterCall();
2104 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2105 DCHECK(instr->IsMarkedAsCall());
2106 DCHECK(ToRegister(instr->function()).is(x1));
2108 if (instr->hydrogen()->pass_argument_count()) {
2109 __ Mov(x0, Operand(instr->arity()));
2113 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2115 // Load the code entry address
2116 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2119 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2123 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2124 CallRuntime(instr->function(), instr->arity(), instr);
2128 void LCodeGen::DoCallStub(LCallStub* instr) {
2129 DCHECK(ToRegister(instr->context()).is(cp));
2130 DCHECK(ToRegister(instr->result()).is(x0));
2131 switch (instr->hydrogen()->major_key()) {
2132 case CodeStub::RegExpExec: {
2133 RegExpExecStub stub(isolate());
2134 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2137 case CodeStub::SubString: {
2138 SubStringStub stub(isolate());
2139 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2142 case CodeStub::StringCompare: {
2143 StringCompareStub stub(isolate());
2144 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2153 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2154 GenerateOsrPrologue();
2158 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2159 Register temp = ToRegister(instr->temp());
2161 PushSafepointRegistersScope scope(this);
2164 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2165 RecordSafepointWithRegisters(
2166 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2167 __ StoreToSafepointRegisterSlot(x0, temp);
2169 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
2173 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2174 class DeferredCheckMaps: public LDeferredCode {
2176 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2177 : LDeferredCode(codegen), instr_(instr), object_(object) {
2178 SetExit(check_maps());
2180 virtual void Generate() {
2181 codegen()->DoDeferredInstanceMigration(instr_, object_);
2183 Label* check_maps() { return &check_maps_; }
2184 virtual LInstruction* instr() { return instr_; }
2191 if (instr->hydrogen()->IsStabilityCheck()) {
2192 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2193 for (int i = 0; i < maps->size(); ++i) {
2194 AddStabilityDependency(maps->at(i).handle());
2199 Register object = ToRegister(instr->value());
2200 Register map_reg = ToRegister(instr->temp());
2202 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2204 DeferredCheckMaps* deferred = NULL;
2205 if (instr->hydrogen()->HasMigrationTarget()) {
2206 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2207 __ Bind(deferred->check_maps());
2210 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2212 for (int i = 0; i < maps->size() - 1; i++) {
2213 Handle<Map> map = maps->at(i).handle();
2214 __ CompareMap(map_reg, map);
2217 Handle<Map> map = maps->at(maps->size() - 1).handle();
2218 __ CompareMap(map_reg, map);
2220 // We didn't match a map.
2221 if (instr->hydrogen()->HasMigrationTarget()) {
2222 __ B(ne, deferred->entry());
2224 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
2231 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2232 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2233 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
2238 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2239 Register value = ToRegister(instr->value());
2240 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2241 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
2245 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2246 Register input = ToRegister(instr->value());
2247 Register scratch = ToRegister(instr->temp());
2249 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2250 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2252 if (instr->hydrogen()->is_interval_check()) {
2253 InstanceType first, last;
2254 instr->hydrogen()->GetCheckInterval(&first, &last);
2256 __ Cmp(scratch, first);
2257 if (first == last) {
2258 // If there is only one type in the interval check for equality.
2259 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2260 } else if (last == LAST_TYPE) {
2261 // We don't need to compare with the higher bound of the interval.
2262 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
2264 // If we are below the lower bound, set the C flag and clear the Z flag
2265 // to force a deopt.
2266 __ Ccmp(scratch, last, CFlag, hs);
2267 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
2272 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2274 if (base::bits::IsPowerOfTwo32(mask)) {
2275 DCHECK((tag == 0) || (tag == mask));
2277 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2278 Deoptimizer::kWrongInstanceType);
2280 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2281 Deoptimizer::kWrongInstanceType);
2285 __ Tst(scratch, mask);
2287 __ And(scratch, scratch, mask);
2288 __ Cmp(scratch, tag);
2290 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2296 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2297 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2298 Register result = ToRegister32(instr->result());
2299 __ ClampDoubleToUint8(result, input, double_scratch());
2303 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2304 Register input = ToRegister32(instr->unclamped());
2305 Register result = ToRegister32(instr->result());
2306 __ ClampInt32ToUint8(result, input);
2310 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2311 Register input = ToRegister(instr->unclamped());
2312 Register result = ToRegister32(instr->result());
2315 // Both smi and heap number cases are handled.
2317 __ JumpIfNotSmi(input, &is_not_smi);
2318 __ SmiUntag(result.X(), input);
2319 __ ClampInt32ToUint8(result);
2322 __ Bind(&is_not_smi);
2324 // Check for heap number.
2325 Label is_heap_number;
2326 __ JumpIfHeapNumber(input, &is_heap_number);
2328 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2329 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2330 Deoptimizer::kNotAHeapNumberUndefined);
2334 // Heap number case.
2335 __ Bind(&is_heap_number);
2336 DoubleRegister dbl_scratch = double_scratch();
2337 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2338 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2339 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2345 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2346 DoubleRegister value_reg = ToDoubleRegister(instr->value());
2347 Register result_reg = ToRegister(instr->result());
2348 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2349 __ Fmov(result_reg, value_reg);
2350 __ Lsr(result_reg, result_reg, 32);
2352 __ Fmov(result_reg.W(), value_reg.S());
2357 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2358 Register hi_reg = ToRegister(instr->hi());
2359 Register lo_reg = ToRegister(instr->lo());
2360 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2362 // Insert the least significant 32 bits of hi_reg into the most significant
2363 // 32 bits of lo_reg, and move to a floating point register.
2364 __ Bfi(lo_reg, hi_reg, 32, 32);
2365 __ Fmov(result_reg, lo_reg);
2369 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2370 Handle<String> class_name = instr->hydrogen()->class_name();
2371 Label* true_label = instr->TrueLabel(chunk_);
2372 Label* false_label = instr->FalseLabel(chunk_);
2373 Register input = ToRegister(instr->value());
2374 Register scratch1 = ToRegister(instr->temp1());
2375 Register scratch2 = ToRegister(instr->temp2());
2377 __ JumpIfSmi(input, false_label);
2379 Register map = scratch2;
2380 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2381 // Assuming the following assertions, we can use the same compares to test
2382 // for both being a function type and being in the object type range.
2383 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2384 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2385 FIRST_SPEC_OBJECT_TYPE + 1);
2386 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2387 LAST_SPEC_OBJECT_TYPE - 1);
2388 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2390 // We expect CompareObjectType to load the object instance type in scratch1.
2391 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2392 __ B(lt, false_label);
2393 __ B(eq, true_label);
2394 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2395 __ B(eq, true_label);
2397 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2400 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2401 // Check if the constructor in the map is a function.
2403 UseScratchRegisterScope temps(masm());
2404 Register instance_type = temps.AcquireX();
2405 __ GetMapConstructor(scratch1, map, scratch2, instance_type);
2406 __ Cmp(instance_type, JS_FUNCTION_TYPE);
2408 // Objects with a non-function constructor have class 'Object'.
2409 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2410 __ B(ne, true_label);
2412 __ B(ne, false_label);
2415 // The constructor function is in scratch1. Get its instance class name.
2417 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2419 FieldMemOperand(scratch1,
2420 SharedFunctionInfo::kInstanceClassNameOffset));
2422 // The class name we are testing against is internalized since it's a literal.
2423 // The name in the constructor is internalized because of the way the context
2424 // is booted. This routine isn't expected to work for random API-created
2425 // classes and it doesn't have to because you can't access it with natives
2426 // syntax. Since both sides are internalized it is sufficient to use an
2427 // identity comparison.
2428 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2432 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2433 DCHECK(instr->hydrogen()->representation().IsDouble());
2434 FPRegister object = ToDoubleRegister(instr->object());
2435 Register temp = ToRegister(instr->temp());
2437 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2438 // (relatively expensive) hole-NaN check.
2439 __ Fcmp(object, object);
2440 __ B(vc, instr->FalseLabel(chunk_));
2442 // We have a NaN, but is it the hole?
2443 __ Fmov(temp, object);
2444 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2448 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2449 DCHECK(instr->hydrogen()->representation().IsTagged());
2450 Register object = ToRegister(instr->object());
2452 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2456 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2457 Register value = ToRegister(instr->value());
2458 Register map = ToRegister(instr->temp());
2460 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2461 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2465 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2466 Representation rep = instr->hydrogen()->value()->representation();
2467 DCHECK(!rep.IsInteger32());
2468 Register scratch = ToRegister(instr->temp());
2470 if (rep.IsDouble()) {
2471 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2472 instr->TrueLabel(chunk()));
2474 Register value = ToRegister(instr->value());
2475 __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2476 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2477 __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2479 EmitGoto(instr->FalseDestination(chunk()));
2483 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2484 LOperand* left = instr->left();
2485 LOperand* right = instr->right();
2487 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2488 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2489 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2491 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2492 // We can statically evaluate the comparison.
2493 double left_val = ToDouble(LConstantOperand::cast(left));
2494 double right_val = ToDouble(LConstantOperand::cast(right));
2495 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2496 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2497 EmitGoto(next_block);
2499 if (instr->is_double()) {
2500 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2502 // If a NaN is involved, i.e. the result is unordered (V set),
2503 // jump to false block label.
2504 __ B(vs, instr->FalseLabel(chunk_));
2505 EmitBranch(instr, cond);
2507 if (instr->hydrogen_value()->representation().IsInteger32()) {
2508 if (right->IsConstantOperand()) {
2509 EmitCompareAndBranch(instr, cond, ToRegister32(left),
2510 ToOperand32(right));
2512 // Commute the operands and the condition.
2513 EmitCompareAndBranch(instr, CommuteCondition(cond),
2514 ToRegister32(right), ToOperand32(left));
2517 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2518 if (right->IsConstantOperand()) {
2519 int32_t value = ToInteger32(LConstantOperand::cast(right));
2520 EmitCompareAndBranch(instr,
2523 Operand(Smi::FromInt(value)));
2524 } else if (left->IsConstantOperand()) {
2525 // Commute the operands and the condition.
2526 int32_t value = ToInteger32(LConstantOperand::cast(left));
2527 EmitCompareAndBranch(instr,
2528 CommuteCondition(cond),
2530 Operand(Smi::FromInt(value)));
2532 EmitCompareAndBranch(instr,
2543 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2544 Register left = ToRegister(instr->left());
2545 Register right = ToRegister(instr->right());
2546 EmitCompareAndBranch(instr, eq, left, right);
2550 void LCodeGen::DoCmpT(LCmpT* instr) {
2551 DCHECK(ToRegister(instr->context()).is(cp));
2552 Token::Value op = instr->op();
2553 Condition cond = TokenToCondition(op, false);
2555 DCHECK(ToRegister(instr->left()).Is(x1));
2556 DCHECK(ToRegister(instr->right()).Is(x0));
2557 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2558 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2559 // Signal that we don't inline smi code before this stub.
2560 InlineSmiCheckInfo::EmitNotInlined(masm());
2562 // Return true or false depending on CompareIC result.
2563 // This instruction is marked as call. We can clobber any register.
2564 DCHECK(instr->IsMarkedAsCall());
2565 __ LoadTrueFalseRoots(x1, x2);
2567 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2571 void LCodeGen::DoConstantD(LConstantD* instr) {
2572 DCHECK(instr->result()->IsDoubleRegister());
2573 DoubleRegister result = ToDoubleRegister(instr->result());
2574 if (instr->value() == 0) {
2575 if (copysign(1.0, instr->value()) == 1.0) {
2576 __ Fmov(result, fp_zero);
2578 __ Fneg(result, fp_zero);
2581 __ Fmov(result, instr->value());
2586 void LCodeGen::DoConstantE(LConstantE* instr) {
2587 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2591 void LCodeGen::DoConstantI(LConstantI* instr) {
2592 DCHECK(is_int32(instr->value()));
2593 // Cast the value here to ensure that the value isn't sign extended by the
2594 // implicit Operand constructor.
2595 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2599 void LCodeGen::DoConstantS(LConstantS* instr) {
2600 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2604 void LCodeGen::DoConstantT(LConstantT* instr) {
2605 Handle<Object> object = instr->value(isolate());
2606 AllowDeferredHandleDereference smi_check;
2607 __ LoadObject(ToRegister(instr->result()), object);
2611 void LCodeGen::DoContext(LContext* instr) {
2612 // If there is a non-return use, the context must be moved to a register.
2613 Register result = ToRegister(instr->result());
2614 if (info()->IsOptimizing()) {
2615 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2617 // If there is no frame, the context must be in cp.
2618 DCHECK(result.is(cp));
2623 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2624 Register reg = ToRegister(instr->value());
2625 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2626 AllowDeferredHandleDereference smi_check;
2627 if (isolate()->heap()->InNewSpace(*object)) {
2628 UseScratchRegisterScope temps(masm());
2629 Register temp = temps.AcquireX();
2630 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2631 __ Mov(temp, Operand(cell));
2632 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2635 __ Cmp(reg, Operand(object));
2637 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
2641 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2642 last_lazy_deopt_pc_ = masm()->pc_offset();
2643 DCHECK(instr->HasEnvironment());
2644 LEnvironment* env = instr->environment();
2645 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2646 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2650 void LCodeGen::DoDateField(LDateField* instr) {
2651 Register object = ToRegister(instr->date());
2652 Register result = ToRegister(instr->result());
2653 Register temp1 = x10;
2654 Register temp2 = x11;
2655 Smi* index = instr->index();
2656 Label runtime, done;
2658 DCHECK(object.is(result) && object.Is(x0));
2659 DCHECK(instr->IsMarkedAsCall());
2661 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2662 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2663 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
2665 if (index->value() == 0) {
2666 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2668 if (index->value() < JSDate::kFirstUncachedField) {
2669 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2670 __ Mov(temp1, Operand(stamp));
2671 __ Ldr(temp1, MemOperand(temp1));
2672 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2673 __ Cmp(temp1, temp2);
2675 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2676 kPointerSize * index->value()));
2681 __ Mov(x1, Operand(index));
2682 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2689 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2690 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2691 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2692 // needed return address), even though the implementation of LAZY and EAGER is
2693 // now identical. When LAZY is eventually completely folded into EAGER, remove
2694 // the special case below.
2695 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2696 type = Deoptimizer::LAZY;
2699 Deoptimize(instr, instr->hydrogen()->reason(), &type);
2703 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2704 Register dividend = ToRegister32(instr->dividend());
2705 int32_t divisor = instr->divisor();
2706 Register result = ToRegister32(instr->result());
2707 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2708 DCHECK(!result.is(dividend));
2710 // Check for (0 / -x) that will produce negative zero.
2711 HDiv* hdiv = instr->hydrogen();
2712 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2713 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
2715 // Check for (kMinInt / -1).
2716 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2717 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2719 __ Cmp(dividend, 1);
2720 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2722 // Deoptimize if remainder will not be 0.
2723 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2724 divisor != 1 && divisor != -1) {
2725 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2726 __ Tst(dividend, mask);
2727 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
2730 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2731 __ Neg(result, dividend);
2734 int32_t shift = WhichPowerOf2Abs(divisor);
2736 __ Mov(result, dividend);
2737 } else if (shift == 1) {
2738 __ Add(result, dividend, Operand(dividend, LSR, 31));
2740 __ Mov(result, Operand(dividend, ASR, 31));
2741 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2743 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2744 if (divisor < 0) __ Neg(result, result);
2748 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2749 Register dividend = ToRegister32(instr->dividend());
2750 int32_t divisor = instr->divisor();
2751 Register result = ToRegister32(instr->result());
2752 DCHECK(!AreAliased(dividend, result));
2755 Deoptimize(instr, Deoptimizer::kDivisionByZero);
2759 // Check for (0 / -x) that will produce negative zero.
2760 HDiv* hdiv = instr->hydrogen();
2761 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2762 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
2765 __ TruncatingDiv(result, dividend, Abs(divisor));
2766 if (divisor < 0) __ Neg(result, result);
2768 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2769 Register temp = ToRegister32(instr->temp());
2770 DCHECK(!AreAliased(dividend, result, temp));
2771 __ Sxtw(dividend.X(), dividend);
2772 __ Mov(temp, divisor);
2773 __ Smsubl(temp.X(), result, temp, dividend.X());
2774 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
2779 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2780 void LCodeGen::DoDivI(LDivI* instr) {
2781 HBinaryOperation* hdiv = instr->hydrogen();
2782 Register dividend = ToRegister32(instr->dividend());
2783 Register divisor = ToRegister32(instr->divisor());
2784 Register result = ToRegister32(instr->result());
2786 // Issue the division first, and then check for any deopt cases whilst the
2787 // result is computed.
2788 __ Sdiv(result, dividend, divisor);
2790 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2791 DCHECK(!instr->temp());
2796 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2797 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
2800 // Check for (0 / -x) as that will produce negative zero.
2801 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2804 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2805 // zero, ie. zero dividend with negative divisor deopts.
2806 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2807 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2808 __ Ccmp(dividend, 0, NoFlag, mi);
2809 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
2812 // Check for (kMinInt / -1).
2813 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2814 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2816 __ Cmp(dividend, 1);
2817 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2818 // -1. If overflow is clear, set the flags for condition ne, as the
2819 // dividend isn't -1, and thus we shouldn't deopt.
2820 __ Ccmp(divisor, -1, NoFlag, vs);
2821 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
2824 // Compute remainder and deopt if it's not zero.
2825 Register remainder = ToRegister32(instr->temp());
2826 __ Msub(remainder, result, divisor, dividend);
2827 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
2831 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2832 DoubleRegister input = ToDoubleRegister(instr->value());
2833 Register result = ToRegister32(instr->result());
2835 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2836 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
2839 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2840 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
2842 if (instr->tag_result()) {
2843 __ SmiTag(result.X());
2848 void LCodeGen::DoDrop(LDrop* instr) {
2849 __ Drop(instr->count());
2853 void LCodeGen::DoDummy(LDummy* instr) {
2854 // Nothing to see here, move on!
2858 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2859 // Nothing to see here, move on!
2863 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2864 DCHECK(ToRegister(instr->context()).is(cp));
2865 // FunctionLiteral instruction is marked as call, we can trash any register.
2866 DCHECK(instr->IsMarkedAsCall());
2868 // Use the fast case closure allocation code that allocates in new
2869 // space for nested functions that don't need literals cloning.
2870 bool pretenure = instr->hydrogen()->pretenure();
2871 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2872 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
2873 instr->hydrogen()->kind());
2874 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2875 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2877 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2878 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2879 : factory()->false_value()));
2880 __ Push(cp, x2, x1);
2881 CallRuntime(Runtime::kNewClosure, 3, instr);
2886 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2887 Register map = ToRegister(instr->map());
2888 Register result = ToRegister(instr->result());
2889 Label load_cache, done;
2891 __ EnumLengthUntagged(result, map);
2892 __ Cbnz(result, &load_cache);
2894 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2897 __ Bind(&load_cache);
2898 __ LoadInstanceDescriptors(map, result);
2899 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2900 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2901 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
2907 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2908 Register object = ToRegister(instr->object());
2909 Register null_value = x5;
2911 DCHECK(instr->IsMarkedAsCall());
2912 DCHECK(object.Is(x0));
2914 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
2915 Deoptimizer::kUndefined);
2917 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2918 __ Cmp(object, null_value);
2919 DeoptimizeIf(eq, instr, Deoptimizer::kNull);
2921 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2923 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2924 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2925 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
2927 Label use_cache, call_runtime;
2928 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2930 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2933 // Get the set of properties to enumerate.
2934 __ Bind(&call_runtime);
2936 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2938 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2939 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
2940 Deoptimizer::kWrongMap);
2942 __ Bind(&use_cache);
2946 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2947 Register input = ToRegister(instr->value());
2948 Register result = ToRegister(instr->result());
2950 __ AssertString(input);
2952 // Assert that we can use a W register load to get the hash.
2953 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2954 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2955 __ IndexFromHash(result, result);
2959 void LCodeGen::EmitGoto(int block) {
2960 // Do not emit jump if we are emitting a goto to the next block.
2961 if (!IsNextEmittedBlock(block)) {
2962 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2967 void LCodeGen::DoGoto(LGoto* instr) {
2968 EmitGoto(instr->block_id());
2972 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2973 LHasCachedArrayIndexAndBranch* instr) {
2974 Register input = ToRegister(instr->value());
2975 Register temp = ToRegister32(instr->temp());
2977 // Assert that the cache status bits fit in a W register.
2978 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2979 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2980 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2981 EmitBranch(instr, eq);
2985 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2986 // to test but is only used in very restricted ways. The only possible kinds
2987 // of intervals are:
2988 // - [ FIRST_TYPE, instr->to() ]
2989 // - [ instr->form(), LAST_TYPE ]
2990 // - instr->from() == instr->to()
2992 // These kinds of intervals can be check with only one compare instruction
2993 // providing the correct value and test condition are used.
2995 // TestType() will return the value to use in the compare instruction and
2996 // BranchCondition() will return the condition to use depending on the kind
2997 // of interval actually specified in the instruction.
2998 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2999 InstanceType from = instr->from();
3000 InstanceType to = instr->to();
3001 if (from == FIRST_TYPE) return to;
3002 DCHECK((from == to) || (to == LAST_TYPE));
3007 // See comment above TestType function for what this function does.
3008 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
3009 InstanceType from = instr->from();
3010 InstanceType to = instr->to();
3011 if (from == to) return eq;
3012 if (to == LAST_TYPE) return hs;
3013 if (from == FIRST_TYPE) return ls;
3019 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3020 Register input = ToRegister(instr->value());
3021 Register scratch = ToRegister(instr->temp());
3023 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3024 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3026 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3027 EmitBranch(instr, BranchCondition(instr->hydrogen()));
3031 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3032 Register result = ToRegister(instr->result());
3033 Register base = ToRegister(instr->base_object());
3034 if (instr->offset()->IsConstantOperand()) {
3035 __ Add(result, base, ToOperand32(instr->offset()));
3037 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3042 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3043 DCHECK(ToRegister(instr->context()).is(cp));
3044 // Assert that the arguments are in the registers expected by InstanceofStub.
3045 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3046 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3048 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3049 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3051 // InstanceofStub returns a result in x0:
3052 // 0 => not an instance
3053 // smi 1 => instance.
3055 __ LoadTrueFalseRoots(x0, x1);
3056 __ Csel(x0, x0, x1, eq);
3060 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3061 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3063 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3064 LInstanceOfKnownGlobal* instr)
3065 : LDeferredCode(codegen), instr_(instr) { }
3066 virtual void Generate() {
3067 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3069 virtual LInstruction* instr() { return instr_; }
3071 LInstanceOfKnownGlobal* instr_;
3074 DeferredInstanceOfKnownGlobal* deferred =
3075 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3077 Label map_check, return_false, cache_miss, done;
3078 Register object = ToRegister(instr->value());
3079 Register result = ToRegister(instr->result());
3080 // x4 is expected in the associated deferred code and stub.
3081 Register map_check_site = x4;
3084 // This instruction is marked as call. We can clobber any register.
3085 DCHECK(instr->IsMarkedAsCall());
3087 // We must take into account that object is in x11.
3088 DCHECK(object.Is(x11));
3089 Register scratch = x10;
3091 // A Smi is not instance of anything.
3092 __ JumpIfSmi(object, &return_false);
3094 // This is the inlined call site instanceof cache. The two occurences of the
3095 // hole value will be patched to the last map/result pair generated by the
3097 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3099 // Below we use Factory::the_hole_value() on purpose instead of loading from
3100 // the root array to force relocation and later be able to patch with a
3102 InstructionAccurateScope scope(masm(), 5);
3103 __ bind(&map_check);
3104 // Will be patched with the cached map.
3105 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3106 __ ldr(scratch, Immediate(cell));
3107 __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
3108 __ cmp(map, scratch);
3109 __ b(&cache_miss, ne);
3110 // The address of this instruction is computed relative to the map check
3111 // above, so check the size of the code generated.
3112 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3113 // Will be patched with the cached result.
3114 __ ldr(result, Immediate(factory()->the_hole_value()));
3118 // The inlined call site cache did not match.
3119 // Check null and string before calling the deferred code.
3120 __ Bind(&cache_miss);
3121 // Compute the address of the map check. It must not be clobbered until the
3122 // InstanceOfStub has used it.
3123 __ Adr(map_check_site, &map_check);
3124 // Null is not instance of anything.
3125 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3127 // String values are not instances of anything.
3128 // Return false if the object is a string. Otherwise, jump to the deferred
3130 // Note that we can't jump directly to deferred code from
3131 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3132 // code can be out of range.
3133 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3134 __ B(deferred->entry());
3136 __ Bind(&return_false);
3137 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3139 // Here result is either true or false.
3140 __ Bind(deferred->exit());
3145 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3146 Register result = ToRegister(instr->result());
3147 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3148 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3149 flags = static_cast<InstanceofStub::Flags>(
3150 flags | InstanceofStub::kArgsInRegisters);
3151 flags = static_cast<InstanceofStub::Flags>(
3152 flags | InstanceofStub::kReturnTrueFalseObject);
3153 flags = static_cast<InstanceofStub::Flags>(
3154 flags | InstanceofStub::kCallSiteInlineCheck);
3156 PushSafepointRegistersScope scope(this);
3157 LoadContextFromDeferred(instr->context());
3159 // Prepare InstanceofStub arguments.
3160 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3161 __ LoadObject(InstanceofStub::right(), instr->function());
3163 InstanceofStub stub(isolate(), flags);
3164 CallCodeGeneric(stub.GetCode(),
3165 RelocInfo::CODE_TARGET,
3167 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3168 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3169 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3171 // Put the result value into the result register slot.
3172 __ StoreToSafepointRegisterSlot(result, result);
3176 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3181 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3182 Register value = ToRegister32(instr->value());
3183 DoubleRegister result = ToDoubleRegister(instr->result());
3184 __ Scvtf(result, value);
3188 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3189 DCHECK(ToRegister(instr->context()).is(cp));
3190 // The function is required to be in x1.
3191 DCHECK(ToRegister(instr->function()).is(x1));
3192 DCHECK(instr->HasPointerMap());
3194 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3195 if (known_function.is_null()) {
3196 LPointerMap* pointers = instr->pointer_map();
3197 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3198 ParameterCount count(instr->arity());
3199 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3201 CallKnownFunction(known_function,
3202 instr->hydrogen()->formal_parameter_count(),
3203 instr->arity(), instr);
3208 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3209 Register temp1 = ToRegister(instr->temp1());
3210 Register temp2 = ToRegister(instr->temp2());
3212 // Get the frame pointer for the calling frame.
3213 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3215 // Skip the arguments adaptor frame if it exists.
3216 Label check_frame_marker;
3217 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3218 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3219 __ B(ne, &check_frame_marker);
3220 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3222 // Check the marker in the calling frame.
3223 __ Bind(&check_frame_marker);
3224 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3226 EmitCompareAndBranch(
3227 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3231 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3232 Label* is_object = instr->TrueLabel(chunk_);
3233 Label* is_not_object = instr->FalseLabel(chunk_);
3234 Register value = ToRegister(instr->value());
3235 Register map = ToRegister(instr->temp1());
3236 Register scratch = ToRegister(instr->temp2());
3238 __ JumpIfSmi(value, is_not_object);
3239 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3241 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3243 // Check for undetectable objects.
3244 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3245 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3247 // Check that instance type is in object type range.
3248 __ IsInstanceJSObjectType(map, scratch, NULL);
3249 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3250 // flags for "le" condition to check if the object's type is a valid
3252 EmitBranch(instr, le);
3256 Condition LCodeGen::EmitIsString(Register input,
3258 Label* is_not_string,
3259 SmiCheck check_needed = INLINE_SMI_CHECK) {
3260 if (check_needed == INLINE_SMI_CHECK) {
3261 __ JumpIfSmi(input, is_not_string);
3263 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3269 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3270 Register val = ToRegister(instr->value());
3271 Register scratch = ToRegister(instr->temp());
3273 SmiCheck check_needed =
3274 instr->hydrogen()->value()->type().IsHeapObject()
3275 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3276 Condition true_cond =
3277 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3279 EmitBranch(instr, true_cond);
3283 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3284 Register value = ToRegister(instr->value());
3285 STATIC_ASSERT(kSmiTag == 0);
3286 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3290 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3291 Register input = ToRegister(instr->value());
3292 Register temp = ToRegister(instr->temp());
3294 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3295 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3297 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3298 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3300 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3304 static const char* LabelType(LLabel* label) {
3305 if (label->is_loop_header()) return " (loop header)";
3306 if (label->is_osr_entry()) return " (OSR entry)";
3311 void LCodeGen::DoLabel(LLabel* label) {
3312 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3313 current_instruction_,
3314 label->hydrogen_value()->id(),
3318 __ Bind(label->label());
3319 current_block_ = label->block_id();
3324 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3325 Register context = ToRegister(instr->context());
3326 Register result = ToRegister(instr->result());
3327 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3328 if (instr->hydrogen()->RequiresHoleCheck()) {
3329 if (instr->hydrogen()->DeoptimizesOnHole()) {
3330 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3331 Deoptimizer::kHole);
3334 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
3335 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3336 __ Bind(¬_the_hole);
3342 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3343 Register function = ToRegister(instr->function());
3344 Register result = ToRegister(instr->result());
3345 Register temp = ToRegister(instr->temp());
3347 // Get the prototype or initial map from the function.
3348 __ Ldr(result, FieldMemOperand(function,
3349 JSFunction::kPrototypeOrInitialMapOffset));
3351 // Check that the function has a prototype or an initial map.
3352 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3353 Deoptimizer::kHole);
3355 // If the function does not have an initial map, we're done.
3357 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3360 // Get the prototype from the initial map.
3361 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3369 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3370 DCHECK(FLAG_vector_ics);
3371 Register vector_register = ToRegister(instr->temp_vector());
3372 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3373 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
3374 DCHECK(slot_register.is(x0));
3376 AllowDeferredHandleDereference vector_structure_check;
3377 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3378 __ Mov(vector_register, vector);
3379 // No need to allocate this register.
3380 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3381 int index = vector->GetIndex(slot);
3382 __ Mov(slot_register, Smi::FromInt(index));
3386 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3387 DCHECK(ToRegister(instr->context()).is(cp));
3388 DCHECK(ToRegister(instr->global_object())
3389 .is(LoadDescriptor::ReceiverRegister()));
3390 DCHECK(ToRegister(instr->result()).Is(x0));
3391 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3392 if (FLAG_vector_ics) {
3393 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3395 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3396 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
3397 PREMONOMORPHIC).code();
3398 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3402 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3407 bool key_is_constant,
3409 ElementsKind elements_kind,
3411 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3413 if (key_is_constant) {
3414 int key_offset = constant_key << element_size_shift;
3415 return MemOperand(base, key_offset + base_offset);
3419 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3420 return MemOperand(scratch, base_offset);
3423 if (base_offset == 0) {
3424 return MemOperand(base, key, SXTW, element_size_shift);
3427 DCHECK(!AreAliased(scratch, key));
3428 __ Add(scratch, base, base_offset);
3429 return MemOperand(scratch, key, SXTW, element_size_shift);
3433 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3434 Register ext_ptr = ToRegister(instr->elements());
3436 ElementsKind elements_kind = instr->elements_kind();
3438 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3439 bool key_is_constant = instr->key()->IsConstantOperand();
3440 Register key = no_reg;
3441 int constant_key = 0;
3442 if (key_is_constant) {
3443 DCHECK(instr->temp() == NULL);
3444 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3445 if (constant_key & 0xf0000000) {
3446 Abort(kArrayIndexConstantValueTooBig);
3449 scratch = ToRegister(instr->temp());
3450 key = ToRegister(instr->key());
3454 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3455 key_is_constant, constant_key,
3457 instr->base_offset());
3459 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3460 (elements_kind == FLOAT32_ELEMENTS)) {
3461 DoubleRegister result = ToDoubleRegister(instr->result());
3462 __ Ldr(result.S(), mem_op);
3463 __ Fcvt(result, result.S());
3464 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3465 (elements_kind == FLOAT64_ELEMENTS)) {
3466 DoubleRegister result = ToDoubleRegister(instr->result());
3467 __ Ldr(result, mem_op);
3469 Register result = ToRegister(instr->result());
3471 switch (elements_kind) {
3472 case EXTERNAL_INT8_ELEMENTS:
3474 __ Ldrsb(result, mem_op);
3476 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3477 case EXTERNAL_UINT8_ELEMENTS:
3478 case UINT8_ELEMENTS:
3479 case UINT8_CLAMPED_ELEMENTS:
3480 __ Ldrb(result, mem_op);
3482 case EXTERNAL_INT16_ELEMENTS:
3483 case INT16_ELEMENTS:
3484 __ Ldrsh(result, mem_op);
3486 case EXTERNAL_UINT16_ELEMENTS:
3487 case UINT16_ELEMENTS:
3488 __ Ldrh(result, mem_op);
3490 case EXTERNAL_INT32_ELEMENTS:
3491 case INT32_ELEMENTS:
3492 __ Ldrsw(result, mem_op);
3494 case EXTERNAL_UINT32_ELEMENTS:
3495 case UINT32_ELEMENTS:
3496 __ Ldr(result.W(), mem_op);
3497 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3498 // Deopt if value > 0x80000000.
3499 __ Tst(result, 0xFFFFFFFF80000000);
3500 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
3503 case FLOAT32_ELEMENTS:
3504 case FLOAT64_ELEMENTS:
3505 case EXTERNAL_FLOAT32_ELEMENTS:
3506 case EXTERNAL_FLOAT64_ELEMENTS:
3507 case FAST_HOLEY_DOUBLE_ELEMENTS:
3508 case FAST_HOLEY_ELEMENTS:
3509 case FAST_HOLEY_SMI_ELEMENTS:
3510 case FAST_DOUBLE_ELEMENTS:
3512 case FAST_SMI_ELEMENTS:
3513 case DICTIONARY_ELEMENTS:
3514 case SLOPPY_ARGUMENTS_ELEMENTS:
3522 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
3526 ElementsKind elements_kind,
3527 Representation representation,
3529 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3530 STATIC_ASSERT(kSmiTag == 0);
3531 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3533 // Even though the HLoad/StoreKeyed instructions force the input
3534 // representation for the key to be an integer, the input gets replaced during
3535 // bounds check elimination with the index argument to the bounds check, which
3536 // can be tagged, so that case must be handled here, too.
3537 if (key_is_tagged) {
3538 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3539 if (representation.IsInteger32()) {
3540 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3541 // Read or write only the smi payload in the case of fast smi arrays.
3542 return UntagSmiMemOperand(base, base_offset);
3544 return MemOperand(base, base_offset);
3547 // Sign extend key because it could be a 32-bit negative value or contain
3548 // garbage in the top 32-bits. The address computation happens in 64-bit.
3549 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3550 if (representation.IsInteger32()) {
3551 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3552 // Read or write only the smi payload in the case of fast smi arrays.
3553 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3554 return UntagSmiMemOperand(base, base_offset);
3556 __ Add(base, elements, base_offset);
3557 return MemOperand(base, key, SXTW, element_size_shift);
3563 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3564 Register elements = ToRegister(instr->elements());
3565 DoubleRegister result = ToDoubleRegister(instr->result());
3568 if (instr->key()->IsConstantOperand()) {
3569 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3570 (instr->temp() == NULL));
3572 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3573 if (constant_key & 0xf0000000) {
3574 Abort(kArrayIndexConstantValueTooBig);
3576 int offset = instr->base_offset() + constant_key * kDoubleSize;
3577 mem_op = MemOperand(elements, offset);
3579 Register load_base = ToRegister(instr->temp());
3580 Register key = ToRegister(instr->key());
3581 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3582 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3583 instr->hydrogen()->elements_kind(),
3584 instr->hydrogen()->representation(),
3585 instr->base_offset());
3588 __ Ldr(result, mem_op);
3590 if (instr->hydrogen()->RequiresHoleCheck()) {
3591 Register scratch = ToRegister(instr->temp());
3592 __ Fmov(scratch, result);
3593 __ Eor(scratch, scratch, kHoleNanInt64);
3594 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
3599 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3600 Register elements = ToRegister(instr->elements());
3601 Register result = ToRegister(instr->result());
3604 Representation representation = instr->hydrogen()->representation();
3605 if (instr->key()->IsConstantOperand()) {
3606 DCHECK(instr->temp() == NULL);
3607 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3608 int offset = instr->base_offset() +
3609 ToInteger32(const_operand) * kPointerSize;
3610 if (representation.IsInteger32()) {
3611 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3612 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3613 STATIC_ASSERT(kSmiTag == 0);
3614 mem_op = UntagSmiMemOperand(elements, offset);
3616 mem_op = MemOperand(elements, offset);
3619 Register load_base = ToRegister(instr->temp());
3620 Register key = ToRegister(instr->key());
3621 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3623 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3624 instr->hydrogen()->elements_kind(),
3625 representation, instr->base_offset());
3628 __ Load(result, mem_op, representation);
3630 if (instr->hydrogen()->RequiresHoleCheck()) {
3631 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3632 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
3634 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3635 Deoptimizer::kHole);
3641 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3642 DCHECK(ToRegister(instr->context()).is(cp));
3643 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3644 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3645 if (FLAG_vector_ics) {
3646 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3650 CodeFactory::KeyedLoadICInOptimizedCode(
3651 isolate(), instr->hydrogen()->initialization_state()).code();
3652 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3654 DCHECK(ToRegister(instr->result()).Is(x0));
3658 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3659 HObjectAccess access = instr->hydrogen()->access();
3660 int offset = access.offset();
3661 Register object = ToRegister(instr->object());
3663 if (access.IsExternalMemory()) {
3664 Register result = ToRegister(instr->result());
3665 __ Load(result, MemOperand(object, offset), access.representation());
3669 if (instr->hydrogen()->representation().IsDouble()) {
3670 DCHECK(access.IsInobject());
3671 FPRegister result = ToDoubleRegister(instr->result());
3672 __ Ldr(result, FieldMemOperand(object, offset));
3676 Register result = ToRegister(instr->result());
3678 if (access.IsInobject()) {
3681 // Load the properties array, using result as a scratch register.
3682 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3686 if (access.representation().IsSmi() &&
3687 instr->hydrogen()->representation().IsInteger32()) {
3688 // Read int value directly from upper half of the smi.
3689 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3690 STATIC_ASSERT(kSmiTag == 0);
3691 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3692 Representation::Integer32());
3694 __ Load(result, FieldMemOperand(source, offset), access.representation());
3699 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3700 DCHECK(ToRegister(instr->context()).is(cp));
3701 // LoadIC expects name and receiver in registers.
3702 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3703 __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3704 if (FLAG_vector_ics) {
3705 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3708 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3709 isolate(), NOT_CONTEXTUAL,
3710 instr->hydrogen()->initialization_state()).code();
3711 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3713 DCHECK(ToRegister(instr->result()).is(x0));
3717 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3718 Register result = ToRegister(instr->result());
3719 __ LoadRoot(result, instr->index());
3723 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3724 Register result = ToRegister(instr->result());
3725 Register map = ToRegister(instr->value());
3726 __ EnumLengthSmi(result, map);
3730 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3731 Representation r = instr->hydrogen()->value()->representation();
3733 DoubleRegister input = ToDoubleRegister(instr->value());
3734 DoubleRegister result = ToDoubleRegister(instr->result());
3735 __ Fabs(result, input);
3736 } else if (r.IsSmi() || r.IsInteger32()) {
3737 Register input = r.IsSmi() ? ToRegister(instr->value())
3738 : ToRegister32(instr->value());
3739 Register result = r.IsSmi() ? ToRegister(instr->result())
3740 : ToRegister32(instr->result());
3741 __ Abs(result, input);
3742 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3747 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3749 Label* allocation_entry) {
3750 // Handle the tricky cases of MathAbsTagged:
3751 // - HeapNumber inputs.
3752 // - Negative inputs produce a positive result, so a new HeapNumber is
3753 // allocated to hold it.
3754 // - Positive inputs are returned as-is, since there is no need to allocate
3755 // a new HeapNumber for the result.
3756 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3757 // a smi. In this case, the inline code sets the result and jumps directly
3758 // to the allocation_entry label.
3759 DCHECK(instr->context() != NULL);
3760 DCHECK(ToRegister(instr->context()).is(cp));
3761 Register input = ToRegister(instr->value());
3762 Register temp1 = ToRegister(instr->temp1());
3763 Register temp2 = ToRegister(instr->temp2());
3764 Register result_bits = ToRegister(instr->temp3());
3765 Register result = ToRegister(instr->result());
3767 Label runtime_allocation;
3769 // Deoptimize if the input is not a HeapNumber.
3770 DeoptimizeIfNotHeapNumber(input, instr);
3772 // If the argument is positive, we can return it as-is, without any need to
3773 // allocate a new HeapNumber for the result. We have to do this in integer
3774 // registers (rather than with fabs) because we need to be able to distinguish
3776 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3777 __ Mov(result, input);
3778 __ Tbz(result_bits, kXSignBit, exit);
3780 // Calculate abs(input) by clearing the sign bit.
3781 __ Bic(result_bits, result_bits, kXSignMask);
3783 // Allocate a new HeapNumber to hold the result.
3784 // result_bits The bit representation of the (double) result.
3785 __ Bind(allocation_entry);
3786 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3787 // The inline (non-deferred) code will store result_bits into result.
3790 __ Bind(&runtime_allocation);
3791 if (FLAG_debug_code) {
3792 // Because result is in the pointer map, we need to make sure it has a valid
3793 // tagged value before we call the runtime. We speculatively set it to the
3794 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3797 Register input = ToRegister(instr->value());
3798 __ JumpIfSmi(result, &result_ok);
3799 __ Cmp(input, result);
3800 __ Assert(eq, kUnexpectedValue);
3801 __ Bind(&result_ok);
3804 { PushSafepointRegistersScope scope(this);
3805 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3807 __ StoreToSafepointRegisterSlot(x0, result);
3809 // The inline (non-deferred) code will store result_bits into result.
3813 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3814 // Class for deferred case.
3815 class DeferredMathAbsTagged: public LDeferredCode {
3817 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3818 : LDeferredCode(codegen), instr_(instr) { }
3819 virtual void Generate() {
3820 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3821 allocation_entry());
3823 virtual LInstruction* instr() { return instr_; }
3824 Label* allocation_entry() { return &allocation; }
3826 LMathAbsTagged* instr_;
3830 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3831 // in GenerateDeferredCode. Tidy this up.
3832 DCHECK(!NeedsDeferredFrame());
3834 DeferredMathAbsTagged* deferred =
3835 new(zone()) DeferredMathAbsTagged(this, instr);
3837 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3838 instr->hydrogen()->value()->representation().IsSmi());
3839 Register input = ToRegister(instr->value());
3840 Register result_bits = ToRegister(instr->temp3());
3841 Register result = ToRegister(instr->result());
3844 // Handle smis inline.
3845 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3846 // never get set by the negation. This is therefore the same as the Integer32
3847 // case in DoMathAbs, except that it operates on 64-bit values.
3848 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3850 __ JumpIfNotSmi(input, deferred->entry());
3852 __ Abs(result, input, NULL, &done);
3854 // The result is the magnitude (abs) of the smallest value a smi can
3855 // represent, encoded as a double.
3856 __ Mov(result_bits, double_to_rawbits(0x80000000));
3857 __ B(deferred->allocation_entry());
3859 __ Bind(deferred->exit());
3860 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3866 void LCodeGen::DoMathExp(LMathExp* instr) {
3867 DoubleRegister input = ToDoubleRegister(instr->value());
3868 DoubleRegister result = ToDoubleRegister(instr->result());
3869 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3870 DoubleRegister double_temp2 = double_scratch();
3871 Register temp1 = ToRegister(instr->temp1());
3872 Register temp2 = ToRegister(instr->temp2());
3873 Register temp3 = ToRegister(instr->temp3());
3875 MathExpGenerator::EmitMathExp(masm(), input, result,
3876 double_temp1, double_temp2,
3877 temp1, temp2, temp3);
3881 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3882 DoubleRegister input = ToDoubleRegister(instr->value());
3883 DoubleRegister result = ToDoubleRegister(instr->result());
3885 __ Frintm(result, input);
3889 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3890 DoubleRegister input = ToDoubleRegister(instr->value());
3891 Register result = ToRegister(instr->result());
3893 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3894 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
3897 __ Fcvtms(result, input);
3899 // Check that the result fits into a 32-bit integer.
3900 // - The result did not overflow.
3901 __ Cmp(result, Operand(result, SXTW));
3902 // - The input was not NaN.
3903 __ Fccmp(input, input, NoFlag, eq);
3904 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
3908 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3909 Register dividend = ToRegister32(instr->dividend());
3910 Register result = ToRegister32(instr->result());
3911 int32_t divisor = instr->divisor();
3913 // If the divisor is 1, return the dividend.
3915 __ Mov(result, dividend, kDiscardForSameWReg);
3919 // If the divisor is positive, things are easy: There can be no deopts and we
3920 // can simply do an arithmetic right shift.
3921 int32_t shift = WhichPowerOf2Abs(divisor);
3923 __ Mov(result, Operand(dividend, ASR, shift));
3927 // If the divisor is negative, we have to negate and handle edge cases.
3928 __ Negs(result, dividend);
3929 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3930 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3933 // Dividing by -1 is basically negation, unless we overflow.
3934 if (divisor == -1) {
3935 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3936 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3941 // If the negation could not overflow, simply shifting is OK.
3942 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3943 __ Mov(result, Operand(dividend, ASR, shift));
3947 __ Asr(result, result, shift);
3948 __ Csel(result, result, kMinInt / divisor, vc);
3952 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3953 Register dividend = ToRegister32(instr->dividend());
3954 int32_t divisor = instr->divisor();
3955 Register result = ToRegister32(instr->result());
3956 DCHECK(!AreAliased(dividend, result));
3959 Deoptimize(instr, Deoptimizer::kDivisionByZero);
3963 // Check for (0 / -x) that will produce negative zero.
3964 HMathFloorOfDiv* hdiv = instr->hydrogen();
3965 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3966 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
3969 // Easy case: We need no dynamic check for the dividend and the flooring
3970 // division is the same as the truncating division.
3971 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3972 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3973 __ TruncatingDiv(result, dividend, Abs(divisor));
3974 if (divisor < 0) __ Neg(result, result);
3978 // In the general case we may need to adjust before and after the truncating
3979 // division to get a flooring division.
3980 Register temp = ToRegister32(instr->temp());
3981 DCHECK(!AreAliased(temp, dividend, result));
3982 Label needs_adjustment, done;
3983 __ Cmp(dividend, 0);
3984 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3985 __ TruncatingDiv(result, dividend, Abs(divisor));
3986 if (divisor < 0) __ Neg(result, result);
3988 __ Bind(&needs_adjustment);
3989 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3990 __ TruncatingDiv(result, temp, Abs(divisor));
3991 if (divisor < 0) __ Neg(result, result);
3992 __ Sub(result, result, Operand(1));
3997 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3998 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3999 Register dividend = ToRegister32(instr->dividend());
4000 Register divisor = ToRegister32(instr->divisor());
4001 Register remainder = ToRegister32(instr->temp());
4002 Register result = ToRegister32(instr->result());
4004 // This can't cause an exception on ARM, so we can speculatively
4005 // execute it already now.
4006 __ Sdiv(result, dividend, divisor);
4009 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4011 // Check for (kMinInt / -1).
4012 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4013 // The V flag will be set iff dividend == kMinInt.
4014 __ Cmp(dividend, 1);
4015 __ Ccmp(divisor, -1, NoFlag, vs);
4016 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
4019 // Check for (0 / -x) that will produce negative zero.
4020 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4022 __ Ccmp(dividend, 0, ZFlag, mi);
4023 // "divisor" can't be null because the code would have already been
4024 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4025 // In this case we need to deoptimize to produce a -0.
4026 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4030 // If both operands have the same sign then we are done.
4031 __ Eor(remainder, dividend, divisor);
4032 __ Tbz(remainder, kWSignBit, &done);
4034 // Check if the result needs to be corrected.
4035 __ Msub(remainder, result, divisor, dividend);
4036 __ Cbz(remainder, &done);
4037 __ Sub(result, result, 1);
4043 void LCodeGen::DoMathLog(LMathLog* instr) {
4044 DCHECK(instr->IsMarkedAsCall());
4045 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4046 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4048 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4052 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4053 Register input = ToRegister32(instr->value());
4054 Register result = ToRegister32(instr->result());
4055 __ Clz(result, input);
4059 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4060 DoubleRegister input = ToDoubleRegister(instr->value());
4061 DoubleRegister result = ToDoubleRegister(instr->result());
4064 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4065 // Math.pow(-Infinity, 0.5) == +Infinity
4066 // Math.pow(-0.0, 0.5) == +0.0
4068 // Catch -infinity inputs first.
4069 // TODO(jbramley): A constant infinity register would be helpful here.
4070 __ Fmov(double_scratch(), kFP64NegativeInfinity);
4071 __ Fcmp(double_scratch(), input);
4072 __ Fabs(result, input);
4075 // Add +0.0 to convert -0.0 to +0.0.
4076 __ Fadd(double_scratch(), input, fp_zero);
4077 __ Fsqrt(result, double_scratch());
4083 void LCodeGen::DoPower(LPower* instr) {
4084 Representation exponent_type = instr->hydrogen()->right()->representation();
4085 // Having marked this as a call, we can use any registers.
4086 // Just make sure that the input/output registers are the expected ones.
4087 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4088 Register integer_exponent = MathPowIntegerDescriptor::exponent();
4089 DCHECK(!instr->right()->IsDoubleRegister() ||
4090 ToDoubleRegister(instr->right()).is(d1));
4091 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4092 ToRegister(instr->right()).is(tagged_exponent));
4093 DCHECK(!exponent_type.IsInteger32() ||
4094 ToRegister(instr->right()).is(integer_exponent));
4095 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4096 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4098 if (exponent_type.IsSmi()) {
4099 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4101 } else if (exponent_type.IsTagged()) {
4103 __ JumpIfSmi(tagged_exponent, &no_deopt);
4104 DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
4106 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4108 } else if (exponent_type.IsInteger32()) {
4109 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4110 // supports large integer exponents.
4111 __ Sxtw(integer_exponent, integer_exponent);
4112 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4115 DCHECK(exponent_type.IsDouble());
4116 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4122 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4123 DoubleRegister input = ToDoubleRegister(instr->value());
4124 DoubleRegister result = ToDoubleRegister(instr->result());
4125 DoubleRegister scratch_d = double_scratch();
4127 DCHECK(!AreAliased(input, result, scratch_d));
4131 __ Frinta(result, input);
4132 __ Fcmp(input, 0.0);
4133 __ Fccmp(result, input, ZFlag, lt);
4134 // The result is correct if the input was in [-0, +infinity], or was a
4135 // negative integral value.
4138 // Here the input is negative, non integral, with an exponent lower than 52.
4139 // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4140 // case. So we can safely add 0.5.
4141 __ Fmov(scratch_d, 0.5);
4142 __ Fadd(result, input, scratch_d);
4143 __ Frintm(result, result);
4144 // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4145 __ Fabs(result, result);
4146 __ Fneg(result, result);
4152 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4153 DoubleRegister input = ToDoubleRegister(instr->value());
4154 DoubleRegister temp = ToDoubleRegister(instr->temp1());
4155 DoubleRegister dot_five = double_scratch();
4156 Register result = ToRegister(instr->result());
4159 // Math.round() rounds to the nearest integer, with ties going towards
4160 // +infinity. This does not match any IEEE-754 rounding mode.
4161 // - Infinities and NaNs are propagated unchanged, but cause deopts because
4162 // they can't be represented as integers.
4163 // - The sign of the result is the same as the sign of the input. This means
4164 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4167 // Add 0.5 and round towards -infinity.
4168 __ Fmov(dot_five, 0.5);
4169 __ Fadd(temp, input, dot_five);
4170 __ Fcvtms(result, temp);
4172 // The result is correct if:
4173 // result is not 0, as the input could be NaN or [-0.5, -0.0].
4174 // result is not 1, as 0.499...94 will wrongly map to 1.
4175 // result fits in 32 bits.
4176 __ Cmp(result, Operand(result.W(), SXTW));
4177 __ Ccmp(result, 1, ZFlag, eq);
4180 // At this point, we have to handle possible inputs of NaN or numbers in the
4181 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4183 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4185 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
4187 // Deoptimize for negative inputs, which at this point are only numbers in
4188 // the range [-0.5, -0.0]
4189 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4190 __ Fmov(result, input);
4191 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
4194 // Deoptimize if the input was NaN.
4195 __ Fcmp(input, dot_five);
4196 DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
4198 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4199 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4200 // else 0; we avoid dealing with 0.499...94 directly.
4201 __ Cset(result, ge);
4206 void LCodeGen::DoMathFround(LMathFround* instr) {
4207 DoubleRegister input = ToDoubleRegister(instr->value());
4208 DoubleRegister result = ToDoubleRegister(instr->result());
4209 __ Fcvt(result.S(), input);
4210 __ Fcvt(result, result.S());
4214 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4215 DoubleRegister input = ToDoubleRegister(instr->value());
4216 DoubleRegister result = ToDoubleRegister(instr->result());
4217 __ Fsqrt(result, input);
4221 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4222 HMathMinMax::Operation op = instr->hydrogen()->operation();
4223 if (instr->hydrogen()->representation().IsInteger32()) {
4224 Register result = ToRegister32(instr->result());
4225 Register left = ToRegister32(instr->left());
4226 Operand right = ToOperand32(instr->right());
4228 __ Cmp(left, right);
4229 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4230 } else if (instr->hydrogen()->representation().IsSmi()) {
4231 Register result = ToRegister(instr->result());
4232 Register left = ToRegister(instr->left());
4233 Operand right = ToOperand(instr->right());
4235 __ Cmp(left, right);
4236 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4238 DCHECK(instr->hydrogen()->representation().IsDouble());
4239 DoubleRegister result = ToDoubleRegister(instr->result());
4240 DoubleRegister left = ToDoubleRegister(instr->left());
4241 DoubleRegister right = ToDoubleRegister(instr->right());
4243 if (op == HMathMinMax::kMathMax) {
4244 __ Fmax(result, left, right);
4246 DCHECK(op == HMathMinMax::kMathMin);
4247 __ Fmin(result, left, right);
4253 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4254 Register dividend = ToRegister32(instr->dividend());
4255 int32_t divisor = instr->divisor();
4256 DCHECK(dividend.is(ToRegister32(instr->result())));
4258 // Theoretically, a variation of the branch-free code for integer division by
4259 // a power of 2 (calculating the remainder via an additional multiplication
4260 // (which gets simplified to an 'and') and subtraction) should be faster, and
4261 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4262 // indicate that positive dividends are heavily favored, so the branching
4263 // version performs better.
4264 HMod* hmod = instr->hydrogen();
4265 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4266 Label dividend_is_not_negative, done;
4267 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4268 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative);
4269 // Note that this is correct even for kMinInt operands.
4270 __ Neg(dividend, dividend);
4271 __ And(dividend, dividend, mask);
4272 __ Negs(dividend, dividend);
4273 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4274 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4279 __ bind(÷nd_is_not_negative);
4280 __ And(dividend, dividend, mask);
4285 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4286 Register dividend = ToRegister32(instr->dividend());
4287 int32_t divisor = instr->divisor();
4288 Register result = ToRegister32(instr->result());
4289 Register temp = ToRegister32(instr->temp());
4290 DCHECK(!AreAliased(dividend, result, temp));
4293 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4297 __ TruncatingDiv(result, dividend, Abs(divisor));
4298 __ Sxtw(dividend.X(), dividend);
4299 __ Mov(temp, Abs(divisor));
4300 __ Smsubl(result.X(), result, temp, dividend.X());
4302 // Check for negative zero.
4303 HMod* hmod = instr->hydrogen();
4304 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4305 Label remainder_not_zero;
4306 __ Cbnz(result, &remainder_not_zero);
4307 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4308 __ bind(&remainder_not_zero);
4313 void LCodeGen::DoModI(LModI* instr) {
4314 Register dividend = ToRegister32(instr->left());
4315 Register divisor = ToRegister32(instr->right());
4316 Register result = ToRegister32(instr->result());
4319 // modulo = dividend - quotient * divisor
4320 __ Sdiv(result, dividend, divisor);
4321 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4322 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4324 __ Msub(result, result, divisor, dividend);
4325 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4326 __ Cbnz(result, &done);
4327 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4333 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4334 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4335 bool is_smi = instr->hydrogen()->representation().IsSmi();
4337 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4339 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4340 int32_t right = ToInteger32(instr->right());
4341 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4343 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4344 bool bailout_on_minus_zero =
4345 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4347 if (bailout_on_minus_zero) {
4349 // The result is -0 if right is negative and left is zero.
4350 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
4351 } else if (right == 0) {
4352 // The result is -0 if the right is zero and the left is negative.
4353 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
4358 // Cases which can detect overflow.
4361 // Only 0x80000000 can overflow here.
4362 __ Negs(result, left);
4363 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4365 __ Neg(result, left);
4369 // This case can never overflow.
4373 // This case can never overflow.
4374 __ Mov(result, left, kDiscardForSameWReg);
4378 __ Adds(result, left, left);
4379 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4381 __ Add(result, left, left);
4386 // Multiplication by constant powers of two (and some related values)
4387 // can be done efficiently with shifted operands.
4388 int32_t right_abs = Abs(right);
4390 if (base::bits::IsPowerOfTwo32(right_abs)) {
4391 int right_log2 = WhichPowerOf2(right_abs);
4394 Register scratch = result;
4395 DCHECK(!AreAliased(scratch, left));
4396 __ Cls(scratch, left);
4397 __ Cmp(scratch, right_log2);
4398 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
4402 // result = left << log2(right)
4403 __ Lsl(result, left, right_log2);
4405 // result = -left << log2(-right)
4407 __ Negs(result, Operand(left, LSL, right_log2));
4408 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4410 __ Neg(result, Operand(left, LSL, right_log2));
4417 // For the following cases, we could perform a conservative overflow check
4418 // with CLS as above. However the few cycles saved are likely not worth
4419 // the risk of deoptimizing more often than required.
4420 DCHECK(!can_overflow);
4423 if (base::bits::IsPowerOfTwo32(right - 1)) {
4424 // result = left + left << log2(right - 1)
4425 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4426 } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4427 // result = -left + left << log2(right + 1)
4428 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4429 __ Neg(result, result);
4434 if (base::bits::IsPowerOfTwo32(-right + 1)) {
4435 // result = left - left << log2(-right + 1)
4436 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4437 } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4438 // result = -left - left << log2(-right - 1)
4439 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4440 __ Neg(result, result);
4449 void LCodeGen::DoMulI(LMulI* instr) {
4450 Register result = ToRegister32(instr->result());
4451 Register left = ToRegister32(instr->left());
4452 Register right = ToRegister32(instr->right());
4454 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4455 bool bailout_on_minus_zero =
4456 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4458 if (bailout_on_minus_zero && !left.Is(right)) {
4459 // If one operand is zero and the other is negative, the result is -0.
4460 // - Set Z (eq) if either left or right, or both, are 0.
4462 __ Ccmp(right, 0, ZFlag, ne);
4463 // - If so (eq), set N (mi) if left + right is negative.
4464 // - Otherwise, clear N.
4465 __ Ccmn(left, right, NoFlag, eq);
4466 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4470 __ Smull(result.X(), left, right);
4471 __ Cmp(result.X(), Operand(result, SXTW));
4472 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4474 __ Mul(result, left, right);
4479 void LCodeGen::DoMulS(LMulS* instr) {
4480 Register result = ToRegister(instr->result());
4481 Register left = ToRegister(instr->left());
4482 Register right = ToRegister(instr->right());
4484 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4485 bool bailout_on_minus_zero =
4486 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4488 if (bailout_on_minus_zero && !left.Is(right)) {
4489 // If one operand is zero and the other is negative, the result is -0.
4490 // - Set Z (eq) if either left or right, or both, are 0.
4492 __ Ccmp(right, 0, ZFlag, ne);
4493 // - If so (eq), set N (mi) if left + right is negative.
4494 // - Otherwise, clear N.
4495 __ Ccmn(left, right, NoFlag, eq);
4496 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4499 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4501 __ Smulh(result, left, right);
4502 __ Cmp(result, Operand(result.W(), SXTW));
4504 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4506 if (AreAliased(result, left, right)) {
4507 // All three registers are the same: half untag the input and then
4508 // multiply, giving a tagged result.
4509 STATIC_ASSERT((kSmiShift % 2) == 0);
4510 __ Asr(result, left, kSmiShift / 2);
4511 __ Mul(result, result, result);
4512 } else if (result.Is(left) && !left.Is(right)) {
4513 // Registers result and left alias, right is distinct: untag left into
4514 // result, and then multiply by right, giving a tagged result.
4515 __ SmiUntag(result, left);
4516 __ Mul(result, result, right);
4518 DCHECK(!left.Is(result));
4519 // Registers result and right alias, left is distinct, or all registers
4520 // are distinct: untag right into result, and then multiply by left,
4521 // giving a tagged result.
4522 __ SmiUntag(result, right);
4523 __ Mul(result, left, result);
4529 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4530 // TODO(3095996): Get rid of this. For now, we need to make the
4531 // result register contain a valid pointer because it is already
4532 // contained in the register pointer map.
4533 Register result = ToRegister(instr->result());
4536 PushSafepointRegistersScope scope(this);
4537 // NumberTagU and NumberTagD use the context from the frame, rather than
4538 // the environment's HContext or HInlinedContext value.
4539 // They only call Runtime::kAllocateHeapNumber.
4540 // The corresponding HChange instructions are added in a phase that does
4541 // not have easy access to the local context.
4542 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4543 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4544 RecordSafepointWithRegisters(
4545 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4546 __ StoreToSafepointRegisterSlot(x0, result);
4550 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4551 class DeferredNumberTagD: public LDeferredCode {
4553 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4554 : LDeferredCode(codegen), instr_(instr) { }
4555 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4556 virtual LInstruction* instr() { return instr_; }
4558 LNumberTagD* instr_;
4561 DoubleRegister input = ToDoubleRegister(instr->value());
4562 Register result = ToRegister(instr->result());
4563 Register temp1 = ToRegister(instr->temp1());
4564 Register temp2 = ToRegister(instr->temp2());
4566 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4567 if (FLAG_inline_new) {
4568 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4570 __ B(deferred->entry());
4573 __ Bind(deferred->exit());
4574 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4578 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4582 Label slow, convert_and_store;
4583 Register src = ToRegister32(value);
4584 Register dst = ToRegister(instr->result());
4585 Register scratch1 = ToRegister(temp1);
4587 if (FLAG_inline_new) {
4588 Register scratch2 = ToRegister(temp2);
4589 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4590 __ B(&convert_and_store);
4593 // Slow case: call the runtime system to do the number allocation.
4595 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4596 // register is stored, as this register is in the pointer map, but contains an
4600 // Preserve the value of all registers.
4601 PushSafepointRegistersScope scope(this);
4603 // NumberTagU and NumberTagD use the context from the frame, rather than
4604 // the environment's HContext or HInlinedContext value.
4605 // They only call Runtime::kAllocateHeapNumber.
4606 // The corresponding HChange instructions are added in a phase that does
4607 // not have easy access to the local context.
4608 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4609 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4610 RecordSafepointWithRegisters(
4611 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4612 __ StoreToSafepointRegisterSlot(x0, dst);
4615 // Convert number to floating point and store in the newly allocated heap
4617 __ Bind(&convert_and_store);
4618 DoubleRegister dbl_scratch = double_scratch();
4619 __ Ucvtf(dbl_scratch, src);
4620 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4624 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4625 class DeferredNumberTagU: public LDeferredCode {
4627 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4628 : LDeferredCode(codegen), instr_(instr) { }
4629 virtual void Generate() {
4630 codegen()->DoDeferredNumberTagU(instr_,
4635 virtual LInstruction* instr() { return instr_; }
4637 LNumberTagU* instr_;
4640 Register value = ToRegister32(instr->value());
4641 Register result = ToRegister(instr->result());
4643 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4644 __ Cmp(value, Smi::kMaxValue);
4645 __ B(hi, deferred->entry());
4646 __ SmiTag(result, value.X());
4647 __ Bind(deferred->exit());
4651 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4652 Register input = ToRegister(instr->value());
4653 Register scratch = ToRegister(instr->temp());
4654 DoubleRegister result = ToDoubleRegister(instr->result());
4655 bool can_convert_undefined_to_nan =
4656 instr->hydrogen()->can_convert_undefined_to_nan();
4658 Label done, load_smi;
4660 // Work out what untag mode we're working with.
4661 HValue* value = instr->hydrogen()->value();
4662 NumberUntagDMode mode = value->representation().IsSmi()
4663 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4665 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4666 __ JumpIfSmi(input, &load_smi);
4668 Label convert_undefined;
4670 // Heap number map check.
4671 if (can_convert_undefined_to_nan) {
4672 __ JumpIfNotHeapNumber(input, &convert_undefined);
4674 DeoptimizeIfNotHeapNumber(input, instr);
4677 // Load heap number.
4678 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4679 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4680 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
4684 if (can_convert_undefined_to_nan) {
4685 __ Bind(&convert_undefined);
4686 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4687 Deoptimizer::kNotAHeapNumberUndefined);
4689 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4690 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4695 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4696 // Fall through to load_smi.
4699 // Smi to double register conversion.
4701 __ SmiUntagToDouble(result, input);
4707 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4708 // This is a pseudo-instruction that ensures that the environment here is
4709 // properly registered for deoptimization and records the assembler's PC
4711 LEnvironment* environment = instr->environment();
4713 // If the environment were already registered, we would have no way of
4714 // backpatching it with the spill slot operands.
4715 DCHECK(!environment->HasBeenRegistered());
4716 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4718 GenerateOsrPrologue();
4722 void LCodeGen::DoParameter(LParameter* instr) {
4727 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4728 __ PushPreamble(instr->argc(), kPointerSize);
4732 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4733 MacroAssembler::PushPopQueue args(masm());
4735 for (int i = 0; i < instr->ArgumentCount(); ++i) {
4736 LOperand* arg = instr->argument(i);
4737 if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4738 Abort(kDoPushArgumentNotImplementedForDoubleType);
4741 args.Queue(ToRegister(arg));
4744 // The preamble was done by LPreparePushArguments.
4745 args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
4749 void LCodeGen::DoReturn(LReturn* instr) {
4750 if (FLAG_trace && info()->IsOptimizing()) {
4751 // Push the return value on the stack as the parameter.
4752 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4753 // managed by the register allocator and tearing down the frame, it's
4754 // safe to write to the context register.
4756 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4757 __ CallRuntime(Runtime::kTraceExit, 1);
4760 if (info()->saves_caller_doubles()) {
4761 RestoreCallerDoubles();
4764 int no_frame_start = -1;
4765 if (NeedsEagerFrame()) {
4766 Register stack_pointer = masm()->StackPointer();
4767 __ Mov(stack_pointer, fp);
4768 no_frame_start = masm_->pc_offset();
4772 if (instr->has_constant_parameter_count()) {
4773 int parameter_count = ToInteger32(instr->constant_parameter_count());
4774 __ Drop(parameter_count + 1);
4776 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
4777 Register parameter_count = ToRegister(instr->parameter_count());
4778 __ DropBySMI(parameter_count);
4782 if (no_frame_start != -1) {
4783 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4788 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4791 String::Encoding encoding) {
4792 if (index->IsConstantOperand()) {
4793 int offset = ToInteger32(LConstantOperand::cast(index));
4794 if (encoding == String::TWO_BYTE_ENCODING) {
4795 offset *= kUC16Size;
4797 STATIC_ASSERT(kCharSize == 1);
4798 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4801 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4802 if (encoding == String::ONE_BYTE_ENCODING) {
4803 return MemOperand(temp, ToRegister32(index), SXTW);
4805 STATIC_ASSERT(kUC16Size == 2);
4806 return MemOperand(temp, ToRegister32(index), SXTW, 1);
4811 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4812 String::Encoding encoding = instr->hydrogen()->encoding();
4813 Register string = ToRegister(instr->string());
4814 Register result = ToRegister(instr->result());
4815 Register temp = ToRegister(instr->temp());
4817 if (FLAG_debug_code) {
4818 // Even though this lithium instruction comes with a temp register, we
4819 // can't use it here because we want to use "AtStart" constraints on the
4820 // inputs and the debug code here needs a scratch register.
4821 UseScratchRegisterScope temps(masm());
4822 Register dbg_temp = temps.AcquireX();
4824 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4825 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4827 __ And(dbg_temp, dbg_temp,
4828 Operand(kStringRepresentationMask | kStringEncodingMask));
4829 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4830 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4831 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4832 ? one_byte_seq_type : two_byte_seq_type));
4833 __ Check(eq, kUnexpectedStringType);
4836 MemOperand operand =
4837 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4838 if (encoding == String::ONE_BYTE_ENCODING) {
4839 __ Ldrb(result, operand);
4841 __ Ldrh(result, operand);
4846 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4847 String::Encoding encoding = instr->hydrogen()->encoding();
4848 Register string = ToRegister(instr->string());
4849 Register value = ToRegister(instr->value());
4850 Register temp = ToRegister(instr->temp());
4852 if (FLAG_debug_code) {
4853 DCHECK(ToRegister(instr->context()).is(cp));
4854 Register index = ToRegister(instr->index());
4855 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4856 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4858 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4859 ? one_byte_seq_type : two_byte_seq_type;
4860 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4863 MemOperand operand =
4864 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4865 if (encoding == String::ONE_BYTE_ENCODING) {
4866 __ Strb(value, operand);
4868 __ Strh(value, operand);
4873 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4874 HChange* hchange = instr->hydrogen();
4875 Register input = ToRegister(instr->value());
4876 Register output = ToRegister(instr->result());
4877 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4878 hchange->value()->CheckFlag(HValue::kUint32)) {
4879 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
4881 __ SmiTag(output, input);
4885 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4886 Register input = ToRegister(instr->value());
4887 Register result = ToRegister(instr->result());
4890 if (instr->needs_check()) {
4891 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
4895 __ SmiUntag(result, input);
4900 void LCodeGen::DoShiftI(LShiftI* instr) {
4901 LOperand* right_op = instr->right();
4902 Register left = ToRegister32(instr->left());
4903 Register result = ToRegister32(instr->result());
4905 if (right_op->IsRegister()) {
4906 Register right = ToRegister32(instr->right());
4907 switch (instr->op()) {
4908 case Token::ROR: __ Ror(result, left, right); break;
4909 case Token::SAR: __ Asr(result, left, right); break;
4910 case Token::SHL: __ Lsl(result, left, right); break;
4912 __ Lsr(result, left, right);
4913 if (instr->can_deopt()) {
4914 // If `left >>> right` >= 0x80000000, the result is not representable
4915 // in a signed 32-bit smi.
4916 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4919 default: UNREACHABLE();
4922 DCHECK(right_op->IsConstantOperand());
4923 int shift_count = JSShiftAmountFromLConstant(right_op);
4924 if (shift_count == 0) {
4925 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4926 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4928 __ Mov(result, left, kDiscardForSameWReg);
4930 switch (instr->op()) {
4931 case Token::ROR: __ Ror(result, left, shift_count); break;
4932 case Token::SAR: __ Asr(result, left, shift_count); break;
4933 case Token::SHL: __ Lsl(result, left, shift_count); break;
4934 case Token::SHR: __ Lsr(result, left, shift_count); break;
4935 default: UNREACHABLE();
4942 void LCodeGen::DoShiftS(LShiftS* instr) {
4943 LOperand* right_op = instr->right();
4944 Register left = ToRegister(instr->left());
4945 Register result = ToRegister(instr->result());
4947 if (right_op->IsRegister()) {
4948 Register right = ToRegister(instr->right());
4950 // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4951 // Since we're handling smis in X registers, we have to extract these bits
4953 __ Ubfx(result, right, kSmiShift, 5);
4955 switch (instr->op()) {
4957 // This is the only case that needs a scratch register. To keep things
4958 // simple for the other cases, borrow a MacroAssembler scratch register.
4959 UseScratchRegisterScope temps(masm());
4960 Register temp = temps.AcquireW();
4961 __ SmiUntag(temp, left);
4962 __ Ror(result.W(), temp.W(), result.W());
4967 __ Asr(result, left, result);
4968 __ Bic(result, result, kSmiShiftMask);
4971 __ Lsl(result, left, result);
4974 __ Lsr(result, left, result);
4975 __ Bic(result, result, kSmiShiftMask);
4976 if (instr->can_deopt()) {
4977 // If `left >>> right` >= 0x80000000, the result is not representable
4978 // in a signed 32-bit smi.
4979 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4982 default: UNREACHABLE();
4985 DCHECK(right_op->IsConstantOperand());
4986 int shift_count = JSShiftAmountFromLConstant(right_op);
4987 if (shift_count == 0) {
4988 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4989 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4991 __ Mov(result, left);
4993 switch (instr->op()) {
4995 __ SmiUntag(result, left);
4996 __ Ror(result.W(), result.W(), shift_count);
5000 __ Asr(result, left, shift_count);
5001 __ Bic(result, result, kSmiShiftMask);
5004 __ Lsl(result, left, shift_count);
5007 __ Lsr(result, left, shift_count);
5008 __ Bic(result, result, kSmiShiftMask);
5010 default: UNREACHABLE();
5017 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5018 __ Debug("LDebugBreak", 0, BREAK);
5022 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5023 DCHECK(ToRegister(instr->context()).is(cp));
5024 Register scratch1 = x5;
5025 Register scratch2 = x6;
5026 DCHECK(instr->IsMarkedAsCall());
5028 // TODO(all): if Mov could handle object in new space then it could be used
5030 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5031 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5032 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5033 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5037 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5038 PushSafepointRegistersScope scope(this);
5039 LoadContextFromDeferred(instr->context());
5040 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5041 RecordSafepointWithLazyDeopt(
5042 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5043 DCHECK(instr->HasEnvironment());
5044 LEnvironment* env = instr->environment();
5045 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5049 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5050 class DeferredStackCheck: public LDeferredCode {
5052 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5053 : LDeferredCode(codegen), instr_(instr) { }
5054 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5055 virtual LInstruction* instr() { return instr_; }
5057 LStackCheck* instr_;
5060 DCHECK(instr->HasEnvironment());
5061 LEnvironment* env = instr->environment();
5062 // There is no LLazyBailout instruction for stack-checks. We have to
5063 // prepare for lazy deoptimization explicitly here.
5064 if (instr->hydrogen()->is_function_entry()) {
5065 // Perform stack overflow check.
5067 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5070 PredictableCodeSizeScope predictable(masm_,
5071 Assembler::kCallSizeWithRelocation);
5072 DCHECK(instr->context()->IsRegister());
5073 DCHECK(ToRegister(instr->context()).is(cp));
5074 CallCode(isolate()->builtins()->StackCheck(),
5075 RelocInfo::CODE_TARGET,
5079 DCHECK(instr->hydrogen()->is_backwards_branch());
5080 // Perform stack overflow check if this goto needs it before jumping.
5081 DeferredStackCheck* deferred_stack_check =
5082 new(zone()) DeferredStackCheck(this, instr);
5083 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5084 __ B(lo, deferred_stack_check->entry());
5086 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5087 __ Bind(instr->done_label());
5088 deferred_stack_check->SetExit(instr->done_label());
5089 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5090 // Don't record a deoptimization index for the safepoint here.
5091 // This will be done explicitly when emitting call and the safepoint in
5092 // the deferred code.
5097 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5098 Register function = ToRegister(instr->function());
5099 Register code_object = ToRegister(instr->code_object());
5100 Register temp = ToRegister(instr->temp());
5101 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5102 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5106 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5107 Register context = ToRegister(instr->context());
5108 Register value = ToRegister(instr->value());
5109 Register scratch = ToRegister(instr->temp());
5110 MemOperand target = ContextMemOperand(context, instr->slot_index());
5112 Label skip_assignment;
5114 if (instr->hydrogen()->RequiresHoleCheck()) {
5115 __ Ldr(scratch, target);
5116 if (instr->hydrogen()->DeoptimizesOnHole()) {
5117 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
5118 Deoptimizer::kHole);
5120 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5124 __ Str(value, target);
5125 if (instr->hydrogen()->NeedsWriteBarrier()) {
5126 SmiCheck check_needed =
5127 instr->hydrogen()->value()->type().IsHeapObject()
5128 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5129 __ RecordWriteContextSlot(context,
5133 GetLinkRegisterState(),
5135 EMIT_REMEMBERED_SET,
5138 __ Bind(&skip_assignment);
5142 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5143 Register ext_ptr = ToRegister(instr->elements());
5144 Register key = no_reg;
5146 ElementsKind elements_kind = instr->elements_kind();
5148 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5149 bool key_is_constant = instr->key()->IsConstantOperand();
5150 int constant_key = 0;
5151 if (key_is_constant) {
5152 DCHECK(instr->temp() == NULL);
5153 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5154 if (constant_key & 0xf0000000) {
5155 Abort(kArrayIndexConstantValueTooBig);
5158 key = ToRegister(instr->key());
5159 scratch = ToRegister(instr->temp());
5163 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5164 key_is_constant, constant_key,
5166 instr->base_offset());
5168 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5169 (elements_kind == FLOAT32_ELEMENTS)) {
5170 DoubleRegister value = ToDoubleRegister(instr->value());
5171 DoubleRegister dbl_scratch = double_scratch();
5172 __ Fcvt(dbl_scratch.S(), value);
5173 __ Str(dbl_scratch.S(), dst);
5174 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5175 (elements_kind == FLOAT64_ELEMENTS)) {
5176 DoubleRegister value = ToDoubleRegister(instr->value());
5179 Register value = ToRegister(instr->value());
5181 switch (elements_kind) {
5182 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
5183 case EXTERNAL_INT8_ELEMENTS:
5184 case EXTERNAL_UINT8_ELEMENTS:
5185 case UINT8_ELEMENTS:
5186 case UINT8_CLAMPED_ELEMENTS:
5188 __ Strb(value, dst);
5190 case EXTERNAL_INT16_ELEMENTS:
5191 case EXTERNAL_UINT16_ELEMENTS:
5192 case INT16_ELEMENTS:
5193 case UINT16_ELEMENTS:
5194 __ Strh(value, dst);
5196 case EXTERNAL_INT32_ELEMENTS:
5197 case EXTERNAL_UINT32_ELEMENTS:
5198 case INT32_ELEMENTS:
5199 case UINT32_ELEMENTS:
5200 __ Str(value.W(), dst);
5202 case FLOAT32_ELEMENTS:
5203 case FLOAT64_ELEMENTS:
5204 case EXTERNAL_FLOAT32_ELEMENTS:
5205 case EXTERNAL_FLOAT64_ELEMENTS:
5206 case FAST_DOUBLE_ELEMENTS:
5208 case FAST_SMI_ELEMENTS:
5209 case FAST_HOLEY_DOUBLE_ELEMENTS:
5210 case FAST_HOLEY_ELEMENTS:
5211 case FAST_HOLEY_SMI_ELEMENTS:
5212 case DICTIONARY_ELEMENTS:
5213 case SLOPPY_ARGUMENTS_ELEMENTS:
5221 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5222 Register elements = ToRegister(instr->elements());
5223 DoubleRegister value = ToDoubleRegister(instr->value());
5226 if (instr->key()->IsConstantOperand()) {
5227 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5228 if (constant_key & 0xf0000000) {
5229 Abort(kArrayIndexConstantValueTooBig);
5231 int offset = instr->base_offset() + constant_key * kDoubleSize;
5232 mem_op = MemOperand(elements, offset);
5234 Register store_base = ToRegister(instr->temp());
5235 Register key = ToRegister(instr->key());
5236 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5237 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5238 instr->hydrogen()->elements_kind(),
5239 instr->hydrogen()->representation(),
5240 instr->base_offset());
5243 if (instr->NeedsCanonicalization()) {
5244 __ CanonicalizeNaN(double_scratch(), value);
5245 __ Str(double_scratch(), mem_op);
5247 __ Str(value, mem_op);
5252 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5253 Register value = ToRegister(instr->value());
5254 Register elements = ToRegister(instr->elements());
5255 Register scratch = no_reg;
5256 Register store_base = no_reg;
5257 Register key = no_reg;
5260 if (!instr->key()->IsConstantOperand() ||
5261 instr->hydrogen()->NeedsWriteBarrier()) {
5262 scratch = ToRegister(instr->temp());
5265 Representation representation = instr->hydrogen()->value()->representation();
5266 if (instr->key()->IsConstantOperand()) {
5267 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5268 int offset = instr->base_offset() +
5269 ToInteger32(const_operand) * kPointerSize;
5270 store_base = elements;
5271 if (representation.IsInteger32()) {
5272 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5273 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5274 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5275 STATIC_ASSERT(kSmiTag == 0);
5276 mem_op = UntagSmiMemOperand(store_base, offset);
5278 mem_op = MemOperand(store_base, offset);
5281 store_base = scratch;
5282 key = ToRegister(instr->key());
5283 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5285 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5286 instr->hydrogen()->elements_kind(),
5287 representation, instr->base_offset());
5290 __ Store(value, mem_op, representation);
5292 if (instr->hydrogen()->NeedsWriteBarrier()) {
5293 DCHECK(representation.IsTagged());
5294 // This assignment may cause element_addr to alias store_base.
5295 Register element_addr = scratch;
5296 SmiCheck check_needed =
5297 instr->hydrogen()->value()->type().IsHeapObject()
5298 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5299 // Compute address of modified element and store it into key register.
5300 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5301 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5302 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5303 instr->hydrogen()->PointersToHereCheckForValue());
5308 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5309 DCHECK(ToRegister(instr->context()).is(cp));
5310 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5311 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
5312 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5314 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
5315 isolate(), instr->language_mode(),
5316 instr->hydrogen()->initialization_state()).code();
5317 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5321 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5322 Representation representation = instr->representation();
5324 Register object = ToRegister(instr->object());
5325 HObjectAccess access = instr->hydrogen()->access();
5326 int offset = access.offset();
5328 if (access.IsExternalMemory()) {
5329 DCHECK(!instr->hydrogen()->has_transition());
5330 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5331 Register value = ToRegister(instr->value());
5332 __ Store(value, MemOperand(object, offset), representation);
5336 __ AssertNotSmi(object);
5338 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
5339 DCHECK(access.IsInobject());
5340 DCHECK(!instr->hydrogen()->has_transition());
5341 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5342 FPRegister value = ToDoubleRegister(instr->value());
5343 __ Str(value, FieldMemOperand(object, offset));
5347 DCHECK(!representation.IsSmi() ||
5348 !instr->value()->IsConstantOperand() ||
5349 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5351 if (instr->hydrogen()->has_transition()) {
5352 Handle<Map> transition = instr->hydrogen()->transition_map();
5353 AddDeprecationDependency(transition);
5354 // Store the new map value.
5355 Register new_map_value = ToRegister(instr->temp0());
5356 __ Mov(new_map_value, Operand(transition));
5357 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5358 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5359 // Update the write barrier for the map field.
5360 __ RecordWriteForMap(object,
5362 ToRegister(instr->temp1()),
5363 GetLinkRegisterState(),
5369 Register destination;
5370 if (access.IsInobject()) {
5371 destination = object;
5373 Register temp0 = ToRegister(instr->temp0());
5374 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5375 destination = temp0;
5378 if (FLAG_unbox_double_fields && representation.IsDouble()) {
5379 DCHECK(access.IsInobject());
5380 FPRegister value = ToDoubleRegister(instr->value());
5381 __ Str(value, FieldMemOperand(object, offset));
5382 } else if (representation.IsSmi() &&
5383 instr->hydrogen()->value()->representation().IsInteger32()) {
5384 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5386 Register temp0 = ToRegister(instr->temp0());
5387 __ Ldr(temp0, FieldMemOperand(destination, offset));
5388 __ AssertSmi(temp0);
5389 // If destination aliased temp0, restore it to the address calculated
5391 if (destination.Is(temp0)) {
5392 DCHECK(!access.IsInobject());
5393 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5396 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5397 STATIC_ASSERT(kSmiTag == 0);
5398 Register value = ToRegister(instr->value());
5399 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5400 Representation::Integer32());
5402 Register value = ToRegister(instr->value());
5403 __ Store(value, FieldMemOperand(destination, offset), representation);
5405 if (instr->hydrogen()->NeedsWriteBarrier()) {
5406 Register value = ToRegister(instr->value());
5407 __ RecordWriteField(destination,
5409 value, // Clobbered.
5410 ToRegister(instr->temp1()), // Clobbered.
5411 GetLinkRegisterState(),
5413 EMIT_REMEMBERED_SET,
5414 instr->hydrogen()->SmiCheckForWriteBarrier(),
5415 instr->hydrogen()->PointersToHereCheckForValue());
5420 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5421 DCHECK(ToRegister(instr->context()).is(cp));
5422 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
5423 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5425 __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5427 StoreIC::initialize_stub(isolate(), instr->language_mode(),
5428 instr->hydrogen()->initialization_state());
5429 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5433 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5434 DCHECK(ToRegister(instr->context()).is(cp));
5435 DCHECK(ToRegister(instr->left()).Is(x1));
5436 DCHECK(ToRegister(instr->right()).Is(x0));
5437 StringAddStub stub(isolate(),
5438 instr->hydrogen()->flags(),
5439 instr->hydrogen()->pretenure_flag());
5440 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5444 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5445 class DeferredStringCharCodeAt: public LDeferredCode {
5447 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5448 : LDeferredCode(codegen), instr_(instr) { }
5449 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5450 virtual LInstruction* instr() { return instr_; }
5452 LStringCharCodeAt* instr_;
5455 DeferredStringCharCodeAt* deferred =
5456 new(zone()) DeferredStringCharCodeAt(this, instr);
5458 StringCharLoadGenerator::Generate(masm(),
5459 ToRegister(instr->string()),
5460 ToRegister32(instr->index()),
5461 ToRegister(instr->result()),
5463 __ Bind(deferred->exit());
5467 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5468 Register string = ToRegister(instr->string());
5469 Register result = ToRegister(instr->result());
5471 // TODO(3095996): Get rid of this. For now, we need to make the
5472 // result register contain a valid pointer because it is already
5473 // contained in the register pointer map.
5476 PushSafepointRegistersScope scope(this);
5478 // Push the index as a smi. This is safe because of the checks in
5479 // DoStringCharCodeAt above.
5480 Register index = ToRegister(instr->index());
5481 __ SmiTagAndPush(index);
5483 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5487 __ StoreToSafepointRegisterSlot(x0, result);
5491 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5492 class DeferredStringCharFromCode: public LDeferredCode {
5494 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5495 : LDeferredCode(codegen), instr_(instr) { }
5496 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5497 virtual LInstruction* instr() { return instr_; }
5499 LStringCharFromCode* instr_;
5502 DeferredStringCharFromCode* deferred =
5503 new(zone()) DeferredStringCharFromCode(this, instr);
5505 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5506 Register char_code = ToRegister32(instr->char_code());
5507 Register result = ToRegister(instr->result());
5509 __ Cmp(char_code, String::kMaxOneByteCharCode);
5510 __ B(hi, deferred->entry());
5511 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5512 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5513 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5514 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5515 __ B(eq, deferred->entry());
5516 __ Bind(deferred->exit());
5520 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5521 Register char_code = ToRegister(instr->char_code());
5522 Register result = ToRegister(instr->result());
5524 // TODO(3095996): Get rid of this. For now, we need to make the
5525 // result register contain a valid pointer because it is already
5526 // contained in the register pointer map.
5529 PushSafepointRegistersScope scope(this);
5530 __ SmiTagAndPush(char_code);
5531 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5532 __ StoreToSafepointRegisterSlot(x0, result);
5536 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5537 DCHECK(ToRegister(instr->context()).is(cp));
5538 Token::Value op = instr->op();
5540 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
5541 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5542 InlineSmiCheckInfo::EmitNotInlined(masm());
5544 Condition condition = TokenToCondition(op, false);
5546 EmitCompareAndBranch(instr, condition, x0, 0);
5550 void LCodeGen::DoSubI(LSubI* instr) {
5551 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5552 Register result = ToRegister32(instr->result());
5553 Register left = ToRegister32(instr->left());
5554 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5557 __ Subs(result, left, right);
5558 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5560 __ Sub(result, left, right);
5565 void LCodeGen::DoSubS(LSubS* instr) {
5566 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5567 Register result = ToRegister(instr->result());
5568 Register left = ToRegister(instr->left());
5569 Operand right = ToOperand(instr->right());
5571 __ Subs(result, left, right);
5572 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5574 __ Sub(result, left, right);
5579 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5583 Register input = ToRegister(value);
5584 Register scratch1 = ToRegister(temp1);
5585 DoubleRegister dbl_scratch1 = double_scratch();
5589 if (instr->truncating()) {
5590 Register output = ToRegister(instr->result());
5593 // If it's not a heap number, jump to undefined check.
5594 __ JumpIfNotHeapNumber(input, &check_bools);
5596 // A heap number: load value and convert to int32 using truncating function.
5597 __ TruncateHeapNumberToI(output, input);
5600 __ Bind(&check_bools);
5602 Register true_root = output;
5603 Register false_root = scratch1;
5604 __ LoadTrueFalseRoots(true_root, false_root);
5605 __ Cmp(input, true_root);
5606 __ Cset(output, eq);
5607 __ Ccmp(input, false_root, ZFlag, ne);
5610 // Output contains zero, undefined is converted to zero for truncating
5612 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5613 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5615 Register output = ToRegister32(instr->result());
5616 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5618 DeoptimizeIfNotHeapNumber(input, instr);
5620 // A heap number: load value and convert to int32 using non-truncating
5621 // function. If the result is out of range, branch to deoptimize.
5622 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5623 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5624 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5626 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5629 __ Fmov(scratch1, dbl_scratch1);
5630 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
5637 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5638 class DeferredTaggedToI: public LDeferredCode {
5640 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5641 : LDeferredCode(codegen), instr_(instr) { }
5642 virtual void Generate() {
5643 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5647 virtual LInstruction* instr() { return instr_; }
5652 Register input = ToRegister(instr->value());
5653 Register output = ToRegister(instr->result());
5655 if (instr->hydrogen()->value()->representation().IsSmi()) {
5656 __ SmiUntag(output, input);
5658 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5660 __ JumpIfNotSmi(input, deferred->entry());
5661 __ SmiUntag(output, input);
5662 __ Bind(deferred->exit());
5667 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5668 Register result = ToRegister(instr->result());
5669 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5673 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5674 DCHECK(ToRegister(instr->value()).Is(x0));
5675 DCHECK(ToRegister(instr->result()).Is(x0));
5677 CallRuntime(Runtime::kToFastProperties, 1, instr);
5681 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5682 DCHECK(ToRegister(instr->context()).is(cp));
5684 // Registers will be used as follows:
5685 // x7 = literals array.
5686 // x1 = regexp literal.
5687 // x0 = regexp literal clone.
5688 // x10-x12 are used as temporaries.
5689 int literal_offset =
5690 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5691 __ LoadObject(x7, instr->hydrogen()->literals());
5692 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5693 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5695 // Create regexp literal using runtime function
5696 // Result will be in x0.
5697 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5698 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5699 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5700 __ Push(x7, x12, x11, x10);
5701 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5704 __ Bind(&materialized);
5705 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5706 Label allocated, runtime_allocate;
5708 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5711 __ Bind(&runtime_allocate);
5712 __ Mov(x0, Smi::FromInt(size));
5714 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5717 __ Bind(&allocated);
5718 // Copy the content into the newly allocated memory.
5719 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5723 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5724 Register object = ToRegister(instr->object());
5726 Handle<Map> from_map = instr->original_map();
5727 Handle<Map> to_map = instr->transitioned_map();
5728 ElementsKind from_kind = instr->from_kind();
5729 ElementsKind to_kind = instr->to_kind();
5731 Label not_applicable;
5733 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5734 Register temp1 = ToRegister(instr->temp1());
5735 Register new_map = ToRegister(instr->temp2());
5736 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK);
5737 __ Mov(new_map, Operand(to_map));
5738 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5740 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5744 UseScratchRegisterScope temps(masm());
5745 // Use the temp register only in a restricted scope - the codegen checks
5746 // that we do not use any register across a call.
5747 __ CheckMap(object, temps.AcquireX(), from_map, ¬_applicable,
5750 DCHECK(object.is(x0));
5751 DCHECK(ToRegister(instr->context()).is(cp));
5752 PushSafepointRegistersScope scope(this);
5753 __ Mov(x1, Operand(to_map));
5754 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5755 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5757 RecordSafepointWithRegisters(
5758 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5760 __ Bind(¬_applicable);
5764 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5765 Register object = ToRegister(instr->object());
5766 Register temp1 = ToRegister(instr->temp1());
5767 Register temp2 = ToRegister(instr->temp2());
5769 Label no_memento_found;
5770 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5771 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
5772 __ Bind(&no_memento_found);
5776 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5777 DoubleRegister input = ToDoubleRegister(instr->value());
5778 Register result = ToRegister(instr->result());
5779 __ TruncateDoubleToI(result, input);
5780 if (instr->tag_result()) {
5781 __ SmiTag(result, result);
5786 void LCodeGen::DoTypeof(LTypeof* instr) {
5787 Register input = ToRegister(instr->value());
5789 CallRuntime(Runtime::kTypeof, 1, instr);
5793 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5794 Handle<String> type_name = instr->type_literal();
5795 Label* true_label = instr->TrueLabel(chunk_);
5796 Label* false_label = instr->FalseLabel(chunk_);
5797 Register value = ToRegister(instr->value());
5799 Factory* factory = isolate()->factory();
5800 if (String::Equals(type_name, factory->number_string())) {
5801 __ JumpIfSmi(value, true_label);
5803 int true_block = instr->TrueDestination(chunk_);
5804 int false_block = instr->FalseDestination(chunk_);
5805 int next_block = GetNextEmittedBlock();
5807 if (true_block == false_block) {
5808 EmitGoto(true_block);
5809 } else if (true_block == next_block) {
5810 __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5812 __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5813 if (false_block != next_block) {
5814 __ B(chunk_->GetAssemblyLabel(false_block));
5818 } else if (String::Equals(type_name, factory->string_string())) {
5819 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5820 Register map = ToRegister(instr->temp1());
5821 Register scratch = ToRegister(instr->temp2());
5823 __ JumpIfSmi(value, false_label);
5824 __ JumpIfObjectType(
5825 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5826 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5827 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5829 } else if (String::Equals(type_name, factory->symbol_string())) {
5830 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5831 Register map = ToRegister(instr->temp1());
5832 Register scratch = ToRegister(instr->temp2());
5834 __ JumpIfSmi(value, false_label);
5835 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5836 EmitBranch(instr, eq);
5838 } else if (String::Equals(type_name, factory->boolean_string())) {
5839 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5840 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5841 EmitBranch(instr, eq);
5843 } else if (String::Equals(type_name, factory->undefined_string())) {
5844 DCHECK(instr->temp1() != NULL);
5845 Register scratch = ToRegister(instr->temp1());
5847 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5848 __ JumpIfSmi(value, false_label);
5849 // Check for undetectable objects and jump to the true branch in this case.
5850 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5851 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5852 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5854 } else if (String::Equals(type_name, factory->function_string())) {
5855 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5856 DCHECK(instr->temp1() != NULL);
5857 Register type = ToRegister(instr->temp1());
5859 __ JumpIfSmi(value, false_label);
5860 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5861 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5862 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5864 } else if (String::Equals(type_name, factory->object_string())) {
5865 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5866 Register map = ToRegister(instr->temp1());
5867 Register scratch = ToRegister(instr->temp2());
5869 __ JumpIfSmi(value, false_label);
5870 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5871 __ JumpIfObjectType(value, map, scratch,
5872 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5873 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5874 __ B(gt, false_label);
5875 // Check for undetectable objects => false.
5876 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5877 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5885 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5886 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5890 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5891 Register object = ToRegister(instr->value());
5892 Register map = ToRegister(instr->map());
5893 Register temp = ToRegister(instr->temp());
5894 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5896 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5900 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5901 Register receiver = ToRegister(instr->receiver());
5902 Register function = ToRegister(instr->function());
5903 Register result = ToRegister(instr->result());
5905 // If the receiver is null or undefined, we have to pass the global object as
5906 // a receiver to normal functions. Values have to be passed unchanged to
5907 // builtins and strict-mode functions.
5908 Label global_object, done, copy_receiver;
5910 if (!instr->hydrogen()->known_function()) {
5911 __ Ldr(result, FieldMemOperand(function,
5912 JSFunction::kSharedFunctionInfoOffset));
5914 // CompilerHints is an int32 field. See objects.h.
5916 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5918 // Do not transform the receiver to object for strict mode functions.
5919 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, ©_receiver);
5921 // Do not transform the receiver to object for builtins.
5922 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver);
5925 // Normal function. Replace undefined or null with global receiver.
5926 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5927 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5929 // Deoptimize if the receiver is not a JS object.
5930 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
5931 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5932 __ B(ge, ©_receiver);
5933 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
5935 __ Bind(&global_object);
5936 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5937 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5938 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5941 __ Bind(©_receiver);
5942 __ Mov(result, receiver);
5947 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5951 PushSafepointRegistersScope scope(this);
5955 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5956 RecordSafepointWithRegisters(
5957 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5958 __ StoreToSafepointRegisterSlot(x0, result);
5962 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5963 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5965 DeferredLoadMutableDouble(LCodeGen* codegen,
5966 LLoadFieldByIndex* instr,
5970 : LDeferredCode(codegen),
5976 void Generate() OVERRIDE {
5977 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5979 LInstruction* instr() OVERRIDE { return instr_; }
5982 LLoadFieldByIndex* instr_;
5987 Register object = ToRegister(instr->object());
5988 Register index = ToRegister(instr->index());
5989 Register result = ToRegister(instr->result());
5991 __ AssertSmi(index);
5993 DeferredLoadMutableDouble* deferred;
5994 deferred = new(zone()) DeferredLoadMutableDouble(
5995 this, instr, result, object, index);
5997 Label out_of_object, done;
5999 __ TestAndBranchIfAnySet(
6000 index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6001 __ Mov(index, Operand(index, ASR, 1));
6003 __ Cmp(index, Smi::FromInt(0));
6004 __ B(lt, &out_of_object);
6006 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6007 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6008 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6012 __ Bind(&out_of_object);
6013 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6014 // Index is equal to negated out of object property index plus 1.
6015 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6016 __ Ldr(result, FieldMemOperand(result,
6017 FixedArray::kHeaderSize - kPointerSize));
6018 __ Bind(deferred->exit());
6023 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6024 Register context = ToRegister(instr->context());
6025 __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6029 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6030 Handle<ScopeInfo> scope_info = instr->scope_info();
6031 __ Push(scope_info);
6032 __ Push(ToRegister(instr->function()));
6033 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6034 RecordSafepoint(Safepoint::kNoLazyDeopt);
6039 } } // namespace v8::internal