1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
21 class SafepointGenerator final : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen,
24 LPointerMap* pointers,
25 Safepoint::DeoptMode mode)
29 virtual ~SafepointGenerator() {}
31 void BeforeCall(int call_size) const override {}
33 void AfterCall() const override {
34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 LPointerMap* pointers_;
40 Safepoint::DeoptMode deopt_mode_;
46 bool LCodeGen::GenerateCode() {
47 LPhase phase("Z_Code generation", chunk());
51 // Open a frame scope to indicate that there is a frame on the stack. The
52 // NONE indicates that the scope shouldn't actually generate code to set up
53 // the frame (that is done in GeneratePrologue).
54 FrameScope frame_scope(masm_, StackFrame::NONE);
56 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
57 GenerateJumpTable() && GenerateSafepointTable();
61 void LCodeGen::FinishCode(Handle<Code> code) {
63 code->set_stack_slots(GetStackSlotCount());
64 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
65 PopulateDeoptimizationData(code);
69 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
85 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
101 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // r1: Callee's JS function.
115 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if enabled)
117 // fp: Caller's frame pointer.
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
124 !info_->is_native() && info_->scope()->has_this_declaration()) {
126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
127 __ ldr(r2, MemOperand(sp, receiver_offset));
128 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
131 __ ldr(r2, GlobalObjectOperand());
132 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
134 __ str(r2, MemOperand(sp, receiver_offset));
140 info()->set_prologue_offset(masm_->pc_offset());
141 if (NeedsEagerFrame()) {
142 if (info()->IsStub()) {
145 __ Prologue(info()->IsCodePreAgingActive());
147 frame_is_built_ = true;
148 info_->AddNoFrameRange(0, masm_->pc_offset());
151 // Reserve space for the stack slots needed by the code.
152 int slots = GetStackSlotCount();
154 if (FLAG_debug_code) {
155 __ sub(sp, sp, Operand(slots * kPointerSize));
158 __ add(r0, sp, Operand(slots * kPointerSize));
159 __ mov(r1, Operand(kSlotsZapValue));
162 __ sub(r0, r0, Operand(kPointerSize));
163 __ str(r1, MemOperand(r0, 2 * kPointerSize));
169 __ sub(sp, sp, Operand(slots * kPointerSize));
173 if (info()->saves_caller_doubles()) {
177 // Possibly allocate a local context.
178 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179 if (heap_slots > 0) {
180 Comment(";;; Allocate local context");
181 bool need_write_barrier = true;
182 // Argument to NewContext is the function, which is in r1.
183 DCHECK(!info()->scope()->is_script_scope());
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots);
187 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false;
191 __ CallRuntime(Runtime::kNewFunctionContext, 1);
193 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp.
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters();
200 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
201 for (int i = first_parameter; i < num_parameters; i++) {
202 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
203 if (var->IsContextSlot()) {
204 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
205 (num_parameters - 1 - i) * kPointerSize;
206 // Load parameter from stack.
207 __ ldr(r0, MemOperand(fp, parameter_offset));
208 // Store it in the context.
209 MemOperand target = ContextOperand(cp, var->index());
211 // Update the write barrier. This clobbers r3 and r0.
212 if (need_write_barrier) {
213 __ RecordWriteContextSlot(
218 GetLinkRegisterState(),
220 } else if (FLAG_debug_code) {
222 __ JumpIfInNewSpace(cp, r0, &done);
223 __ Abort(kExpectedNewSpaceObject);
228 Comment(";;; End allocate local context");
232 if (FLAG_trace && info()->IsOptimizing()) {
233 // We have not executed any compiled code yet, so cp still holds the
235 __ CallRuntime(Runtime::kTraceEnter, 0);
237 return !is_aborted();
241 void LCodeGen::GenerateOsrPrologue() {
242 // Generate the OSR entry prologue at the first unknown OSR value, or if there
243 // are none, at the OSR entrypoint instruction.
244 if (osr_pc_offset_ >= 0) return;
246 osr_pc_offset_ = masm()->pc_offset();
248 // Adjust the frame size, subsuming the unoptimized frame into the
250 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
252 __ sub(sp, sp, Operand(slots * kPointerSize));
256 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
257 if (instr->IsCall()) {
258 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
260 if (!instr->IsLazyBailout() && !instr->IsGap()) {
261 safepoints_.BumpLastLazySafepointIndex();
266 bool LCodeGen::GenerateDeferredCode() {
267 DCHECK(is_generating());
268 if (deferred_.length() > 0) {
269 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
270 LDeferredCode* code = deferred_[i];
273 instructions_->at(code->instruction_index())->hydrogen_value();
274 RecordAndWritePosition(
275 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
277 Comment(";;; <@%d,#%d> "
278 "-------------------- Deferred %s --------------------",
279 code->instruction_index(),
280 code->instr()->hydrogen_value()->id(),
281 code->instr()->Mnemonic());
282 __ bind(code->entry());
283 if (NeedsDeferredFrame()) {
284 Comment(";;; Build frame");
285 DCHECK(!frame_is_built_);
286 DCHECK(info()->IsStub());
287 frame_is_built_ = true;
289 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
291 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
292 Comment(";;; Deferred code");
295 if (NeedsDeferredFrame()) {
296 Comment(";;; Destroy frame");
297 DCHECK(frame_is_built_);
300 frame_is_built_ = false;
302 __ jmp(code->exit());
306 // Force constant pool emission at the end of the deferred code to make
307 // sure that no constant pools are emitted after.
308 masm()->CheckConstPool(true, false);
310 return !is_aborted();
314 bool LCodeGen::GenerateJumpTable() {
315 // Check that the jump table is accessible from everywhere in the function
316 // code, i.e. that offsets to the table can be encoded in the 24bit signed
317 // immediate of a branch instruction.
318 // To simplify we consider the code size from the first instruction to the
319 // end of the jump table. We also don't consider the pc load delta.
320 // Each entry in the jump table generates one instruction and inlines one
321 // 32bit data after it.
322 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
323 jump_table_.length() * 7)) {
324 Abort(kGeneratedCodeIsTooLarge);
327 if (jump_table_.length() > 0) {
328 Label needs_frame, call_deopt_entry;
330 Comment(";;; -------------------- Jump table --------------------");
331 Address base = jump_table_[0].address;
333 Register entry_offset = scratch0();
335 int length = jump_table_.length();
336 for (int i = 0; i < length; i++) {
337 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
338 __ bind(&table_entry->label);
340 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
341 Address entry = table_entry->address;
342 DeoptComment(table_entry->deopt_info);
344 // Second-level deopt table entries are contiguous and small, so instead
345 // of loading the full, absolute address of each one, load an immediate
346 // offset which will be added to the base address later.
347 __ mov(entry_offset, Operand(entry - base));
349 if (table_entry->needs_frame) {
350 DCHECK(!info()->saves_caller_doubles());
351 Comment(";;; call deopt with frame");
355 __ bl(&call_deopt_entry);
357 info()->LogDeoptCallPosition(masm()->pc_offset(),
358 table_entry->deopt_info.inlining_id);
359 masm()->CheckConstPool(false, false);
362 if (needs_frame.is_linked()) {
363 __ bind(&needs_frame);
364 // This variant of deopt can only be used with stubs. Since we don't
365 // have a function pointer to install in the stack frame that we're
366 // building, install a special marker there instead.
367 DCHECK(info()->IsStub());
368 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
370 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
373 Comment(";;; call deopt");
374 __ bind(&call_deopt_entry);
376 if (info()->saves_caller_doubles()) {
377 DCHECK(info()->IsStub());
378 RestoreCallerDoubles();
381 // Add the base address to the offset previously loaded in entry_offset.
382 __ add(entry_offset, entry_offset,
383 Operand(ExternalReference::ForDeoptEntry(base)));
387 // Force constant pool emission at the end of the deopt jump table to make
388 // sure that no constant pools are emitted after.
389 masm()->CheckConstPool(true, false);
391 // The deoptimization jump table is the last part of the instruction
392 // sequence. Mark the generated code as done unless we bailed out.
393 if (!is_aborted()) status_ = DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
405 Register LCodeGen::ToRegister(int index) const {
406 return Register::FromAllocationIndex(index);
410 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
411 return DwVfpRegister::FromAllocationIndex(index);
415 Register LCodeGen::ToRegister(LOperand* op) const {
416 DCHECK(op->IsRegister());
417 return ToRegister(op->index());
421 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
422 if (op->IsRegister()) {
423 return ToRegister(op->index());
424 } else if (op->IsConstantOperand()) {
425 LConstantOperand* const_op = LConstantOperand::cast(op);
426 HConstant* constant = chunk_->LookupConstant(const_op);
427 Handle<Object> literal = constant->handle(isolate());
428 Representation r = chunk_->LookupLiteralRepresentation(const_op);
429 if (r.IsInteger32()) {
430 AllowDeferredHandleDereference get_number;
431 DCHECK(literal->IsNumber());
432 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
433 } else if (r.IsDouble()) {
434 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
436 DCHECK(r.IsSmiOrTagged());
437 __ Move(scratch, literal);
440 } else if (op->IsStackSlot()) {
441 __ ldr(scratch, ToMemOperand(op));
449 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
450 DCHECK(op->IsDoubleRegister());
451 return ToDoubleRegister(op->index());
455 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
456 SwVfpRegister flt_scratch,
457 DwVfpRegister dbl_scratch) {
458 if (op->IsDoubleRegister()) {
459 return ToDoubleRegister(op->index());
460 } else if (op->IsConstantOperand()) {
461 LConstantOperand* const_op = LConstantOperand::cast(op);
462 HConstant* constant = chunk_->LookupConstant(const_op);
463 Handle<Object> literal = constant->handle(isolate());
464 Representation r = chunk_->LookupLiteralRepresentation(const_op);
465 if (r.IsInteger32()) {
466 DCHECK(literal->IsNumber());
467 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
468 __ vmov(flt_scratch, ip);
469 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
471 } else if (r.IsDouble()) {
472 Abort(kUnsupportedDoubleImmediate);
473 } else if (r.IsTagged()) {
474 Abort(kUnsupportedTaggedImmediate);
476 } else if (op->IsStackSlot()) {
477 // TODO(regis): Why is vldr not taking a MemOperand?
478 // __ vldr(dbl_scratch, ToMemOperand(op));
479 MemOperand mem_op = ToMemOperand(op);
480 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
488 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
489 HConstant* constant = chunk_->LookupConstant(op);
490 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
491 return constant->handle(isolate());
495 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
496 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
500 bool LCodeGen::IsSmi(LConstantOperand* op) const {
501 return chunk_->LookupLiteralRepresentation(op).IsSmi();
505 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
506 return ToRepresentation(op, Representation::Integer32());
510 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
511 const Representation& r) const {
512 HConstant* constant = chunk_->LookupConstant(op);
513 int32_t value = constant->Integer32Value();
514 if (r.IsInteger32()) return value;
515 DCHECK(r.IsSmiOrTagged());
516 return reinterpret_cast<int32_t>(Smi::FromInt(value));
520 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
521 HConstant* constant = chunk_->LookupConstant(op);
522 return Smi::FromInt(constant->Integer32Value());
526 double LCodeGen::ToDouble(LConstantOperand* op) const {
527 HConstant* constant = chunk_->LookupConstant(op);
528 DCHECK(constant->HasDoubleValue());
529 return constant->DoubleValue();
533 Operand LCodeGen::ToOperand(LOperand* op) {
534 if (op->IsConstantOperand()) {
535 LConstantOperand* const_op = LConstantOperand::cast(op);
536 HConstant* constant = chunk()->LookupConstant(const_op);
537 Representation r = chunk_->LookupLiteralRepresentation(const_op);
539 DCHECK(constant->HasSmiValue());
540 return Operand(Smi::FromInt(constant->Integer32Value()));
541 } else if (r.IsInteger32()) {
542 DCHECK(constant->HasInteger32Value());
543 return Operand(constant->Integer32Value());
544 } else if (r.IsDouble()) {
545 Abort(kToOperandUnsupportedDoubleImmediate);
547 DCHECK(r.IsTagged());
548 return Operand(constant->handle(isolate()));
549 } else if (op->IsRegister()) {
550 return Operand(ToRegister(op));
551 } else if (op->IsDoubleRegister()) {
552 Abort(kToOperandIsDoubleRegisterUnimplemented);
553 return Operand::Zero();
555 // Stack slots not implemented, use ToMemOperand instead.
557 return Operand::Zero();
561 static int ArgumentsOffsetWithoutFrame(int index) {
563 return -(index + 1) * kPointerSize;
567 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
568 DCHECK(!op->IsRegister());
569 DCHECK(!op->IsDoubleRegister());
570 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
571 if (NeedsEagerFrame()) {
572 return MemOperand(fp, StackSlotOffset(op->index()));
574 // Retrieve parameter without eager stack-frame relative to the
576 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
581 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
582 DCHECK(op->IsDoubleStackSlot());
583 if (NeedsEagerFrame()) {
584 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
586 // Retrieve parameter without eager stack-frame relative to the
589 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
594 void LCodeGen::WriteTranslation(LEnvironment* environment,
595 Translation* translation) {
596 if (environment == NULL) return;
598 // The translation includes one command per value in the environment.
599 int translation_size = environment->translation_size();
601 WriteTranslation(environment->outer(), translation);
602 WriteTranslationFrame(environment, translation);
604 int object_index = 0;
605 int dematerialized_index = 0;
606 for (int i = 0; i < translation_size; ++i) {
607 LOperand* value = environment->values()->at(i);
609 environment, translation, value, environment->HasTaggedValueAt(i),
610 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
615 void LCodeGen::AddToTranslation(LEnvironment* environment,
616 Translation* translation,
620 int* object_index_pointer,
621 int* dematerialized_index_pointer) {
622 if (op == LEnvironment::materialization_marker()) {
623 int object_index = (*object_index_pointer)++;
624 if (environment->ObjectIsDuplicateAt(object_index)) {
625 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
626 translation->DuplicateObject(dupe_of);
629 int object_length = environment->ObjectLengthAt(object_index);
630 if (environment->ObjectIsArgumentsAt(object_index)) {
631 translation->BeginArgumentsObject(object_length);
633 translation->BeginCapturedObject(object_length);
635 int dematerialized_index = *dematerialized_index_pointer;
636 int env_offset = environment->translation_size() + dematerialized_index;
637 *dematerialized_index_pointer += object_length;
638 for (int i = 0; i < object_length; ++i) {
639 LOperand* value = environment->values()->at(env_offset + i);
640 AddToTranslation(environment,
643 environment->HasTaggedValueAt(env_offset + i),
644 environment->HasUint32ValueAt(env_offset + i),
645 object_index_pointer,
646 dematerialized_index_pointer);
651 if (op->IsStackSlot()) {
653 translation->StoreStackSlot(op->index());
654 } else if (is_uint32) {
655 translation->StoreUint32StackSlot(op->index());
657 translation->StoreInt32StackSlot(op->index());
659 } else if (op->IsDoubleStackSlot()) {
660 translation->StoreDoubleStackSlot(op->index());
661 } else if (op->IsRegister()) {
662 Register reg = ToRegister(op);
664 translation->StoreRegister(reg);
665 } else if (is_uint32) {
666 translation->StoreUint32Register(reg);
668 translation->StoreInt32Register(reg);
670 } else if (op->IsDoubleRegister()) {
671 DoubleRegister reg = ToDoubleRegister(op);
672 translation->StoreDoubleRegister(reg);
673 } else if (op->IsConstantOperand()) {
674 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
675 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
676 translation->StoreLiteral(src_index);
683 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
684 int size = masm()->CallSize(code, mode);
685 if (code->kind() == Code::BINARY_OP_IC ||
686 code->kind() == Code::COMPARE_IC) {
687 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
693 void LCodeGen::CallCode(Handle<Code> code,
694 RelocInfo::Mode mode,
696 TargetAddressStorageMode storage_mode) {
697 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
701 void LCodeGen::CallCodeGeneric(Handle<Code> code,
702 RelocInfo::Mode mode,
704 SafepointMode safepoint_mode,
705 TargetAddressStorageMode storage_mode) {
706 DCHECK(instr != NULL);
707 // Block literal pool emission to ensure nop indicating no inlined smi code
708 // is in the correct position.
709 Assembler::BlockConstPoolScope block_const_pool(masm());
710 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
711 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
713 // Signal that we don't inline smi code before these stubs in the
714 // optimizing code generator.
715 if (code->kind() == Code::BINARY_OP_IC ||
716 code->kind() == Code::COMPARE_IC) {
722 void LCodeGen::CallRuntime(const Runtime::Function* function,
725 SaveFPRegsMode save_doubles) {
726 DCHECK(instr != NULL);
728 __ CallRuntime(function, num_arguments, save_doubles);
730 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
734 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
735 if (context->IsRegister()) {
736 __ Move(cp, ToRegister(context));
737 } else if (context->IsStackSlot()) {
738 __ ldr(cp, ToMemOperand(context));
739 } else if (context->IsConstantOperand()) {
740 HConstant* constant =
741 chunk_->LookupConstant(LConstantOperand::cast(context));
742 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
749 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
753 LoadContextFromDeferred(context);
754 __ CallRuntimeSaveDoubles(id);
755 RecordSafepointWithRegisters(
756 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
760 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
761 Safepoint::DeoptMode mode) {
762 environment->set_has_been_used();
763 if (!environment->HasBeenRegistered()) {
764 // Physical stack frame layout:
765 // -x ............. -4 0 ..................................... y
766 // [incoming arguments] [spill slots] [pushed outgoing arguments]
768 // Layout of the environment:
769 // 0 ..................................................... size-1
770 // [parameters] [locals] [expression stack including arguments]
772 // Layout of the translation:
773 // 0 ........................................................ size - 1 + 4
774 // [expression stack including arguments] [locals] [4 words] [parameters]
775 // |>------------ translation_size ------------<|
778 int jsframe_count = 0;
779 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
781 if (e->frame_type() == JS_FUNCTION) {
785 Translation translation(&translations_, frame_count, jsframe_count, zone());
786 WriteTranslation(environment, &translation);
787 int deoptimization_index = deoptimizations_.length();
788 int pc_offset = masm()->pc_offset();
789 environment->Register(deoptimization_index,
791 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
792 deoptimizations_.Add(environment, zone());
797 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
798 Deoptimizer::DeoptReason deopt_reason,
799 Deoptimizer::BailoutType bailout_type) {
800 LEnvironment* environment = instr->environment();
801 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
802 DCHECK(environment->HasBeenRegistered());
803 int id = environment->deoptimization_index();
804 DCHECK(info()->IsOptimizing() || info()->IsStub());
806 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
808 Abort(kBailoutWasNotPrepared);
812 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
813 Register scratch = scratch0();
814 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
816 // Store the condition on the stack if necessary
817 if (condition != al) {
818 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
819 __ mov(scratch, Operand(1), LeaveCC, condition);
824 __ mov(scratch, Operand(count));
825 __ ldr(r1, MemOperand(scratch));
826 __ sub(r1, r1, Operand(1), SetCC);
827 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
828 __ str(r1, MemOperand(scratch));
831 if (condition != al) {
832 // Clean up the stack before the deoptimizer call
836 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
838 // 'Restore' the condition in a slightly hacky way. (It would be better
839 // to use 'msr' and 'mrs' instructions here, but they are not supported by
840 // our ARM simulator).
841 if (condition != al) {
843 __ cmp(scratch, Operand::Zero());
847 if (info()->ShouldTrapOnDeopt()) {
848 __ stop("trap_on_deopt", condition);
851 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
853 DCHECK(info()->IsStub() || frame_is_built_);
854 // Go through jump table if we need to handle condition, build frame, or
855 // restore caller doubles.
856 if (condition == al && frame_is_built_ &&
857 !info()->saves_caller_doubles()) {
858 DeoptComment(deopt_info);
859 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
860 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
862 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
864 // We often have several deopts to the same entry, reuse the last
865 // jump entry if this is the case.
866 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
867 jump_table_.is_empty() ||
868 !table_entry.IsEquivalentTo(jump_table_.last())) {
869 jump_table_.Add(table_entry, zone());
871 __ b(condition, &jump_table_.last().label);
876 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
877 Deoptimizer::DeoptReason deopt_reason) {
878 Deoptimizer::BailoutType bailout_type = info()->IsStub()
880 : Deoptimizer::EAGER;
881 DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
885 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
886 int length = deoptimizations_.length();
887 if (length == 0) return;
888 Handle<DeoptimizationInputData> data =
889 DeoptimizationInputData::New(isolate(), length, TENURED);
891 Handle<ByteArray> translations =
892 translations_.CreateByteArray(isolate()->factory());
893 data->SetTranslationByteArray(*translations);
894 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
895 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
896 if (info_->IsOptimizing()) {
897 // Reference to shared function info does not change between phases.
898 AllowDeferredHandleDereference allow_handle_dereference;
899 data->SetSharedFunctionInfo(*info_->shared_info());
901 data->SetSharedFunctionInfo(Smi::FromInt(0));
903 data->SetWeakCellCache(Smi::FromInt(0));
905 Handle<FixedArray> literals =
906 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
907 { AllowDeferredHandleDereference copy_handles;
908 for (int i = 0; i < deoptimization_literals_.length(); i++) {
909 literals->set(i, *deoptimization_literals_[i]);
911 data->SetLiteralArray(*literals);
914 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
915 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
917 // Populate the deoptimization entries.
918 for (int i = 0; i < length; i++) {
919 LEnvironment* env = deoptimizations_[i];
920 data->SetAstId(i, env->ast_id());
921 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
922 data->SetArgumentsStackHeight(i,
923 Smi::FromInt(env->arguments_stack_height()));
924 data->SetPc(i, Smi::FromInt(env->pc_offset()));
926 code->set_deoptimization_data(*data);
930 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
931 DCHECK_EQ(0, deoptimization_literals_.length());
932 for (auto function : chunk()->inlined_functions()) {
933 DefineDeoptimizationLiteral(function);
935 inlined_function_count_ = deoptimization_literals_.length();
939 void LCodeGen::RecordSafepointWithLazyDeopt(
940 LInstruction* instr, SafepointMode safepoint_mode) {
941 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
942 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
944 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
945 RecordSafepointWithRegisters(
946 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
951 void LCodeGen::RecordSafepoint(
952 LPointerMap* pointers,
953 Safepoint::Kind kind,
955 Safepoint::DeoptMode deopt_mode) {
956 DCHECK(expected_safepoint_kind_ == kind);
958 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
959 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
960 kind, arguments, deopt_mode);
961 for (int i = 0; i < operands->length(); i++) {
962 LOperand* pointer = operands->at(i);
963 if (pointer->IsStackSlot()) {
964 safepoint.DefinePointerSlot(pointer->index(), zone());
965 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
966 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
972 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
973 Safepoint::DeoptMode deopt_mode) {
974 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
978 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
979 LPointerMap empty_pointers(zone());
980 RecordSafepoint(&empty_pointers, deopt_mode);
984 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
986 Safepoint::DeoptMode deopt_mode) {
988 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
992 void LCodeGen::RecordAndWritePosition(int position) {
993 if (position == RelocInfo::kNoPosition) return;
994 masm()->positions_recorder()->RecordPosition(position);
995 masm()->positions_recorder()->WriteRecordedPositions();
999 static const char* LabelType(LLabel* label) {
1000 if (label->is_loop_header()) return " (loop header)";
1001 if (label->is_osr_entry()) return " (OSR entry)";
1006 void LCodeGen::DoLabel(LLabel* label) {
1007 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1008 current_instruction_,
1009 label->hydrogen_value()->id(),
1012 __ bind(label->label());
1013 current_block_ = label->block_id();
1018 void LCodeGen::DoParallelMove(LParallelMove* move) {
1019 resolver_.Resolve(move);
1023 void LCodeGen::DoGap(LGap* gap) {
1024 for (int i = LGap::FIRST_INNER_POSITION;
1025 i <= LGap::LAST_INNER_POSITION;
1027 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1028 LParallelMove* move = gap->GetParallelMove(inner_pos);
1029 if (move != NULL) DoParallelMove(move);
1034 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1039 void LCodeGen::DoParameter(LParameter* instr) {
1044 void LCodeGen::DoCallStub(LCallStub* instr) {
1045 DCHECK(ToRegister(instr->context()).is(cp));
1046 DCHECK(ToRegister(instr->result()).is(r0));
1047 switch (instr->hydrogen()->major_key()) {
1048 case CodeStub::RegExpExec: {
1049 RegExpExecStub stub(isolate());
1050 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1053 case CodeStub::SubString: {
1054 SubStringStub stub(isolate());
1055 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1058 case CodeStub::StringCompare: {
1059 StringCompareStub stub(isolate());
1060 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1069 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1070 GenerateOsrPrologue();
1074 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1075 Register dividend = ToRegister(instr->dividend());
1076 int32_t divisor = instr->divisor();
1077 DCHECK(dividend.is(ToRegister(instr->result())));
1079 // Theoretically, a variation of the branch-free code for integer division by
1080 // a power of 2 (calculating the remainder via an additional multiplication
1081 // (which gets simplified to an 'and') and subtraction) should be faster, and
1082 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1083 // indicate that positive dividends are heavily favored, so the branching
1084 // version performs better.
1085 HMod* hmod = instr->hydrogen();
1086 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1087 Label dividend_is_not_negative, done;
1088 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1089 __ cmp(dividend, Operand::Zero());
1090 __ b(pl, ÷nd_is_not_negative);
1091 // Note that this is correct even for kMinInt operands.
1092 __ rsb(dividend, dividend, Operand::Zero());
1093 __ and_(dividend, dividend, Operand(mask));
1094 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1095 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1096 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1101 __ bind(÷nd_is_not_negative);
1102 __ and_(dividend, dividend, Operand(mask));
1107 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1108 Register dividend = ToRegister(instr->dividend());
1109 int32_t divisor = instr->divisor();
1110 Register result = ToRegister(instr->result());
1111 DCHECK(!dividend.is(result));
1114 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1118 __ TruncatingDiv(result, dividend, Abs(divisor));
1119 __ mov(ip, Operand(Abs(divisor)));
1120 __ smull(result, ip, result, ip);
1121 __ sub(result, dividend, result, SetCC);
1123 // Check for negative zero.
1124 HMod* hmod = instr->hydrogen();
1125 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1126 Label remainder_not_zero;
1127 __ b(ne, &remainder_not_zero);
1128 __ cmp(dividend, Operand::Zero());
1129 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1130 __ bind(&remainder_not_zero);
1135 void LCodeGen::DoModI(LModI* instr) {
1136 HMod* hmod = instr->hydrogen();
1137 if (CpuFeatures::IsSupported(SUDIV)) {
1138 CpuFeatureScope scope(masm(), SUDIV);
1140 Register left_reg = ToRegister(instr->left());
1141 Register right_reg = ToRegister(instr->right());
1142 Register result_reg = ToRegister(instr->result());
1145 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1146 // case because we can't return a NaN.
1147 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1148 __ cmp(right_reg, Operand::Zero());
1149 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1152 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1153 // want. We have to deopt if we care about -0, because we can't return that.
1154 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1155 Label no_overflow_possible;
1156 __ cmp(left_reg, Operand(kMinInt));
1157 __ b(ne, &no_overflow_possible);
1158 __ cmp(right_reg, Operand(-1));
1159 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1160 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1162 __ b(ne, &no_overflow_possible);
1163 __ mov(result_reg, Operand::Zero());
1166 __ bind(&no_overflow_possible);
1169 // For 'r3 = r1 % r2' we can have the following ARM code:
1171 // mls r3, r3, r2, r1
1173 __ sdiv(result_reg, left_reg, right_reg);
1174 __ Mls(result_reg, result_reg, right_reg, left_reg);
1176 // If we care about -0, test if the dividend is <0 and the result is 0.
1177 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1178 __ cmp(result_reg, Operand::Zero());
1180 __ cmp(left_reg, Operand::Zero());
1181 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1186 // General case, without any SDIV support.
1187 Register left_reg = ToRegister(instr->left());
1188 Register right_reg = ToRegister(instr->right());
1189 Register result_reg = ToRegister(instr->result());
1190 Register scratch = scratch0();
1191 DCHECK(!scratch.is(left_reg));
1192 DCHECK(!scratch.is(right_reg));
1193 DCHECK(!scratch.is(result_reg));
1194 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1195 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1196 DCHECK(!divisor.is(dividend));
1197 LowDwVfpRegister quotient = double_scratch0();
1198 DCHECK(!quotient.is(dividend));
1199 DCHECK(!quotient.is(divisor));
1202 // Check for x % 0, we have to deopt in this case because we can't return a
1204 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1205 __ cmp(right_reg, Operand::Zero());
1206 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1209 __ Move(result_reg, left_reg);
1210 // Load the arguments in VFP registers. The divisor value is preloaded
1211 // before. Be careful that 'right_reg' is only live on entry.
1212 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1213 __ vmov(double_scratch0().low(), left_reg);
1214 __ vcvt_f64_s32(dividend, double_scratch0().low());
1215 __ vmov(double_scratch0().low(), right_reg);
1216 __ vcvt_f64_s32(divisor, double_scratch0().low());
1218 // We do not care about the sign of the divisor. Note that we still handle
1219 // the kMinInt % -1 case correctly, though.
1220 __ vabs(divisor, divisor);
1221 // Compute the quotient and round it to a 32bit integer.
1222 __ vdiv(quotient, dividend, divisor);
1223 __ vcvt_s32_f64(quotient.low(), quotient);
1224 __ vcvt_f64_s32(quotient, quotient.low());
1226 // Compute the remainder in result.
1227 __ vmul(double_scratch0(), divisor, quotient);
1228 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1229 __ vmov(scratch, double_scratch0().low());
1230 __ sub(result_reg, left_reg, scratch, SetCC);
1232 // If we care about -0, test if the dividend is <0 and the result is 0.
1233 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1235 __ cmp(left_reg, Operand::Zero());
1236 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1243 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1244 Register dividend = ToRegister(instr->dividend());
1245 int32_t divisor = instr->divisor();
1246 Register result = ToRegister(instr->result());
1247 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1248 DCHECK(!result.is(dividend));
1250 // Check for (0 / -x) that will produce negative zero.
1251 HDiv* hdiv = instr->hydrogen();
1252 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1253 __ cmp(dividend, Operand::Zero());
1254 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1256 // Check for (kMinInt / -1).
1257 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1258 __ cmp(dividend, Operand(kMinInt));
1259 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1261 // Deoptimize if remainder will not be 0.
1262 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1263 divisor != 1 && divisor != -1) {
1264 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1265 __ tst(dividend, Operand(mask));
1266 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1269 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1270 __ rsb(result, dividend, Operand(0));
1273 int32_t shift = WhichPowerOf2Abs(divisor);
1275 __ mov(result, dividend);
1276 } else if (shift == 1) {
1277 __ add(result, dividend, Operand(dividend, LSR, 31));
1279 __ mov(result, Operand(dividend, ASR, 31));
1280 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1282 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1283 if (divisor < 0) __ rsb(result, result, Operand(0));
1287 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1288 Register dividend = ToRegister(instr->dividend());
1289 int32_t divisor = instr->divisor();
1290 Register result = ToRegister(instr->result());
1291 DCHECK(!dividend.is(result));
1294 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1298 // Check for (0 / -x) that will produce negative zero.
1299 HDiv* hdiv = instr->hydrogen();
1300 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1301 __ cmp(dividend, Operand::Zero());
1302 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1305 __ TruncatingDiv(result, dividend, Abs(divisor));
1306 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1308 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1309 __ mov(ip, Operand(divisor));
1310 __ smull(scratch0(), ip, result, ip);
1311 __ sub(scratch0(), scratch0(), dividend, SetCC);
1312 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1317 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1318 void LCodeGen::DoDivI(LDivI* instr) {
1319 HBinaryOperation* hdiv = instr->hydrogen();
1320 Register dividend = ToRegister(instr->dividend());
1321 Register divisor = ToRegister(instr->divisor());
1322 Register result = ToRegister(instr->result());
1325 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1326 __ cmp(divisor, Operand::Zero());
1327 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1330 // Check for (0 / -x) that will produce negative zero.
1331 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1333 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1334 // Do the test only if it hadn't be done above.
1335 __ cmp(divisor, Operand::Zero());
1337 __ b(pl, &positive);
1338 __ cmp(dividend, Operand::Zero());
1339 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1343 // Check for (kMinInt / -1).
1344 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1345 (!CpuFeatures::IsSupported(SUDIV) ||
1346 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1347 // We don't need to check for overflow when truncating with sdiv
1348 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1349 __ cmp(dividend, Operand(kMinInt));
1350 __ cmp(divisor, Operand(-1), eq);
1351 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1354 if (CpuFeatures::IsSupported(SUDIV)) {
1355 CpuFeatureScope scope(masm(), SUDIV);
1356 __ sdiv(result, dividend, divisor);
1358 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1359 DoubleRegister vright = double_scratch0();
1360 __ vmov(double_scratch0().low(), dividend);
1361 __ vcvt_f64_s32(vleft, double_scratch0().low());
1362 __ vmov(double_scratch0().low(), divisor);
1363 __ vcvt_f64_s32(vright, double_scratch0().low());
1364 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1365 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1366 __ vmov(result, double_scratch0().low());
1369 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1370 // Compute remainder and deopt if it's not zero.
1371 Register remainder = scratch0();
1372 __ Mls(remainder, result, divisor, dividend);
1373 __ cmp(remainder, Operand::Zero());
1374 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1379 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1380 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1381 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1382 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1384 // This is computed in-place.
1385 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1387 __ vmla(addend, multiplier, multiplicand);
1391 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1392 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1393 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1394 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1396 // This is computed in-place.
1397 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1399 __ vmls(minuend, multiplier, multiplicand);
1403 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1404 Register dividend = ToRegister(instr->dividend());
1405 Register result = ToRegister(instr->result());
1406 int32_t divisor = instr->divisor();
1408 // If the divisor is 1, return the dividend.
1410 __ Move(result, dividend);
1414 // If the divisor is positive, things are easy: There can be no deopts and we
1415 // can simply do an arithmetic right shift.
1416 int32_t shift = WhichPowerOf2Abs(divisor);
1418 __ mov(result, Operand(dividend, ASR, shift));
1422 // If the divisor is negative, we have to negate and handle edge cases.
1423 __ rsb(result, dividend, Operand::Zero(), SetCC);
1424 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1425 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1428 // Dividing by -1 is basically negation, unless we overflow.
1429 if (divisor == -1) {
1430 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1431 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1436 // If the negation could not overflow, simply shifting is OK.
1437 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1438 __ mov(result, Operand(result, ASR, shift));
1442 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1443 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1447 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1448 Register dividend = ToRegister(instr->dividend());
1449 int32_t divisor = instr->divisor();
1450 Register result = ToRegister(instr->result());
1451 DCHECK(!dividend.is(result));
1454 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1458 // Check for (0 / -x) that will produce negative zero.
1459 HMathFloorOfDiv* hdiv = instr->hydrogen();
1460 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1461 __ cmp(dividend, Operand::Zero());
1462 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1465 // Easy case: We need no dynamic check for the dividend and the flooring
1466 // division is the same as the truncating division.
1467 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1468 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1469 __ TruncatingDiv(result, dividend, Abs(divisor));
1470 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1474 // In the general case we may need to adjust before and after the truncating
1475 // division to get a flooring division.
1476 Register temp = ToRegister(instr->temp());
1477 DCHECK(!temp.is(dividend) && !temp.is(result));
1478 Label needs_adjustment, done;
1479 __ cmp(dividend, Operand::Zero());
1480 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1481 __ TruncatingDiv(result, dividend, Abs(divisor));
1482 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1484 __ bind(&needs_adjustment);
1485 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1486 __ TruncatingDiv(result, temp, Abs(divisor));
1487 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1488 __ sub(result, result, Operand(1));
1493 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1494 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1495 HBinaryOperation* hdiv = instr->hydrogen();
1496 Register left = ToRegister(instr->dividend());
1497 Register right = ToRegister(instr->divisor());
1498 Register result = ToRegister(instr->result());
1501 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1502 __ cmp(right, Operand::Zero());
1503 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1506 // Check for (0 / -x) that will produce negative zero.
1507 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1509 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1510 // Do the test only if it hadn't be done above.
1511 __ cmp(right, Operand::Zero());
1513 __ b(pl, &positive);
1514 __ cmp(left, Operand::Zero());
1515 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1519 // Check for (kMinInt / -1).
1520 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1521 (!CpuFeatures::IsSupported(SUDIV) ||
1522 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1523 // We don't need to check for overflow when truncating with sdiv
1524 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1525 __ cmp(left, Operand(kMinInt));
1526 __ cmp(right, Operand(-1), eq);
1527 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1530 if (CpuFeatures::IsSupported(SUDIV)) {
1531 CpuFeatureScope scope(masm(), SUDIV);
1532 __ sdiv(result, left, right);
1534 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1535 DoubleRegister vright = double_scratch0();
1536 __ vmov(double_scratch0().low(), left);
1537 __ vcvt_f64_s32(vleft, double_scratch0().low());
1538 __ vmov(double_scratch0().low(), right);
1539 __ vcvt_f64_s32(vright, double_scratch0().low());
1540 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1541 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1542 __ vmov(result, double_scratch0().low());
1546 Register remainder = scratch0();
1547 __ Mls(remainder, result, right, left);
1548 __ cmp(remainder, Operand::Zero());
1550 __ eor(remainder, remainder, Operand(right));
1551 __ add(result, result, Operand(remainder, ASR, 31));
1556 void LCodeGen::DoMulI(LMulI* instr) {
1557 Register result = ToRegister(instr->result());
1558 // Note that result may alias left.
1559 Register left = ToRegister(instr->left());
1560 LOperand* right_op = instr->right();
1562 bool bailout_on_minus_zero =
1563 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1564 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1566 if (right_op->IsConstantOperand()) {
1567 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1569 if (bailout_on_minus_zero && (constant < 0)) {
1570 // The case of a null constant will be handled separately.
1571 // If constant is negative and left is null, the result should be -0.
1572 __ cmp(left, Operand::Zero());
1573 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1579 __ rsb(result, left, Operand::Zero(), SetCC);
1580 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1582 __ rsb(result, left, Operand::Zero());
1586 if (bailout_on_minus_zero) {
1587 // If left is strictly negative and the constant is null, the
1588 // result is -0. Deoptimize if required, otherwise return 0.
1589 __ cmp(left, Operand::Zero());
1590 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1592 __ mov(result, Operand::Zero());
1595 __ Move(result, left);
1598 // Multiplying by powers of two and powers of two plus or minus
1599 // one can be done faster with shifted operands.
1600 // For other constants we emit standard code.
1601 int32_t mask = constant >> 31;
1602 uint32_t constant_abs = (constant + mask) ^ mask;
1604 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1605 int32_t shift = WhichPowerOf2(constant_abs);
1606 __ mov(result, Operand(left, LSL, shift));
1607 // Correct the sign of the result is the constant is negative.
1608 if (constant < 0) __ rsb(result, result, Operand::Zero());
1609 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1610 int32_t shift = WhichPowerOf2(constant_abs - 1);
1611 __ add(result, left, Operand(left, LSL, shift));
1612 // Correct the sign of the result is the constant is negative.
1613 if (constant < 0) __ rsb(result, result, Operand::Zero());
1614 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1615 int32_t shift = WhichPowerOf2(constant_abs + 1);
1616 __ rsb(result, left, Operand(left, LSL, shift));
1617 // Correct the sign of the result is the constant is negative.
1618 if (constant < 0) __ rsb(result, result, Operand::Zero());
1620 // Generate standard code.
1621 __ mov(ip, Operand(constant));
1622 __ mul(result, left, ip);
1627 DCHECK(right_op->IsRegister());
1628 Register right = ToRegister(right_op);
1631 Register scratch = scratch0();
1632 // scratch:result = left * right.
1633 if (instr->hydrogen()->representation().IsSmi()) {
1634 __ SmiUntag(result, left);
1635 __ smull(result, scratch, result, right);
1637 __ smull(result, scratch, left, right);
1639 __ cmp(scratch, Operand(result, ASR, 31));
1640 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1642 if (instr->hydrogen()->representation().IsSmi()) {
1643 __ SmiUntag(result, left);
1644 __ mul(result, result, right);
1646 __ mul(result, left, right);
1650 if (bailout_on_minus_zero) {
1652 __ teq(left, Operand(right));
1654 // Bail out if the result is minus zero.
1655 __ cmp(result, Operand::Zero());
1656 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1663 void LCodeGen::DoBitI(LBitI* instr) {
1664 LOperand* left_op = instr->left();
1665 LOperand* right_op = instr->right();
1666 DCHECK(left_op->IsRegister());
1667 Register left = ToRegister(left_op);
1668 Register result = ToRegister(instr->result());
1669 Operand right(no_reg);
1671 if (right_op->IsStackSlot()) {
1672 right = Operand(EmitLoadRegister(right_op, ip));
1674 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1675 right = ToOperand(right_op);
1678 switch (instr->op()) {
1679 case Token::BIT_AND:
1680 __ and_(result, left, right);
1683 __ orr(result, left, right);
1685 case Token::BIT_XOR:
1686 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1687 __ mvn(result, Operand(left));
1689 __ eor(result, left, right);
1699 void LCodeGen::DoShiftI(LShiftI* instr) {
1700 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1701 // result may alias either of them.
1702 LOperand* right_op = instr->right();
1703 Register left = ToRegister(instr->left());
1704 Register result = ToRegister(instr->result());
1705 Register scratch = scratch0();
1706 if (right_op->IsRegister()) {
1707 // Mask the right_op operand.
1708 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1709 switch (instr->op()) {
1711 __ mov(result, Operand(left, ROR, scratch));
1714 __ mov(result, Operand(left, ASR, scratch));
1717 if (instr->can_deopt()) {
1718 __ mov(result, Operand(left, LSR, scratch), SetCC);
1719 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
1721 __ mov(result, Operand(left, LSR, scratch));
1725 __ mov(result, Operand(left, LSL, scratch));
1732 // Mask the right_op operand.
1733 int value = ToInteger32(LConstantOperand::cast(right_op));
1734 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1735 switch (instr->op()) {
1737 if (shift_count != 0) {
1738 __ mov(result, Operand(left, ROR, shift_count));
1740 __ Move(result, left);
1744 if (shift_count != 0) {
1745 __ mov(result, Operand(left, ASR, shift_count));
1747 __ Move(result, left);
1751 if (shift_count != 0) {
1752 __ mov(result, Operand(left, LSR, shift_count));
1754 if (instr->can_deopt()) {
1755 __ tst(left, Operand(0x80000000));
1756 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
1758 __ Move(result, left);
1762 if (shift_count != 0) {
1763 if (instr->hydrogen_value()->representation().IsSmi() &&
1764 instr->can_deopt()) {
1765 if (shift_count != 1) {
1766 __ mov(result, Operand(left, LSL, shift_count - 1));
1767 __ SmiTag(result, result, SetCC);
1769 __ SmiTag(result, left, SetCC);
1771 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1773 __ mov(result, Operand(left, LSL, shift_count));
1776 __ Move(result, left);
1787 void LCodeGen::DoSubI(LSubI* instr) {
1788 LOperand* left = instr->left();
1789 LOperand* right = instr->right();
1790 LOperand* result = instr->result();
1791 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1792 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1794 if (right->IsStackSlot()) {
1795 Register right_reg = EmitLoadRegister(right, ip);
1796 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1798 DCHECK(right->IsRegister() || right->IsConstantOperand());
1799 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1803 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1808 void LCodeGen::DoRSubI(LRSubI* instr) {
1809 LOperand* left = instr->left();
1810 LOperand* right = instr->right();
1811 LOperand* result = instr->result();
1812 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1813 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1815 if (right->IsStackSlot()) {
1816 Register right_reg = EmitLoadRegister(right, ip);
1817 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1819 DCHECK(right->IsRegister() || right->IsConstantOperand());
1820 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1824 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1829 void LCodeGen::DoConstantI(LConstantI* instr) {
1830 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1834 void LCodeGen::DoConstantS(LConstantS* instr) {
1835 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1839 void LCodeGen::DoConstantD(LConstantD* instr) {
1840 DCHECK(instr->result()->IsDoubleRegister());
1841 DwVfpRegister result = ToDoubleRegister(instr->result());
1842 #if V8_HOST_ARCH_IA32
1843 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1845 uint64_t bits = instr->bits();
1846 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1847 V8_UINT64_C(0x7FF0000000000000)) {
1848 uint32_t lo = static_cast<uint32_t>(bits);
1849 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1850 __ mov(ip, Operand(lo));
1851 __ mov(scratch0(), Operand(hi));
1852 __ vmov(result, ip, scratch0());
1856 double v = instr->value();
1857 __ Vmov(result, v, scratch0());
1861 void LCodeGen::DoConstantE(LConstantE* instr) {
1862 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1866 void LCodeGen::DoConstantT(LConstantT* instr) {
1867 Handle<Object> object = instr->value(isolate());
1868 AllowDeferredHandleDereference smi_check;
1869 __ Move(ToRegister(instr->result()), object);
1873 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1874 Register result = ToRegister(instr->result());
1875 Register map = ToRegister(instr->value());
1876 __ EnumLength(result, map);
1880 void LCodeGen::DoDateField(LDateField* instr) {
1881 Register object = ToRegister(instr->date());
1882 Register result = ToRegister(instr->result());
1883 Register scratch = ToRegister(instr->temp());
1884 Smi* index = instr->index();
1885 DCHECK(object.is(result));
1886 DCHECK(object.is(r0));
1887 DCHECK(!scratch.is(scratch0()));
1888 DCHECK(!scratch.is(object));
1890 if (index->value() == 0) {
1891 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1893 Label runtime, done;
1894 if (index->value() < JSDate::kFirstUncachedField) {
1895 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1896 __ mov(scratch, Operand(stamp));
1897 __ ldr(scratch, MemOperand(scratch));
1898 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1899 __ cmp(scratch, scratch0());
1901 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1902 kPointerSize * index->value()));
1906 __ PrepareCallCFunction(2, scratch);
1907 __ mov(r1, Operand(index));
1908 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1914 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1916 String::Encoding encoding) {
1917 if (index->IsConstantOperand()) {
1918 int offset = ToInteger32(LConstantOperand::cast(index));
1919 if (encoding == String::TWO_BYTE_ENCODING) {
1920 offset *= kUC16Size;
1922 STATIC_ASSERT(kCharSize == 1);
1923 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1925 Register scratch = scratch0();
1926 DCHECK(!scratch.is(string));
1927 DCHECK(!scratch.is(ToRegister(index)));
1928 if (encoding == String::ONE_BYTE_ENCODING) {
1929 __ add(scratch, string, Operand(ToRegister(index)));
1931 STATIC_ASSERT(kUC16Size == 2);
1932 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1934 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1938 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1939 String::Encoding encoding = instr->hydrogen()->encoding();
1940 Register string = ToRegister(instr->string());
1941 Register result = ToRegister(instr->result());
1943 if (FLAG_debug_code) {
1944 Register scratch = scratch0();
1945 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1946 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1948 __ and_(scratch, scratch,
1949 Operand(kStringRepresentationMask | kStringEncodingMask));
1950 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1951 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1952 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1953 ? one_byte_seq_type : two_byte_seq_type));
1954 __ Check(eq, kUnexpectedStringType);
1957 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1958 if (encoding == String::ONE_BYTE_ENCODING) {
1959 __ ldrb(result, operand);
1961 __ ldrh(result, operand);
1966 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1967 String::Encoding encoding = instr->hydrogen()->encoding();
1968 Register string = ToRegister(instr->string());
1969 Register value = ToRegister(instr->value());
1971 if (FLAG_debug_code) {
1972 Register index = ToRegister(instr->index());
1973 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1974 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1976 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1977 ? one_byte_seq_type : two_byte_seq_type;
1978 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1981 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1982 if (encoding == String::ONE_BYTE_ENCODING) {
1983 __ strb(value, operand);
1985 __ strh(value, operand);
1990 void LCodeGen::DoAddI(LAddI* instr) {
1991 LOperand* left = instr->left();
1992 LOperand* right = instr->right();
1993 LOperand* result = instr->result();
1994 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1995 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1997 if (right->IsStackSlot()) {
1998 Register right_reg = EmitLoadRegister(right, ip);
1999 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2001 DCHECK(right->IsRegister() || right->IsConstantOperand());
2002 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2006 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2011 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2012 LOperand* left = instr->left();
2013 LOperand* right = instr->right();
2014 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2015 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2016 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2017 Register left_reg = ToRegister(left);
2018 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2020 : Operand(EmitLoadRegister(right, ip));
2021 Register result_reg = ToRegister(instr->result());
2022 __ cmp(left_reg, right_op);
2023 __ Move(result_reg, left_reg, condition);
2024 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2026 DCHECK(instr->hydrogen()->representation().IsDouble());
2027 DwVfpRegister left_reg = ToDoubleRegister(left);
2028 DwVfpRegister right_reg = ToDoubleRegister(right);
2029 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2030 Label result_is_nan, return_left, return_right, check_zero, done;
2031 __ VFPCompareAndSetFlags(left_reg, right_reg);
2032 if (operation == HMathMinMax::kMathMin) {
2033 __ b(mi, &return_left);
2034 __ b(gt, &return_right);
2036 __ b(mi, &return_right);
2037 __ b(gt, &return_left);
2039 __ b(vs, &result_is_nan);
2040 // Left equals right => check for -0.
2041 __ VFPCompareAndSetFlags(left_reg, 0.0);
2042 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2043 __ b(ne, &done); // left == right != 0.
2045 __ b(ne, &return_left); // left == right != 0.
2047 // At this point, both left and right are either 0 or -0.
2048 if (operation == HMathMinMax::kMathMin) {
2049 // We could use a single 'vorr' instruction here if we had NEON support.
2050 __ vneg(left_reg, left_reg);
2051 __ vsub(result_reg, left_reg, right_reg);
2052 __ vneg(result_reg, result_reg);
2054 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2055 // the decision for vadd is easy because vand is a NEON instruction.
2056 __ vadd(result_reg, left_reg, right_reg);
2060 __ bind(&result_is_nan);
2061 __ vadd(result_reg, left_reg, right_reg);
2064 __ bind(&return_right);
2065 __ Move(result_reg, right_reg);
2066 if (!left_reg.is(result_reg)) {
2070 __ bind(&return_left);
2071 __ Move(result_reg, left_reg);
2078 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2079 DwVfpRegister left = ToDoubleRegister(instr->left());
2080 DwVfpRegister right = ToDoubleRegister(instr->right());
2081 DwVfpRegister result = ToDoubleRegister(instr->result());
2082 switch (instr->op()) {
2084 __ vadd(result, left, right);
2087 __ vsub(result, left, right);
2090 __ vmul(result, left, right);
2093 __ vdiv(result, left, right);
2096 __ PrepareCallCFunction(0, 2, scratch0());
2097 __ MovToFloatParameters(left, right);
2099 ExternalReference::mod_two_doubles_operation(isolate()),
2101 // Move the result in the double result register.
2102 __ MovFromFloatResult(result);
2112 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2113 DCHECK(ToRegister(instr->context()).is(cp));
2114 DCHECK(ToRegister(instr->left()).is(r1));
2115 DCHECK(ToRegister(instr->right()).is(r0));
2116 DCHECK(ToRegister(instr->result()).is(r0));
2119 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2120 // Block literal pool emission to ensure nop indicating no inlined smi code
2121 // is in the correct position.
2122 Assembler::BlockConstPoolScope block_const_pool(masm());
2123 CallCode(code, RelocInfo::CODE_TARGET, instr);
2127 template<class InstrType>
2128 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2129 int left_block = instr->TrueDestination(chunk_);
2130 int right_block = instr->FalseDestination(chunk_);
2132 int next_block = GetNextEmittedBlock();
2134 if (right_block == left_block || condition == al) {
2135 EmitGoto(left_block);
2136 } else if (left_block == next_block) {
2137 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2138 } else if (right_block == next_block) {
2139 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2141 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2142 __ b(chunk_->GetAssemblyLabel(right_block));
2147 template<class InstrType>
2148 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2149 int false_block = instr->FalseDestination(chunk_);
2150 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2154 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2159 void LCodeGen::DoBranch(LBranch* instr) {
2160 Representation r = instr->hydrogen()->value()->representation();
2161 if (r.IsInteger32() || r.IsSmi()) {
2162 DCHECK(!info()->IsStub());
2163 Register reg = ToRegister(instr->value());
2164 __ cmp(reg, Operand::Zero());
2165 EmitBranch(instr, ne);
2166 } else if (r.IsDouble()) {
2167 DCHECK(!info()->IsStub());
2168 DwVfpRegister reg = ToDoubleRegister(instr->value());
2169 // Test the double value. Zero and NaN are false.
2170 __ VFPCompareAndSetFlags(reg, 0.0);
2171 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2172 EmitBranch(instr, ne);
2174 DCHECK(r.IsTagged());
2175 Register reg = ToRegister(instr->value());
2176 HType type = instr->hydrogen()->value()->type();
2177 if (type.IsBoolean()) {
2178 DCHECK(!info()->IsStub());
2179 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2180 EmitBranch(instr, eq);
2181 } else if (type.IsSmi()) {
2182 DCHECK(!info()->IsStub());
2183 __ cmp(reg, Operand::Zero());
2184 EmitBranch(instr, ne);
2185 } else if (type.IsJSArray()) {
2186 DCHECK(!info()->IsStub());
2187 EmitBranch(instr, al);
2188 } else if (type.IsHeapNumber()) {
2189 DCHECK(!info()->IsStub());
2190 DwVfpRegister dbl_scratch = double_scratch0();
2191 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2192 // Test the double value. Zero and NaN are false.
2193 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2194 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2195 EmitBranch(instr, ne);
2196 } else if (type.IsString()) {
2197 DCHECK(!info()->IsStub());
2198 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2199 __ cmp(ip, Operand::Zero());
2200 EmitBranch(instr, ne);
2202 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2203 // Avoid deopts in the case where we've never executed this path before.
2204 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2206 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2207 // undefined -> false.
2208 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2209 __ b(eq, instr->FalseLabel(chunk_));
2211 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2212 // Boolean -> its value.
2213 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2214 __ b(eq, instr->TrueLabel(chunk_));
2215 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2216 __ b(eq, instr->FalseLabel(chunk_));
2218 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2220 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2221 __ b(eq, instr->FalseLabel(chunk_));
2224 if (expected.Contains(ToBooleanStub::SMI)) {
2225 // Smis: 0 -> false, all other -> true.
2226 __ cmp(reg, Operand::Zero());
2227 __ b(eq, instr->FalseLabel(chunk_));
2228 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2229 } else if (expected.NeedsMap()) {
2230 // If we need a map later and have a Smi -> deopt.
2232 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
2235 const Register map = scratch0();
2236 if (expected.NeedsMap()) {
2237 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2239 if (expected.CanBeUndetectable()) {
2240 // Undetectable -> false.
2241 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2242 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2243 __ b(ne, instr->FalseLabel(chunk_));
2247 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2248 // spec object -> true.
2249 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2250 __ b(ge, instr->TrueLabel(chunk_));
2253 if (expected.Contains(ToBooleanStub::STRING)) {
2254 // String value -> false iff empty.
2256 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2257 __ b(ge, ¬_string);
2258 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2259 __ cmp(ip, Operand::Zero());
2260 __ b(ne, instr->TrueLabel(chunk_));
2261 __ b(instr->FalseLabel(chunk_));
2262 __ bind(¬_string);
2265 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2266 // Symbol value -> true.
2267 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2268 __ b(eq, instr->TrueLabel(chunk_));
2271 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2272 // SIMD value -> true.
2273 __ CompareInstanceType(map, ip, FLOAT32X4_TYPE);
2274 __ b(eq, instr->TrueLabel(chunk_));
2277 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2278 // heap number -> false iff +0, -0, or NaN.
2279 DwVfpRegister dbl_scratch = double_scratch0();
2280 Label not_heap_number;
2281 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2282 __ b(ne, ¬_heap_number);
2283 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2284 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2285 __ cmp(r0, r0, vs); // NaN -> false.
2286 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2287 __ b(instr->TrueLabel(chunk_));
2288 __ bind(¬_heap_number);
2291 if (!expected.IsGeneric()) {
2292 // We've seen something for the first time -> deopt.
2293 // This can only happen if we are not generic already.
2294 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2301 void LCodeGen::EmitGoto(int block) {
2302 if (!IsNextEmittedBlock(block)) {
2303 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2308 void LCodeGen::DoGoto(LGoto* instr) {
2309 EmitGoto(instr->block_id());
2313 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2314 Condition cond = kNoCondition;
2317 case Token::EQ_STRICT:
2321 case Token::NE_STRICT:
2325 cond = is_unsigned ? lo : lt;
2328 cond = is_unsigned ? hi : gt;
2331 cond = is_unsigned ? ls : le;
2334 cond = is_unsigned ? hs : ge;
2337 case Token::INSTANCEOF:
2345 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2346 LOperand* left = instr->left();
2347 LOperand* right = instr->right();
2349 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2350 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2351 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2353 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2354 // We can statically evaluate the comparison.
2355 double left_val = ToDouble(LConstantOperand::cast(left));
2356 double right_val = ToDouble(LConstantOperand::cast(right));
2357 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2358 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2359 EmitGoto(next_block);
2361 if (instr->is_double()) {
2362 // Compare left and right operands as doubles and load the
2363 // resulting flags into the normal status register.
2364 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2365 // If a NaN is involved, i.e. the result is unordered (V set),
2366 // jump to false block label.
2367 __ b(vs, instr->FalseLabel(chunk_));
2369 if (right->IsConstantOperand()) {
2370 int32_t value = ToInteger32(LConstantOperand::cast(right));
2371 if (instr->hydrogen_value()->representation().IsSmi()) {
2372 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2374 __ cmp(ToRegister(left), Operand(value));
2376 } else if (left->IsConstantOperand()) {
2377 int32_t value = ToInteger32(LConstantOperand::cast(left));
2378 if (instr->hydrogen_value()->representation().IsSmi()) {
2379 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2381 __ cmp(ToRegister(right), Operand(value));
2383 // We commuted the operands, so commute the condition.
2384 cond = CommuteCondition(cond);
2386 __ cmp(ToRegister(left), ToRegister(right));
2389 EmitBranch(instr, cond);
2394 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2395 Register left = ToRegister(instr->left());
2396 Register right = ToRegister(instr->right());
2398 __ cmp(left, Operand(right));
2399 EmitBranch(instr, eq);
2403 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2404 if (instr->hydrogen()->representation().IsTagged()) {
2405 Register input_reg = ToRegister(instr->object());
2406 __ mov(ip, Operand(factory()->the_hole_value()));
2407 __ cmp(input_reg, ip);
2408 EmitBranch(instr, eq);
2412 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2413 __ VFPCompareAndSetFlags(input_reg, input_reg);
2414 EmitFalseBranch(instr, vc);
2416 Register scratch = scratch0();
2417 __ VmovHigh(scratch, input_reg);
2418 __ cmp(scratch, Operand(kHoleNanUpper32));
2419 EmitBranch(instr, eq);
2423 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2424 Representation rep = instr->hydrogen()->value()->representation();
2425 DCHECK(!rep.IsInteger32());
2426 Register scratch = ToRegister(instr->temp());
2428 if (rep.IsDouble()) {
2429 DwVfpRegister value = ToDoubleRegister(instr->value());
2430 __ VFPCompareAndSetFlags(value, 0.0);
2431 EmitFalseBranch(instr, ne);
2432 __ VmovHigh(scratch, value);
2433 __ cmp(scratch, Operand(0x80000000));
2435 Register value = ToRegister(instr->value());
2438 Heap::kHeapNumberMapRootIndex,
2439 instr->FalseLabel(chunk()),
2441 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2442 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2443 __ cmp(scratch, Operand(0x80000000));
2444 __ cmp(ip, Operand(0x00000000), eq);
2446 EmitBranch(instr, eq);
2450 Condition LCodeGen::EmitIsObject(Register input,
2452 Label* is_not_object,
2454 Register temp2 = scratch0();
2455 __ JumpIfSmi(input, is_not_object);
2457 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2458 __ cmp(input, temp2);
2459 __ b(eq, is_object);
2462 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2463 // Undetectable objects behave like undefined.
2464 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2465 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2466 __ b(ne, is_not_object);
2468 // Load instance type and check that it is in object type range.
2469 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2470 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2471 __ b(lt, is_not_object);
2472 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2477 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2478 Register reg = ToRegister(instr->value());
2479 Register temp1 = ToRegister(instr->temp());
2481 Condition true_cond =
2482 EmitIsObject(reg, temp1,
2483 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2485 EmitBranch(instr, true_cond);
2489 Condition LCodeGen::EmitIsString(Register input,
2491 Label* is_not_string,
2492 SmiCheck check_needed = INLINE_SMI_CHECK) {
2493 if (check_needed == INLINE_SMI_CHECK) {
2494 __ JumpIfSmi(input, is_not_string);
2496 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2502 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2503 Register reg = ToRegister(instr->value());
2504 Register temp1 = ToRegister(instr->temp());
2506 SmiCheck check_needed =
2507 instr->hydrogen()->value()->type().IsHeapObject()
2508 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2509 Condition true_cond =
2510 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2512 EmitBranch(instr, true_cond);
2516 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2517 Register input_reg = EmitLoadRegister(instr->value(), ip);
2518 __ SmiTst(input_reg);
2519 EmitBranch(instr, eq);
2523 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2524 Register input = ToRegister(instr->value());
2525 Register temp = ToRegister(instr->temp());
2527 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2528 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2530 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2531 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2532 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2533 EmitBranch(instr, ne);
2537 static Condition ComputeCompareCondition(Token::Value op) {
2539 case Token::EQ_STRICT:
2552 return kNoCondition;
2557 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2558 DCHECK(ToRegister(instr->context()).is(cp));
2559 Token::Value op = instr->op();
2562 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2563 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2564 // This instruction also signals no smi code inlined.
2565 __ cmp(r0, Operand::Zero());
2567 Condition condition = ComputeCompareCondition(op);
2569 EmitBranch(instr, condition);
2573 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2574 InstanceType from = instr->from();
2575 InstanceType to = instr->to();
2576 if (from == FIRST_TYPE) return to;
2577 DCHECK(from == to || to == LAST_TYPE);
2582 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2583 InstanceType from = instr->from();
2584 InstanceType to = instr->to();
2585 if (from == to) return eq;
2586 if (to == LAST_TYPE) return hs;
2587 if (from == FIRST_TYPE) return ls;
2593 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2594 Register scratch = scratch0();
2595 Register input = ToRegister(instr->value());
2597 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2598 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2601 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2602 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2606 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2607 Register input = ToRegister(instr->value());
2608 Register result = ToRegister(instr->result());
2610 __ AssertString(input);
2612 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2613 __ IndexFromHash(result, result);
2617 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2618 LHasCachedArrayIndexAndBranch* instr) {
2619 Register input = ToRegister(instr->value());
2620 Register scratch = scratch0();
2623 FieldMemOperand(input, String::kHashFieldOffset));
2624 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2625 EmitBranch(instr, eq);
2629 // Branches to a label or falls through with the answer in flags. Trashes
2630 // the temp registers, but not the input.
2631 void LCodeGen::EmitClassOfTest(Label* is_true,
2633 Handle<String>class_name,
2637 DCHECK(!input.is(temp));
2638 DCHECK(!input.is(temp2));
2639 DCHECK(!temp.is(temp2));
2641 __ JumpIfSmi(input, is_false);
2643 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2644 // Assuming the following assertions, we can use the same compares to test
2645 // for both being a function type and being in the object type range.
2646 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2647 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2648 FIRST_SPEC_OBJECT_TYPE + 1);
2649 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2650 LAST_SPEC_OBJECT_TYPE - 1);
2651 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2652 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2655 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2658 // Faster code path to avoid two compares: subtract lower bound from the
2659 // actual type and do a signed compare with the width of the type range.
2660 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2661 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2662 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2663 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2664 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2668 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2669 // Check if the constructor in the map is a function.
2670 Register instance_type = ip;
2671 __ GetMapConstructor(temp, temp, temp2, instance_type);
2673 // Objects with a non-function constructor have class 'Object'.
2674 __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
2675 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2681 // temp now contains the constructor function. Grab the
2682 // instance class name from there.
2683 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2684 __ ldr(temp, FieldMemOperand(temp,
2685 SharedFunctionInfo::kInstanceClassNameOffset));
2686 // The class name we are testing against is internalized since it's a literal.
2687 // The name in the constructor is internalized because of the way the context
2688 // is booted. This routine isn't expected to work for random API-created
2689 // classes and it doesn't have to because you can't access it with natives
2690 // syntax. Since both sides are internalized it is sufficient to use an
2691 // identity comparison.
2692 __ cmp(temp, Operand(class_name));
2693 // End with the answer in flags.
2697 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2698 Register input = ToRegister(instr->value());
2699 Register temp = scratch0();
2700 Register temp2 = ToRegister(instr->temp());
2701 Handle<String> class_name = instr->hydrogen()->class_name();
2703 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2704 class_name, input, temp, temp2);
2706 EmitBranch(instr, eq);
2710 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2711 Register reg = ToRegister(instr->value());
2712 Register temp = ToRegister(instr->temp());
2714 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2715 __ cmp(temp, Operand(instr->map()));
2716 EmitBranch(instr, eq);
2720 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2721 DCHECK(ToRegister(instr->context()).is(cp));
2722 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
2723 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
2725 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2726 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2728 __ cmp(r0, Operand::Zero());
2729 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2730 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2734 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2735 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2737 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2738 LInstanceOfKnownGlobal* instr)
2739 : LDeferredCode(codegen), instr_(instr) { }
2740 void Generate() override {
2741 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2744 LInstruction* instr() override { return instr_; }
2745 Label* map_check() { return &map_check_; }
2746 Label* load_bool() { return &load_bool_; }
2749 LInstanceOfKnownGlobal* instr_;
2754 DeferredInstanceOfKnownGlobal* deferred;
2755 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2757 Label done, false_result;
2758 Register object = ToRegister(instr->value());
2759 Register temp = ToRegister(instr->temp());
2760 Register result = ToRegister(instr->result());
2762 // A Smi is not instance of anything.
2763 __ JumpIfSmi(object, &false_result);
2765 // This is the inlined call site instanceof cache. The two occurences of the
2766 // hole value will be patched to the last map/result pair generated by the
2769 Register map = temp;
2770 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2772 // Block constant pool emission to ensure the positions of instructions are
2773 // as expected by the patcher. See InstanceofStub::Generate().
2774 Assembler::BlockConstPoolScope block_const_pool(masm());
2775 __ bind(deferred->map_check()); // Label for calculating code patching.
2776 // We use Factory::the_hole_value() on purpose instead of loading from the
2777 // root array to force relocation to be able to later patch with
2779 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2780 __ mov(ip, Operand(cell));
2781 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
2782 __ cmp(map, Operand(ip));
2783 __ b(ne, &cache_miss);
2784 __ bind(deferred->load_bool()); // Label for calculating code patching.
2785 // We use Factory::the_hole_value() on purpose instead of loading from the
2786 // root array to force relocation to be able to later patch
2787 // with true or false.
2788 __ mov(result, Operand(factory()->the_hole_value()));
2792 // The inlined call site cache did not match. Check null and string before
2793 // calling the deferred code.
2794 __ bind(&cache_miss);
2795 // Null is not instance of anything.
2796 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2797 __ cmp(object, Operand(ip));
2798 __ b(eq, &false_result);
2800 // String values is not instance of anything.
2801 Condition is_string = masm_->IsObjectStringType(object, temp);
2802 __ b(is_string, &false_result);
2804 // Go to the deferred code.
2805 __ b(deferred->entry());
2807 __ bind(&false_result);
2808 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2810 // Here result has either true or false. Deferred code also produces true or
2812 __ bind(deferred->exit());
2817 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2820 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2821 flags = static_cast<InstanceofStub::Flags>(
2822 flags | InstanceofStub::kArgsInRegisters);
2823 flags = static_cast<InstanceofStub::Flags>(
2824 flags | InstanceofStub::kCallSiteInlineCheck);
2825 flags = static_cast<InstanceofStub::Flags>(
2826 flags | InstanceofStub::kReturnTrueFalseObject);
2827 InstanceofStub stub(isolate(), flags);
2829 PushSafepointRegistersScope scope(this);
2830 LoadContextFromDeferred(instr->context());
2832 __ Move(InstanceofStub::right(), instr->function());
2834 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
2835 int additional_delta = (call_size / Assembler::kInstrSize) + 4;
2837 // Make sure that code size is predicable, since we use specific constants
2838 // offsets in the code to find embedded values..
2839 PredictableCodeSizeScope predictable(
2840 masm_, additional_delta * Assembler::kInstrSize);
2841 // The labels must be already bound since the code has predictabel size up
2842 // to the call instruction.
2843 DCHECK(map_check->is_bound());
2844 DCHECK(bool_load->is_bound());
2845 // Make sure we don't emit any additional entries in the constant pool
2846 // before the call to ensure that the CallCodeSize() calculated the
2847 // correct number of instructions for the constant pool load.
2849 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
2850 int map_check_delta =
2851 masm_->InstructionsGeneratedSince(map_check) + additional_delta;
2852 int bool_load_delta =
2853 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2854 Label before_push_delta;
2855 __ bind(&before_push_delta);
2856 __ BlockConstPoolFor(additional_delta);
2857 // r5 is used to communicate the offset to the location of the map check.
2858 __ mov(r5, Operand(map_check_delta * kPointerSize));
2859 // r6 is used to communicate the offset to the location of the bool load.
2860 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2861 // The mov above can generate one or two instructions. The delta was
2862 // computed for two instructions, so we need to pad here in case of one
2864 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2868 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
2869 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2871 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2872 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2873 // Put the result value (r0) into the result register slot and
2874 // restore all registers.
2875 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2879 void LCodeGen::DoCmpT(LCmpT* instr) {
2880 DCHECK(ToRegister(instr->context()).is(cp));
2881 Token::Value op = instr->op();
2884 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2885 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2886 // This instruction also signals no smi code inlined.
2887 __ cmp(r0, Operand::Zero());
2889 Condition condition = ComputeCompareCondition(op);
2890 __ LoadRoot(ToRegister(instr->result()),
2891 Heap::kTrueValueRootIndex,
2893 __ LoadRoot(ToRegister(instr->result()),
2894 Heap::kFalseValueRootIndex,
2895 NegateCondition(condition));
2899 void LCodeGen::DoReturn(LReturn* instr) {
2900 if (FLAG_trace && info()->IsOptimizing()) {
2901 // Push the return value on the stack as the parameter.
2902 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2903 // managed by the register allocator and tearing down the frame, it's
2904 // safe to write to the context register.
2906 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2907 __ CallRuntime(Runtime::kTraceExit, 1);
2909 if (info()->saves_caller_doubles()) {
2910 RestoreCallerDoubles();
2912 int no_frame_start = -1;
2913 if (NeedsEagerFrame()) {
2914 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2916 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2917 if (instr->has_constant_parameter_count()) {
2918 int parameter_count = ToInteger32(instr->constant_parameter_count());
2919 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2920 if (sp_delta != 0) {
2921 __ add(sp, sp, Operand(sp_delta));
2924 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2925 Register reg = ToRegister(instr->parameter_count());
2926 // The argument count parameter is a smi
2928 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2933 if (no_frame_start != -1) {
2934 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2941 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2942 Register vector_register = ToRegister(instr->temp_vector());
2943 Register slot_register = LoadDescriptor::SlotRegister();
2944 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2945 DCHECK(slot_register.is(r0));
2947 AllowDeferredHandleDereference vector_structure_check;
2948 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2949 __ Move(vector_register, vector);
2950 // No need to allocate this register.
2951 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2952 int index = vector->GetIndex(slot);
2953 __ mov(slot_register, Operand(Smi::FromInt(index)));
2958 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2959 Register vector_register = ToRegister(instr->temp_vector());
2960 Register slot_register = ToRegister(instr->temp_slot());
2962 AllowDeferredHandleDereference vector_structure_check;
2963 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2964 __ Move(vector_register, vector);
2965 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2966 int index = vector->GetIndex(slot);
2967 __ mov(slot_register, Operand(Smi::FromInt(index)));
2971 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2972 DCHECK(ToRegister(instr->context()).is(cp));
2973 DCHECK(ToRegister(instr->global_object())
2974 .is(LoadDescriptor::ReceiverRegister()));
2975 DCHECK(ToRegister(instr->result()).is(r0));
2977 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2978 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2980 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2981 SLOPPY, PREMONOMORPHIC).code();
2982 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2986 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2987 Register context = ToRegister(instr->context());
2988 Register result = ToRegister(instr->result());
2989 __ ldr(result, ContextOperand(context, instr->slot_index()));
2990 if (instr->hydrogen()->RequiresHoleCheck()) {
2991 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2993 if (instr->hydrogen()->DeoptimizesOnHole()) {
2994 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2996 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3002 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3003 Register context = ToRegister(instr->context());
3004 Register value = ToRegister(instr->value());
3005 Register scratch = scratch0();
3006 MemOperand target = ContextOperand(context, instr->slot_index());
3008 Label skip_assignment;
3010 if (instr->hydrogen()->RequiresHoleCheck()) {
3011 __ ldr(scratch, target);
3012 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3013 __ cmp(scratch, ip);
3014 if (instr->hydrogen()->DeoptimizesOnHole()) {
3015 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3017 __ b(ne, &skip_assignment);
3021 __ str(value, target);
3022 if (instr->hydrogen()->NeedsWriteBarrier()) {
3023 SmiCheck check_needed =
3024 instr->hydrogen()->value()->type().IsHeapObject()
3025 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3026 __ RecordWriteContextSlot(context,
3030 GetLinkRegisterState(),
3032 EMIT_REMEMBERED_SET,
3036 __ bind(&skip_assignment);
3040 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3041 HObjectAccess access = instr->hydrogen()->access();
3042 int offset = access.offset();
3043 Register object = ToRegister(instr->object());
3045 if (access.IsExternalMemory()) {
3046 Register result = ToRegister(instr->result());
3047 MemOperand operand = MemOperand(object, offset);
3048 __ Load(result, operand, access.representation());
3052 if (instr->hydrogen()->representation().IsDouble()) {
3053 DwVfpRegister result = ToDoubleRegister(instr->result());
3054 __ vldr(result, FieldMemOperand(object, offset));
3058 Register result = ToRegister(instr->result());
3059 if (!access.IsInobject()) {
3060 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3063 MemOperand operand = FieldMemOperand(object, offset);
3064 __ Load(result, operand, access.representation());
3068 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3069 DCHECK(ToRegister(instr->context()).is(cp));
3070 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3071 DCHECK(ToRegister(instr->result()).is(r0));
3073 // Name is always in r2.
3074 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3075 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3077 CodeFactory::LoadICInOptimizedCode(
3078 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3079 instr->hydrogen()->initialization_state()).code();
3080 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3084 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3085 Register scratch = scratch0();
3086 Register function = ToRegister(instr->function());
3087 Register result = ToRegister(instr->result());
3089 // Get the prototype or initial map from the function.
3091 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3093 // Check that the function has a prototype or an initial map.
3094 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3096 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3098 // If the function does not have an initial map, we're done.
3100 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3103 // Get the prototype from the initial map.
3104 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3111 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3112 Register result = ToRegister(instr->result());
3113 __ LoadRoot(result, instr->index());
3117 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3118 Register arguments = ToRegister(instr->arguments());
3119 Register result = ToRegister(instr->result());
3120 // There are two words between the frame pointer and the last argument.
3121 // Subtracting from length accounts for one of them add one more.
3122 if (instr->length()->IsConstantOperand()) {
3123 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3124 if (instr->index()->IsConstantOperand()) {
3125 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3126 int index = (const_length - const_index) + 1;
3127 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3129 Register index = ToRegister(instr->index());
3130 __ rsb(result, index, Operand(const_length + 1));
3131 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3133 } else if (instr->index()->IsConstantOperand()) {
3134 Register length = ToRegister(instr->length());
3135 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3136 int loc = const_index - 1;
3138 __ sub(result, length, Operand(loc));
3139 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3141 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3144 Register length = ToRegister(instr->length());
3145 Register index = ToRegister(instr->index());
3146 __ sub(result, length, index);
3147 __ add(result, result, Operand(1));
3148 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3153 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3154 Register external_pointer = ToRegister(instr->elements());
3155 Register key = no_reg;
3156 ElementsKind elements_kind = instr->elements_kind();
3157 bool key_is_constant = instr->key()->IsConstantOperand();
3158 int constant_key = 0;
3159 if (key_is_constant) {
3160 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3161 if (constant_key & 0xF0000000) {
3162 Abort(kArrayIndexConstantValueTooBig);
3165 key = ToRegister(instr->key());
3167 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3168 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3169 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3170 int base_offset = instr->base_offset();
3172 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3173 elements_kind == FLOAT32_ELEMENTS ||
3174 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3175 elements_kind == FLOAT64_ELEMENTS) {
3176 DwVfpRegister result = ToDoubleRegister(instr->result());
3177 Operand operand = key_is_constant
3178 ? Operand(constant_key << element_size_shift)
3179 : Operand(key, LSL, shift_size);
3180 __ add(scratch0(), external_pointer, operand);
3181 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3182 elements_kind == FLOAT32_ELEMENTS) {
3183 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3184 __ vcvt_f64_f32(result, double_scratch0().low());
3185 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3186 __ vldr(result, scratch0(), base_offset);
3189 Register result = ToRegister(instr->result());
3190 MemOperand mem_operand = PrepareKeyedOperand(
3191 key, external_pointer, key_is_constant, constant_key,
3192 element_size_shift, shift_size, base_offset);
3193 switch (elements_kind) {
3194 case EXTERNAL_INT8_ELEMENTS:
3196 __ ldrsb(result, mem_operand);
3198 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3199 case EXTERNAL_UINT8_ELEMENTS:
3200 case UINT8_ELEMENTS:
3201 case UINT8_CLAMPED_ELEMENTS:
3202 __ ldrb(result, mem_operand);
3204 case EXTERNAL_INT16_ELEMENTS:
3205 case INT16_ELEMENTS:
3206 __ ldrsh(result, mem_operand);
3208 case EXTERNAL_UINT16_ELEMENTS:
3209 case UINT16_ELEMENTS:
3210 __ ldrh(result, mem_operand);
3212 case EXTERNAL_INT32_ELEMENTS:
3213 case INT32_ELEMENTS:
3214 __ ldr(result, mem_operand);
3216 case EXTERNAL_UINT32_ELEMENTS:
3217 case UINT32_ELEMENTS:
3218 __ ldr(result, mem_operand);
3219 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3220 __ cmp(result, Operand(0x80000000));
3221 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
3224 case FLOAT32_ELEMENTS:
3225 case FLOAT64_ELEMENTS:
3226 case EXTERNAL_FLOAT32_ELEMENTS:
3227 case EXTERNAL_FLOAT64_ELEMENTS:
3228 case FAST_HOLEY_DOUBLE_ELEMENTS:
3229 case FAST_HOLEY_ELEMENTS:
3230 case FAST_HOLEY_SMI_ELEMENTS:
3231 case FAST_DOUBLE_ELEMENTS:
3233 case FAST_SMI_ELEMENTS:
3234 case DICTIONARY_ELEMENTS:
3235 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3236 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3244 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3245 Register elements = ToRegister(instr->elements());
3246 bool key_is_constant = instr->key()->IsConstantOperand();
3247 Register key = no_reg;
3248 DwVfpRegister result = ToDoubleRegister(instr->result());
3249 Register scratch = scratch0();
3251 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3253 int base_offset = instr->base_offset();
3254 if (key_is_constant) {
3255 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3256 if (constant_key & 0xF0000000) {
3257 Abort(kArrayIndexConstantValueTooBig);
3259 base_offset += constant_key * kDoubleSize;
3261 __ add(scratch, elements, Operand(base_offset));
3263 if (!key_is_constant) {
3264 key = ToRegister(instr->key());
3265 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3266 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3267 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3270 __ vldr(result, scratch, 0);
3272 if (instr->hydrogen()->RequiresHoleCheck()) {
3273 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3274 __ cmp(scratch, Operand(kHoleNanUpper32));
3275 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3280 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3281 Register elements = ToRegister(instr->elements());
3282 Register result = ToRegister(instr->result());
3283 Register scratch = scratch0();
3284 Register store_base = scratch;
3285 int offset = instr->base_offset();
3287 if (instr->key()->IsConstantOperand()) {
3288 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3289 offset += ToInteger32(const_operand) * kPointerSize;
3290 store_base = elements;
3292 Register key = ToRegister(instr->key());
3293 // Even though the HLoadKeyed instruction forces the input
3294 // representation for the key to be an integer, the input gets replaced
3295 // during bound check elimination with the index argument to the bounds
3296 // check, which can be tagged, so that case must be handled here, too.
3297 if (instr->hydrogen()->key()->representation().IsSmi()) {
3298 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3300 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3303 __ ldr(result, MemOperand(store_base, offset));
3305 // Check for the hole value.
3306 if (instr->hydrogen()->RequiresHoleCheck()) {
3307 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3309 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
3311 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3312 __ cmp(result, scratch);
3313 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3315 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3316 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3318 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3319 __ cmp(result, scratch);
3321 if (info()->IsStub()) {
3322 // A stub can safely convert the hole to undefined only if the array
3323 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3324 // it needs to bail out.
3325 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3326 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3327 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3328 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3330 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3336 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3337 if (instr->is_typed_elements()) {
3338 DoLoadKeyedExternalArray(instr);
3339 } else if (instr->hydrogen()->representation().IsDouble()) {
3340 DoLoadKeyedFixedDoubleArray(instr);
3342 DoLoadKeyedFixedArray(instr);
3347 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3349 bool key_is_constant,
3354 if (key_is_constant) {
3355 return MemOperand(base, (constant_key << element_size) + base_offset);
3358 if (base_offset == 0) {
3359 if (shift_size >= 0) {
3360 return MemOperand(base, key, LSL, shift_size);
3362 DCHECK_EQ(-1, shift_size);
3363 return MemOperand(base, key, LSR, 1);
3367 if (shift_size >= 0) {
3368 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3369 return MemOperand(scratch0(), base_offset);
3371 DCHECK_EQ(-1, shift_size);
3372 __ add(scratch0(), base, Operand(key, ASR, 1));
3373 return MemOperand(scratch0(), base_offset);
3378 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3379 DCHECK(ToRegister(instr->context()).is(cp));
3380 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3381 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3383 if (instr->hydrogen()->HasVectorAndSlot()) {
3384 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3387 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3388 isolate(), instr->hydrogen()->language_mode(),
3389 instr->hydrogen()->initialization_state()).code();
3390 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3394 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3395 Register scratch = scratch0();
3396 Register result = ToRegister(instr->result());
3398 if (instr->hydrogen()->from_inlined()) {
3399 __ sub(result, sp, Operand(2 * kPointerSize));
3401 // Check if the calling frame is an arguments adaptor frame.
3402 Label done, adapted;
3403 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3404 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3405 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3407 // Result is the frame pointer for the frame if not adapted and for the real
3408 // frame below the adaptor frame if adapted.
3409 __ mov(result, fp, LeaveCC, ne);
3410 __ mov(result, scratch, LeaveCC, eq);
3415 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3416 Register elem = ToRegister(instr->elements());
3417 Register result = ToRegister(instr->result());
3421 // If no arguments adaptor frame the number of arguments is fixed.
3423 __ mov(result, Operand(scope()->num_parameters()));
3426 // Arguments adaptor frame present. Get argument length from there.
3427 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3429 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3430 __ SmiUntag(result);
3432 // Argument length is in result register.
3437 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3438 Register receiver = ToRegister(instr->receiver());
3439 Register function = ToRegister(instr->function());
3440 Register result = ToRegister(instr->result());
3441 Register scratch = scratch0();
3443 // If the receiver is null or undefined, we have to pass the global
3444 // object as a receiver to normal functions. Values have to be
3445 // passed unchanged to builtins and strict-mode functions.
3446 Label global_object, result_in_receiver;
3448 if (!instr->hydrogen()->known_function()) {
3449 // Do not transform the receiver to object for strict mode
3452 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3454 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3455 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3456 __ tst(scratch, Operand(mask));
3457 __ b(ne, &result_in_receiver);
3459 // Do not transform the receiver to object for builtins.
3460 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3461 __ b(ne, &result_in_receiver);
3464 // Normal function. Replace undefined or null with global receiver.
3465 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3466 __ cmp(receiver, scratch);
3467 __ b(eq, &global_object);
3468 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3469 __ cmp(receiver, scratch);
3470 __ b(eq, &global_object);
3472 // Deoptimize if the receiver is not a JS object.
3473 __ SmiTst(receiver);
3474 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
3475 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3476 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3478 __ b(&result_in_receiver);
3479 __ bind(&global_object);
3480 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3482 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3483 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3485 if (result.is(receiver)) {
3486 __ bind(&result_in_receiver);
3490 __ bind(&result_in_receiver);
3491 __ mov(result, receiver);
3492 __ bind(&result_ok);
3497 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3498 Register receiver = ToRegister(instr->receiver());
3499 Register function = ToRegister(instr->function());
3500 Register length = ToRegister(instr->length());
3501 Register elements = ToRegister(instr->elements());
3502 Register scratch = scratch0();
3503 DCHECK(receiver.is(r0)); // Used for parameter count.
3504 DCHECK(function.is(r1)); // Required by InvokeFunction.
3505 DCHECK(ToRegister(instr->result()).is(r0));
3507 // Copy the arguments to this function possibly from the
3508 // adaptor frame below it.
3509 const uint32_t kArgumentsLimit = 1 * KB;
3510 __ cmp(length, Operand(kArgumentsLimit));
3511 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
3513 // Push the receiver and use the register to keep the original
3514 // number of arguments.
3516 __ mov(receiver, length);
3517 // The arguments are at a one pointer size offset from elements.
3518 __ add(elements, elements, Operand(1 * kPointerSize));
3520 // Loop through the arguments pushing them onto the execution
3523 // length is a small non-negative integer, due to the test above.
3524 __ cmp(length, Operand::Zero());
3527 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3529 __ sub(length, length, Operand(1), SetCC);
3533 DCHECK(instr->HasPointerMap());
3534 LPointerMap* pointers = instr->pointer_map();
3535 SafepointGenerator safepoint_generator(
3536 this, pointers, Safepoint::kLazyDeopt);
3537 // The number of arguments is stored in receiver which is r0, as expected
3538 // by InvokeFunction.
3539 ParameterCount actual(receiver);
3540 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3544 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3545 LOperand* argument = instr->value();
3546 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3547 Abort(kDoPushArgumentNotImplementedForDoubleType);
3549 Register argument_reg = EmitLoadRegister(argument, ip);
3550 __ push(argument_reg);
3555 void LCodeGen::DoDrop(LDrop* instr) {
3556 __ Drop(instr->count());
3560 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3561 Register result = ToRegister(instr->result());
3562 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3566 void LCodeGen::DoContext(LContext* instr) {
3567 // If there is a non-return use, the context must be moved to a register.
3568 Register result = ToRegister(instr->result());
3569 if (info()->IsOptimizing()) {
3570 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3572 // If there is no frame, the context must be in cp.
3573 DCHECK(result.is(cp));
3578 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3579 DCHECK(ToRegister(instr->context()).is(cp));
3580 __ push(cp); // The context is the first argument.
3581 __ Move(scratch0(), instr->hydrogen()->pairs());
3582 __ push(scratch0());
3583 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3584 __ push(scratch0());
3585 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3589 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3590 int formal_parameter_count, int arity,
3591 LInstruction* instr) {
3592 bool dont_adapt_arguments =
3593 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3594 bool can_invoke_directly =
3595 dont_adapt_arguments || formal_parameter_count == arity;
3597 Register function_reg = r1;
3599 LPointerMap* pointers = instr->pointer_map();
3601 if (can_invoke_directly) {
3603 __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3605 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3606 // is available to write to at this point.
3607 if (dont_adapt_arguments) {
3608 __ mov(r0, Operand(arity));
3612 __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3615 // Set up deoptimization.
3616 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3618 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3619 ParameterCount count(arity);
3620 ParameterCount expected(formal_parameter_count);
3621 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3626 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3627 DCHECK(instr->context() != NULL);
3628 DCHECK(ToRegister(instr->context()).is(cp));
3629 Register input = ToRegister(instr->value());
3630 Register result = ToRegister(instr->result());
3631 Register scratch = scratch0();
3633 // Deoptimize if not a heap number.
3634 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3635 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3636 __ cmp(scratch, Operand(ip));
3637 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3640 Register exponent = scratch0();
3642 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3643 // Check the sign of the argument. If the argument is positive, just
3645 __ tst(exponent, Operand(HeapNumber::kSignMask));
3646 // Move the input to the result if necessary.
3647 __ Move(result, input);
3650 // Input is negative. Reverse its sign.
3651 // Preserve the value of all registers.
3653 PushSafepointRegistersScope scope(this);
3655 // Registers were saved at the safepoint, so we can use
3656 // many scratch registers.
3657 Register tmp1 = input.is(r1) ? r0 : r1;
3658 Register tmp2 = input.is(r2) ? r0 : r2;
3659 Register tmp3 = input.is(r3) ? r0 : r3;
3660 Register tmp4 = input.is(r4) ? r0 : r4;
3662 // exponent: floating point exponent value.
3664 Label allocated, slow;
3665 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3666 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3669 // Slow case: Call the runtime system to do the number allocation.
3672 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3674 // Set the pointer to the new heap number in tmp.
3675 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3676 // Restore input_reg after call to runtime.
3677 __ LoadFromSafepointRegisterSlot(input, input);
3678 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3680 __ bind(&allocated);
3681 // exponent: floating point exponent value.
3682 // tmp1: allocated heap number.
3683 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3684 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3685 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3686 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3688 __ StoreToSafepointRegisterSlot(tmp1, result);
3695 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3696 Register input = ToRegister(instr->value());
3697 Register result = ToRegister(instr->result());
3698 __ cmp(input, Operand::Zero());
3699 __ Move(result, input, pl);
3700 // We can make rsb conditional because the previous cmp instruction
3701 // will clear the V (overflow) flag and rsb won't set this flag
3702 // if input is positive.
3703 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3704 // Deoptimize on overflow.
3705 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3709 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3710 // Class for deferred case.
3711 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3713 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3714 : LDeferredCode(codegen), instr_(instr) { }
3715 void Generate() override {
3716 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3718 LInstruction* instr() override { return instr_; }
3724 Representation r = instr->hydrogen()->value()->representation();
3726 DwVfpRegister input = ToDoubleRegister(instr->value());
3727 DwVfpRegister result = ToDoubleRegister(instr->result());
3728 __ vabs(result, input);
3729 } else if (r.IsSmiOrInteger32()) {
3730 EmitIntegerMathAbs(instr);
3732 // Representation is tagged.
3733 DeferredMathAbsTaggedHeapNumber* deferred =
3734 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3735 Register input = ToRegister(instr->value());
3737 __ JumpIfNotSmi(input, deferred->entry());
3738 // If smi, handle it directly.
3739 EmitIntegerMathAbs(instr);
3740 __ bind(deferred->exit());
3745 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3746 DwVfpRegister input = ToDoubleRegister(instr->value());
3747 Register result = ToRegister(instr->result());
3748 Register input_high = scratch0();
3751 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3752 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3755 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3757 __ cmp(result, Operand::Zero());
3759 __ cmp(input_high, Operand::Zero());
3760 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3766 void LCodeGen::DoMathRound(LMathRound* instr) {
3767 DwVfpRegister input = ToDoubleRegister(instr->value());
3768 Register result = ToRegister(instr->result());
3769 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3770 DwVfpRegister input_plus_dot_five = double_scratch1;
3771 Register input_high = scratch0();
3772 DwVfpRegister dot_five = double_scratch0();
3773 Label convert, done;
3775 __ Vmov(dot_five, 0.5, scratch0());
3776 __ vabs(double_scratch1, input);
3777 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3778 // If input is in [-0.5, -0], the result is -0.
3779 // If input is in [+0, +0.5[, the result is +0.
3780 // If the input is +0.5, the result is 1.
3781 __ b(hi, &convert); // Out of [-0.5, +0.5].
3782 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3783 __ VmovHigh(input_high, input);
3784 __ cmp(input_high, Operand::Zero());
3786 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3788 __ VFPCompareAndSetFlags(input, dot_five);
3789 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3790 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3791 // flag kBailoutOnMinusZero.
3792 __ mov(result, Operand::Zero(), LeaveCC, ne);
3796 __ vadd(input_plus_dot_five, input, dot_five);
3797 // Reuse dot_five (double_scratch0) as we no longer need this value.
3798 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3800 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3805 void LCodeGen::DoMathFround(LMathFround* instr) {
3806 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3807 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3808 LowDwVfpRegister scratch = double_scratch0();
3809 __ vcvt_f32_f64(scratch.low(), input_reg);
3810 __ vcvt_f64_f32(output_reg, scratch.low());
3814 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3815 DwVfpRegister input = ToDoubleRegister(instr->value());
3816 DwVfpRegister result = ToDoubleRegister(instr->result());
3817 __ vsqrt(result, input);
3821 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3822 DwVfpRegister input = ToDoubleRegister(instr->value());
3823 DwVfpRegister result = ToDoubleRegister(instr->result());
3824 DwVfpRegister temp = double_scratch0();
3826 // Note that according to ECMA-262 15.8.2.13:
3827 // Math.pow(-Infinity, 0.5) == Infinity
3828 // Math.sqrt(-Infinity) == NaN
3830 __ vmov(temp, -V8_INFINITY, scratch0());
3831 __ VFPCompareAndSetFlags(input, temp);
3832 __ vneg(result, temp, eq);
3835 // Add +0 to convert -0 to +0.
3836 __ vadd(result, input, kDoubleRegZero);
3837 __ vsqrt(result, result);
3842 void LCodeGen::DoPower(LPower* instr) {
3843 Representation exponent_type = instr->hydrogen()->right()->representation();
3844 // Having marked this as a call, we can use any registers.
3845 // Just make sure that the input/output registers are the expected ones.
3846 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3847 DCHECK(!instr->right()->IsDoubleRegister() ||
3848 ToDoubleRegister(instr->right()).is(d1));
3849 DCHECK(!instr->right()->IsRegister() ||
3850 ToRegister(instr->right()).is(tagged_exponent));
3851 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3852 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3854 if (exponent_type.IsSmi()) {
3855 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3857 } else if (exponent_type.IsTagged()) {
3859 __ JumpIfSmi(tagged_exponent, &no_deopt);
3860 DCHECK(!r6.is(tagged_exponent));
3861 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3862 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3863 __ cmp(r6, Operand(ip));
3864 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3866 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3868 } else if (exponent_type.IsInteger32()) {
3869 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3872 DCHECK(exponent_type.IsDouble());
3873 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3879 void LCodeGen::DoMathExp(LMathExp* instr) {
3880 DwVfpRegister input = ToDoubleRegister(instr->value());
3881 DwVfpRegister result = ToDoubleRegister(instr->result());
3882 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3883 DwVfpRegister double_scratch2 = double_scratch0();
3884 Register temp1 = ToRegister(instr->temp1());
3885 Register temp2 = ToRegister(instr->temp2());
3887 MathExpGenerator::EmitMathExp(
3888 masm(), input, result, double_scratch1, double_scratch2,
3889 temp1, temp2, scratch0());
3893 void LCodeGen::DoMathLog(LMathLog* instr) {
3894 __ PrepareCallCFunction(0, 1, scratch0());
3895 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3896 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3898 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3902 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3903 Register input = ToRegister(instr->value());
3904 Register result = ToRegister(instr->result());
3905 __ clz(result, input);
3909 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3910 DCHECK(ToRegister(instr->context()).is(cp));
3911 DCHECK(ToRegister(instr->function()).is(r1));
3912 DCHECK(instr->HasPointerMap());
3914 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3915 if (known_function.is_null()) {
3916 LPointerMap* pointers = instr->pointer_map();
3917 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3918 ParameterCount count(instr->arity());
3919 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3921 CallKnownFunction(known_function,
3922 instr->hydrogen()->formal_parameter_count(),
3923 instr->arity(), instr);
3928 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3929 DCHECK(ToRegister(instr->result()).is(r0));
3931 if (instr->hydrogen()->IsTailCall()) {
3932 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3934 if (instr->target()->IsConstantOperand()) {
3935 LConstantOperand* target = LConstantOperand::cast(instr->target());
3936 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3937 __ Jump(code, RelocInfo::CODE_TARGET);
3939 DCHECK(instr->target()->IsRegister());
3940 Register target = ToRegister(instr->target());
3941 // Make sure we don't emit any additional entries in the constant pool
3942 // before the call to ensure that the CallCodeSize() calculated the
3944 // number of instructions for the constant pool load.
3946 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3947 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3952 LPointerMap* pointers = instr->pointer_map();
3953 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3955 if (instr->target()->IsConstantOperand()) {
3956 LConstantOperand* target = LConstantOperand::cast(instr->target());
3957 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3958 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3959 PlatformInterfaceDescriptor* call_descriptor =
3960 instr->descriptor().platform_specific_descriptor();
3961 if (call_descriptor != NULL) {
3962 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3963 call_descriptor->storage_mode());
3965 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
3968 DCHECK(instr->target()->IsRegister());
3969 Register target = ToRegister(instr->target());
3970 generator.BeforeCall(__ CallSize(target));
3971 // Make sure we don't emit any additional entries in the constant pool
3972 // before the call to ensure that the CallCodeSize() calculated the
3974 // number of instructions for the constant pool load.
3976 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3977 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3981 generator.AfterCall();
3986 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3987 DCHECK(ToRegister(instr->function()).is(r1));
3988 DCHECK(ToRegister(instr->result()).is(r0));
3990 if (instr->hydrogen()->pass_argument_count()) {
3991 __ mov(r0, Operand(instr->arity()));
3995 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3997 // Load the code entry address
3998 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4001 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4005 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4006 DCHECK(ToRegister(instr->context()).is(cp));
4007 DCHECK(ToRegister(instr->function()).is(r1));
4008 DCHECK(ToRegister(instr->result()).is(r0));
4010 int arity = instr->arity();
4011 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4012 if (instr->hydrogen()->HasVectorAndSlot()) {
4013 Register slot_register = ToRegister(instr->temp_slot());
4014 Register vector_register = ToRegister(instr->temp_vector());
4015 DCHECK(slot_register.is(r3));
4016 DCHECK(vector_register.is(r2));
4018 AllowDeferredHandleDereference vector_structure_check;
4019 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4020 int index = vector->GetIndex(instr->hydrogen()->slot());
4022 __ Move(vector_register, vector);
4023 __ mov(slot_register, Operand(Smi::FromInt(index)));
4025 CallICState::CallType call_type =
4026 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4029 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4030 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4032 CallFunctionStub stub(isolate(), arity, flags);
4033 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4038 void LCodeGen::DoCallNew(LCallNew* instr) {
4039 DCHECK(ToRegister(instr->context()).is(cp));
4040 DCHECK(ToRegister(instr->constructor()).is(r1));
4041 DCHECK(ToRegister(instr->result()).is(r0));
4043 __ mov(r0, Operand(instr->arity()));
4044 // No cell in r2 for construct type feedback in optimized code
4045 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4046 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4047 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4051 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4052 DCHECK(ToRegister(instr->context()).is(cp));
4053 DCHECK(ToRegister(instr->constructor()).is(r1));
4054 DCHECK(ToRegister(instr->result()).is(r0));
4056 __ mov(r0, Operand(instr->arity()));
4057 if (instr->arity() == 1) {
4058 // We only need the allocation site for the case we have a length argument.
4059 // The case may bail out to the runtime, which will determine the correct
4060 // elements kind with the site.
4061 __ Move(r2, instr->hydrogen()->site());
4063 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4065 ElementsKind kind = instr->hydrogen()->elements_kind();
4066 AllocationSiteOverrideMode override_mode =
4067 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4068 ? DISABLE_ALLOCATION_SITES
4071 if (instr->arity() == 0) {
4072 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4073 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4074 } else if (instr->arity() == 1) {
4076 if (IsFastPackedElementsKind(kind)) {
4078 // We might need a change here
4079 // look at the first argument
4080 __ ldr(r5, MemOperand(sp, 0));
4081 __ cmp(r5, Operand::Zero());
4082 __ b(eq, &packed_case);
4084 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4085 ArraySingleArgumentConstructorStub stub(isolate(),
4088 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4090 __ bind(&packed_case);
4093 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4094 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4097 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4098 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4103 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4104 CallRuntime(instr->function(), instr->arity(), instr);
4108 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4109 Register function = ToRegister(instr->function());
4110 Register code_object = ToRegister(instr->code_object());
4111 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4113 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4117 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4118 Register result = ToRegister(instr->result());
4119 Register base = ToRegister(instr->base_object());
4120 if (instr->offset()->IsConstantOperand()) {
4121 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4122 __ add(result, base, Operand(ToInteger32(offset)));
4124 Register offset = ToRegister(instr->offset());
4125 __ add(result, base, offset);
4130 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4131 Representation representation = instr->representation();
4133 Register object = ToRegister(instr->object());
4134 Register scratch = scratch0();
4135 HObjectAccess access = instr->hydrogen()->access();
4136 int offset = access.offset();
4138 if (access.IsExternalMemory()) {
4139 Register value = ToRegister(instr->value());
4140 MemOperand operand = MemOperand(object, offset);
4141 __ Store(value, operand, representation);
4145 __ AssertNotSmi(object);
4147 DCHECK(!representation.IsSmi() ||
4148 !instr->value()->IsConstantOperand() ||
4149 IsSmi(LConstantOperand::cast(instr->value())));
4150 if (representation.IsDouble()) {
4151 DCHECK(access.IsInobject());
4152 DCHECK(!instr->hydrogen()->has_transition());
4153 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4154 DwVfpRegister value = ToDoubleRegister(instr->value());
4155 __ vstr(value, FieldMemOperand(object, offset));
4159 if (instr->hydrogen()->has_transition()) {
4160 Handle<Map> transition = instr->hydrogen()->transition_map();
4161 AddDeprecationDependency(transition);
4162 __ mov(scratch, Operand(transition));
4163 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4164 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4165 Register temp = ToRegister(instr->temp());
4166 // Update the write barrier for the map field.
4167 __ RecordWriteForMap(object,
4170 GetLinkRegisterState(),
4176 Register value = ToRegister(instr->value());
4177 if (access.IsInobject()) {
4178 MemOperand operand = FieldMemOperand(object, offset);
4179 __ Store(value, operand, representation);
4180 if (instr->hydrogen()->NeedsWriteBarrier()) {
4181 // Update the write barrier for the object for in-object properties.
4182 __ RecordWriteField(object,
4186 GetLinkRegisterState(),
4188 EMIT_REMEMBERED_SET,
4189 instr->hydrogen()->SmiCheckForWriteBarrier(),
4190 instr->hydrogen()->PointersToHereCheckForValue());
4193 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4194 MemOperand operand = FieldMemOperand(scratch, offset);
4195 __ Store(value, operand, representation);
4196 if (instr->hydrogen()->NeedsWriteBarrier()) {
4197 // Update the write barrier for the properties array.
4198 // object is used as a scratch register.
4199 __ RecordWriteField(scratch,
4203 GetLinkRegisterState(),
4205 EMIT_REMEMBERED_SET,
4206 instr->hydrogen()->SmiCheckForWriteBarrier(),
4207 instr->hydrogen()->PointersToHereCheckForValue());
4213 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4214 DCHECK(ToRegister(instr->context()).is(cp));
4215 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4216 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4218 if (instr->hydrogen()->HasVectorAndSlot()) {
4219 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4222 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4223 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4224 isolate(), instr->language_mode(),
4225 instr->hydrogen()->initialization_state()).code();
4226 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4230 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4231 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4232 if (instr->index()->IsConstantOperand()) {
4233 Operand index = ToOperand(instr->index());
4234 Register length = ToRegister(instr->length());
4235 __ cmp(length, index);
4236 cc = CommuteCondition(cc);
4238 Register index = ToRegister(instr->index());
4239 Operand length = ToOperand(instr->length());
4240 __ cmp(index, length);
4242 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4244 __ b(NegateCondition(cc), &done);
4245 __ stop("eliminated bounds check failed");
4248 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4253 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4254 Register external_pointer = ToRegister(instr->elements());
4255 Register key = no_reg;
4256 ElementsKind elements_kind = instr->elements_kind();
4257 bool key_is_constant = instr->key()->IsConstantOperand();
4258 int constant_key = 0;
4259 if (key_is_constant) {
4260 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4261 if (constant_key & 0xF0000000) {
4262 Abort(kArrayIndexConstantValueTooBig);
4265 key = ToRegister(instr->key());
4267 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4268 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4269 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4270 int base_offset = instr->base_offset();
4272 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4273 elements_kind == FLOAT32_ELEMENTS ||
4274 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4275 elements_kind == FLOAT64_ELEMENTS) {
4276 Register address = scratch0();
4277 DwVfpRegister value(ToDoubleRegister(instr->value()));
4278 if (key_is_constant) {
4279 if (constant_key != 0) {
4280 __ add(address, external_pointer,
4281 Operand(constant_key << element_size_shift));
4283 address = external_pointer;
4286 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4288 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4289 elements_kind == FLOAT32_ELEMENTS) {
4290 __ vcvt_f32_f64(double_scratch0().low(), value);
4291 __ vstr(double_scratch0().low(), address, base_offset);
4292 } else { // Storing doubles, not floats.
4293 __ vstr(value, address, base_offset);
4296 Register value(ToRegister(instr->value()));
4297 MemOperand mem_operand = PrepareKeyedOperand(
4298 key, external_pointer, key_is_constant, constant_key,
4299 element_size_shift, shift_size,
4301 switch (elements_kind) {
4302 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4303 case EXTERNAL_INT8_ELEMENTS:
4304 case EXTERNAL_UINT8_ELEMENTS:
4305 case UINT8_ELEMENTS:
4306 case UINT8_CLAMPED_ELEMENTS:
4308 __ strb(value, mem_operand);
4310 case EXTERNAL_INT16_ELEMENTS:
4311 case EXTERNAL_UINT16_ELEMENTS:
4312 case INT16_ELEMENTS:
4313 case UINT16_ELEMENTS:
4314 __ strh(value, mem_operand);
4316 case EXTERNAL_INT32_ELEMENTS:
4317 case EXTERNAL_UINT32_ELEMENTS:
4318 case INT32_ELEMENTS:
4319 case UINT32_ELEMENTS:
4320 __ str(value, mem_operand);
4322 case FLOAT32_ELEMENTS:
4323 case FLOAT64_ELEMENTS:
4324 case EXTERNAL_FLOAT32_ELEMENTS:
4325 case EXTERNAL_FLOAT64_ELEMENTS:
4326 case FAST_DOUBLE_ELEMENTS:
4328 case FAST_SMI_ELEMENTS:
4329 case FAST_HOLEY_DOUBLE_ELEMENTS:
4330 case FAST_HOLEY_ELEMENTS:
4331 case FAST_HOLEY_SMI_ELEMENTS:
4332 case DICTIONARY_ELEMENTS:
4333 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4334 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4342 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4343 DwVfpRegister value = ToDoubleRegister(instr->value());
4344 Register elements = ToRegister(instr->elements());
4345 Register scratch = scratch0();
4346 DwVfpRegister double_scratch = double_scratch0();
4347 bool key_is_constant = instr->key()->IsConstantOperand();
4348 int base_offset = instr->base_offset();
4350 // Calculate the effective address of the slot in the array to store the
4352 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4353 if (key_is_constant) {
4354 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4355 if (constant_key & 0xF0000000) {
4356 Abort(kArrayIndexConstantValueTooBig);
4358 __ add(scratch, elements,
4359 Operand((constant_key << element_size_shift) + base_offset));
4361 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4362 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4363 __ add(scratch, elements, Operand(base_offset));
4364 __ add(scratch, scratch,
4365 Operand(ToRegister(instr->key()), LSL, shift_size));
4368 if (instr->NeedsCanonicalization()) {
4369 // Force a canonical NaN.
4370 if (masm()->emit_debug_code()) {
4372 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4373 __ Assert(ne, kDefaultNaNModeNotSet);
4375 __ VFPCanonicalizeNaN(double_scratch, value);
4376 __ vstr(double_scratch, scratch, 0);
4378 __ vstr(value, scratch, 0);
4383 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4384 Register value = ToRegister(instr->value());
4385 Register elements = ToRegister(instr->elements());
4386 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4388 Register scratch = scratch0();
4389 Register store_base = scratch;
4390 int offset = instr->base_offset();
4393 if (instr->key()->IsConstantOperand()) {
4394 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4395 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4396 offset += ToInteger32(const_operand) * kPointerSize;
4397 store_base = elements;
4399 // Even though the HLoadKeyed instruction forces the input
4400 // representation for the key to be an integer, the input gets replaced
4401 // during bound check elimination with the index argument to the bounds
4402 // check, which can be tagged, so that case must be handled here, too.
4403 if (instr->hydrogen()->key()->representation().IsSmi()) {
4404 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4406 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4409 __ str(value, MemOperand(store_base, offset));
4411 if (instr->hydrogen()->NeedsWriteBarrier()) {
4412 SmiCheck check_needed =
4413 instr->hydrogen()->value()->type().IsHeapObject()
4414 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4415 // Compute address of modified element and store it into key register.
4416 __ add(key, store_base, Operand(offset));
4417 __ RecordWrite(elements,
4420 GetLinkRegisterState(),
4422 EMIT_REMEMBERED_SET,
4424 instr->hydrogen()->PointersToHereCheckForValue());
4429 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4430 // By cases: external, fast double
4431 if (instr->is_typed_elements()) {
4432 DoStoreKeyedExternalArray(instr);
4433 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4434 DoStoreKeyedFixedDoubleArray(instr);
4436 DoStoreKeyedFixedArray(instr);
4441 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4442 DCHECK(ToRegister(instr->context()).is(cp));
4443 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4444 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4445 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4447 if (instr->hydrogen()->HasVectorAndSlot()) {
4448 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4451 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4452 isolate(), instr->language_mode(),
4453 instr->hydrogen()->initialization_state()).code();
4454 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4458 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4459 class DeferredMaybeGrowElements final : public LDeferredCode {
4461 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4462 : LDeferredCode(codegen), instr_(instr) {}
4463 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4464 LInstruction* instr() override { return instr_; }
4467 LMaybeGrowElements* instr_;
4470 Register result = r0;
4471 DeferredMaybeGrowElements* deferred =
4472 new (zone()) DeferredMaybeGrowElements(this, instr);
4473 LOperand* key = instr->key();
4474 LOperand* current_capacity = instr->current_capacity();
4476 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4477 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4478 DCHECK(key->IsConstantOperand() || key->IsRegister());
4479 DCHECK(current_capacity->IsConstantOperand() ||
4480 current_capacity->IsRegister());
4482 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4483 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4484 int32_t constant_capacity =
4485 ToInteger32(LConstantOperand::cast(current_capacity));
4486 if (constant_key >= constant_capacity) {
4488 __ jmp(deferred->entry());
4490 } else if (key->IsConstantOperand()) {
4491 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4492 __ cmp(ToRegister(current_capacity), Operand(constant_key));
4493 __ b(le, deferred->entry());
4494 } else if (current_capacity->IsConstantOperand()) {
4495 int32_t constant_capacity =
4496 ToInteger32(LConstantOperand::cast(current_capacity));
4497 __ cmp(ToRegister(key), Operand(constant_capacity));
4498 __ b(ge, deferred->entry());
4500 __ cmp(ToRegister(key), ToRegister(current_capacity));
4501 __ b(ge, deferred->entry());
4504 if (instr->elements()->IsRegister()) {
4505 __ Move(result, ToRegister(instr->elements()));
4507 __ ldr(result, ToMemOperand(instr->elements()));
4510 __ bind(deferred->exit());
4514 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4515 // TODO(3095996): Get rid of this. For now, we need to make the
4516 // result register contain a valid pointer because it is already
4517 // contained in the register pointer map.
4518 Register result = r0;
4519 __ mov(result, Operand::Zero());
4521 // We have to call a stub.
4523 PushSafepointRegistersScope scope(this);
4524 if (instr->object()->IsRegister()) {
4525 __ Move(result, ToRegister(instr->object()));
4527 __ ldr(result, ToMemOperand(instr->object()));
4530 LOperand* key = instr->key();
4531 if (key->IsConstantOperand()) {
4532 __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
4534 __ Move(r3, ToRegister(key));
4538 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4539 instr->hydrogen()->kind());
4541 RecordSafepointWithLazyDeopt(
4542 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4543 __ StoreToSafepointRegisterSlot(result, result);
4546 // Deopt on smi, which means the elements array changed to dictionary mode.
4548 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
4552 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4553 Register object_reg = ToRegister(instr->object());
4554 Register scratch = scratch0();
4556 Handle<Map> from_map = instr->original_map();
4557 Handle<Map> to_map = instr->transitioned_map();
4558 ElementsKind from_kind = instr->from_kind();
4559 ElementsKind to_kind = instr->to_kind();
4561 Label not_applicable;
4562 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4563 __ cmp(scratch, Operand(from_map));
4564 __ b(ne, ¬_applicable);
4566 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4567 Register new_map_reg = ToRegister(instr->new_map_temp());
4568 __ mov(new_map_reg, Operand(to_map));
4569 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4571 __ RecordWriteForMap(object_reg,
4574 GetLinkRegisterState(),
4577 DCHECK(ToRegister(instr->context()).is(cp));
4578 DCHECK(object_reg.is(r0));
4579 PushSafepointRegistersScope scope(this);
4580 __ Move(r1, to_map);
4581 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4582 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4584 RecordSafepointWithRegisters(
4585 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4587 __ bind(¬_applicable);
4591 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4592 Register object = ToRegister(instr->object());
4593 Register temp = ToRegister(instr->temp());
4594 Label no_memento_found;
4595 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4596 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4597 __ bind(&no_memento_found);
4601 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4602 DCHECK(ToRegister(instr->context()).is(cp));
4603 DCHECK(ToRegister(instr->left()).is(r1));
4604 DCHECK(ToRegister(instr->right()).is(r0));
4605 StringAddStub stub(isolate(),
4606 instr->hydrogen()->flags(),
4607 instr->hydrogen()->pretenure_flag());
4608 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4612 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4613 class DeferredStringCharCodeAt final : public LDeferredCode {
4615 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4616 : LDeferredCode(codegen), instr_(instr) { }
4617 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4618 LInstruction* instr() override { return instr_; }
4621 LStringCharCodeAt* instr_;
4624 DeferredStringCharCodeAt* deferred =
4625 new(zone()) DeferredStringCharCodeAt(this, instr);
4627 StringCharLoadGenerator::Generate(masm(),
4628 ToRegister(instr->string()),
4629 ToRegister(instr->index()),
4630 ToRegister(instr->result()),
4632 __ bind(deferred->exit());
4636 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4637 Register string = ToRegister(instr->string());
4638 Register result = ToRegister(instr->result());
4639 Register scratch = scratch0();
4641 // TODO(3095996): Get rid of this. For now, we need to make the
4642 // result register contain a valid pointer because it is already
4643 // contained in the register pointer map.
4644 __ mov(result, Operand::Zero());
4646 PushSafepointRegistersScope scope(this);
4648 // Push the index as a smi. This is safe because of the checks in
4649 // DoStringCharCodeAt above.
4650 if (instr->index()->IsConstantOperand()) {
4651 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4652 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4655 Register index = ToRegister(instr->index());
4659 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4663 __ StoreToSafepointRegisterSlot(r0, result);
4667 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4668 class DeferredStringCharFromCode final : public LDeferredCode {
4670 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4671 : LDeferredCode(codegen), instr_(instr) { }
4672 void Generate() override {
4673 codegen()->DoDeferredStringCharFromCode(instr_);
4675 LInstruction* instr() override { return instr_; }
4678 LStringCharFromCode* instr_;
4681 DeferredStringCharFromCode* deferred =
4682 new(zone()) DeferredStringCharFromCode(this, instr);
4684 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4685 Register char_code = ToRegister(instr->char_code());
4686 Register result = ToRegister(instr->result());
4687 DCHECK(!char_code.is(result));
4689 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4690 __ b(hi, deferred->entry());
4691 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4692 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4693 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4694 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4696 __ b(eq, deferred->entry());
4697 __ bind(deferred->exit());
4701 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4702 Register char_code = ToRegister(instr->char_code());
4703 Register result = ToRegister(instr->result());
4705 // TODO(3095996): Get rid of this. For now, we need to make the
4706 // result register contain a valid pointer because it is already
4707 // contained in the register pointer map.
4708 __ mov(result, Operand::Zero());
4710 PushSafepointRegistersScope scope(this);
4711 __ SmiTag(char_code);
4713 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4714 __ StoreToSafepointRegisterSlot(r0, result);
4718 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4719 LOperand* input = instr->value();
4720 DCHECK(input->IsRegister() || input->IsStackSlot());
4721 LOperand* output = instr->result();
4722 DCHECK(output->IsDoubleRegister());
4723 SwVfpRegister single_scratch = double_scratch0().low();
4724 if (input->IsStackSlot()) {
4725 Register scratch = scratch0();
4726 __ ldr(scratch, ToMemOperand(input));
4727 __ vmov(single_scratch, scratch);
4729 __ vmov(single_scratch, ToRegister(input));
4731 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4735 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4736 LOperand* input = instr->value();
4737 LOperand* output = instr->result();
4739 SwVfpRegister flt_scratch = double_scratch0().low();
4740 __ vmov(flt_scratch, ToRegister(input));
4741 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4745 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4746 class DeferredNumberTagI final : public LDeferredCode {
4748 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4749 : LDeferredCode(codegen), instr_(instr) { }
4750 void Generate() override {
4751 codegen()->DoDeferredNumberTagIU(instr_,
4757 LInstruction* instr() override { return instr_; }
4760 LNumberTagI* instr_;
4763 Register src = ToRegister(instr->value());
4764 Register dst = ToRegister(instr->result());
4766 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4767 __ SmiTag(dst, src, SetCC);
4768 __ b(vs, deferred->entry());
4769 __ bind(deferred->exit());
4773 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4774 class DeferredNumberTagU final : public LDeferredCode {
4776 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4777 : LDeferredCode(codegen), instr_(instr) { }
4778 void Generate() override {
4779 codegen()->DoDeferredNumberTagIU(instr_,
4785 LInstruction* instr() override { return instr_; }
4788 LNumberTagU* instr_;
4791 Register input = ToRegister(instr->value());
4792 Register result = ToRegister(instr->result());
4794 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4795 __ cmp(input, Operand(Smi::kMaxValue));
4796 __ b(hi, deferred->entry());
4797 __ SmiTag(result, input);
4798 __ bind(deferred->exit());
4802 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4806 IntegerSignedness signedness) {
4808 Register src = ToRegister(value);
4809 Register dst = ToRegister(instr->result());
4810 Register tmp1 = scratch0();
4811 Register tmp2 = ToRegister(temp1);
4812 Register tmp3 = ToRegister(temp2);
4813 LowDwVfpRegister dbl_scratch = double_scratch0();
4815 if (signedness == SIGNED_INT32) {
4816 // There was overflow, so bits 30 and 31 of the original integer
4817 // disagree. Try to allocate a heap number in new space and store
4818 // the value in there. If that fails, call the runtime system.
4820 __ SmiUntag(src, dst);
4821 __ eor(src, src, Operand(0x80000000));
4823 __ vmov(dbl_scratch.low(), src);
4824 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4826 __ vmov(dbl_scratch.low(), src);
4827 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4830 if (FLAG_inline_new) {
4831 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4832 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4836 // Slow case: Call the runtime system to do the number allocation.
4839 // TODO(3095996): Put a valid pointer value in the stack slot where the
4840 // result register is stored, as this register is in the pointer map, but
4841 // contains an integer value.
4842 __ mov(dst, Operand::Zero());
4844 // Preserve the value of all registers.
4845 PushSafepointRegistersScope scope(this);
4847 // NumberTagI and NumberTagD use the context from the frame, rather than
4848 // the environment's HContext or HInlinedContext value.
4849 // They only call Runtime::kAllocateHeapNumber.
4850 // The corresponding HChange instructions are added in a phase that does
4851 // not have easy access to the local context.
4852 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4853 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4854 RecordSafepointWithRegisters(
4855 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4856 __ sub(r0, r0, Operand(kHeapObjectTag));
4857 __ StoreToSafepointRegisterSlot(r0, dst);
4860 // Done. Put the value in dbl_scratch into the value of the allocated heap
4863 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4864 __ add(dst, dst, Operand(kHeapObjectTag));
4868 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4869 class DeferredNumberTagD final : public LDeferredCode {
4871 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4872 : LDeferredCode(codegen), instr_(instr) { }
4873 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4874 LInstruction* instr() override { return instr_; }
4877 LNumberTagD* instr_;
4880 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4881 Register scratch = scratch0();
4882 Register reg = ToRegister(instr->result());
4883 Register temp1 = ToRegister(instr->temp());
4884 Register temp2 = ToRegister(instr->temp2());
4886 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4887 if (FLAG_inline_new) {
4888 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4889 // We want the untagged address first for performance
4890 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4893 __ jmp(deferred->entry());
4895 __ bind(deferred->exit());
4896 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4897 // Now that we have finished with the object's real address tag it
4898 __ add(reg, reg, Operand(kHeapObjectTag));
4902 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4903 // TODO(3095996): Get rid of this. For now, we need to make the
4904 // result register contain a valid pointer because it is already
4905 // contained in the register pointer map.
4906 Register reg = ToRegister(instr->result());
4907 __ mov(reg, Operand::Zero());
4909 PushSafepointRegistersScope scope(this);
4910 // NumberTagI and NumberTagD use the context from the frame, rather than
4911 // the environment's HContext or HInlinedContext value.
4912 // They only call Runtime::kAllocateHeapNumber.
4913 // The corresponding HChange instructions are added in a phase that does
4914 // not have easy access to the local context.
4915 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4916 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4917 RecordSafepointWithRegisters(
4918 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4919 __ sub(r0, r0, Operand(kHeapObjectTag));
4920 __ StoreToSafepointRegisterSlot(r0, reg);
4924 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4925 HChange* hchange = instr->hydrogen();
4926 Register input = ToRegister(instr->value());
4927 Register output = ToRegister(instr->result());
4928 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4929 hchange->value()->CheckFlag(HValue::kUint32)) {
4930 __ tst(input, Operand(0xc0000000));
4931 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4933 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4934 !hchange->value()->CheckFlag(HValue::kUint32)) {
4935 __ SmiTag(output, input, SetCC);
4936 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4938 __ SmiTag(output, input);
4943 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4944 Register input = ToRegister(instr->value());
4945 Register result = ToRegister(instr->result());
4946 if (instr->needs_check()) {
4947 STATIC_ASSERT(kHeapObjectTag == 1);
4948 // If the input is a HeapObject, SmiUntag will set the carry flag.
4949 __ SmiUntag(result, input, SetCC);
4950 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
4952 __ SmiUntag(result, input);
4957 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4958 DwVfpRegister result_reg,
4959 NumberUntagDMode mode) {
4960 bool can_convert_undefined_to_nan =
4961 instr->hydrogen()->can_convert_undefined_to_nan();
4962 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4964 Register scratch = scratch0();
4965 SwVfpRegister flt_scratch = double_scratch0().low();
4966 DCHECK(!result_reg.is(double_scratch0()));
4967 Label convert, load_smi, done;
4968 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4970 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4971 // Heap number map check.
4972 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4973 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4974 __ cmp(scratch, Operand(ip));
4975 if (can_convert_undefined_to_nan) {
4978 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4981 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4982 if (deoptimize_on_minus_zero) {
4983 __ VmovLow(scratch, result_reg);
4984 __ cmp(scratch, Operand::Zero());
4986 __ VmovHigh(scratch, result_reg);
4987 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4988 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4991 if (can_convert_undefined_to_nan) {
4993 // Convert undefined (and hole) to NaN.
4994 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4995 __ cmp(input_reg, Operand(ip));
4996 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4997 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4998 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
5002 __ SmiUntag(scratch, input_reg);
5003 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5005 // Smi to double register conversion
5007 // scratch: untagged value of input_reg
5008 __ vmov(flt_scratch, scratch);
5009 __ vcvt_f64_s32(result_reg, flt_scratch);
5014 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5015 Register input_reg = ToRegister(instr->value());
5016 Register scratch1 = scratch0();
5017 Register scratch2 = ToRegister(instr->temp());
5018 LowDwVfpRegister double_scratch = double_scratch0();
5019 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5021 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5022 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5026 // The input was optimistically untagged; revert it.
5027 // The carry flag is set when we reach this deferred code as we just executed
5028 // SmiUntag(heap_object, SetCC)
5029 STATIC_ASSERT(kHeapObjectTag == 1);
5030 __ adc(scratch2, input_reg, Operand(input_reg));
5032 // Heap number map check.
5033 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5034 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5035 __ cmp(scratch1, Operand(ip));
5037 if (instr->truncating()) {
5038 // Performs a truncating conversion of a floating point number as used by
5039 // the JS bitwise operations.
5040 Label no_heap_number, check_bools, check_false;
5041 __ b(ne, &no_heap_number);
5042 __ TruncateHeapNumberToI(input_reg, scratch2);
5045 // Check for Oddballs. Undefined/False is converted to zero and True to one
5046 // for truncating conversions.
5047 __ bind(&no_heap_number);
5048 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5049 __ cmp(scratch2, Operand(ip));
5050 __ b(ne, &check_bools);
5051 __ mov(input_reg, Operand::Zero());
5054 __ bind(&check_bools);
5055 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5056 __ cmp(scratch2, Operand(ip));
5057 __ b(ne, &check_false);
5058 __ mov(input_reg, Operand(1));
5061 __ bind(&check_false);
5062 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5063 __ cmp(scratch2, Operand(ip));
5064 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5065 __ mov(input_reg, Operand::Zero());
5067 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5069 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5070 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5071 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5072 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5074 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5075 __ cmp(input_reg, Operand::Zero());
5077 __ VmovHigh(scratch1, double_scratch2);
5078 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5079 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5086 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5087 class DeferredTaggedToI final : public LDeferredCode {
5089 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5090 : LDeferredCode(codegen), instr_(instr) { }
5091 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
5092 LInstruction* instr() override { return instr_; }
5098 LOperand* input = instr->value();
5099 DCHECK(input->IsRegister());
5100 DCHECK(input->Equals(instr->result()));
5102 Register input_reg = ToRegister(input);
5104 if (instr->hydrogen()->value()->representation().IsSmi()) {
5105 __ SmiUntag(input_reg);
5107 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5109 // Optimistically untag the input.
5110 // If the input is a HeapObject, SmiUntag will set the carry flag.
5111 __ SmiUntag(input_reg, SetCC);
5112 // Branch to deferred code if the input was tagged.
5113 // The deferred code will take care of restoring the tag.
5114 __ b(cs, deferred->entry());
5115 __ bind(deferred->exit());
5120 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5121 LOperand* input = instr->value();
5122 DCHECK(input->IsRegister());
5123 LOperand* result = instr->result();
5124 DCHECK(result->IsDoubleRegister());
5126 Register input_reg = ToRegister(input);
5127 DwVfpRegister result_reg = ToDoubleRegister(result);
5129 HValue* value = instr->hydrogen()->value();
5130 NumberUntagDMode mode = value->representation().IsSmi()
5131 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5133 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5137 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5138 Register result_reg = ToRegister(instr->result());
5139 Register scratch1 = scratch0();
5140 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5141 LowDwVfpRegister double_scratch = double_scratch0();
5143 if (instr->truncating()) {
5144 __ TruncateDoubleToI(result_reg, double_input);
5146 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5147 // Deoptimize if the input wasn't a int32 (inside a double).
5148 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5149 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5151 __ cmp(result_reg, Operand::Zero());
5153 __ VmovHigh(scratch1, double_input);
5154 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5155 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5162 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5163 Register result_reg = ToRegister(instr->result());
5164 Register scratch1 = scratch0();
5165 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5166 LowDwVfpRegister double_scratch = double_scratch0();
5168 if (instr->truncating()) {
5169 __ TruncateDoubleToI(result_reg, double_input);
5171 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5172 // Deoptimize if the input wasn't a int32 (inside a double).
5173 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5174 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5176 __ cmp(result_reg, Operand::Zero());
5178 __ VmovHigh(scratch1, double_input);
5179 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5180 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5184 __ SmiTag(result_reg, SetCC);
5185 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5189 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5190 LOperand* input = instr->value();
5191 __ SmiTst(ToRegister(input));
5192 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
5196 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5197 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5198 LOperand* input = instr->value();
5199 __ SmiTst(ToRegister(input));
5200 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5205 void LCodeGen::DoCheckArrayBufferNotNeutered(
5206 LCheckArrayBufferNotNeutered* instr) {
5207 Register view = ToRegister(instr->view());
5208 Register scratch = scratch0();
5210 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5211 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5212 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5213 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
5217 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5218 Register input = ToRegister(instr->value());
5219 Register scratch = scratch0();
5221 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5222 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5224 if (instr->hydrogen()->is_interval_check()) {
5227 instr->hydrogen()->GetCheckInterval(&first, &last);
5229 __ cmp(scratch, Operand(first));
5231 // If there is only one type in the interval check for equality.
5232 if (first == last) {
5233 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5235 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
5236 // Omit check for the last type.
5237 if (last != LAST_TYPE) {
5238 __ cmp(scratch, Operand(last));
5239 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
5245 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5247 if (base::bits::IsPowerOfTwo32(mask)) {
5248 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5249 __ tst(scratch, Operand(mask));
5250 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
5252 __ and_(scratch, scratch, Operand(mask));
5253 __ cmp(scratch, Operand(tag));
5254 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5260 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5261 Register reg = ToRegister(instr->value());
5262 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5263 AllowDeferredHandleDereference smi_check;
5264 if (isolate()->heap()->InNewSpace(*object)) {
5265 Register reg = ToRegister(instr->value());
5266 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5267 __ mov(ip, Operand(cell));
5268 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5271 __ cmp(reg, Operand(object));
5273 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5277 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5279 PushSafepointRegistersScope scope(this);
5281 __ mov(cp, Operand::Zero());
5282 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5283 RecordSafepointWithRegisters(
5284 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5285 __ StoreToSafepointRegisterSlot(r0, scratch0());
5287 __ tst(scratch0(), Operand(kSmiTagMask));
5288 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
5292 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5293 class DeferredCheckMaps final : public LDeferredCode {
5295 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5296 : LDeferredCode(codegen), instr_(instr), object_(object) {
5297 SetExit(check_maps());
5299 void Generate() override {
5300 codegen()->DoDeferredInstanceMigration(instr_, object_);
5302 Label* check_maps() { return &check_maps_; }
5303 LInstruction* instr() override { return instr_; }
5311 if (instr->hydrogen()->IsStabilityCheck()) {
5312 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5313 for (int i = 0; i < maps->size(); ++i) {
5314 AddStabilityDependency(maps->at(i).handle());
5319 Register map_reg = scratch0();
5321 LOperand* input = instr->value();
5322 DCHECK(input->IsRegister());
5323 Register reg = ToRegister(input);
5325 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5327 DeferredCheckMaps* deferred = NULL;
5328 if (instr->hydrogen()->HasMigrationTarget()) {
5329 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5330 __ bind(deferred->check_maps());
5333 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5335 for (int i = 0; i < maps->size() - 1; i++) {
5336 Handle<Map> map = maps->at(i).handle();
5337 __ CompareMap(map_reg, map, &success);
5341 Handle<Map> map = maps->at(maps->size() - 1).handle();
5342 __ CompareMap(map_reg, map, &success);
5343 if (instr->hydrogen()->HasMigrationTarget()) {
5344 __ b(ne, deferred->entry());
5346 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5353 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5354 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5355 Register result_reg = ToRegister(instr->result());
5356 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5360 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5361 Register unclamped_reg = ToRegister(instr->unclamped());
5362 Register result_reg = ToRegister(instr->result());
5363 __ ClampUint8(result_reg, unclamped_reg);
5367 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5368 Register scratch = scratch0();
5369 Register input_reg = ToRegister(instr->unclamped());
5370 Register result_reg = ToRegister(instr->result());
5371 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5372 Label is_smi, done, heap_number;
5374 // Both smi and heap number cases are handled.
5375 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5377 // Check for heap number
5378 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5379 __ cmp(scratch, Operand(factory()->heap_number_map()));
5380 __ b(eq, &heap_number);
5382 // Check for undefined. Undefined is converted to zero for clamping
5384 __ cmp(input_reg, Operand(factory()->undefined_value()));
5385 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5386 __ mov(result_reg, Operand::Zero());
5390 __ bind(&heap_number);
5391 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5392 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5397 __ ClampUint8(result_reg, result_reg);
5403 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5404 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5405 Register result_reg = ToRegister(instr->result());
5406 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5407 __ VmovHigh(result_reg, value_reg);
5409 __ VmovLow(result_reg, value_reg);
5414 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5415 Register hi_reg = ToRegister(instr->hi());
5416 Register lo_reg = ToRegister(instr->lo());
5417 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5418 __ VmovHigh(result_reg, hi_reg);
5419 __ VmovLow(result_reg, lo_reg);
5423 void LCodeGen::DoAllocate(LAllocate* instr) {
5424 class DeferredAllocate final : public LDeferredCode {
5426 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5427 : LDeferredCode(codegen), instr_(instr) { }
5428 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5429 LInstruction* instr() override { return instr_; }
5435 DeferredAllocate* deferred =
5436 new(zone()) DeferredAllocate(this, instr);
5438 Register result = ToRegister(instr->result());
5439 Register scratch = ToRegister(instr->temp1());
5440 Register scratch2 = ToRegister(instr->temp2());
5442 // Allocate memory for the object.
5443 AllocationFlags flags = TAG_OBJECT;
5444 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5445 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5447 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5448 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5449 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5452 if (instr->size()->IsConstantOperand()) {
5453 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5454 if (size <= Page::kMaxRegularHeapObjectSize) {
5455 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5457 __ jmp(deferred->entry());
5460 Register size = ToRegister(instr->size());
5461 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5464 __ bind(deferred->exit());
5466 if (instr->hydrogen()->MustPrefillWithFiller()) {
5467 STATIC_ASSERT(kHeapObjectTag == 1);
5468 if (instr->size()->IsConstantOperand()) {
5469 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5470 __ mov(scratch, Operand(size - kHeapObjectTag));
5472 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5474 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5477 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5478 __ str(scratch2, MemOperand(result, scratch));
5484 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5485 Register result = ToRegister(instr->result());
5487 // TODO(3095996): Get rid of this. For now, we need to make the
5488 // result register contain a valid pointer because it is already
5489 // contained in the register pointer map.
5490 __ mov(result, Operand(Smi::FromInt(0)));
5492 PushSafepointRegistersScope scope(this);
5493 if (instr->size()->IsRegister()) {
5494 Register size = ToRegister(instr->size());
5495 DCHECK(!size.is(result));
5499 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5500 if (size >= 0 && size <= Smi::kMaxValue) {
5501 __ Push(Smi::FromInt(size));
5503 // We should never get here at runtime => abort
5504 __ stop("invalid allocation size");
5509 int flags = AllocateDoubleAlignFlag::encode(
5510 instr->hydrogen()->MustAllocateDoubleAligned());
5511 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5512 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5513 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5515 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5517 __ Push(Smi::FromInt(flags));
5519 CallRuntimeFromDeferred(
5520 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5521 __ StoreToSafepointRegisterSlot(r0, result);
5525 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5526 DCHECK(ToRegister(instr->value()).is(r0));
5528 CallRuntime(Runtime::kToFastProperties, 1, instr);
5532 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5533 DCHECK(ToRegister(instr->context()).is(cp));
5535 // Registers will be used as follows:
5536 // r6 = literals array.
5537 // r1 = regexp literal.
5538 // r0 = regexp literal clone.
5539 // r2-5 are used as temporaries.
5540 int literal_offset =
5541 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5542 __ Move(r6, instr->hydrogen()->literals());
5543 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5544 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5546 __ b(ne, &materialized);
5548 // Create regexp literal using runtime function
5549 // Result will be in r0.
5550 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5551 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5552 __ mov(r3, Operand(instr->hydrogen()->flags()));
5553 __ Push(r6, r5, r4, r3);
5554 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5557 __ bind(&materialized);
5558 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5559 Label allocated, runtime_allocate;
5561 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5564 __ bind(&runtime_allocate);
5565 __ mov(r0, Operand(Smi::FromInt(size)));
5567 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5570 __ bind(&allocated);
5571 // Copy the content into the newly allocated memory.
5572 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5576 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5577 DCHECK(ToRegister(instr->context()).is(cp));
5578 // Use the fast case closure allocation code that allocates in new
5579 // space for nested functions that don't need literals cloning.
5580 bool pretenure = instr->hydrogen()->pretenure();
5581 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5582 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5583 instr->hydrogen()->kind());
5584 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5585 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5587 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5588 __ mov(r1, Operand(pretenure ? factory()->true_value()
5589 : factory()->false_value()));
5590 __ Push(cp, r2, r1);
5591 CallRuntime(Runtime::kNewClosure, 3, instr);
5596 void LCodeGen::DoTypeof(LTypeof* instr) {
5597 DCHECK(ToRegister(instr->value()).is(r3));
5598 DCHECK(ToRegister(instr->result()).is(r0));
5600 Register value_register = ToRegister(instr->value());
5601 __ JumpIfNotSmi(value_register, &do_call);
5602 __ mov(r0, Operand(isolate()->factory()->number_string()));
5605 TypeofStub stub(isolate());
5606 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5611 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5612 Register input = ToRegister(instr->value());
5614 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5615 instr->FalseLabel(chunk_),
5617 instr->type_literal());
5618 if (final_branch_condition != kNoCondition) {
5619 EmitBranch(instr, final_branch_condition);
5624 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5627 Handle<String> type_name) {
5628 Condition final_branch_condition = kNoCondition;
5629 Register scratch = scratch0();
5630 Factory* factory = isolate()->factory();
5631 if (String::Equals(type_name, factory->number_string())) {
5632 __ JumpIfSmi(input, true_label);
5633 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5634 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5635 final_branch_condition = eq;
5637 } else if (String::Equals(type_name, factory->string_string())) {
5638 __ JumpIfSmi(input, false_label);
5639 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5640 __ b(ge, false_label);
5641 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5642 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5643 final_branch_condition = eq;
5645 } else if (String::Equals(type_name, factory->symbol_string())) {
5646 __ JumpIfSmi(input, false_label);
5647 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5648 final_branch_condition = eq;
5650 } else if (String::Equals(type_name, factory->boolean_string())) {
5651 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5652 __ b(eq, true_label);
5653 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5654 final_branch_condition = eq;
5656 } else if (String::Equals(type_name, factory->undefined_string())) {
5657 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5658 __ b(eq, true_label);
5659 __ JumpIfSmi(input, false_label);
5660 // Check for undetectable objects => true.
5661 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5662 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5663 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5664 final_branch_condition = ne;
5666 } else if (String::Equals(type_name, factory->function_string())) {
5667 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5668 Register type_reg = scratch;
5669 __ JumpIfSmi(input, false_label);
5670 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5671 __ b(eq, true_label);
5672 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5673 final_branch_condition = eq;
5675 } else if (String::Equals(type_name, factory->object_string())) {
5676 Register map = scratch;
5677 __ JumpIfSmi(input, false_label);
5678 __ CompareRoot(input, Heap::kNullValueRootIndex);
5679 __ b(eq, true_label);
5680 __ CheckObjectTypeRange(input,
5682 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5683 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5685 // Check for undetectable objects => false.
5686 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5687 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5688 final_branch_condition = eq;
5690 } else if (String::Equals(type_name, factory->float32x4_string())) {
5691 __ JumpIfSmi(input, false_label);
5692 __ CompareObjectType(input, scratch, no_reg, FLOAT32X4_TYPE);
5693 final_branch_condition = eq;
5699 return final_branch_condition;
5703 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5704 Register temp1 = ToRegister(instr->temp());
5706 EmitIsConstructCall(temp1, scratch0());
5707 EmitBranch(instr, eq);
5711 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5712 DCHECK(!temp1.is(temp2));
5713 // Get the frame pointer for the calling frame.
5714 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5716 // Skip the arguments adaptor frame if it exists.
5717 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5718 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5719 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5721 // Check the marker in the calling frame.
5722 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5723 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5727 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5728 if (!info()->IsStub()) {
5729 // Ensure that we have enough space after the previous lazy-bailout
5730 // instruction for patching the code here.
5731 int current_pc = masm()->pc_offset();
5732 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5733 // Block literal pool emission for duration of padding.
5734 Assembler::BlockConstPoolScope block_const_pool(masm());
5735 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5736 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5737 while (padding_size > 0) {
5739 padding_size -= Assembler::kInstrSize;
5743 last_lazy_deopt_pc_ = masm()->pc_offset();
5747 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5748 last_lazy_deopt_pc_ = masm()->pc_offset();
5749 DCHECK(instr->HasEnvironment());
5750 LEnvironment* env = instr->environment();
5751 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5752 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5756 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5757 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5758 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5759 // needed return address), even though the implementation of LAZY and EAGER is
5760 // now identical. When LAZY is eventually completely folded into EAGER, remove
5761 // the special case below.
5762 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5763 type = Deoptimizer::LAZY;
5766 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5770 void LCodeGen::DoDummy(LDummy* instr) {
5771 // Nothing to see here, move on!
5775 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5776 // Nothing to see here, move on!
5780 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5781 PushSafepointRegistersScope scope(this);
5782 LoadContextFromDeferred(instr->context());
5783 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5784 RecordSafepointWithLazyDeopt(
5785 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5786 DCHECK(instr->HasEnvironment());
5787 LEnvironment* env = instr->environment();
5788 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5792 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5793 class DeferredStackCheck final : public LDeferredCode {
5795 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5796 : LDeferredCode(codegen), instr_(instr) { }
5797 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5798 LInstruction* instr() override { return instr_; }
5801 LStackCheck* instr_;
5804 DCHECK(instr->HasEnvironment());
5805 LEnvironment* env = instr->environment();
5806 // There is no LLazyBailout instruction for stack-checks. We have to
5807 // prepare for lazy deoptimization explicitly here.
5808 if (instr->hydrogen()->is_function_entry()) {
5809 // Perform stack overflow check.
5811 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5812 __ cmp(sp, Operand(ip));
5814 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5815 PredictableCodeSizeScope predictable(masm(),
5816 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5817 DCHECK(instr->context()->IsRegister());
5818 DCHECK(ToRegister(instr->context()).is(cp));
5819 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5822 DCHECK(instr->hydrogen()->is_backwards_branch());
5823 // Perform stack overflow check if this goto needs it before jumping.
5824 DeferredStackCheck* deferred_stack_check =
5825 new(zone()) DeferredStackCheck(this, instr);
5826 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5827 __ cmp(sp, Operand(ip));
5828 __ b(lo, deferred_stack_check->entry());
5829 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5830 __ bind(instr->done_label());
5831 deferred_stack_check->SetExit(instr->done_label());
5832 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5833 // Don't record a deoptimization index for the safepoint here.
5834 // This will be done explicitly when emitting call and the safepoint in
5835 // the deferred code.
5840 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5841 // This is a pseudo-instruction that ensures that the environment here is
5842 // properly registered for deoptimization and records the assembler's PC
5844 LEnvironment* environment = instr->environment();
5846 // If the environment were already registered, we would have no way of
5847 // backpatching it with the spill slot operands.
5848 DCHECK(!environment->HasBeenRegistered());
5849 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5851 GenerateOsrPrologue();
5855 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5857 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5859 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5860 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5861 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
5863 Label use_cache, call_runtime;
5864 Register null_value = r5;
5865 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5866 __ CheckEnumCache(null_value, &call_runtime);
5868 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5871 // Get the set of properties to enumerate.
5872 __ bind(&call_runtime);
5874 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5876 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5877 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5879 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5880 __ bind(&use_cache);
5884 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5885 Register map = ToRegister(instr->map());
5886 Register result = ToRegister(instr->result());
5887 Label load_cache, done;
5888 __ EnumLength(result, map);
5889 __ cmp(result, Operand(Smi::FromInt(0)));
5890 __ b(ne, &load_cache);
5891 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5894 __ bind(&load_cache);
5895 __ LoadInstanceDescriptors(map, result);
5897 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5899 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5900 __ cmp(result, Operand::Zero());
5901 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5907 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5908 Register object = ToRegister(instr->value());
5909 Register map = ToRegister(instr->map());
5910 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5911 __ cmp(map, scratch0());
5912 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5916 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5920 PushSafepointRegistersScope scope(this);
5923 __ mov(cp, Operand::Zero());
5924 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5925 RecordSafepointWithRegisters(
5926 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5927 __ StoreToSafepointRegisterSlot(r0, result);
5931 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5932 class DeferredLoadMutableDouble final : public LDeferredCode {
5934 DeferredLoadMutableDouble(LCodeGen* codegen,
5935 LLoadFieldByIndex* instr,
5939 : LDeferredCode(codegen),
5945 void Generate() override {
5946 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5948 LInstruction* instr() override { return instr_; }
5951 LLoadFieldByIndex* instr_;
5957 Register object = ToRegister(instr->object());
5958 Register index = ToRegister(instr->index());
5959 Register result = ToRegister(instr->result());
5960 Register scratch = scratch0();
5962 DeferredLoadMutableDouble* deferred;
5963 deferred = new(zone()) DeferredLoadMutableDouble(
5964 this, instr, result, object, index);
5966 Label out_of_object, done;
5968 __ tst(index, Operand(Smi::FromInt(1)));
5969 __ b(ne, deferred->entry());
5970 __ mov(index, Operand(index, ASR, 1));
5972 __ cmp(index, Operand::Zero());
5973 __ b(lt, &out_of_object);
5975 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5976 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5980 __ bind(&out_of_object);
5981 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5982 // Index is equal to negated out of object property index plus 1.
5983 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5984 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5985 __ ldr(result, FieldMemOperand(scratch,
5986 FixedArray::kHeaderSize - kPointerSize));
5987 __ bind(deferred->exit());
5992 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5993 Register context = ToRegister(instr->context());
5994 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5998 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5999 Handle<ScopeInfo> scope_info = instr->scope_info();
6000 __ Push(scope_info);
6001 __ push(ToRegister(instr->function()));
6002 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6003 RecordSafepoint(Safepoint::kNoLazyDeopt);
6009 } // namespace internal